diff --git a/.circleci/config.yml b/.circleci/config.yml index 49800a54c..3f73fb04f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -14,9 +14,6 @@ executors: - image: tendermintdev/docker-website-deployment environment: AWS_REGION: us-east-1 - protoc: - docker: - - image: tendermintdev/docker-protoc commands: run_test: @@ -75,19 +72,6 @@ jobs: root: "/tmp/bin" paths: - "." - proto-lint: - executor: protoc - steps: - - checkout - - run: - command: make proto-lint - - proto-breakage: - executor: protoc - steps: - - checkout - - run: - command: make proto-check-breaking test_abci_apps: executor: golang @@ -378,13 +362,10 @@ jobs: # command: | # set -x # export PATH=~/.local/bin:$PATH - # # install node and dredd # ./scripts/get_nodejs.sh - # # build the binaries with a proper version of Go # docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks - # # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use # go get github.com/snikch/goodman/cmd/goodman # make contract-tests @@ -407,13 +388,11 @@ workflows: filters: branches: only: - - docs-theme-latest + - docs-staging - setup_dependencies - test_abci_apps: requires: - setup_dependencies - - proto-breakage - - proto-lint - test_abci_cli: requires: - setup_dependencies diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml new file mode 100644 index 000000000..5b449cffd --- /dev/null +++ b/.github/workflows/linkchecker.yml @@ -0,0 +1,12 @@ +name: Check Markdown links +on: + schedule: + - cron: '* */24 * * *' +jobs: + markdown-link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - uses: gaurav-nelson/github-action-markdown-link-check@0.6.0 + with: + folder-path: "docs" diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml new file mode 100644 index 000000000..ddc9ee4c4 --- /dev/null +++ b/.github/workflows/proto.yml @@ -0,0 +1,12 @@ +name: Proto check +on: [pull_request] +jobs: + proto-checks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - uses: docker-practice/actions-setup-docker@master + - name: lint + run: make proto-lint + - name: check-breakage + run: make proto-check-breaking-ci diff --git a/CHANGELOG.md b/CHANGELOG.md index 6040849c1..69020d919 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,57 @@ # Changelog +## v0.33.2 + +*March 11, 2020* + +Special thanks to external contributors on this release: +@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912 + +Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). + +### BREAKING CHANGES: + +- CLI/RPC/Config + - [cli] [\#4505](https://github.com/tendermint/tendermint/pull/4505) `tendermint lite` sub-command new syntax (@melekes): + `lite cosmoshub-3 -p 52.57.29.196:26657 -w public-seed-node.cosmoshub.certus.one:26657 + --height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948` + +- Go API + - [lite2] [\#4535](https://github.com/tendermint/tendermint/pull/4535) Remove `Start/Stop` (@melekes) + - [lite2] [\#4469](https://github.com/tendermint/tendermint/issues/4469) Remove `RemoveNoLongerTrustedHeaders` and `RemoveNoLongerTrustedHeadersPeriod` option (@cmwaters) + - [lite2] [\#4473](https://github.com/tendermint/tendermint/issues/4473) Return height as a 2nd param in `TrustedValidatorSet` (@melekes) + - [lite2] [\#4536](https://github.com/tendermint/tendermint/pull/4536) `Update` returns a signed header (1st param) (@melekes) + + +### IMPROVEMENTS: + +- [blockchain/v2] [\#4361](https://github.com/tendermint/tendermint/pull/4361) Add reactor (@brapse) +- [cmd] [\#4515](https://github.com/tendermint/tendermint/issues/4515) Change `tendermint debug dump` sub-command archives filename's format (@melekes) +- [consensus] [\#3583](https://github.com/tendermint/tendermint/issues/3583) Reduce `non-deterministic signature` log noise (@tau3) +- [examples/kvstore] [\#4507](https://github.com/tendermint/tendermint/issues/4507) ABCI query now returns the proper height (@erikgrinaker) +- [lite2] [\#4462](https://github.com/tendermint/tendermint/issues/4462) Add `NewHTTPClient` and `NewHTTPClientFromTrustedStore` (@cmwaters) +- [lite2] [\#4329](https://github.com/tendermint/tendermint/issues/4329) modified bisection to loop (@cmwaters) +- [lite2] [\#4385](https://github.com/tendermint/tendermint/issues/4385) Disconnect from bad nodes (@melekes) +- [lite2] [\#4398](https://github.com/tendermint/tendermint/issues/4398) Add `VerifyAdjacent` and `VerifyNonAdjacent` funcs (@cmwaters) +- [lite2] [\#4426](https://github.com/tendermint/tendermint/issues/4426) Don't save intermediate headers (@cmwaters) +- [lite2] [\#4464](https://github.com/tendermint/tendermint/issues/4464) Cross-check first header (@cmwaters) +- [lite2] [\#4470](https://github.com/tendermint/tendermint/issues/4470) Fix inconsistent header-validatorset pairing (@melekes) +- [lite2] [\#4488](https://github.com/tendermint/tendermint/issues/4488) Allow local clock drift -10 sec. (@melekes) +- [p2p] [\#4449](https://github.com/tendermint/tendermint/pull/4449) Use `curve25519.X25519()` instead of `ScalarMult` (@erikgrinaker) +- [types] [\#4417](https://github.com/tendermint/tendermint/issues/4417) **VerifyCommitX() functions should return as soon as +2/3 threshold is reached** (@alessio). +- [libs/kv] [\#4542](https://github.com/tendermint/tendermint/pull/4542) remove unused type KI64Pair (@tessr) + +### BUG FIXES: + +- [cmd] [\#4303](https://github.com/tendermint/tendermint/issues/4303) Show useful error when Tendermint is not initialized (@melekes) +- [cmd] [\#4515](https://github.com/tendermint/tendermint/issues/4515) **Fix `tendermint debug kill` sub-command** (@melekes) +- [rpc] [\#3935](https://github.com/tendermint/tendermint/issues/3935) **Create buffered subscriptions on `/subscribe`** (@melekes) +- [rpc] [\#4375](https://github.com/tendermint/tendermint/issues/4375) Stop searching for txs in `/tx_search` upon client timeout (@gterzian) +- [rpc] [\#4406](https://github.com/tendermint/tendermint/pull/4406) Fix issue with multiple subscriptions on the websocket (@antho1404) +- [rpc] [\#4432](https://github.com/tendermint/tendermint/issues/4432) Fix `/tx_search` pagination with ordered results (@erikgrinaker) +- [rpc] [\#4492](https://github.com/tendermint/tendermint/issues/4492) Keep the original subscription "id" field when new RPCs come in (@michaelfig) + + ## v0.33.1 *Feburary 13, 2020* @@ -95,7 +147,7 @@ subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/b - Apps - - [tm-bench] Removed tm-bench in favor of [tm-load-test](https://github.com/interchainio/tm-load-test) + - [tm-bench] Removed tm-bench in favor of [tm-load-test](https://github.com/informalsystems/tm-load-test) - Go API diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 8877d31ec..79d66bc05 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,11 +1,12 @@ -## v0.33.2 +## v0.33.3 + +- Nodes are no longer guaranteed to contain all blocks up to the latest height. The ABCI app can now control which blocks to retain through the ABCI field `ResponseCommit.retain_height`, all blocks and associated data below this height will be removed. \*\* Special thanks to external contributors on this release: -Friendly reminder, we have a [bug bounty -program](https://hackerone.com/tendermint). +Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). ### BREAKING CHANGES: @@ -13,18 +14,33 @@ program](https://hackerone.com/tendermint). - Apps +- P2P Protocol + - Go API + - [rpc/client] [\#4628](https://github.com/tendermint/tendermint/pull/4628) Split out HTTP and local clients into `http` and `local` packages (@erikgrinaker). + - [lite2] [\#4616](https://github.com/tendermint/tendermint/pull/4616) Make `maxClockDrift` an option (@melekes). + `Verify/VerifyAdjacent/VerifyNonAdjacent` now accept `maxClockDrift time.Duration`. + ### FEATURES: +- [abci] Add `ResponseCommit.retain_height` field, which will automatically remove blocks below this height. +- [rpc] Add `/status` response fields for the earliest block available on the node +- [rpc] [\#4611](https://github.com/tendermint/tendermint/pull/4611) Add `codespace` to `ResultBroadcastTx` (@whylee259) + ### IMPROVEMENTS: -- [types] [\#4417](https://github.com/tendermint/tendermint/issues/4417) VerifyCommitX() functions should return as soon as +2/3 threashold is reached. +- [blockchain] Add `Base` to blockchain reactor P2P messages `StatusRequest` and `StatusResponse` +- [example/kvstore] Add `RetainBlocks` option to control block retention +- [p2p] [\#4548](https://github.com/tendermint/tendermint/pull/4548) Add ban list to address book (@cmwaters) +- [privval] \#4534 Add `error` as a return value on`GetPubKey()` +- [Docker] \#4569 Default configuration added to docker image (you can still mount your own config the same way) (@greg-szabo) +- [lite2] [\#4562](https://github.com/tendermint/tendermint/pull/4562) Cache headers when using bisection (@cmwaters) +- [all] [\#4608](https://github.com/tendermint/tendermint/pull/4608) Give reactors descriptive names when they're initialized +- [lite2] [\#4575](https://github.com/tendermint/tendermint/pull/4575) Use bisection for within-range verification (@cmwaters) +- [tools] \#4615 Allow developers to use Docker to generate proto stubs, via `make proto-gen-docker`. ### BUG FIXES: -- [rpc] [\#4493](https://github.com/tendermint/tendermint/pull/4493) Keep the original subscription "id" field when new RPCs come in (@michaelfig) - -- [rpc] [\#4437](https://github.com/tendermint/tendermint/pull/4437) Fix tx_search pagination with ordered results (@erikgrinaker) - -- [rpc] [\#4406](https://github.com/tendermint/tendermint/pull/4406) Fix issue with multiple subscriptions on the websocket (@antho1404) +- [rpc] \#4568 Fix panic when `Subscribe` is called, but HTTP client is not running (@melekes) + `Subscribe`, `Unsubscribe(All)` methods return an error now. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 623cfb53a..e057c32a8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,7 +12,7 @@ landing changes in master. All work on the code base should be motivated by a [Github Issue](https://github.com/tendermint/tendermint/issues). [Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) -is a good place start when looking for places to contribute. If you +is a good place start when looking for places to contribute. If you would like to work on an issue which already exists, please indicate so by leaving a comment. @@ -49,8 +49,9 @@ maintainers to take a look. ![Contributing flow](./docs/imgs/contributing.png) Each stage of the process is aimed at creating feedback cycles which align contributors and maintainers to make sure: -* Contributors don’t waste their time implementing/proposing features which won’t land in master. -* Maintainers have the necessary context in order to support and review contributions. + +- Contributors don’t waste their time implementing/proposing features which won’t land in master. +- Maintainers have the necessary context in order to support and review contributions. ## Forking @@ -100,7 +101,28 @@ specify exactly the dependency you want to update, eg. ## Protobuf -When working with [protobuf](https://developers.google.com/protocol-buffers) there are a few things you should know. We use [buf](https://buf.build/) for our linting and breaking changes checking. If you would like to run linting and check if the changes you have made are breaking then you will have to install the needed dependencies with `make buf`. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. To generate new stubs based off of your changes you can run `make proto-gen` (you can do this outside of GOPATH). +We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core. + +For linting and checking breaking changes, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. + +There are two ways to generate your proto stubs. + +1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker` +2. Run `make proto-gen` after installing `protoc` and gogoproto. + +### Installation Instructions + +To install `protoc`, download an appropriate release (https://github.com/protocolbuffers/protobuf) and then move the provided binaries into your PATH (follow instructions in README included with the download). + +To install `gogoproto`, do the following: + +```sh +$ go get github.com/gogo/protobuf/gogoproto +$ cd $GOPATH/pkg/mod/github.com/gogo/protobuf@v1.3.1 # or wherever go get installs things +$ make install +``` + +You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files. ## Vagrant @@ -172,9 +194,23 @@ easy to reference the pull request where a change was introduced. - ensure pull branch is based on a recent `master` - run `make test` to ensure that all tests pass -- squash merge pull request +- [squash](https://stackoverflow.com/questions/5189560/squash-my-last-x-commits-together-using-git) merge pull request - the `unstable` branch may be used to aggregate pull merges before fixing tests +### Git Commit Style + +We follow the [Go style guide on commit messages](https://tip.golang.org/doc/contribute.html#commit_messages). Write concise commits that start with the package name and have a description that finishes the sentence "This change modifies Tendermint to...". For example, + +\``` +cmd/debug: execute p.Signal only when p is not nil + +[potentially longer description in the body] + +Fixes #nnnn +\``` + +Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though! + ### Release Procedure #### Major Release diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index c3c186e05..e3a918749 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -22,17 +22,33 @@ RUN apk update && \ # Run the container with tmuser by default. (UID=100, GID=1000) USER tmuser -# Expose the data directory as a volume since there's mutable state in there -VOLUME [ $TMHOME ] - WORKDIR $TMHOME -# p2p and rpc port -EXPOSE 26656 26657 +# p2p, rpc and prometheus port +EXPOSE 26656 26657 26660 ENTRYPOINT ["/usr/bin/tendermint"] -CMD ["node", "--moniker=`hostname`"] +CMD ["node"] STOPSIGNAL SIGTERM ARG BINARY=tendermint COPY $BINARY /usr/bin/tendermint + +# Create default configuration for docker run. +RUN /usr/bin/tendermint init && \ + sed -i \ + -e 's/^proxy_app\s*=.*/proxy_app = "kvstore"/' \ + -e 's/^moniker\s*=.*/moniker = "dockernode"/' \ + -e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \ + -e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \ + -e 's/^index_all_tags\s*=.*/index_all_tags = true/' \ + -e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \ + -e 's/^prometheus\s*=.*/prometheus = true/' \ + $TMHOME/config/config.toml && \ + sed -i \ + -e 's/^\s*"chain_id":.*/ "chain_id": "dockerchain",/' \ + $TMHOME/config/genesis.json + +# Expose the data directory as a volume since there's mutable state in there +VOLUME [ $TMHOME ] + diff --git a/Makefile b/Makefile index 5557a87ae..eb9626bf4 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,8 @@ OUTPUT?=build/tendermint BUILD_TAGS?='tendermint' LD_FLAGS = -X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD` -s -w BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" +HTTPS_GIT := https://github.com/tendermint/tendermint.git +DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf all: check build test install .PHONY: all @@ -52,14 +54,23 @@ proto-gen: @sh scripts/protocgen.sh .PHONY: proto-gen +proto-gen-docker: + @echo "Generating Protobuf files" + @docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh +.PHONY: proto-gen-docker + proto-lint: - @buf check lint --error-format=json + @$(DOCKER_BUF) check lint --error-format=json .PHONY: proto-lint proto-check-breaking: - @buf check breaking --against-input '.git#branch=master' + @$(DOCKER_BUF) check breaking --against-input .git#branch=master .PHONY: proto-check-breaking +proto-check-breaking-ci: + @$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master +.PHONY: proto-check-breaking-ci + ############################################################################### ### Build ABCI ### ############################################################################### @@ -129,9 +140,10 @@ clean_certs: ### Formatting, linting, and vetting ### ############################################################################### -fmt: - @go fmt ./... -.PHONY: fmt +format: + find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s + find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w -local github.com/tendermint/tendermint +.PHONY: format lint: @echo "--> Running linter" diff --git a/README.md b/README.md index 8caf846e3..94256ca6f 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Or [Blockchain](), for shor [![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) [![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://godoc.org/github.com/tendermint/tendermint) [![Go version](https://img.shields.io/badge/go-1.14-blue.svg)](https://github.com/moovweb/gvm) -[![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org) +[![Discord](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/AzefAFd) [![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) [![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) @@ -68,17 +68,16 @@ See the [install instructions](/docs/introduction/install.md) ## Contributing -Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions, -and the [contributing guidelines](CONTRIBUTING.md) when submitting code. +Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions. -Join the larger community on the [forum](https://forum.cosmos.network/) and the [chat](https://riot.im/app/#/room/#tendermint:matrix.org). +Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md) +and the [style guide](STYLE_GUIDE.md). -To learn more about the structure of the software, watch the [Developer -Sessions](/docs/DEV_SESSIONS.md) and read some [Architectural Decision -Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). +To get more active, Join the wider community at [Discord](https://discord.gg/AzefAFd) or jump onto the [Forum](https://forum.cosmos.network/). -Learn more by reading the code and comparing it to the -[specification](https://github.com/tendermint/spec). +Learn more by reading the code and the +[specifications](https://github.com/tendermint/spec) or watch the [Developer Sessions](/docs/DEV_SESSIONS.md) and read up on the +[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). ## Versioning @@ -137,7 +136,7 @@ hosted at: https://docs.tendermint.com/master/ ### Tools Benchmarking is provided by `tm-load-test`. -The code for `tm-load-test` can be found [here](https://github.com/interchainio/tm-load-test) this binary needs to be built separately. +The code for `tm-load-test` can be found [here](https://github.com/informalsystems/tm-load-test) this binary needs to be built separately. Additional documentation is found [here](/docs/tools). ### Sub-projects diff --git a/STYLE_GUIDE.md b/STYLE_GUIDE.md new file mode 100644 index 000000000..6d0deda6d --- /dev/null +++ b/STYLE_GUIDE.md @@ -0,0 +1,159 @@ +# Go Coding Style Guide + +In order to keep our code looking good with lots of programmers working on it, it helps to have a "style guide", so all +the code generally looks quite similar. This doesn't mean there is only one "right way" to write code, or even that this +standard is better than your style. But if we agree to a number of stylistic practices, it makes it much easier to read +and modify new code. Please feel free to make suggestions if there's something you would like to add or modify. + +We expect all contributors to be familiar with [Effective Go](https://golang.org/doc/effective_go.html) +(and it's recommended reading for all Go programmers anyways). Additionally, we generally agree with the suggestions + in [Uber's style guide](https://github.com/uber-go/guide/blob/master/style.md) and use that as a starting point. + + +## Code Structure + +Perhaps more key for code readability than good commenting is having the right structure. As a rule of thumb, try to write +in a logical order of importance, taking a little time to think how to order and divide the code such that someone could +scroll down and understand the functionality of it just as well as you do. A loose example of such order would be: +* Constants, global and package-level variables +* Main Struct +* Options (only if they are seen as critical to the struct else they should be placed in another file) +* Initialization / Start and stop of the service +* Msgs/Events +* Public Functions (In order of most important) +* Private/helper functions +* Auxiliary structs and function (can also be above private functions or in a separate file) + +## General + + * Use `gofmt` (or `goimport`) to format all code upon saving it. (If you use VIM, check out vim-go). + * Use a linter (see below) and generally try to keep the linter happy (where it makes sense). + * Think about documentation, and try to leave godoc comments, when it will help new developers. + * Every package should have a high level doc.go file to describe the purpose of that package, its main functions, and any other relevant information. + * `TODO` should not be used. If important enough should be recorded as an issue. + * `BUG` / `FIXME` should be used sparingly to guide future developers on some of the vulnerabilities of the code. + * `XXX` can be used in work-in-progress (prefixed with "WIP:" on github) branches but they must be removed before approving a PR. + * Applications (e.g. clis/servers) *should* panic on unexpected unrecoverable errors and print a stack trace. + +## Comments + + * Use a space after comment deliminter (ex. `// your comment`). + * Many comments are not sentences. These should begin with a lower case letter and end without a period. + * Conversely, sentences in comments should be sentenced-cased and end with a period. + +## Linters + +These must be applied to all (Go) repos. + + * [shellcheck](https://github.com/koalaman/shellcheck) + * [golangci-lint](https://github.com/golangci/golangci-lint) (covers all important linters) + - See the `.golangci.yml` file in each repo for linter configuration. + +## Various + + * Reserve "Save" and "Load" for long-running persistence operations. When parsing bytes, use "Encode" or "Decode". + * Maintain consistency across the codebase. + * Functions that return functions should have the suffix `Fn` + * Names should not [stutter](https://blog.golang.org/package-names). For example, a struct generally shouldn’t have + a field named after itself; e.g., this shouldn't occur: +``` golang +type middleware struct { + middleware Middleware +} +``` + * In comments, use "iff" to mean, "if and only if". + * Product names are capitalized, like "Tendermint", "Basecoin", "Protobuf", etc except in command lines: `tendermint --help` + * Acronyms are all capitalized, like "RPC", "gRPC", "API". "MyID", rather than "MyId". + * Prefer errors.New() instead of fmt.Errorf() unless you're actually using the format feature with arguments. + +## Importing Libraries + +Sometimes it's necessary to rename libraries to avoid naming collisions or ambiguity. + + * Use [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports) + * Separate imports into blocks - one for the standard lib, one for external libs and one for application libs. + * Here are some common library labels for consistency: + - dbm "github.com/tendermint/tm-db" + - tmcmd "github.com/tendermint/tendermint/cmd/tendermint/commands" + - tmcfg "github.com/tendermint/tendermint/config/tendermint" + - tmtypes "github.com/tendermint/tendermint/types" + * Never use anonymous imports (the `.`), for example, `tmlibs/common` or anything else. + * When importing a pkg from the `tendermint/libs` directory, prefix the pkg alias with tm. + - tmbits "github.com/tendermint/tendermint/libs/bits" + * tip: Use the `_` library import to import a library for initialization effects (side effects) + +## Dependencies + + * Dependencies should be pinned by a release tag, or specific commit, to avoid breaking `go get` when external dependencies are updated. + * Refer to the [contributing](CONTRIBUTING.md) document for more details + +## Testing + + * The first rule of testing is: we add tests to our code + * The second rule of testing is: we add tests to our code + * For Golang testing: + * Make use of table driven testing where possible and not-cumbersome + - [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) + * Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require) + * When using mocks, it is recommended to use Testify [mock] (https://pkg.go.dev/github.com/stretchr/testify/mock + ) along with [Mockery](https://github.com/vektra/mockery) for autogeneration + +## Errors + + * Ensure that errors are concise, clear and traceable. + * Use stdlib errors package. + * For wrapping errors, use `fmt.Errorf()` with `%w`. + * Panic is appropriate when an internal invariant of a system is broken, while all other cases (in particular, + incorrect or invalid usage) should return errors. + +## Config + + * Currently the TOML filetype is being used for config files + * A good practice is to store per-user config files under `~/.[yourAppName]/config.toml` + +## CLI + + * When implementing a CLI use [Cobra](https://github.com/spf13/cobra) and [Viper](https://github.com/spf13/viper). + * Helper messages for commands and flags must be all lowercase. + * Instead of using pointer flags (eg. `FlagSet().StringVar`) use Viper to retrieve flag values (eg. `viper.GetString`) + - The flag key used when setting and getting the flag should always be stored in a + variable taking the form `FlagXxx` or `flagXxx`. + - Flag short variable descriptions should always start with a lower case character as to remain consistent with + the description provided in the default `--help` flag. + +## Version + + * Every repo should have a version/version.go file that mimics the Tendermint Core repo + * We read the value of the constant version in our build scripts and hence it has to be a string + +## Non-Go Code + + * All non-Go code (`*.proto`, `Makefile`, `*.sh`), where there is no common + agreement on style, should be formatted according to + [EditorConfig](http://editorconfig.org/) config: + + ``` + # top-most EditorConfig file + root = true + + # Unix-style newlines with a newline ending every file + [*] + charset = utf-8 + end_of_line = lf + insert_final_newline = true + trim_trailing_whitespace = true + + [Makefile] + indent_style = tab + + [*.sh] + indent_style = tab + + [*.proto] + indent_style = space + indent_size = 2 + ``` + + Make sure the file above (`.editorconfig`) are in the root directory of your + repo and you have a [plugin for your + editor](http://editorconfig.org/#download) installed. diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index f856519f3..42f00231f 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -6,11 +6,12 @@ import ( "encoding/json" "fmt" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/kv" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) var ( @@ -63,7 +64,8 @@ var _ types.Application = (*Application)(nil) type Application struct { types.BaseApplication - state State + state State + RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight) } func NewApplication() *Application { @@ -118,7 +120,12 @@ func (app *Application) Commit() types.ResponseCommit { app.state.AppHash = appHash app.state.Height++ saveState(app.state) - return types.ResponseCommit{Data: appHash} + + resp := types.ResponseCommit{Data: appHash} + if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks { + resp.RetainHeight = app.state.Height - app.RetainBlocks + 1 + } + return resp } // Returns an associated value or nil if missing. @@ -136,6 +143,7 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo resQuery.Index = -1 // TODO make Proof return index resQuery.Key = reqQuery.Data resQuery.Value = value + resQuery.Height = app.state.Height return } @@ -151,6 +159,7 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo resQuery.Log = "exists" } resQuery.Value = value + resQuery.Height = app.state.Height return resQuery } diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index e8dee67d9..4d8c829ad 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -30,6 +30,11 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri // repeating tx doesn't raise error ar = app.DeliverTx(req) require.False(t, ar.IsErr(), ar) + // commit + app.Commit() + + info := app.Info(types.RequestInfo{}) + require.NotZero(t, info.LastBlockHeight) // make sure query is fine resQuery := app.Query(types.RequestQuery{ @@ -37,7 +42,9 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri Data: []byte(key), }) require.Equal(t, code.CodeTypeOK, resQuery.Code) + require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) + require.EqualValues(t, info.LastBlockHeight, resQuery.Height) // make sure proof is fine resQuery = app.Query(types.RequestQuery{ @@ -46,7 +53,9 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri Prove: true, }) require.EqualValues(t, code.CodeTypeOK, resQuery.Code) + require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) + require.EqualValues(t, info.LastBlockHeight, resQuery.Height) } func TestKVStoreKV(t *testing.T) { @@ -300,6 +309,13 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) + // commit + _, err = app.CommitSync() + require.NoError(t, err) + + info, err := app.InfoSync(types.RequestInfo{}) + require.NoError(t, err) + require.NotZero(t, info.LastBlockHeight) // make sure query is fine resQuery, err := app.QuerySync(types.RequestQuery{ @@ -308,7 +324,9 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) }) require.Nil(t, err) require.Equal(t, code.CodeTypeOK, resQuery.Code) + require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) + require.EqualValues(t, info.LastBlockHeight, resQuery.Height) // make sure proof is fine resQuery, err = app.QuerySync(types.RequestQuery{ @@ -318,5 +336,7 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) }) require.Nil(t, err) require.Equal(t, code.CodeTypeOK, resQuery.Code) + require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) + require.EqualValues(t, info.LastBlockHeight, resQuery.Height) } diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 0c5498bee..fffc617be 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -7,12 +7,13 @@ import ( "strconv" "strings" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" tmtypes "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const ( diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index 0cdd43df6..9e702b5ce 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -27,7 +27,7 @@ > query "abc" -> code: OK -> log: exists --> height: 0 +-> height: 2 -> key: abc -> key.hex: 616263 -> value: abc @@ -43,7 +43,7 @@ > query "def" -> code: OK -> log: exists --> height: 0 +-> height: 3 -> key: def -> key.hex: 646566 -> value: xyz diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index d8445f469..51ff4aedd 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1865,6 +1865,7 @@ func (m *ResponseEndBlock) GetEvents() []Event { type ResponseCommit struct { // reserve 1 Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1910,6 +1911,13 @@ func (m *ResponseCommit) GetData() []byte { return nil } +func (m *ResponseCommit) GetRetainHeight() int64 { + if m != nil { + return m.RetainHeight + } + return 0 +} + // ConsensusParams contains all consensus-relevant parameters // that can be adjusted by the abci app type ConsensusParams struct { @@ -2571,7 +2579,7 @@ func (m *PartSetHeader) GetHash() []byte { // Validator type Validator struct { Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - //PubKey pub_key = 2 [(gogoproto.nullable)=false]; + // PubKey pub_key = 2 [(gogoproto.nullable)=false]; Power int64 `protobuf:"varint,3,opt,name=power,proto3" json:"power,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2960,155 +2968,156 @@ func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa func init() { golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa49c51fa1ac) } var fileDescriptor_9f1eaa49c51fa1ac = []byte{ - // 2370 bytes of a gzipped FileDescriptorProto + // 2386 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x59, 0x4d, 0x90, 0x1b, 0x47, - 0x15, 0xde, 0xd1, 0x6a, 0x57, 0xd2, 0xd3, 0xee, 0x4a, 0x69, 0x3b, 0x89, 0x22, 0x92, 0x5d, 0xd7, - 0xf8, 0x6f, 0x9d, 0x04, 0x6d, 0x58, 0x2a, 0x54, 0x8c, 0x5d, 0xa1, 0x56, 0x6b, 0x07, 0xa9, 0x62, - 0x3b, 0x9b, 0xb1, 0xbd, 0x18, 0xa8, 0xca, 0x54, 0x4b, 0xd3, 0x96, 0xa6, 0x56, 0x9a, 0x99, 0xcc, - 0xb4, 0x64, 0x89, 0xe2, 0x4e, 0x51, 0xc5, 0x81, 0x0b, 0x55, 0x5c, 0xb8, 0x73, 0xe4, 0xc0, 0x21, - 0x47, 0x8e, 0x39, 0x70, 0xe0, 0xc0, 0xd9, 0xc0, 0xc2, 0x89, 0xca, 0x91, 0xa2, 0x38, 0x52, 0xfd, - 0xba, 0xe7, 0x4f, 0x2b, 0xad, 0xc6, 0xc1, 0x37, 0x2e, 0xd2, 0x74, 0xf7, 0x7b, 0xaf, 0xbb, 0x5f, - 0xbf, 0x7e, 0xdf, 0x7b, 0xaf, 0xe1, 0x35, 0xda, 0xe9, 0xda, 0x7b, 0x7c, 0xea, 0xb1, 0x40, 0xfe, - 0x36, 0x3c, 0xdf, 0xe5, 0x2e, 0x79, 0x95, 0x33, 0xc7, 0x62, 0xfe, 0xd0, 0x76, 0x78, 0x43, 0x90, - 0x34, 0x70, 0xb0, 0x7e, 0x8d, 0xf7, 0x6d, 0xdf, 0x32, 0x3d, 0xea, 0xf3, 0xe9, 0x1e, 0x52, 0xee, - 0xf5, 0xdc, 0x9e, 0x1b, 0x7f, 0x49, 0xf6, 0x7a, 0xbd, 0xeb, 0x4f, 0x3d, 0xee, 0xee, 0x0d, 0x99, - 0x7f, 0x32, 0x60, 0xea, 0x4f, 0x8d, 0x5d, 0x18, 0xd8, 0x9d, 0x60, 0xef, 0x64, 0x9c, 0x9c, 0xaf, - 0xbe, 0xd3, 0x73, 0xdd, 0xde, 0x80, 0x49, 0x99, 0x9d, 0xd1, 0xd3, 0x3d, 0x6e, 0x0f, 0x59, 0xc0, - 0xe9, 0xd0, 0x53, 0x04, 0xdb, 0xb3, 0x04, 0xd6, 0xc8, 0xa7, 0xdc, 0x76, 0x1d, 0x39, 0xae, 0xff, - 0x7b, 0x0d, 0x0a, 0x06, 0xfb, 0x7c, 0xc4, 0x02, 0x4e, 0x3e, 0x80, 0x3c, 0xeb, 0xf6, 0xdd, 0x5a, - 0xee, 0x92, 0xb6, 0x5b, 0xde, 0xd7, 0x1b, 0x73, 0xf7, 0xd2, 0x50, 0xd4, 0x77, 0xbb, 0x7d, 0xb7, - 0xb5, 0x62, 0x20, 0x07, 0xb9, 0x05, 0x6b, 0x4f, 0x07, 0xa3, 0xa0, 0x5f, 0x5b, 0x45, 0xd6, 0xcb, - 0xe7, 0xb3, 0x7e, 0x24, 0x48, 0x5b, 0x2b, 0x86, 0xe4, 0x11, 0xd3, 0xda, 0xce, 0x53, 0xb7, 0x96, - 0xcf, 0x32, 0x6d, 0xdb, 0x79, 0x8a, 0xd3, 0x0a, 0x0e, 0xd2, 0x02, 0x08, 0x18, 0x37, 0x5d, 0x4f, - 0x6c, 0xa8, 0xb6, 0x86, 0xfc, 0xd7, 0xcf, 0xe7, 0x7f, 0xc8, 0xf8, 0x27, 0x48, 0xde, 0x5a, 0x31, - 0x4a, 0x41, 0xd8, 0x10, 0x92, 0x6c, 0xc7, 0xe6, 0x66, 0xb7, 0x4f, 0x6d, 0xa7, 0xb6, 0x9e, 0x45, - 0x52, 0xdb, 0xb1, 0xf9, 0xa1, 0x20, 0x17, 0x92, 0xec, 0xb0, 0x21, 0x54, 0xf1, 0xf9, 0x88, 0xf9, - 0xd3, 0x5a, 0x21, 0x8b, 0x2a, 0x3e, 0x15, 0xa4, 0x42, 0x15, 0xc8, 0x43, 0x3e, 0x86, 0x72, 0x87, - 0xf5, 0x6c, 0xc7, 0xec, 0x0c, 0xdc, 0xee, 0x49, 0xad, 0x88, 0x22, 0x76, 0xcf, 0x17, 0xd1, 0x14, - 0x0c, 0x4d, 0x41, 0xdf, 0x5a, 0x31, 0xa0, 0x13, 0xb5, 0x48, 0x13, 0x8a, 0xdd, 0x3e, 0xeb, 0x9e, - 0x98, 0x7c, 0x52, 0x2b, 0xa1, 0xa4, 0xab, 0xe7, 0x4b, 0x3a, 0x14, 0xd4, 0x8f, 0x26, 0xad, 0x15, - 0xa3, 0xd0, 0x95, 0x9f, 0x42, 0x2f, 0x16, 0x1b, 0xd8, 0x63, 0xe6, 0x0b, 0x29, 0x17, 0xb2, 0xe8, - 0xe5, 0x8e, 0xa4, 0x47, 0x39, 0x25, 0x2b, 0x6c, 0x90, 0xbb, 0x50, 0x62, 0x8e, 0xa5, 0x36, 0x56, - 0x46, 0x41, 0xd7, 0x96, 0x58, 0x98, 0x63, 0x85, 0xdb, 0x2a, 0x32, 0xf5, 0x4d, 0x3e, 0x84, 0xf5, - 0xae, 0x3b, 0x1c, 0xda, 0xbc, 0xb6, 0x81, 0x32, 0xae, 0x2c, 0xd9, 0x12, 0xd2, 0xb6, 0x56, 0x0c, - 0xc5, 0xd5, 0x2c, 0xc0, 0xda, 0x98, 0x0e, 0x46, 0x4c, 0xbf, 0x0e, 0xe5, 0x84, 0x25, 0x93, 0x1a, - 0x14, 0x86, 0x2c, 0x08, 0x68, 0x8f, 0xd5, 0xb4, 0x4b, 0xda, 0x6e, 0xc9, 0x08, 0x9b, 0xfa, 0x16, - 0x6c, 0x24, 0xed, 0x56, 0x1f, 0x46, 0x8c, 0xc2, 0x16, 0x05, 0xe3, 0x98, 0xf9, 0x81, 0x30, 0x40, - 0xc5, 0xa8, 0x9a, 0xe4, 0x32, 0x6c, 0xe2, 0x6e, 0xcd, 0x70, 0x5c, 0xdc, 0xab, 0xbc, 0xb1, 0x81, - 0x9d, 0xc7, 0x8a, 0x68, 0x07, 0xca, 0xde, 0xbe, 0x17, 0x91, 0xac, 0x22, 0x09, 0x78, 0xfb, 0x9e, - 0x22, 0xd0, 0xbf, 0x0b, 0xd5, 0x59, 0xd3, 0x25, 0x55, 0x58, 0x3d, 0x61, 0x53, 0x35, 0x9f, 0xf8, - 0x24, 0x17, 0xd5, 0xb6, 0x70, 0x8e, 0x92, 0xa1, 0xf6, 0xf8, 0xbb, 0x5c, 0xc4, 0x1c, 0x59, 0xab, - 0xb8, 0x6e, 0xc2, 0x49, 0x20, 0x77, 0x79, 0xbf, 0xde, 0x90, 0x0e, 0xa2, 0x11, 0x3a, 0x88, 0xc6, - 0xa3, 0xd0, 0x83, 0x34, 0x8b, 0x5f, 0x3e, 0xdf, 0x59, 0xf9, 0xe5, 0x5f, 0x76, 0x34, 0x03, 0x39, - 0xc8, 0x1b, 0xc2, 0xa0, 0xa8, 0xed, 0x98, 0xb6, 0xa5, 0xe6, 0x29, 0x60, 0xbb, 0x6d, 0x91, 0x4f, - 0xa1, 0xda, 0x75, 0x9d, 0x80, 0x39, 0xc1, 0x28, 0x10, 0x6e, 0x8e, 0x0e, 0x03, 0xe5, 0x0b, 0x16, - 0x1d, 0xf2, 0x61, 0x48, 0x7e, 0x84, 0xd4, 0x46, 0xa5, 0x9b, 0xee, 0x20, 0xf7, 0x00, 0xc6, 0x74, - 0x60, 0x5b, 0x94, 0xbb, 0x7e, 0x50, 0xcb, 0x5f, 0x5a, 0x3d, 0x47, 0xd8, 0x71, 0x48, 0xf8, 0xd8, - 0xb3, 0x28, 0x67, 0xcd, 0xbc, 0x58, 0xb9, 0x91, 0xe0, 0x27, 0xd7, 0xa0, 0x42, 0x3d, 0xcf, 0x0c, - 0x38, 0xe5, 0xcc, 0xec, 0x4c, 0x39, 0x0b, 0xd0, 0x5f, 0x6c, 0x18, 0x9b, 0xd4, 0xf3, 0x1e, 0x8a, - 0xde, 0xa6, 0xe8, 0xd4, 0xad, 0xe8, 0xb4, 0xf1, 0x6a, 0x12, 0x02, 0x79, 0x8b, 0x72, 0x8a, 0xda, - 0xda, 0x30, 0xf0, 0x5b, 0xf4, 0x79, 0x94, 0xf7, 0x95, 0x0e, 0xf0, 0x9b, 0xbc, 0x06, 0xeb, 0x7d, - 0x66, 0xf7, 0xfa, 0x1c, 0xb7, 0xbd, 0x6a, 0xa8, 0x96, 0x38, 0x18, 0xcf, 0x77, 0xc7, 0x0c, 0xbd, - 0x5b, 0xd1, 0x90, 0x0d, 0xfd, 0x57, 0x39, 0x78, 0xe5, 0xcc, 0xf5, 0x15, 0x72, 0xfb, 0x34, 0xe8, - 0x87, 0x73, 0x89, 0x6f, 0x72, 0x4b, 0xc8, 0xa5, 0x16, 0xf3, 0x95, 0x57, 0x7e, 0x6b, 0x81, 0x06, - 0x5a, 0x48, 0xa4, 0x36, 0xae, 0x58, 0xc8, 0x63, 0xa8, 0x0e, 0x68, 0xc0, 0x4d, 0x69, 0xfb, 0x26, - 0x7a, 0xd9, 0xd5, 0x73, 0x3d, 0xc1, 0x3d, 0x1a, 0xde, 0x19, 0x61, 0xdc, 0x4a, 0xdc, 0xd6, 0x20, - 0xd5, 0x4b, 0x9e, 0xc0, 0xc5, 0xce, 0xf4, 0x27, 0xd4, 0xe1, 0xb6, 0xc3, 0xcc, 0x33, 0x67, 0xb4, - 0xb3, 0x40, 0xf4, 0xdd, 0xb1, 0x6d, 0x31, 0xa7, 0x1b, 0x1e, 0xce, 0x85, 0x48, 0x44, 0x74, 0x78, - 0x81, 0xfe, 0x04, 0xb6, 0xd2, 0xbe, 0x88, 0x6c, 0x41, 0x8e, 0x4f, 0x94, 0x46, 0x72, 0x7c, 0x42, - 0xbe, 0x03, 0x79, 0x21, 0x0e, 0xb5, 0xb1, 0xb5, 0x10, 0x2c, 0x14, 0xf7, 0xa3, 0xa9, 0xc7, 0x0c, - 0xa4, 0xd7, 0xf5, 0xe8, 0x26, 0x44, 0xfe, 0x69, 0x56, 0xb6, 0x7e, 0x03, 0x2a, 0x33, 0xae, 0x27, - 0x71, 0xac, 0x5a, 0xf2, 0x58, 0xf5, 0x0a, 0x6c, 0xa6, 0x3c, 0x8c, 0xfe, 0xc7, 0x75, 0x28, 0x1a, - 0x2c, 0xf0, 0x84, 0x11, 0x93, 0x16, 0x94, 0xd8, 0xa4, 0xcb, 0x24, 0x2c, 0x69, 0x4b, 0x9c, 0xb8, - 0xe4, 0xb9, 0x1b, 0xd2, 0x0b, 0xaf, 0x19, 0x31, 0x93, 0x9b, 0x29, 0x48, 0xbe, 0xbc, 0x4c, 0x48, - 0x12, 0x93, 0x6f, 0xa7, 0x31, 0xf9, 0xca, 0x12, 0xde, 0x19, 0x50, 0xbe, 0x99, 0x02, 0xe5, 0x65, - 0x13, 0xa7, 0x50, 0xb9, 0x3d, 0x07, 0x95, 0x97, 0x6d, 0x7f, 0x01, 0x2c, 0xb7, 0xe7, 0xc0, 0xf2, - 0xee, 0xd2, 0xb5, 0xcc, 0xc5, 0xe5, 0xdb, 0x69, 0x5c, 0x5e, 0xa6, 0x8e, 0x19, 0x60, 0xbe, 0x37, - 0x0f, 0x98, 0x6f, 0x2c, 0x91, 0xb1, 0x10, 0x99, 0x0f, 0xcf, 0x20, 0xf3, 0xb5, 0x25, 0xa2, 0xe6, - 0x40, 0x73, 0x3b, 0x05, 0xcd, 0x90, 0x49, 0x37, 0x0b, 0xb0, 0xf9, 0xa3, 0xb3, 0xd8, 0x7c, 0x7d, - 0x99, 0xa9, 0xcd, 0x03, 0xe7, 0xef, 0xcd, 0x80, 0xf3, 0xd5, 0x65, 0xbb, 0x5a, 0x88, 0xce, 0x37, - 0x84, 0x7f, 0x9c, 0xb9, 0x19, 0xc2, 0x97, 0x32, 0xdf, 0x77, 0x7d, 0x05, 0x7c, 0xb2, 0xa1, 0xef, - 0x0a, 0x8f, 0x1d, 0xdb, 0xff, 0x39, 0x48, 0x8e, 0x97, 0x36, 0x61, 0xed, 0xfa, 0x17, 0x5a, 0xcc, - 0x8b, 0x9e, 0x2d, 0xe9, 0xed, 0x4b, 0xca, 0xdb, 0x27, 0x00, 0x3e, 0x97, 0x06, 0xf8, 0x1d, 0x28, - 0x0b, 0x4c, 0x99, 0xc1, 0x6e, 0xea, 0x85, 0xd8, 0x4d, 0xde, 0x86, 0x57, 0xd0, 0xff, 0xca, 0x30, - 0x40, 0x39, 0x92, 0x3c, 0x3a, 0x92, 0x8a, 0x18, 0x90, 0x1a, 0x94, 0x40, 0xf1, 0x4d, 0xb8, 0x90, - 0xa0, 0x15, 0x72, 0x11, 0x0b, 0x24, 0x48, 0x55, 0x23, 0xea, 0x03, 0xcf, 0x6b, 0xd1, 0xa0, 0xaf, - 0xdf, 0x8f, 0x15, 0x14, 0xc7, 0x05, 0x04, 0xf2, 0x5d, 0xd7, 0x92, 0xfb, 0xde, 0x34, 0xf0, 0x5b, - 0xc4, 0x0a, 0x03, 0xb7, 0x87, 0x8b, 0x2b, 0x19, 0xe2, 0x53, 0x50, 0x45, 0x57, 0xbb, 0x24, 0xef, - 0xac, 0xfe, 0x7b, 0x2d, 0x96, 0x17, 0x87, 0x0a, 0xf3, 0x50, 0x5d, 0x7b, 0x99, 0xa8, 0x9e, 0xfb, - 0xdf, 0x50, 0x5d, 0xff, 0x97, 0x16, 0x1f, 0x69, 0x84, 0xd7, 0x5f, 0x4f, 0x05, 0xc2, 0xba, 0x6c, - 0xc7, 0x62, 0x13, 0x54, 0xf9, 0xaa, 0x21, 0x1b, 0x61, 0xa8, 0xb5, 0x8e, 0xc7, 0x90, 0x0e, 0xb5, - 0x0a, 0xd8, 0x27, 0x1b, 0xe4, 0x7d, 0xc4, 0x79, 0xf7, 0xa9, 0x72, 0x0d, 0x29, 0x10, 0x94, 0x49, - 0x5d, 0x43, 0x65, 0x73, 0x47, 0x82, 0xcc, 0x90, 0xd4, 0x09, 0x7c, 0x29, 0xa5, 0xc2, 0x86, 0x37, - 0xa1, 0x24, 0x96, 0x1e, 0x78, 0xb4, 0xcb, 0xf0, 0x6e, 0x97, 0x8c, 0xb8, 0x43, 0xb7, 0x80, 0x9c, - 0xf5, 0x31, 0xe4, 0x01, 0xac, 0xb3, 0x31, 0x73, 0xb8, 0x38, 0x23, 0xa1, 0xd6, 0x37, 0x17, 0x02, - 0x31, 0x73, 0x78, 0xb3, 0x26, 0x94, 0xf9, 0xcf, 0xe7, 0x3b, 0x55, 0xc9, 0xf3, 0xae, 0x3b, 0xb4, - 0x39, 0x1b, 0x7a, 0x7c, 0x6a, 0x28, 0x29, 0xfa, 0xcf, 0x72, 0x02, 0x0f, 0x53, 0xfe, 0x67, 0xae, - 0x7a, 0xc3, 0x4b, 0x93, 0x4b, 0x84, 0x48, 0xd9, 0x54, 0xfe, 0x16, 0x40, 0x8f, 0x06, 0xe6, 0x33, - 0xea, 0x70, 0x66, 0x29, 0xbd, 0x97, 0x7a, 0x34, 0xf8, 0x01, 0x76, 0x88, 0x78, 0x53, 0x0c, 0x8f, - 0x02, 0x66, 0xe1, 0x01, 0xac, 0x1a, 0x85, 0x1e, 0x0d, 0x1e, 0x07, 0xcc, 0x4a, 0xec, 0xb5, 0xf0, - 0x32, 0xf6, 0x9a, 0xd6, 0x77, 0x71, 0x56, 0xdf, 0x3f, 0xcf, 0xc5, 0xb7, 0x23, 0x0e, 0x1f, 0xfe, - 0x3f, 0x75, 0xf1, 0x1b, 0xcc, 0x29, 0xd2, 0x20, 0x40, 0x7e, 0x08, 0xaf, 0x44, 0xb7, 0xd2, 0x1c, - 0xe1, 0x6d, 0x0d, 0xad, 0xf0, 0xc5, 0x2e, 0x77, 0x75, 0x9c, 0xee, 0x0e, 0xc8, 0x67, 0xf0, 0xfa, - 0x8c, 0x0f, 0x8a, 0x26, 0xc8, 0xbd, 0x90, 0x2b, 0x7a, 0x35, 0xed, 0x8a, 0x42, 0xf9, 0xb1, 0xf6, - 0x56, 0x5f, 0xca, 0xad, 0xb9, 0x22, 0x42, 0xd8, 0x24, 0xbc, 0xcd, 0xb3, 0x09, 0xfd, 0xcf, 0x1a, - 0x54, 0x66, 0x16, 0x48, 0x3e, 0x80, 0x35, 0x89, 0xc0, 0xda, 0xb9, 0x85, 0x10, 0xd4, 0xb8, 0xda, - 0x93, 0x64, 0x20, 0x07, 0x50, 0x64, 0x2a, 0xba, 0x56, 0x4a, 0xb9, 0xba, 0x24, 0x08, 0x57, 0xfc, - 0x11, 0x1b, 0xb9, 0x03, 0xa5, 0x48, 0xf5, 0x4b, 0x32, 0xb7, 0xe8, 0xe4, 0x94, 0x90, 0x98, 0x51, - 0x3f, 0x84, 0x72, 0x62, 0x79, 0xe4, 0x1b, 0x50, 0x1a, 0xd2, 0x89, 0x4a, 0xb7, 0x64, 0x00, 0x5d, - 0x1c, 0xd2, 0x09, 0x66, 0x5a, 0xe4, 0x75, 0x28, 0x88, 0xc1, 0x1e, 0x95, 0x07, 0xb9, 0x6a, 0xac, - 0x0f, 0xe9, 0xe4, 0xfb, 0x34, 0xd0, 0x7f, 0xa1, 0xc1, 0x56, 0x7a, 0x9d, 0xe4, 0x1d, 0x20, 0x82, - 0x96, 0xf6, 0x98, 0xe9, 0x8c, 0x86, 0x12, 0x23, 0x43, 0x89, 0x95, 0x21, 0x9d, 0x1c, 0xf4, 0xd8, - 0x83, 0xd1, 0x10, 0xa7, 0x0e, 0xc8, 0x7d, 0xa8, 0x86, 0xc4, 0x61, 0xb1, 0x4b, 0x69, 0xe5, 0x8d, - 0x33, 0xc9, 0xee, 0x1d, 0x45, 0x20, 0x73, 0xdd, 0x5f, 0x8b, 0x5c, 0x77, 0x4b, 0xca, 0x0b, 0x47, - 0xf4, 0xf7, 0xa1, 0x32, 0xb3, 0x63, 0xa2, 0xc3, 0xa6, 0x37, 0xea, 0x98, 0x27, 0x6c, 0x6a, 0xa2, - 0x4a, 0xd0, 0xd4, 0x4b, 0x46, 0xd9, 0x1b, 0x75, 0x3e, 0x66, 0x53, 0x91, 0x75, 0x04, 0x7a, 0x17, - 0xb6, 0xd2, 0xc9, 0x94, 0x00, 0x0e, 0xdf, 0x1d, 0x39, 0x16, 0xae, 0x7b, 0xcd, 0x90, 0x0d, 0x72, - 0x0b, 0xd6, 0xc6, 0xae, 0xb4, 0xe6, 0xf3, 0xb2, 0xa7, 0x63, 0x97, 0xb3, 0x44, 0x4a, 0x26, 0x79, - 0xf4, 0x00, 0xd6, 0xd0, 0x2e, 0x85, 0x8d, 0x61, 0x5a, 0xa4, 0x02, 0x17, 0xf1, 0x4d, 0x8e, 0x01, - 0x28, 0xe7, 0xbe, 0xdd, 0x19, 0xc5, 0xe2, 0x6b, 0x49, 0xf1, 0x03, 0xbb, 0x13, 0x34, 0x4e, 0xc6, - 0x8d, 0x23, 0x6a, 0xfb, 0xcd, 0x37, 0x95, 0x65, 0x5f, 0x8c, 0x79, 0x12, 0xd6, 0x9d, 0x90, 0xa4, - 0x7f, 0x95, 0x87, 0x75, 0x99, 0x6e, 0x92, 0x0f, 0xd3, 0xc5, 0x8f, 0xf2, 0xfe, 0xf6, 0xa2, 0xe5, - 0x4b, 0x2a, 0xb5, 0xfa, 0x28, 0x82, 0xba, 0x36, 0x5b, 0x51, 0x68, 0x96, 0x4f, 0x9f, 0xef, 0x14, - 0x30, 0xfa, 0x68, 0xdf, 0x89, 0xcb, 0x0b, 0x8b, 0xb2, 0xeb, 0xb0, 0x96, 0x91, 0x7f, 0xe1, 0x5a, - 0x46, 0x0b, 0x36, 0x13, 0xe1, 0x96, 0x6d, 0xa9, 0x3c, 0x65, 0xfb, 0xbc, 0x4b, 0xd7, 0xbe, 0xa3, - 0xd6, 0x5f, 0x8e, 0xc2, 0xb1, 0xb6, 0x45, 0x76, 0xd3, 0x49, 0x36, 0x46, 0x6d, 0x32, 0x5c, 0x48, - 0xe4, 0xcd, 0x22, 0x66, 0x13, 0xd7, 0x41, 0x5c, 0x7e, 0x49, 0x22, 0xa3, 0x87, 0xa2, 0xe8, 0xc0, - 0xc1, 0xeb, 0x50, 0x89, 0x03, 0x1b, 0x49, 0x52, 0x94, 0x52, 0xe2, 0x6e, 0x24, 0x7c, 0x0f, 0x2e, - 0x3a, 0x6c, 0xc2, 0xcd, 0x59, 0xea, 0x12, 0x52, 0x13, 0x31, 0x76, 0x9c, 0xe6, 0xb8, 0x0a, 0x5b, - 0xb1, 0x0b, 0x45, 0x5a, 0x90, 0xa5, 0x8f, 0xa8, 0x17, 0xc9, 0xde, 0x80, 0x62, 0x14, 0x76, 0x96, - 0x91, 0xa0, 0x40, 0x65, 0xb4, 0x19, 0x05, 0xb2, 0x3e, 0x0b, 0x46, 0x03, 0xae, 0x84, 0x6c, 0x20, - 0x0d, 0x06, 0xb2, 0x86, 0xec, 0x47, 0xda, 0xcb, 0xb0, 0x19, 0x7a, 0x15, 0x49, 0xb7, 0x89, 0x74, - 0x1b, 0x61, 0x27, 0x12, 0xdd, 0x80, 0xaa, 0xe7, 0xbb, 0x9e, 0x1b, 0x30, 0xdf, 0xa4, 0x96, 0xe5, - 0xb3, 0x20, 0xa8, 0x6d, 0x49, 0x79, 0x61, 0xff, 0x81, 0xec, 0xd6, 0xbf, 0x05, 0x85, 0x30, 0x9e, - 0xbe, 0x08, 0x6b, 0xcd, 0xc8, 0x43, 0xe6, 0x0d, 0xd9, 0x10, 0xf8, 0x7a, 0xe0, 0x79, 0xaa, 0xba, - 0x26, 0x3e, 0xf5, 0x01, 0x14, 0xd4, 0x81, 0xcd, 0xad, 0xa9, 0xdc, 0x87, 0x0d, 0x8f, 0xfa, 0x62, - 0x1b, 0xc9, 0xca, 0xca, 0xa2, 0x8c, 0xf0, 0x88, 0xfa, 0xfc, 0x21, 0xe3, 0xa9, 0x02, 0x4b, 0x19, - 0xf9, 0x65, 0x97, 0x7e, 0x13, 0x36, 0x53, 0x34, 0x62, 0x99, 0xdc, 0xe5, 0x74, 0x10, 0x5e, 0x74, - 0x6c, 0x44, 0x2b, 0xc9, 0xc5, 0x2b, 0xd1, 0x6f, 0x41, 0x29, 0x3a, 0x2b, 0x91, 0x68, 0x84, 0xaa, - 0xd0, 0x94, 0xfa, 0x65, 0x13, 0x8b, 0x48, 0xee, 0x33, 0xe6, 0x2b, 0xeb, 0x97, 0x0d, 0x9d, 0x25, - 0x1c, 0x93, 0x44, 0x33, 0x72, 0x1b, 0x0a, 0xca, 0x31, 0xa9, 0xfb, 0xb8, 0xa8, 0x5c, 0x74, 0x84, - 0x9e, 0x2a, 0x2c, 0x17, 0x49, 0xbf, 0x15, 0x4f, 0x93, 0x4b, 0x4e, 0xf3, 0x53, 0x28, 0x86, 0xce, - 0x27, 0x8d, 0x12, 0x72, 0x86, 0x4b, 0xcb, 0x50, 0x42, 0x4d, 0x12, 0x33, 0x0a, 0x6b, 0x0a, 0xec, - 0x9e, 0xc3, 0x2c, 0x33, 0xbe, 0x82, 0x38, 0x67, 0xd1, 0xa8, 0xc8, 0x81, 0x7b, 0xe1, 0xfd, 0xd2, - 0xdf, 0x83, 0x75, 0xb9, 0xd6, 0xb9, 0x2e, 0x6e, 0x1e, 0xb4, 0xfe, 0x43, 0x83, 0x62, 0x08, 0x1f, - 0x73, 0x99, 0x52, 0x9b, 0xc8, 0x7d, 0xdd, 0x4d, 0xbc, 0x7c, 0x97, 0xf4, 0x2e, 0x10, 0xb4, 0x14, - 0x73, 0xec, 0x72, 0xdb, 0xe9, 0x99, 0xf2, 0x2c, 0x64, 0x24, 0x58, 0xc5, 0x91, 0x63, 0x1c, 0x38, - 0x12, 0xfd, 0x6f, 0x5f, 0x86, 0x72, 0xa2, 0xca, 0x45, 0x0a, 0xb0, 0xfa, 0x80, 0x3d, 0xab, 0xae, - 0x90, 0x32, 0x14, 0x0c, 0x86, 0x35, 0x82, 0xaa, 0xb6, 0xff, 0x55, 0x01, 0x2a, 0x07, 0xcd, 0xc3, - 0xf6, 0x81, 0xe7, 0x0d, 0xec, 0x2e, 0xe2, 0x19, 0xf9, 0x04, 0xf2, 0x98, 0x27, 0x67, 0x78, 0xdf, - 0xa9, 0x67, 0x29, 0x38, 0x11, 0x03, 0xd6, 0x30, 0x9d, 0x26, 0x59, 0x9e, 0x7d, 0xea, 0x99, 0xea, - 0x50, 0x62, 0x91, 0x68, 0x70, 0x19, 0x5e, 0x83, 0xea, 0x59, 0x8a, 0x53, 0xe4, 0x33, 0x28, 0xc5, - 0x79, 0x72, 0xd6, 0x37, 0xa2, 0x7a, 0xe6, 0xb2, 0x95, 0x90, 0x1f, 0x67, 0x06, 0x59, 0x5f, 0x48, - 0xea, 0x99, 0xeb, 0x35, 0xe4, 0x09, 0x14, 0xc2, 0x1c, 0x2c, 0xdb, 0x2b, 0x4e, 0x3d, 0x63, 0x49, - 0x49, 0x1c, 0x9f, 0x4c, 0x9d, 0xb3, 0x3c, 0x55, 0xd5, 0x33, 0xd5, 0xcd, 0xc8, 0x63, 0x58, 0x57, - 0xc1, 0x6f, 0xa6, 0xf7, 0x99, 0x7a, 0xb6, 0x42, 0x91, 0x50, 0x72, 0x5c, 0x9c, 0xc8, 0xfa, 0x3c, - 0x57, 0xcf, 0x5c, 0x30, 0x24, 0x14, 0x20, 0x91, 0x4f, 0x67, 0x7e, 0x77, 0xab, 0x67, 0x2f, 0x04, - 0x92, 0x1f, 0x43, 0x31, 0xca, 0x9a, 0x32, 0xbe, 0x7f, 0xd5, 0xb3, 0xd6, 0xe2, 0x9a, 0xed, 0xff, - 0xfc, 0x6d, 0x5b, 0xfb, 0xed, 0xe9, 0xb6, 0xf6, 0xc5, 0xe9, 0xb6, 0xf6, 0xe5, 0xe9, 0xb6, 0xf6, - 0xa7, 0xd3, 0x6d, 0xed, 0xaf, 0xa7, 0xdb, 0xda, 0x1f, 0xfe, 0xbe, 0xad, 0xfd, 0xe8, 0x9d, 0x9e, - 0xcd, 0xfb, 0xa3, 0x4e, 0xa3, 0xeb, 0x0e, 0xf7, 0x62, 0x81, 0xc9, 0xcf, 0xf8, 0x51, 0xbb, 0xb3, - 0x8e, 0x0e, 0xeb, 0xdb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xce, 0x64, 0xb9, 0xe4, 0xe9, 0x1e, + 0x15, 0xde, 0xd1, 0x6a, 0x57, 0xd2, 0xd3, 0xfe, 0xc8, 0x6d, 0x27, 0x91, 0x85, 0xb3, 0xeb, 0x9a, + 0x8d, 0xed, 0x75, 0x12, 0xb4, 0x61, 0xa9, 0x50, 0x31, 0x76, 0x85, 0x5a, 0xad, 0x1d, 0xa4, 0x8a, + 0xed, 0x6c, 0xc6, 0xf6, 0x62, 0xa0, 0x2a, 0x53, 0x2d, 0x4d, 0x5b, 0x9a, 0x5a, 0x69, 0x66, 0x32, + 0xd3, 0x92, 0x25, 0x8a, 0x3b, 0x45, 0x15, 0x07, 0x2e, 0x54, 0x71, 0xe1, 0xce, 0x91, 0x03, 0x87, + 0x1c, 0x39, 0xe6, 0xc0, 0x81, 0x03, 0x67, 0x03, 0x0b, 0x27, 0x2a, 0x47, 0x8a, 0xe2, 0x48, 0xf5, + 0xeb, 0x9e, 0x3f, 0xad, 0xb4, 0x1a, 0x07, 0xdf, 0xb8, 0x48, 0xd3, 0x3d, 0xef, 0xbd, 0xee, 0x7e, + 0xfd, 0xde, 0xfb, 0xde, 0x7b, 0x03, 0xaf, 0xd3, 0x76, 0xc7, 0xde, 0xe3, 0x13, 0x8f, 0x05, 0xf2, + 0xb7, 0xee, 0xf9, 0x2e, 0x77, 0xc9, 0x6b, 0x9c, 0x39, 0x16, 0xf3, 0x07, 0xb6, 0xc3, 0xeb, 0x82, + 0xa4, 0x8e, 0x2f, 0x6b, 0xd7, 0x79, 0xcf, 0xf6, 0x2d, 0xd3, 0xa3, 0x3e, 0x9f, 0xec, 0x21, 0xe5, + 0x5e, 0xd7, 0xed, 0xba, 0xf1, 0x93, 0x64, 0xaf, 0xd5, 0x3a, 0xfe, 0xc4, 0xe3, 0xee, 0xde, 0x80, + 0xf9, 0x27, 0x7d, 0xa6, 0xfe, 0xd4, 0xbb, 0x8b, 0x7d, 0xbb, 0x1d, 0xec, 0x9d, 0x8c, 0x92, 0xeb, + 0xd5, 0xb6, 0xbb, 0xae, 0xdb, 0xed, 0x33, 0x29, 0xb3, 0x3d, 0x7c, 0xb6, 0xc7, 0xed, 0x01, 0x0b, + 0x38, 0x1d, 0x78, 0x8a, 0x60, 0x6b, 0x9a, 0xc0, 0x1a, 0xfa, 0x94, 0xdb, 0xae, 0x23, 0xdf, 0xeb, + 0xff, 0x5e, 0x81, 0x82, 0xc1, 0x3e, 0x1f, 0xb2, 0x80, 0x93, 0x0f, 0x20, 0xcf, 0x3a, 0x3d, 0xb7, + 0x9a, 0xbb, 0xaa, 0xed, 0x96, 0xf7, 0xf5, 0xfa, 0xcc, 0xb3, 0xd4, 0x15, 0xf5, 0xbd, 0x4e, 0xcf, + 0x6d, 0x2e, 0x19, 0xc8, 0x41, 0x6e, 0xc3, 0xca, 0xb3, 0xfe, 0x30, 0xe8, 0x55, 0x97, 0x91, 0x75, + 0xe7, 0x7c, 0xd6, 0x8f, 0x04, 0x69, 0x73, 0xc9, 0x90, 0x3c, 0x62, 0x59, 0xdb, 0x79, 0xe6, 0x56, + 0xf3, 0x59, 0x96, 0x6d, 0x39, 0xcf, 0x70, 0x59, 0xc1, 0x41, 0x9a, 0x00, 0x01, 0xe3, 0xa6, 0xeb, + 0x89, 0x03, 0x55, 0x57, 0x90, 0xff, 0xc6, 0xf9, 0xfc, 0x8f, 0x18, 0xff, 0x04, 0xc9, 0x9b, 0x4b, + 0x46, 0x29, 0x08, 0x07, 0x42, 0x92, 0xed, 0xd8, 0xdc, 0xec, 0xf4, 0xa8, 0xed, 0x54, 0x57, 0xb3, + 0x48, 0x6a, 0x39, 0x36, 0x3f, 0x14, 0xe4, 0x42, 0x92, 0x1d, 0x0e, 0x84, 0x2a, 0x3e, 0x1f, 0x32, + 0x7f, 0x52, 0x2d, 0x64, 0x51, 0xc5, 0xa7, 0x82, 0x54, 0xa8, 0x02, 0x79, 0xc8, 0xc7, 0x50, 0x6e, + 0xb3, 0xae, 0xed, 0x98, 0xed, 0xbe, 0xdb, 0x39, 0xa9, 0x16, 0x51, 0xc4, 0xee, 0xf9, 0x22, 0x1a, + 0x82, 0xa1, 0x21, 0xe8, 0x9b, 0x4b, 0x06, 0xb4, 0xa3, 0x11, 0x69, 0x40, 0xb1, 0xd3, 0x63, 0x9d, + 0x13, 0x93, 0x8f, 0xab, 0x25, 0x94, 0x74, 0xed, 0x7c, 0x49, 0x87, 0x82, 0xfa, 0xf1, 0xb8, 0xb9, + 0x64, 0x14, 0x3a, 0xf2, 0x51, 0xe8, 0xc5, 0x62, 0x7d, 0x7b, 0xc4, 0x7c, 0x21, 0xe5, 0x62, 0x16, + 0xbd, 0xdc, 0x95, 0xf4, 0x28, 0xa7, 0x64, 0x85, 0x03, 0x72, 0x0f, 0x4a, 0xcc, 0xb1, 0xd4, 0xc1, + 0xca, 0x28, 0xe8, 0xfa, 0x02, 0x0b, 0x73, 0xac, 0xf0, 0x58, 0x45, 0xa6, 0x9e, 0xc9, 0x87, 0xb0, + 0xda, 0x71, 0x07, 0x03, 0x9b, 0x57, 0xd7, 0x50, 0xc6, 0x5b, 0x0b, 0x8e, 0x84, 0xb4, 0xcd, 0x25, + 0x43, 0x71, 0x35, 0x0a, 0xb0, 0x32, 0xa2, 0xfd, 0x21, 0xd3, 0x6f, 0x40, 0x39, 0x61, 0xc9, 0xa4, + 0x0a, 0x85, 0x01, 0x0b, 0x02, 0xda, 0x65, 0x55, 0xed, 0xaa, 0xb6, 0x5b, 0x32, 0xc2, 0xa1, 0xbe, + 0x01, 0x6b, 0x49, 0xbb, 0xd5, 0x07, 0x11, 0xa3, 0xb0, 0x45, 0xc1, 0x38, 0x62, 0x7e, 0x20, 0x0c, + 0x50, 0x31, 0xaa, 0x21, 0xd9, 0x81, 0x75, 0x3c, 0xad, 0x19, 0xbe, 0x17, 0x7e, 0x95, 0x37, 0xd6, + 0x70, 0xf2, 0x58, 0x11, 0x6d, 0x43, 0xd9, 0xdb, 0xf7, 0x22, 0x92, 0x65, 0x24, 0x01, 0x6f, 0xdf, + 0x53, 0x04, 0xfa, 0x77, 0xa1, 0x32, 0x6d, 0xba, 0xa4, 0x02, 0xcb, 0x27, 0x6c, 0xa2, 0xd6, 0x13, + 0x8f, 0xe4, 0x92, 0x3a, 0x16, 0xae, 0x51, 0x32, 0xd4, 0x19, 0x7f, 0x97, 0x8b, 0x98, 0x23, 0x6b, + 0x15, 0xee, 0x26, 0x82, 0x04, 0x72, 0x97, 0xf7, 0x6b, 0x75, 0x19, 0x20, 0xea, 0x61, 0x80, 0xa8, + 0x3f, 0x0e, 0x23, 0x48, 0xa3, 0xf8, 0xe5, 0x8b, 0xed, 0xa5, 0x5f, 0xfe, 0x65, 0x5b, 0x33, 0x90, + 0x83, 0x5c, 0x16, 0x06, 0x45, 0x6d, 0xc7, 0xb4, 0x2d, 0xb5, 0x4e, 0x01, 0xc7, 0x2d, 0x8b, 0x7c, + 0x0a, 0x95, 0x8e, 0xeb, 0x04, 0xcc, 0x09, 0x86, 0x81, 0x08, 0x73, 0x74, 0x10, 0xa8, 0x58, 0x30, + 0xef, 0x92, 0x0f, 0x43, 0xf2, 0x23, 0xa4, 0x36, 0x36, 0x3b, 0xe9, 0x09, 0x72, 0x1f, 0x60, 0x44, + 0xfb, 0xb6, 0x45, 0xb9, 0xeb, 0x07, 0xd5, 0xfc, 0xd5, 0xe5, 0x73, 0x84, 0x1d, 0x87, 0x84, 0x4f, + 0x3c, 0x8b, 0x72, 0xd6, 0xc8, 0x8b, 0x9d, 0x1b, 0x09, 0x7e, 0x72, 0x1d, 0x36, 0xa9, 0xe7, 0x99, + 0x01, 0xa7, 0x9c, 0x99, 0xed, 0x09, 0x67, 0x01, 0xc6, 0x8b, 0x35, 0x63, 0x9d, 0x7a, 0xde, 0x23, + 0x31, 0xdb, 0x10, 0x93, 0xba, 0x15, 0xdd, 0x36, 0xba, 0x26, 0x21, 0x90, 0xb7, 0x28, 0xa7, 0xa8, + 0xad, 0x35, 0x03, 0x9f, 0xc5, 0x9c, 0x47, 0x79, 0x4f, 0xe9, 0x00, 0x9f, 0xc9, 0xeb, 0xb0, 0xda, + 0x63, 0x76, 0xb7, 0xc7, 0xf1, 0xd8, 0xcb, 0x86, 0x1a, 0x89, 0x8b, 0xf1, 0x7c, 0x77, 0xc4, 0x30, + 0xba, 0x15, 0x0d, 0x39, 0xd0, 0x7f, 0x95, 0x83, 0x0b, 0x67, 0xdc, 0x57, 0xc8, 0xed, 0xd1, 0xa0, + 0x17, 0xae, 0x25, 0x9e, 0xc9, 0x6d, 0x21, 0x97, 0x5a, 0xcc, 0x57, 0x51, 0xf9, 0xcd, 0x39, 0x1a, + 0x68, 0x22, 0x91, 0x3a, 0xb8, 0x62, 0x21, 0x4f, 0xa0, 0xd2, 0xa7, 0x01, 0x37, 0xa5, 0xed, 0x9b, + 0x18, 0x65, 0x97, 0xcf, 0x8d, 0x04, 0xf7, 0x69, 0xe8, 0x33, 0xc2, 0xb8, 0x95, 0xb8, 0x8d, 0x7e, + 0x6a, 0x96, 0x3c, 0x85, 0x4b, 0xed, 0xc9, 0x4f, 0xa8, 0xc3, 0x6d, 0x87, 0x99, 0x67, 0xee, 0x68, + 0x7b, 0x8e, 0xe8, 0x7b, 0x23, 0xdb, 0x62, 0x4e, 0x27, 0xbc, 0x9c, 0x8b, 0x91, 0x88, 0xe8, 0xf2, + 0x02, 0xfd, 0x29, 0x6c, 0xa4, 0x63, 0x11, 0xd9, 0x80, 0x1c, 0x1f, 0x2b, 0x8d, 0xe4, 0xf8, 0x98, + 0x7c, 0x07, 0xf2, 0x42, 0x1c, 0x6a, 0x63, 0x63, 0x2e, 0x58, 0x28, 0xee, 0xc7, 0x13, 0x8f, 0x19, + 0x48, 0xaf, 0xeb, 0x91, 0x27, 0x44, 0xf1, 0x69, 0x5a, 0xb6, 0x7e, 0x13, 0x36, 0xa7, 0x42, 0x4f, + 0xe2, 0x5a, 0xb5, 0xe4, 0xb5, 0xea, 0x9b, 0xb0, 0x9e, 0x8a, 0x30, 0xfa, 0x1f, 0x57, 0xa1, 0x68, + 0xb0, 0xc0, 0x13, 0x46, 0x4c, 0x9a, 0x50, 0x62, 0xe3, 0x0e, 0x93, 0xb0, 0xa4, 0x2d, 0x08, 0xe2, + 0x92, 0xe7, 0x5e, 0x48, 0x2f, 0xa2, 0x66, 0xc4, 0x4c, 0x6e, 0xa5, 0x20, 0x79, 0x67, 0x91, 0x90, + 0x24, 0x26, 0xdf, 0x49, 0x63, 0xf2, 0x5b, 0x0b, 0x78, 0xa7, 0x40, 0xf9, 0x56, 0x0a, 0x94, 0x17, + 0x2d, 0x9c, 0x42, 0xe5, 0xd6, 0x0c, 0x54, 0x5e, 0x74, 0xfc, 0x39, 0xb0, 0xdc, 0x9a, 0x01, 0xcb, + 0xbb, 0x0b, 0xf7, 0x32, 0x13, 0x97, 0xef, 0xa4, 0x71, 0x79, 0x91, 0x3a, 0xa6, 0x80, 0xf9, 0xfe, + 0x2c, 0x60, 0xbe, 0xb9, 0x40, 0xc6, 0x5c, 0x64, 0x3e, 0x3c, 0x83, 0xcc, 0xd7, 0x17, 0x88, 0x9a, + 0x01, 0xcd, 0xad, 0x14, 0x34, 0x43, 0x26, 0xdd, 0xcc, 0xc1, 0xe6, 0x8f, 0xce, 0x62, 0xf3, 0x8d, + 0x45, 0xa6, 0x36, 0x0b, 0x9c, 0xbf, 0x37, 0x05, 0xce, 0xd7, 0x16, 0x9d, 0x6a, 0x2e, 0x3a, 0xdf, + 0x14, 0xf1, 0x71, 0xca, 0x33, 0x44, 0x2c, 0x65, 0xbe, 0xef, 0xfa, 0x0a, 0xf8, 0xe4, 0x40, 0xdf, + 0x15, 0x11, 0x3b, 0xb6, 0xff, 0x73, 0x90, 0x1c, 0x9d, 0x36, 0x61, 0xed, 0xfa, 0x17, 0x5a, 0xcc, + 0x8b, 0x91, 0x2d, 0x19, 0xed, 0x4b, 0x2a, 0xda, 0x27, 0x00, 0x3e, 0x97, 0x06, 0xf8, 0x6d, 0x28, + 0x0b, 0x4c, 0x99, 0xc2, 0x6e, 0xea, 0x85, 0xd8, 0x4d, 0xde, 0x86, 0x0b, 0x18, 0x7f, 0x65, 0x1a, + 0xa0, 0x02, 0x49, 0x1e, 0x03, 0xc9, 0xa6, 0x78, 0x21, 0x35, 0x28, 0x81, 0xe2, 0x9b, 0x70, 0x31, + 0x41, 0x2b, 0xe4, 0x22, 0x16, 0x48, 0x90, 0xaa, 0x44, 0xd4, 0x07, 0x9e, 0xd7, 0xa4, 0x41, 0x4f, + 0x7f, 0x10, 0x2b, 0x28, 0xce, 0x0b, 0x08, 0xe4, 0x3b, 0xae, 0x25, 0xcf, 0xbd, 0x6e, 0xe0, 0xb3, + 0xc8, 0x15, 0xfa, 0x6e, 0x17, 0x37, 0x57, 0x32, 0xc4, 0xa3, 0xa0, 0x8a, 0x5c, 0xbb, 0x24, 0x7d, + 0x56, 0xff, 0xbd, 0x16, 0xcb, 0x8b, 0x53, 0x85, 0x59, 0xa8, 0xae, 0xbd, 0x4a, 0x54, 0xcf, 0xfd, + 0x6f, 0xa8, 0xae, 0xff, 0x4b, 0x8b, 0xaf, 0x34, 0xc2, 0xeb, 0xaf, 0xa7, 0x02, 0x61, 0x5d, 0xb6, + 0x63, 0xb1, 0x31, 0xaa, 0x7c, 0xd9, 0x90, 0x83, 0x30, 0xd5, 0x5a, 0xc5, 0x6b, 0x48, 0xa7, 0x5a, + 0x05, 0x9c, 0x93, 0x03, 0xf2, 0x3e, 0xe2, 0xbc, 0xfb, 0x4c, 0x85, 0x86, 0x14, 0x08, 0xca, 0xa2, + 0xae, 0xae, 0xaa, 0xb9, 0x23, 0x41, 0x66, 0x48, 0xea, 0x04, 0xbe, 0x94, 0x52, 0x69, 0xc3, 0x15, + 0x28, 0x89, 0xad, 0x07, 0x1e, 0xed, 0x30, 0xf4, 0xed, 0x92, 0x11, 0x4f, 0xe8, 0x16, 0x90, 0xb3, + 0x31, 0x86, 0x3c, 0x84, 0x55, 0x36, 0x62, 0x0e, 0x17, 0x77, 0x24, 0xd4, 0x7a, 0x65, 0x2e, 0x10, + 0x33, 0x87, 0x37, 0xaa, 0x42, 0x99, 0xff, 0x7c, 0xb1, 0x5d, 0x91, 0x3c, 0xef, 0xba, 0x03, 0x9b, + 0xb3, 0x81, 0xc7, 0x27, 0x86, 0x92, 0xa2, 0xff, 0x2c, 0x27, 0xf0, 0x30, 0x15, 0x7f, 0x66, 0xaa, + 0x37, 0x74, 0x9a, 0x5c, 0x22, 0x45, 0xca, 0xa6, 0xf2, 0x37, 0x01, 0xba, 0x34, 0x30, 0x9f, 0x53, + 0x87, 0x33, 0x4b, 0xe9, 0xbd, 0xd4, 0xa5, 0xc1, 0x0f, 0x70, 0x42, 0xe4, 0x9b, 0xe2, 0xf5, 0x30, + 0x60, 0x16, 0x5e, 0xc0, 0xb2, 0x51, 0xe8, 0xd2, 0xe0, 0x49, 0xc0, 0xac, 0xc4, 0x59, 0x0b, 0xaf, + 0xe2, 0xac, 0x69, 0x7d, 0x17, 0xa7, 0xf5, 0xfd, 0xf3, 0x5c, 0xec, 0x1d, 0x71, 0xfa, 0xf0, 0xff, + 0xa9, 0x8b, 0xdf, 0x60, 0x4d, 0x91, 0x06, 0x01, 0xf2, 0x43, 0xb8, 0x10, 0x79, 0xa5, 0x39, 0x44, + 0x6f, 0x0d, 0xad, 0xf0, 0xe5, 0x9c, 0xbb, 0x32, 0x4a, 0x4f, 0x07, 0xe4, 0x33, 0x78, 0x63, 0x2a, + 0x06, 0x45, 0x0b, 0xe4, 0x5e, 0x2a, 0x14, 0xbd, 0x96, 0x0e, 0x45, 0xa1, 0xfc, 0x58, 0x7b, 0xcb, + 0xaf, 0xc4, 0x6b, 0x5a, 0x22, 0x85, 0x4d, 0xc2, 0xdb, 0x4c, 0x9b, 0xd8, 0x81, 0x75, 0x9f, 0x71, + 0x51, 0x4b, 0xa5, 0xaa, 0x86, 0x35, 0x39, 0x29, 0x21, 0x41, 0xff, 0xb3, 0x06, 0x9b, 0x53, 0xa7, + 0x20, 0x1f, 0xc0, 0x8a, 0x84, 0x69, 0xed, 0xdc, 0x6e, 0x09, 0x5e, 0x8b, 0x3a, 0xb8, 0x64, 0x20, + 0x07, 0x50, 0x64, 0x2a, 0x05, 0x57, 0x9a, 0xbb, 0xb6, 0x20, 0x53, 0x57, 0xfc, 0x11, 0x1b, 0xb9, + 0x0b, 0xa5, 0xe8, 0x7e, 0x16, 0x94, 0x77, 0xd1, 0xf5, 0x2a, 0x21, 0x31, 0xa3, 0x7e, 0x08, 0xe5, + 0xc4, 0xf6, 0xc8, 0x37, 0xa0, 0x34, 0xa0, 0x63, 0x55, 0x93, 0xc9, 0x2c, 0xbb, 0x38, 0xa0, 0x63, + 0x2c, 0xc7, 0xc8, 0x1b, 0x50, 0x10, 0x2f, 0xbb, 0x54, 0xde, 0xf6, 0xb2, 0xb1, 0x3a, 0xa0, 0xe3, + 0xef, 0xd3, 0x40, 0xff, 0x85, 0x06, 0x1b, 0xe9, 0x7d, 0x92, 0x77, 0x80, 0x08, 0x5a, 0xda, 0x65, + 0xa6, 0x33, 0x1c, 0x48, 0x20, 0x0d, 0x25, 0x6e, 0x0e, 0xe8, 0xf8, 0xa0, 0xcb, 0x1e, 0x0e, 0x07, + 0xb8, 0x74, 0x40, 0x1e, 0x40, 0x25, 0x24, 0x0e, 0x3b, 0x62, 0x4a, 0x2b, 0x97, 0xcf, 0x54, 0xc4, + 0x77, 0x15, 0x81, 0x2c, 0x88, 0x7f, 0x2d, 0x0a, 0xe2, 0x0d, 0x29, 0x2f, 0x7c, 0xa3, 0xbf, 0x0f, + 0x9b, 0x53, 0x27, 0x26, 0x3a, 0xac, 0x7b, 0xc3, 0xb6, 0x79, 0xc2, 0x26, 0x26, 0xaa, 0x04, 0xfd, + 0xa1, 0x64, 0x94, 0xbd, 0x61, 0xfb, 0x63, 0x36, 0x11, 0xa5, 0x49, 0xa0, 0x77, 0x60, 0x23, 0x5d, + 0x71, 0x09, 0x74, 0xf1, 0xdd, 0xa1, 0x63, 0xe1, 0xbe, 0x57, 0x0c, 0x39, 0x20, 0xb7, 0x61, 0x65, + 0xe4, 0x4a, 0x93, 0x3f, 0xaf, 0xc4, 0x3a, 0x76, 0x39, 0x4b, 0xd4, 0x6d, 0x92, 0x47, 0x0f, 0x60, + 0x05, 0x8d, 0x57, 0x18, 0x22, 0xd6, 0x4e, 0x2a, 0xbb, 0x11, 0xcf, 0xe4, 0x18, 0x80, 0x72, 0xee, + 0xdb, 0xed, 0x61, 0x2c, 0xbe, 0x9a, 0x14, 0xdf, 0xb7, 0xdb, 0x41, 0xfd, 0x64, 0x54, 0x3f, 0xa2, + 0xb6, 0xdf, 0xb8, 0xa2, 0xcc, 0xff, 0x52, 0xcc, 0x93, 0x70, 0x81, 0x84, 0x24, 0xfd, 0xab, 0x3c, + 0xac, 0xca, 0x9a, 0x94, 0x7c, 0x98, 0xee, 0x90, 0x94, 0xf7, 0xb7, 0xe6, 0x6d, 0x5f, 0x52, 0xa9, + 0xdd, 0x47, 0x69, 0xd6, 0xf5, 0xe9, 0xb6, 0x43, 0xa3, 0x7c, 0xfa, 0x62, 0xbb, 0x80, 0x29, 0x4a, + 0xeb, 0x6e, 0xdc, 0x83, 0x98, 0x57, 0x82, 0x87, 0x0d, 0x8f, 0xfc, 0x4b, 0x37, 0x3c, 0x9a, 0xb0, + 0x9e, 0xc8, 0xc9, 0x6c, 0x4b, 0x15, 0x33, 0x5b, 0xe7, 0x39, 0x5d, 0xeb, 0xae, 0xda, 0x7f, 0x39, + 0xca, 0xd9, 0x5a, 0x16, 0xd9, 0x4d, 0x57, 0xe2, 0x98, 0xda, 0xc9, 0x9c, 0x22, 0x51, 0x5c, 0x8b, + 0xc4, 0x4e, 0xb8, 0x83, 0x88, 0x10, 0x92, 0x44, 0xa6, 0x18, 0x45, 0x31, 0x81, 0x2f, 0x6f, 0xc0, + 0x66, 0x9c, 0xfd, 0x48, 0x92, 0xa2, 0x94, 0x12, 0x4f, 0x23, 0xe1, 0x7b, 0x70, 0xc9, 0x61, 0x63, + 0x6e, 0x4e, 0x53, 0x97, 0x90, 0x9a, 0x88, 0x77, 0xc7, 0x69, 0x8e, 0x6b, 0xb0, 0x11, 0xc7, 0x59, + 0xa4, 0x05, 0xd9, 0x1f, 0x89, 0x66, 0x91, 0xec, 0x32, 0x14, 0xa3, 0xdc, 0xb4, 0x8c, 0x04, 0x05, + 0x2a, 0x53, 0xd2, 0x28, 0xdb, 0xf5, 0x59, 0x30, 0xec, 0x73, 0x25, 0x64, 0x0d, 0x69, 0x30, 0xdb, + 0x35, 0xe4, 0x3c, 0xd2, 0xee, 0xc0, 0x7a, 0x18, 0x55, 0x24, 0xdd, 0x3a, 0xd2, 0xad, 0x85, 0x93, + 0x48, 0x74, 0x13, 0x2a, 0x9e, 0xef, 0x7a, 0x6e, 0xc0, 0x7c, 0x93, 0x5a, 0x96, 0xcf, 0x82, 0xa0, + 0xba, 0x21, 0xe5, 0x85, 0xf3, 0x07, 0x72, 0x5a, 0xff, 0x16, 0x14, 0xc2, 0xa4, 0xfb, 0x12, 0xac, + 0x34, 0xa2, 0x08, 0x99, 0x37, 0xe4, 0x40, 0x80, 0xf0, 0x81, 0xe7, 0xa9, 0x16, 0x9c, 0x78, 0xd4, + 0xfb, 0x50, 0x50, 0x17, 0x36, 0xb3, 0xf1, 0xf2, 0x00, 0xd6, 0x3c, 0xea, 0x8b, 0x63, 0x24, 0xdb, + 0x2f, 0xf3, 0xca, 0xc6, 0x23, 0xea, 0xf3, 0x47, 0x8c, 0xa7, 0xba, 0x30, 0x65, 0xe4, 0x97, 0x53, + 0xfa, 0x2d, 0x58, 0x4f, 0xd1, 0x88, 0x6d, 0x72, 0x97, 0xd3, 0x7e, 0xe8, 0xe8, 0x38, 0x88, 0x76, + 0x92, 0x8b, 0x77, 0xa2, 0xdf, 0x86, 0x52, 0x74, 0x57, 0xa2, 0x1a, 0x09, 0x55, 0xa1, 0x29, 0xf5, + 0xcb, 0x21, 0x76, 0x9a, 0xdc, 0xe7, 0xcc, 0x57, 0xd6, 0x2f, 0x07, 0x3a, 0x4b, 0x04, 0x26, 0x09, + 0x79, 0xe4, 0x0e, 0x14, 0x54, 0x60, 0x52, 0xfe, 0x38, 0xaf, 0xa7, 0x74, 0x84, 0x91, 0x2a, 0xec, + 0x29, 0xc9, 0xb8, 0x15, 0x2f, 0x93, 0x4b, 0x2e, 0xf3, 0x53, 0x28, 0x86, 0xc1, 0x27, 0x8d, 0x12, + 0x72, 0x85, 0xab, 0x8b, 0x50, 0x42, 0x2d, 0x12, 0x33, 0x0a, 0x6b, 0x0a, 0xec, 0xae, 0xc3, 0x2c, + 0x33, 0x76, 0x41, 0x5c, 0xb3, 0x68, 0x6c, 0xca, 0x17, 0xf7, 0x43, 0xff, 0xd2, 0xdf, 0x83, 0x55, + 0xb9, 0xd7, 0x99, 0x21, 0x6e, 0x06, 0xfe, 0xea, 0xff, 0xd0, 0xa0, 0x18, 0xc2, 0xc7, 0x4c, 0xa6, + 0xd4, 0x21, 0x72, 0x5f, 0xf7, 0x10, 0xaf, 0x3e, 0x24, 0xbd, 0x0b, 0x04, 0x2d, 0xc5, 0x1c, 0xb9, + 0xdc, 0x76, 0xba, 0xa6, 0xbc, 0x0b, 0x99, 0x2e, 0x56, 0xf0, 0xcd, 0x31, 0xbe, 0x38, 0x12, 0xf3, + 0x6f, 0xef, 0x40, 0x39, 0xd1, 0x0a, 0x23, 0x05, 0x58, 0x7e, 0xc8, 0x9e, 0x57, 0x96, 0x48, 0x19, + 0x0a, 0x06, 0xc3, 0x46, 0x42, 0x45, 0xdb, 0xff, 0xaa, 0x00, 0x9b, 0x07, 0x8d, 0xc3, 0xd6, 0x81, + 0xe7, 0xf5, 0xed, 0x0e, 0xe2, 0x19, 0xf9, 0x04, 0xf2, 0x58, 0x4c, 0x67, 0xf8, 0x08, 0x54, 0xcb, + 0xd2, 0x95, 0x22, 0x06, 0xac, 0x60, 0xcd, 0x4d, 0xb2, 0x7c, 0x1b, 0xaa, 0x65, 0x6a, 0x56, 0x89, + 0x4d, 0xa2, 0xc1, 0x65, 0xf8, 0x64, 0x54, 0xcb, 0xd2, 0xc1, 0x22, 0x9f, 0x41, 0x29, 0x2e, 0xa6, + 0xb3, 0x7e, 0x48, 0xaa, 0x65, 0xee, 0x6d, 0x09, 0xf9, 0x71, 0xf9, 0x90, 0xf5, 0x33, 0x4a, 0x2d, + 0x73, 0x53, 0x87, 0x3c, 0x85, 0x42, 0x58, 0xa8, 0x65, 0xfb, 0xd4, 0x53, 0xcb, 0xd8, 0x77, 0x12, + 0xd7, 0x27, 0xeb, 0xeb, 0x2c, 0xdf, 0xb3, 0x6a, 0x99, 0x9a, 0x6b, 0xe4, 0x09, 0xac, 0xaa, 0x0c, + 0x39, 0xd3, 0x47, 0x9c, 0x5a, 0xb6, 0x6e, 0x92, 0x50, 0x72, 0xdc, 0xc1, 0xc8, 0xfa, 0x0d, 0xaf, + 0x96, 0xb9, 0xab, 0x48, 0x28, 0x40, 0xa2, 0xe8, 0xce, 0xfc, 0x71, 0xae, 0x96, 0xbd, 0x5b, 0x48, + 0x7e, 0x0c, 0xc5, 0xa8, 0xb4, 0xca, 0xf8, 0x91, 0xac, 0x96, 0xb5, 0x61, 0xd7, 0x68, 0xfd, 0xe7, + 0x6f, 0x5b, 0xda, 0x6f, 0x4f, 0xb7, 0xb4, 0x2f, 0x4e, 0xb7, 0xb4, 0x2f, 0x4f, 0xb7, 0xb4, 0x3f, + 0x9d, 0x6e, 0x69, 0x7f, 0x3d, 0xdd, 0xd2, 0xfe, 0xf0, 0xf7, 0x2d, 0xed, 0x47, 0xef, 0x74, 0x6d, + 0xde, 0x1b, 0xb6, 0xeb, 0x1d, 0x77, 0xb0, 0x17, 0x0b, 0x4c, 0x3e, 0xc6, 0x5f, 0xbe, 0xdb, 0xab, + 0x18, 0xb0, 0xbe, 0xfd, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x66, 0x8a, 0xe9, 0x0e, 0x1f, 0x00, 0x00, } @@ -4512,6 +4521,9 @@ func (this *ResponseCommit) Equal(that interface{}) bool { if !bytes.Equal(this.Data, that1.Data) { return false } + if this.RetainHeight != that1.RetainHeight { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -7143,6 +7155,11 @@ func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + i-- + dAtA[i] = 0x18 + } if len(m.Data) > 0 { i -= len(m.Data) copy(dAtA[i:], m.Data) @@ -8479,8 +8496,12 @@ func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { for i := 0; i < v30; i++ { this.Data[i] = byte(r.Intn(256)) } + this.RetainHeight = int64(r.Int63()) + if r.Intn(2) == 0 { + this.RetainHeight *= -1 + } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + this.XXX_unrecognized = randUnrecognizedTypes(r, 4) } return this } @@ -9665,6 +9686,9 @@ func (m *ResponseCommit) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.RetainHeight != 0 { + n += 1 + sovTypes(uint64(m.RetainHeight)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -14046,6 +14070,25 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { m.Data = []byte{} } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) + } + m.RetainHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetainHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/abci/types/types.proto b/abci/types/types.proto index 0d47ad9b3..351329de1 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -207,7 +207,8 @@ message ResponseEndBlock { message ResponseCommit { // reserve 1 - bytes data = 2; + bytes data = 2; + int64 retain_height = 3; } //---------------------------------------- diff --git a/blockchain/v0/codec.go b/blockchain/v0/codec.go index 4494f41aa..f023bbfa1 100644 --- a/blockchain/v0/codec.go +++ b/blockchain/v0/codec.go @@ -2,6 +2,7 @@ package v0 import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/blockchain/v0/pool.go b/blockchain/v0/pool.go index 1931d7960..bd8165752 100644 --- a/blockchain/v0/pool.go +++ b/blockchain/v0/pool.go @@ -284,16 +284,17 @@ func (pool *BlockPool) MaxPeerHeight() int64 { return pool.maxPeerHeight } -// SetPeerHeight sets the peer's alleged blockchain height. -func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) { +// SetPeerRange sets the peer's alleged blockchain base and height. +func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) { pool.mtx.Lock() defer pool.mtx.Unlock() peer := pool.peers[peerID] if peer != nil { + peer.base = base peer.height = height } else { - peer = newBPPeer(pool, peerID, height) + peer = newBPPeer(pool, peerID, base, height) peer.setLogger(pool.Logger.With("peer", peerID)) pool.peers[peerID] = peer } @@ -346,9 +347,9 @@ func (pool *BlockPool) updateMaxPeerHeight() { pool.maxPeerHeight = max } -// Pick an available peer with at least the given minHeight. +// Pick an available peer with the given height available. // If no peers are available, returns nil. -func (pool *BlockPool) pickIncrAvailablePeer(minHeight int64) *bpPeer { +func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -360,7 +361,7 @@ func (pool *BlockPool) pickIncrAvailablePeer(minHeight int64) *bpPeer { if peer.numPending >= maxPendingRequestsPerPeer { continue } - if peer.height < minHeight { + if height < peer.base || height > peer.height { continue } peer.incrPending() @@ -432,6 +433,7 @@ type bpPeer struct { didTimeout bool numPending int32 height int64 + base int64 pool *BlockPool id p2p.ID recvMonitor *flow.Monitor @@ -441,10 +443,11 @@ type bpPeer struct { logger log.Logger } -func newBPPeer(pool *BlockPool, peerID p2p.ID, height int64) *bpPeer { +func newBPPeer(pool *BlockPool, peerID p2p.ID, base int64, height int64) *bpPeer { peer := &bpPeer{ pool: pool, id: peerID, + base: base, height: height, numPending: 0, logger: log.NewNopLogger(), diff --git a/blockchain/v0/pool_test.go b/blockchain/v0/pool_test.go index 783ff2526..9a3dd299c 100644 --- a/blockchain/v0/pool_test.go +++ b/blockchain/v0/pool_test.go @@ -20,6 +20,7 @@ func init() { type testPeer struct { id p2p.ID + base int64 height int64 inputChan chan inputData //make sure each peer's data is sequential } @@ -67,7 +68,11 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { for i := 0; i < numPeers; i++ { peerID := p2p.ID(tmrand.Str(12)) height := minHeight + tmrand.Int63n(maxHeight-minHeight) - peers[peerID] = testPeer{peerID, height, make(chan inputData, 10)} + base := minHeight + int64(i) + if base > height { + base = height + } + peers[peerID] = testPeer{peerID, base, height, make(chan inputData, 10)} } return peers } @@ -93,7 +98,7 @@ func TestBlockPoolBasic(t *testing.T) { // Introduce each peer. go func() { for _, peer := range peers { - pool.SetPeerHeight(peer.id, peer.height) + pool.SetPeerRange(peer.id, peer.base, peer.height) } }() @@ -148,7 +153,7 @@ func TestBlockPoolTimeout(t *testing.T) { // Introduce each peer. go func() { for _, peer := range peers { - pool.SetPeerHeight(peer.id, peer.height) + pool.SetPeerRange(peer.id, peer.base, peer.height) } }() @@ -192,7 +197,7 @@ func TestBlockPoolRemovePeer(t *testing.T) { for i := 0; i < 10; i++ { peerID := p2p.ID(fmt.Sprintf("%d", i+1)) height := int64(i + 1) - peers[peerID] = testPeer{peerID, height, make(chan inputData)} + peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)} } requestsCh := make(chan BlockRequest) errorsCh := make(chan peerError) @@ -205,7 +210,7 @@ func TestBlockPoolRemovePeer(t *testing.T) { // add peers for peerID, peer := range peers { - pool.SetPeerHeight(peerID, peer.height) + pool.SetPeerRange(peerID, peer.base, peer.height) } assert.EqualValues(t, 10, pool.MaxPeerHeight()) diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go index d47e892c2..247222160 100644 --- a/blockchain/v0/reactor.go +++ b/blockchain/v0/reactor.go @@ -140,12 +140,15 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + Height: bcR.store.Height(), + Base: bcR.store.Base(), + }) peer.Send(BlockchainChannel, msgBytes) // it's OK if send fails. will try later in poolRoutine // peer is added to the pool once we receive the first - // bcStatusResponseMessage from the peer and call pool.SetPeerHeight + // bcStatusResponseMessage from the peer and call pool.SetPeerRange } // RemovePeer implements Reactor by removing peer from the pool. @@ -155,8 +158,6 @@ func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { // respondToPeer loads a block and sends it to the requesting peer, // if we have it. Otherwise, we'll respond saying we don't have it. -// According to the Tendermint spec, if all nodes are honest, -// no node should be requesting for a block that's non-existent. func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage, src p2p.Peer) (queued bool) { @@ -196,11 +197,15 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) case *bcStatusRequestMessage: // Send peer our state. - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) - src.TrySend(BlockchainChannel, msgBytes) + src.TrySend(BlockchainChannel, cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + Height: bcR.store.Height(), + Base: bcR.store.Base(), + })) case *bcStatusResponseMessage: // Got a peer status. Unverified. - bcR.pool.SetPeerHeight(src.ID(), msg.Height) + bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height) + case *bcNoBlockResponseMessage: + bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height) default: bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -338,7 +343,7 @@ FOR_LOOP: // TODO: same thing for app - but we would need a way to // get the hash without persisting the state var err error - state, err = bcR.blockExec.ApplyBlock(state, firstID, first) + state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first) if err != nil { // TODO This is bad, are we zombie? panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) @@ -360,9 +365,12 @@ FOR_LOOP: } } -// BroadcastStatusRequest broadcasts `BlockStore` height. +// BroadcastStatusRequest broadcasts `BlockStore` base and height. func (bcR *BlockchainReactor) BroadcastStatusRequest() error { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }) bcR.Switch.Broadcast(BlockchainChannel, msgBytes) return nil } @@ -446,34 +454,48 @@ func (m *bcBlockResponseMessage) String() string { type bcStatusRequestMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. func (m *bcStatusRequestMessage) ValidateBasic() error { + if m.Base < 0 { + return errors.New("negative Base") + } if m.Height < 0 { return errors.New("negative Height") } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusRequestMessage) String() string { - return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusRequestMessage %v:%v]", m.Base, m.Height) } //------------------------------------- type bcStatusResponseMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. func (m *bcStatusResponseMessage) ValidateBasic() error { + if m.Base < 0 { + return errors.New("negative Base") + } if m.Height < 0 { return errors.New("negative Height") } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusResponseMessage) String() string { - return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusResponseMessage %v:%v]", m.Base, m.Height) } diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go index 5a97d3aed..a31c9a141 100644 --- a/blockchain/v0/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -7,10 +7,10 @@ import ( "time" "github.com/pkg/errors" - "github.com/tendermint/tendermint/store" - "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" @@ -18,9 +18,9 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) var config *cfg.Config @@ -112,7 +112,7 @@ func newBlockchainReactor( thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { panic(errors.Wrap(err, "error apply block")) } diff --git a/blockchain/v1/codec.go b/blockchain/v1/codec.go index 786584435..ce4f7dfab 100644 --- a/blockchain/v1/codec.go +++ b/blockchain/v1/codec.go @@ -2,6 +2,7 @@ package v1 import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/blockchain/v1/peer.go b/blockchain/v1/peer.go index 02b1b4fc1..ad26585b3 100644 --- a/blockchain/v1/peer.go +++ b/blockchain/v1/peer.go @@ -27,6 +27,7 @@ type BpPeer struct { logger log.Logger ID p2p.ID + Base int64 // the peer reported base Height int64 // the peer reported height NumPendingBlockRequests int // number of requests still waiting for block responses blocks map[int64]*types.Block // blocks received or expected to be received from this peer @@ -38,14 +39,15 @@ type BpPeer struct { } // NewBpPeer creates a new peer. -func NewBpPeer( - peerID p2p.ID, height int64, onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer { +func NewBpPeer(peerID p2p.ID, base int64, height int64, + onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer { if params == nil { params = BpPeerDefaultParams() } return &BpPeer{ ID: peerID, + Base: base, Height: height, blocks: make(map[int64]*types.Block, maxRequestsPerPeer), logger: log.NewNopLogger(), diff --git a/blockchain/v1/peer_test.go b/blockchain/v1/peer_test.go index aac03db7e..0e7a73473 100644 --- a/blockchain/v1/peer_test.go +++ b/blockchain/v1/peer_test.go @@ -16,7 +16,7 @@ import ( func TestPeerMonitor(t *testing.T) { peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) {}, nil) peer.SetLogger(log.TestingLogger()) @@ -35,7 +35,7 @@ func TestPeerResetBlockResponseTimer(t *testing.T) { params := &BpPeerParams{timeout: 2 * time.Millisecond} peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) { peerTestMtx.Lock() defer peerTestMtx.Unlock() @@ -75,7 +75,7 @@ func TestPeerRequestSent(t *testing.T) { params := &BpPeerParams{timeout: 2 * time.Millisecond} peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) {}, params) @@ -94,7 +94,7 @@ func TestPeerRequestSent(t *testing.T) { func TestPeerGetAndRemoveBlock(t *testing.T) { peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 100, + p2p.ID(tmrand.Str(12)), 0, 100, func(err error, _ p2p.ID) {}, nil) @@ -142,7 +142,7 @@ func TestPeerGetAndRemoveBlock(t *testing.T) { func TestPeerAddBlock(t *testing.T) { peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 100, + p2p.ID(tmrand.Str(12)), 0, 100, func(err error, _ p2p.ID) {}, nil) @@ -189,7 +189,7 @@ func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) { ) peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) { peerTestMtx.Lock() defer peerTestMtx.Unlock() @@ -215,7 +215,7 @@ func TestPeerCheckRate(t *testing.T) { minRecvRate: int64(100), // 100 bytes/sec exponential moving average } peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) {}, params) peer.SetLogger(log.TestingLogger()) @@ -249,7 +249,7 @@ func TestPeerCleanup(t *testing.T) { params := &BpPeerParams{timeout: 2 * time.Millisecond} peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) {}, params) peer.SetLogger(log.TestingLogger()) diff --git a/blockchain/v1/pool.go b/blockchain/v1/pool.go index be2edbc21..27e0f3a04 100644 --- a/blockchain/v1/pool.go +++ b/blockchain/v1/pool.go @@ -66,9 +66,9 @@ func (pool *BlockPool) updateMaxPeerHeight() { pool.MaxPeerHeight = newMax } -// UpdatePeer adds a new peer or updates an existing peer with a new height. +// UpdatePeer adds a new peer or updates an existing peer with a new base and height. // If a peer is short it is not added. -func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error { +func (pool *BlockPool) UpdatePeer(peerID p2p.ID, base int64, height int64) error { peer := pool.peers[peerID] @@ -79,10 +79,10 @@ func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error { return errPeerTooShort } // Add new peer. - peer = NewBpPeer(peerID, height, pool.toBcR.sendPeerError, nil) + peer = NewBpPeer(peerID, base, height, pool.toBcR.sendPeerError, nil) peer.SetLogger(pool.logger.With("peer", peerID)) pool.peers[peerID] = peer - pool.logger.Info("added peer", "peerID", peerID, "height", height, "num_peers", len(pool.peers)) + pool.logger.Info("added peer", "peerID", peerID, "base", base, "height", height, "num_peers", len(pool.peers)) } else { // Check if peer is lowering its height. This is not allowed. if height < peer.Height { @@ -90,6 +90,7 @@ func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error { return errPeerLowersItsHeight } // Update existing peer. + peer.Base = base peer.Height = height } @@ -213,7 +214,7 @@ func (pool *BlockPool) sendRequest(height int64) bool { if peer.NumPendingBlockRequests >= maxRequestsPerPeer { continue } - if peer.Height < height { + if peer.Base > height || peer.Height < height { continue } diff --git a/blockchain/v1/pool_test.go b/blockchain/v1/pool_test.go index e612eb43e..31b9d09f7 100644 --- a/blockchain/v1/pool_test.go +++ b/blockchain/v1/pool_test.go @@ -13,6 +13,7 @@ import ( type testPeer struct { id p2p.ID + base int64 height int64 } @@ -70,7 +71,7 @@ func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64] if p.Height > maxH { maxH = p.Height } - bPool.peers[p.ID] = NewBpPeer(p.ID, p.Height, bcr.sendPeerError, nil) + bPool.peers[p.ID] = NewBpPeer(p.ID, p.Base, p.Height, bcr.sendPeerError, nil) bPool.peers[p.ID].SetLogger(bcr.logger) } @@ -93,6 +94,7 @@ func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2 assert.NotNil(t, peer2) assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests) assert.Equal(t, peer1.Height, peer2.Height) + assert.Equal(t, peer1.Base, peer2.Base) assert.Equal(t, len(peer1.blocks), len(peer2.blocks)) for h, block1 := range peer1.blocks { block2 := peer2.blocks[h] @@ -123,26 +125,32 @@ func TestBlockPoolUpdatePeer(t *testing.T) { { name: "add a first short peer", pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), - args: testPeer{"P1", 50}, + args: testPeer{"P1", 0, 50}, errWanted: errPeerTooShort, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), }, { name: "add a first good peer", pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), - args: testPeer{"P1", 101}, + args: testPeer{"P1", 0, 101}, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}), }, + { + name: "add a first good peer with base", + pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + args: testPeer{"P1", 10, 101}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Base: 10, Height: 101}}, map[int64]tPBlocks{}), + }, { name: "increase the height of P1 from 120 to 123", pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}), - args: testPeer{"P1", 123}, + args: testPeer{"P1", 0, 123}, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}), }, { name: "decrease the height of P1 from 120 to 110", pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}), - args: testPeer{"P1", 110}, + args: testPeer{"P1", 0, 110}, errWanted: errPeerLowersItsHeight, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), }, @@ -151,7 +159,7 @@ func TestBlockPoolUpdatePeer(t *testing.T) { pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}}, map[int64]tPBlocks{ 100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}), - args: testPeer{"P1", 102}, + args: testPeer{"P1", 0, 102}, errWanted: errPeerLowersItsHeight, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), @@ -162,7 +170,7 @@ func TestBlockPoolUpdatePeer(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { pool := tt.pool - err := pool.UpdatePeer(tt.args.id, tt.args.height) + err := pool.UpdatePeer(tt.args.id, tt.args.base, tt.args.height) assert.Equal(t, tt.errWanted, err) assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks) assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers) @@ -300,20 +308,34 @@ func TestBlockPoolSendRequestBatch(t *testing.T) { testBcR := newTestBcR() tests := []struct { - name string - pool *BlockPool - maxRequestsPerPeer int - expRequests map[int64]bool - expPeerResults []testPeerResult - expnumPendingBlockRequests int + name string + pool *BlockPool + maxRequestsPerPeer int + expRequests map[int64]bool + expRequestsSent int + expPeerResults []testPeerResult }{ { - name: "one peer - send up to maxRequestsPerPeer block requests", - pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}), - maxRequestsPerPeer: 2, - expRequests: map[int64]bool{10: true, 11: true}, - expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}}, - expnumPendingBlockRequests: 2, + name: "one peer - send up to maxRequestsPerPeer block requests", + pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}), + maxRequestsPerPeer: 2, + expRequests: map[int64]bool{10: true, 11: true}, + expRequestsSent: 2, + expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}}, + }, + { + name: "multiple peers - stops at gap between height and base", + pool: makeBlockPool(testBcR, 10, []BpPeer{ + {ID: "P1", Base: 1, Height: 12}, + {ID: "P2", Base: 15, Height: 100}, + }, map[int64]tPBlocks{}), + maxRequestsPerPeer: 10, + expRequests: map[int64]bool{10: true, 11: true, 12: true}, + expRequestsSent: 3, + expPeerResults: []testPeerResult{ + {id: "P1", numPendingBlockRequests: 3}, + {id: "P2", numPendingBlockRequests: 0}, + }, }, { name: "n peers - send n*maxRequestsPerPeer block requests", @@ -324,10 +346,10 @@ func TestBlockPoolSendRequestBatch(t *testing.T) { map[int64]tPBlocks{}), maxRequestsPerPeer: 2, expRequests: map[int64]bool{10: true, 11: true}, + expRequestsSent: 4, expPeerResults: []testPeerResult{ {id: "P1", numPendingBlockRequests: 2}, {id: "P2", numPendingBlockRequests: 2}}, - expnumPendingBlockRequests: 4, }, } @@ -339,15 +361,13 @@ func TestBlockPoolSendRequestBatch(t *testing.T) { var pool = tt.pool maxRequestsPerPeer = tt.maxRequestsPerPeer pool.MakeNextRequests(10) - assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers)) + assert.Equal(t, tt.expRequestsSent, testResults.numRequestsSent) for _, tPeer := range tt.expPeerResults { var peer = pool.peers[tPeer.id] assert.NotNil(t, peer) assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests) } - assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers)) - }) } } diff --git a/blockchain/v1/reactor.go b/blockchain/v1/reactor.go index 1aba26b35..28a314b8a 100644 --- a/blockchain/v1/reactor.go +++ b/blockchain/v1/reactor.go @@ -7,6 +7,7 @@ import ( "time" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/behaviour" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" @@ -168,7 +169,10 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }) peer.Send(BlockchainChannel, msgBytes) // it's OK if send fails. will try later in poolRoutine @@ -195,7 +199,10 @@ func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcBlockRequestMessage, } func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcStatusRequestMessage, src p2p.Peer) (queued bool) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }) return src.TrySend(BlockchainChannel, msgBytes) } @@ -429,7 +436,7 @@ func (bcR *BlockchainReactor) processBlock() error { bcR.store.SaveBlock(first, firstParts, second.LastCommit) - bcR.state, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first) + bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first) if err != nil { panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) } @@ -440,7 +447,10 @@ func (bcR *BlockchainReactor) processBlock() error { // Implements bcRNotifier // sendStatusRequest broadcasts `BlockStore` height. func (bcR *BlockchainReactor) sendStatusRequest() { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }) bcR.Switch.Broadcast(BlockchainChannel, msgBytes) } @@ -589,6 +599,7 @@ func (m *bcBlockResponseMessage) String() string { type bcStatusRequestMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. @@ -596,17 +607,24 @@ func (m *bcStatusRequestMessage) ValidateBasic() error { if m.Height < 0 { return errors.New("negative Height") } + if m.Base < 0 { + return errors.New("negative Base") + } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusRequestMessage) String() string { - return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusRequestMessage %v:%v]", m.Base, m.Height) } //------------------------------------- type bcStatusResponseMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. @@ -614,9 +632,15 @@ func (m *bcStatusResponseMessage) ValidateBasic() error { if m.Height < 0 { return errors.New("negative Height") } + if m.Base < 0 { + return errors.New("negative Base") + } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusResponseMessage) String() string { - return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusResponseMessage %v:%v]", m.Base, m.Height) } diff --git a/blockchain/v1/reactor_fsm.go b/blockchain/v1/reactor_fsm.go index 8d3a363ae..0f65f9d66 100644 --- a/blockchain/v1/reactor_fsm.go +++ b/blockchain/v1/reactor_fsm.go @@ -58,6 +58,7 @@ func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM { type bReactorEventData struct { peerID p2p.ID err error // for peer error: timeout, slow; for processed block event if error occurred + base int64 // for status response height int64 // for status response; for processed block event block *types.Block // for block response stateName string // for state timeout events @@ -89,7 +90,7 @@ func (msg *bcReactorMessage) String() string { case startFSMEv: dataStr = "" case statusResponseEv: - dataStr = fmt.Sprintf("peer=%v height=%v", msg.data.peerID, msg.data.height) + dataStr = fmt.Sprintf("peer=%v base=%v height=%v", msg.data.peerID, msg.data.base, msg.data.height) case blockResponseEv: dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v", msg.data.peerID, msg.data.block.Height, msg.data.length) @@ -213,7 +214,7 @@ func init() { return finished, errNoTallerPeer case statusResponseEv: - if err := fsm.pool.UpdatePeer(data.peerID, data.height); err != nil { + if err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height); err != nil { if fsm.pool.NumPeers() == 0 { return waitForPeer, err } @@ -246,7 +247,7 @@ func init() { switch ev { case statusResponseEv: - err := fsm.pool.UpdatePeer(data.peerID, data.height) + err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height) if fsm.pool.NumPeers() == 0 { return waitForPeer, err } diff --git a/blockchain/v1/reactor_fsm_test.go b/blockchain/v1/reactor_fsm_test.go index f51defb51..5980ceb08 100644 --- a/blockchain/v1/reactor_fsm_test.go +++ b/blockchain/v1/reactor_fsm_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index deb73ad6d..e0b3472bf 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -10,6 +10,10 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" @@ -20,7 +24,6 @@ import ( "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) var config *cfg.Config @@ -46,15 +49,19 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G } func makeVote( + t *testing.T, header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote { - addr := privVal.GetPubKey().Address() - idx, _ := valset.GetByAddress(addr) + + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + + valIdx, _ := valset.GetByAddress(pubKey.Address()) vote := &types.Vote{ - ValidatorAddress: addr, - ValidatorIndex: idx, + ValidatorAddress: pubKey.Address(), + ValidatorIndex: valIdx, Height: header.Height, Round: 1, Timestamp: tmtime.Now(), @@ -73,6 +80,7 @@ type BlockchainReactorPair struct { } func newBlockchainReactor( + t *testing.T, logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, @@ -114,7 +122,7 @@ func newBlockchainReactor( lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) lastBlock := blockStore.LoadBlock(blockHeight - 1) - vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]) + vote := makeVote(t, &lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]) lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) } @@ -123,7 +131,7 @@ func newBlockchainReactor( thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { panic(errors.Wrap(err, "error apply block")) } @@ -138,6 +146,7 @@ func newBlockchainReactor( } func newBlockchainReactorPair( + t *testing.T, logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, @@ -147,7 +156,7 @@ func newBlockchainReactorPair( consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor) return BlockchainReactorPair{ - newBlockchainReactor(logger, genDoc, privVals, maxBlockHeight), + newBlockchainReactor(t, logger, genDoc, privVals, maxBlockHeight), consensusReactor} } @@ -174,8 +183,8 @@ func TestFastSyncNoBlockResponse(t *testing.T) { reactorPairs := make([]BlockchainReactorPair, 2) logger := log.TestingLogger() - reactorPairs[0] = newBlockchainReactorPair(logger, genDoc, privVals, maxBlockHeight) - reactorPairs[1] = newBlockchainReactorPair(logger, genDoc, privVals, 0) + reactorPairs[0] = newBlockchainReactorPair(t, logger, genDoc, privVals, maxBlockHeight) + reactorPairs[1] = newBlockchainReactorPair(t, logger, genDoc, privVals, 0) p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch { s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR) @@ -239,7 +248,7 @@ func TestFastSyncBadBlockStopsPeer(t *testing.T) { defer os.RemoveAll(config.RootDir) genDoc, privVals := randGenesisDoc(1, false, 30) - otherChain := newBlockchainReactorPair(log.TestingLogger(), genDoc, privVals, maxBlockHeight) + otherChain := newBlockchainReactorPair(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight) defer func() { _ = otherChain.bcR.Stop() _ = otherChain.conR.Stop() @@ -254,7 +263,7 @@ func TestFastSyncBadBlockStopsPeer(t *testing.T) { if i == 0 { height = maxBlockHeight } - reactorPairs[i] = newBlockchainReactorPair(logger[i], genDoc, privVals, height) + reactorPairs[i] = newBlockchainReactorPair(t, logger[i], genDoc, privVals, height) } switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch { @@ -296,7 +305,7 @@ outerFor: reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store lastLogger := log.TestingLogger() - lastReactorPair := newBlockchainReactorPair(lastLogger, genDoc, privVals, 0) + lastReactorPair := newBlockchainReactorPair(t, lastLogger, genDoc, privVals, 0) reactorPairs = append(reactorPairs, lastReactorPair) switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch { diff --git a/blockchain/v2/codec.go b/blockchain/v2/codec.go index f970d115f..4e92846c4 100644 --- a/blockchain/v2/codec.go +++ b/blockchain/v2/codec.go @@ -2,6 +2,7 @@ package v2 import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/blockchain/v2/io.go b/blockchain/v2/io.go index 3db48c8c0..fde446f6e 100644 --- a/blockchain/v2/io.go +++ b/blockchain/v2/io.go @@ -14,7 +14,7 @@ type iIO interface { sendBlockNotFound(height int64, peerID p2p.ID) error sendStatusResponse(height int64, peerID p2p.ID) error - broadcastStatusRequest(height int64) + broadcastStatusRequest(base int64, height int64) trySwitchToConsensus(state state.State, blocksSynced int) } @@ -104,8 +104,11 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, blocksSynced int) { } } -func (sio *switchIO) broadcastStatusRequest(height int64) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{height}) +func (sio *switchIO) broadcastStatusRequest(base int64, height int64) { + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{ + Base: base, + Height: height, + }) // XXX: maybe we should use an io specific peer list here sio.sw.Broadcast(BlockchainChannel, msgBytes) } diff --git a/blockchain/v2/processor_context.go b/blockchain/v2/processor_context.go index 7e96a3a69..2e8142adc 100644 --- a/blockchain/v2/processor_context.go +++ b/blockchain/v2/processor_context.go @@ -29,7 +29,7 @@ func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContex } func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error { - newState, err := pc.applier.ApplyBlock(pc.state, blockID, block) + newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block) pc.state = newState return err } diff --git a/blockchain/v2/processor_test.go b/blockchain/v2/processor_test.go index fc35c4c72..6bc36b2d3 100644 --- a/blockchain/v2/processor_test.go +++ b/blockchain/v2/processor_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/p2p" tmState "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" diff --git a/blockchain/v2/reactor.go b/blockchain/v2/reactor.go index 767e59819..88ec6268d 100644 --- a/blockchain/v2/reactor.go +++ b/blockchain/v2/reactor.go @@ -7,6 +7,7 @@ import ( "time" "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/behaviour" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" @@ -71,41 +72,56 @@ func (m *bcBlockResponseMessage) String() string { type bcStatusRequestMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. func (m *bcStatusRequestMessage) ValidateBasic() error { + if m.Base < 0 { + return errors.New("negative Base") + } if m.Height < 0 { return errors.New("negative Height") } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusRequestMessage) String() string { - return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusRequestMessage %v:%v]", m.Base, m.Height) } //------------------------------------- type bcStatusResponseMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. func (m *bcStatusResponseMessage) ValidateBasic() error { + if m.Base < 0 { + return errors.New("negative Base") + } if m.Height < 0 { return errors.New("negative Height") } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusResponseMessage) String() string { - return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusResponseMessage %v:%v]", m.Base, m.Height) } type blockStore interface { LoadBlock(height int64) *types.Block SaveBlock(*types.Block, *types.PartSet, *types.Commit) + Base() int64 Height() int64 } @@ -135,7 +151,7 @@ type blockVerifier interface { //nolint:deadcode type blockApplier interface { - ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error) + ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error) } // XXX: unify naming in this package around tmState @@ -265,6 +281,7 @@ type bcStatusResponse struct { priorityNormal time time.Time peerID p2p.ID + base int64 height int64 } @@ -336,7 +353,7 @@ func (r *BlockchainReactor) demux() { case <-doProcessBlockCh: r.processor.send(rProcessBlock{}) case <-doStatusCh: - r.io.broadcastStatusRequest(r.SyncHeight()) + r.io.broadcastStatusRequest(r.store.Base(), r.SyncHeight()) // Events from peers case event := <-r.events: @@ -482,7 +499,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } case *bcStatusResponseMessage: - r.events <- bcStatusResponse{peerID: src.ID(), height: msg.Height} + r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height} case *bcBlockResponseMessage: r.events <- bcBlockResponse{ diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index ad091f582..108f6f500 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -10,6 +10,8 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/behaviour" cfg "github.com/tendermint/tendermint/config" @@ -23,7 +25,6 @@ import ( "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) type mockPeer struct { @@ -76,9 +77,9 @@ type mockBlockApplier struct { } // XXX: Add whitelist/blacklist? -func (mba *mockBlockApplier) ApplyBlock(state sm.State, blockID types.BlockID, block *types.Block) (sm.State, error) { +func (mba *mockBlockApplier) ApplyBlock(state sm.State, blockID types.BlockID, block *types.Block) (sm.State, int64, error) { state.LastBlockHeight++ - return state, nil + return state, 0, nil } type mockSwitchIo struct { @@ -126,7 +127,7 @@ func (sio *mockSwitchIo) hasSwitchedToConsensus() bool { return sio.switchedToConsensus } -func (sio *mockSwitchIo) broadcastStatusRequest(height int64) { +func (sio *mockSwitchIo) broadcastStatusRequest(base int64, height int64) { } type testReactorParams struct { @@ -510,7 +511,7 @@ func newReactorStore( thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { panic(errors.Wrap(err, "error apply block")) } diff --git a/blockchain/v2/routine.go b/blockchain/v2/routine.go index 1a883c3c4..ff12bfebc 100644 --- a/blockchain/v2/routine.go +++ b/blockchain/v2/routine.go @@ -5,6 +5,7 @@ import ( "sync/atomic" "github.com/Workiva/go-datastructures/queue" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/blockchain/v2/scheduler.go b/blockchain/v2/scheduler.go index 3cf0b2468..803955b22 100644 --- a/blockchain/v2/scheduler.go +++ b/blockchain/v2/scheduler.go @@ -111,20 +111,22 @@ type scPeer struct { // updated to Removed when peer is removed state peerState + base int64 // updated when statusResponse is received height int64 // updated when statusResponse is received lastTouched time.Time lastRate int64 // last receive rate in bytes } func (p scPeer) String() string { - return fmt.Sprintf("{state %v, height %d, lastTouched %v, lastRate %d, id %v}", - p.state, p.height, p.lastTouched, p.lastRate, p.peerID) + return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}", + p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID) } func newScPeer(peerID p2p.ID) *scPeer { return &scPeer{ peerID: peerID, state: peerStateNew, + base: -1, height: -1, lastTouched: time.Time{}, } @@ -280,7 +282,7 @@ func (sc *scheduler) addNewBlocks() { } } -func (sc *scheduler) setPeerHeight(peerID p2p.ID, height int64) error { +func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error { peer, ok := sc.peers[peerID] if !ok { return fmt.Errorf("cannot find peer %s", peerID) @@ -295,6 +297,11 @@ func (sc *scheduler) setPeerHeight(peerID p2p.ID, height int64) error { return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height) } + if base > height { + return fmt.Errorf("cannot set peer base higher than its height") + } + + peer.base = base peer.height = height peer.state = peerStateReady @@ -312,13 +319,13 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState { } } -func (sc *scheduler) getPeersAtHeightOrAbove(height int64) []p2p.ID { +func (sc *scheduler) getPeersWithHeight(height int64) []p2p.ID { peers := make([]p2p.ID, 0) for _, peer := range sc.peers { if peer.state != peerStateReady { continue } - if peer.height >= height { + if peer.base <= height && peer.height >= height { peers = append(peers, peer.peerID) } } @@ -395,6 +402,11 @@ func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) er height, peerID, peer.height) } + if height < peer.base { + return fmt.Errorf("cannot request height %d for peer %s with base %d", + height, peerID, peer.base) + } + sc.setStateAtHeight(height, blockStatePending) sc.pendingBlocks[height] = peerID sc.pendingTime[height] = time @@ -463,7 +475,7 @@ func (sc *scheduler) pendingFrom(peerID p2p.ID) []int64 { } func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) { - peers := sc.getPeersAtHeightOrAbove(height) + peers := sc.getPeersWithHeight(height) if len(peers) == 0 { return "", fmt.Errorf("cannot find peer for height %d", height) } @@ -535,8 +547,8 @@ func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, erro _ = sc.removePeer(event.peerID) return scPeerError{peerID: event.peerID, - reason: fmt.Errorf("peer %v with height %d claims no block for %d", - event.peerID, peer.height, event.height)}, nil + reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d", + event.peerID, peer.base, peer.height, event.height)}, nil } func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) { @@ -653,7 +665,7 @@ func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) { } func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) { - err := sc.setPeerHeight(event.peerID, event.height) + err := sc.setPeerRange(event.peerID, event.base, event.height) if err != nil { return scPeerError{peerID: event.peerID, reason: err}, nil } diff --git a/blockchain/v2/scheduler_test.go b/blockchain/v2/scheduler_test.go index 445ba51a7..4ec81e123 100644 --- a/blockchain/v2/scheduler_test.go +++ b/blockchain/v2/scheduler_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -144,8 +145,8 @@ func TestScMaxHeights(t *testing.T) { sc: scheduler{ height: 1, peers: map[p2p.ID]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: -1, state: peerStateNew}}, + "P1": {base: -1, height: -1, state: peerStateNew}, + "P2": {base: -1, height: -1, state: peerStateNew}}, }, wantMax: 0, }, @@ -193,15 +194,15 @@ func TestScAddPeer(t *testing.T) { name: "add first peer", fields: scTestParams{}, args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, + wantFields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, }, { name: "add second peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, + fields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, args: args{peerID: "P2"}, wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: -1, state: peerStateNew}}}, + "P1": {base: -1, height: -1, state: peerStateNew}, + "P2": {base: -1, height: -1, state: peerStateNew}}}, }, { name: "attempt to add duplicate peer", @@ -500,10 +501,11 @@ func TestScRemovePeer(t *testing.T) { } } -func TestScSetPeerHeight(t *testing.T) { +func TestScSetPeerRange(t *testing.T) { type args struct { peerID p2p.ID + base int64 height int64 } tests := []struct { @@ -575,13 +577,37 @@ func TestScSetPeerHeight(t *testing.T) { peers: map[string]*scPeer{"P2": {height: 10000000000, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}}, }, + { + name: "add peer with base > height should error", + fields: scTestParams{ + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}}, + args: args{peerID: "P1", base: 6, height: 5}, + wantFields: scTestParams{ + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}}, + wantErr: true, + }, + { + name: "add peer with base == height is fine", + fields: scTestParams{ + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateNew}}, + targetPending: 4, + }, + args: args{peerID: "P1", base: 6, height: 6}, + wantFields: scTestParams{ + targetPending: 4, + peers: map[string]*scPeer{"P1": {base: 6, height: 6, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}}, + }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { sc := newTestScheduler(tt.fields) - if err := sc.setPeerHeight(tt.args.peerID, tt.args.height); (err != nil) != tt.wantErr { + err := sc.setPeerRange(tt.args.peerID, tt.args.base, tt.args.height) + if (err != nil) != tt.wantErr { t.Errorf("setPeerHeight() wantErr %v, error = %v", tt.wantErr, err) } wantSc := newTestScheduler(tt.wantFields) @@ -590,7 +616,7 @@ func TestScSetPeerHeight(t *testing.T) { } } -func TestScGetPeersAtHeight(t *testing.T) { +func TestScGetPeersWithHeight(t *testing.T) { type args struct { height int64 @@ -647,6 +673,26 @@ func TestScGetPeersAtHeight(t *testing.T) { args: args{height: 4}, wantResult: []p2p.ID{"P1"}, }, + { + name: "one Ready higher peer at base", + fields: scTestParams{ + targetPending: 4, + peers: map[string]*scPeer{"P1": {base: 4, height: 20, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}, + }, + args: args{height: 4}, + wantResult: []p2p.ID{"P1"}, + }, + { + name: "one Ready higher peer with higher base", + fields: scTestParams{ + targetPending: 4, + peers: map[string]*scPeer{"P1": {base: 10, height: 20, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}, + }, + args: args{height: 4}, + wantResult: []p2p.ID{}, + }, { name: "multiple mixed peers", fields: scTestParams{ @@ -668,9 +714,9 @@ func TestScGetPeersAtHeight(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { sc := newTestScheduler(tt.fields) - // getPeersAtHeight should not mutate the scheduler + // getPeersWithHeight should not mutate the scheduler wantSc := sc - res := sc.getPeersAtHeightOrAbove(tt.args.height) + res := sc.getPeersWithHeight(tt.args.height) sort.Sort(PeerByID(res)) assert.Equal(t, tt.wantResult, res) assert.Equal(t, wantSc, sc) @@ -694,7 +740,7 @@ func TestScMarkPending(t *testing.T) { wantErr bool }{ { - name: "attempt mark pending an unknown block", + name: "attempt mark pending an unknown block above height", fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{1, 2}}, @@ -704,6 +750,17 @@ func TestScMarkPending(t *testing.T) { allB: []int64{1, 2}}, wantErr: true, }, + { + name: "attempt mark pending an unknown block below base", + fields: scTestParams{ + peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4, 5, 6}}, + args: args{peerID: "P1", height: 3, tm: now}, + wantFields: scTestParams{ + peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4, 5, 6}}, + wantErr: true, + }, { name: "attempt mark pending from non existing peer", fields: scTestParams{ @@ -1201,6 +1258,16 @@ func TestScSelectPeer(t *testing.T) { args: args{height: 4}, wantResult: "P1", }, + { + name: "one Ready higher peer with higher base", + fields: scTestParams{ + peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4, 5, 6}, + }, + args: args{height: 3}, + wantResult: "", + wantError: true, + }, { name: "many Ready higher peers with different number of pending requests", fields: scTestParams{ @@ -1989,7 +2056,7 @@ func TestScHandle(t *testing.T) { args: args{event: bcAddNewPeer{peerID: "P1"}}, wantEvent: noOpEvent{}, wantSc: &scTestParams{startTime: now, peers: map[string]*scPeer{ - "P1": {height: -1, state: peerStateNew}}, height: 1}, + "P1": {base: -1, height: -1, state: peerStateNew}}, height: 1}, }, { // set height of P1 args: args{event: bcStatusResponse{peerID: "P1", time: tick[0], height: 3}}, diff --git a/buf.yaml b/buf.yaml index 1b97487f4..7349a3516 100644 --- a/buf.yaml +++ b/buf.yaml @@ -13,4 +13,3 @@ lint: breaking: use: - FILE - - PACKAGE diff --git a/cmd/tendermint/commands/codec.go b/cmd/tendermint/commands/codec.go index 717f2d21e..041b9e9ce 100644 --- a/cmd/tendermint/commands/codec.go +++ b/cmd/tendermint/commands/codec.go @@ -2,6 +2,7 @@ package commands import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/cmd/tendermint/commands/debug/dump.go b/cmd/tendermint/commands/debug/dump.go index 80ca15c6b..a21d8217e 100644 --- a/cmd/tendermint/commands/debug/dump.go +++ b/cmd/tendermint/commands/debug/dump.go @@ -10,9 +10,10 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" - rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) var dumpCmd = &cobra.Command{ @@ -58,7 +59,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { } } - rpc, err := rpcclient.NewHTTP(nodeRPCAddr, "/websocket") + rpc, err := rpchttp.New(nodeRPCAddr, "/websocket") if err != nil { return errors.Wrap(err, "failed to create new http client") } @@ -78,7 +79,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { return nil } -func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpcclient.HTTP) { +func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) { start := time.Now().UTC() tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") @@ -126,7 +127,7 @@ func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpcclient.HTTP) { } } - outFile := filepath.Join(outDir, fmt.Sprintf("%s.zip", start.Format(time.Stamp))) + outFile := filepath.Join(outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339))) if err := zipDir(tmpDir, outFile); err != nil { logger.Error("failed to create and compress archive", "file", outFile, "error", err) } diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index 52defc69c..40e298c72 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -16,7 +16,7 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" - rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) var killCmd = &cobra.Command{ @@ -44,7 +44,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { return errors.New("invalid output file") } - rpc, err := rpcclient.NewHTTP(nodeRPCAddr, "/websocket") + rpc, err := rpchttp.New(nodeRPCAddr, "/websocket") if err != nil { return errors.Wrap(err, "failed to create new http client") } @@ -124,7 +124,7 @@ func killProc(pid uint64, dir string) error { go func() { // Killing the Tendermint process with the '-ABRT|-6' signal will result in // a goroutine stacktrace. - p, err := os.FindProcess(os.Getpid()) + p, err := os.FindProcess(int(pid)) if err != nil { fmt.Fprintf(os.Stderr, "failed to find PID to kill Tendermint process: %s", err) } else if err = p.Signal(syscall.SIGABRT); err != nil { diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index b392d23d7..9e5e36a87 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -9,13 +9,14 @@ import ( "path/filepath" "github.com/pkg/errors" + cfg "github.com/tendermint/tendermint/config" - rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) // dumpStatus gets node status state dump from the Tendermint RPC and writes it // to file. It returns an error upon failure. -func dumpStatus(rpc *rpcclient.HTTP, dir, filename string) error { +func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error { status, err := rpc.Status() if err != nil { return errors.Wrap(err, "failed to get node status") @@ -26,7 +27,7 @@ func dumpStatus(rpc *rpcclient.HTTP, dir, filename string) error { // dumpNetInfo gets network information state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. -func dumpNetInfo(rpc *rpcclient.HTTP, dir, filename string) error { +func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error { netInfo, err := rpc.NetInfo() if err != nil { return errors.Wrap(err, "failed to get node network information") @@ -37,7 +38,7 @@ func dumpNetInfo(rpc *rpcclient.HTTP, dir, filename string) error { // dumpConsensusState gets consensus state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. -func dumpConsensusState(rpc *rpcclient.HTTP, dir, filename string) error { +func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error { consDump, err := rpc.DumpConsensusState() if err != nil { return errors.Wrap(err, "failed to get node consensus dump") diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index eedf6f2b5..1ece45132 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -3,7 +3,9 @@ package commands import ( "fmt" + "github.com/pkg/errors" "github.com/spf13/cobra" + cfg "github.com/tendermint/tendermint/config" tmos "github.com/tendermint/tendermint/libs/os" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -60,10 +62,13 @@ func initFilesWithConfig(config *cfg.Config) error { GenesisTime: tmtime.Now(), ConsensusParams: types.DefaultConsensusParams(), } - key := pv.GetPubKey() + pubKey, err := pv.GetPubKey() + if err != nil { + return errors.Wrap(err, "can't get pubkey") + } genDoc.Validators = []types.GenesisValidator{{ - Address: key.Address(), - PubKey: key, + Address: pubKey.Address(), + PubKey: pubKey, Power: 10, }} diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index 8477b46ed..e28daa6d8 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -2,59 +2,74 @@ package commands import ( "net/http" + "os" "strings" "time" "github.com/pkg/errors" "github.com/spf13/cobra" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" lite "github.com/tendermint/tendermint/lite2" - "github.com/tendermint/tendermint/lite2/provider" - httpp "github.com/tendermint/tendermint/lite2/provider/http" lproxy "github.com/tendermint/tendermint/lite2/proxy" lrpc "github.com/tendermint/tendermint/lite2/rpc" dbs "github.com/tendermint/tendermint/lite2/store/db" - rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpcserver "github.com/tendermint/tendermint/rpc/lib/server" ) // LiteCmd represents the base command when called without any subcommands var LiteCmd = &cobra.Command{ - Use: "lite", + Use: "lite [chainID]", Short: "Run a light client proxy server, verifying Tendermint rpc", Long: `Run a light client proxy server, verifying Tendermint rpc. All calls that can be tracked back to a block header by a proof will be verified before passing them back to the caller. Other than -that, it will present the same interface as a full Tendermint node.`, - RunE: runProxy, - SilenceUsage: true, +that, it will present the same interface as a full Tendermint node. + +Example: + +start a fresh instance: + +lite cosmoshub-3 -p 52.57.29.196:26657 -w public-seed-node.cosmoshub.certus.one:26657 + --height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948020CD + +continue from latest state: + +lite cosmoshub-3 -p 52.57.29.196:26657 -w public-seed-node.cosmoshub.certus.one:26657 +`, + RunE: runProxy, + Args: cobra.ExactArgs(1), + Example: `lite cosmoshub-3 -p 52.57.29.196:26657 -w public-seed-node.cosmoshub.certus.one:26657 + --height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948020CD`, } var ( listenAddr string primaryAddr string + witnessAddrsJoined string chainID string home string - witnessesAddrs string maxOpenConnections int trustingPeriod time.Duration trustedHeight int64 trustedHash []byte + + verbose bool ) func init() { LiteCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", "Serve the proxy on the given address") - LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") - LiteCmd.Flags().StringVar(&primaryAddr, "primary", "tcp://localhost:26657", + LiteCmd.Flags().StringVarP(&primaryAddr, "primary", "p", "", "Connect to a Tendermint node at this address") - LiteCmd.Flags().StringVar(&witnessesAddrs, "witnesses", "", + LiteCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "", "Tendermint nodes to cross-check the primary node, comma-separated") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") LiteCmd.Flags().IntVar( @@ -64,65 +79,77 @@ func init() { "Maximum number of simultaneous connections (including WebSocket).") LiteCmd.Flags().DurationVar(&trustingPeriod, "trusting-period", 168*time.Hour, "Trusting period. Should be significantly less than the unbonding period") - LiteCmd.Flags().Int64Var(&trustedHeight, "trusted-height", 1, "Trusted header's height") - LiteCmd.Flags().BytesHexVar(&trustedHash, "trusted-hash", []byte{}, "Trusted header's hash") + LiteCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height") + LiteCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash") + LiteCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output") } func runProxy(cmd *cobra.Command, args []string) error { - liteLogger := logger.With("module", "lite") + // Initialise logger. + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + var option log.Option + if verbose { + option, _ = log.AllowLevel("debug") + } else { + option, _ = log.AllowLevel("info") + } + logger = log.NewFilter(logger, option) - logger.Info("Connecting to the primary node...") - rpcClient, err := rpcclient.NewHTTP(chainID, primaryAddr) + chainID = args[0] + logger.Info("Creating client...", "chainID", chainID) + + witnessesAddrs := strings.Split(witnessAddrsJoined, ",") + + db, err := dbm.NewGoLevelDB("lite-client-db", home) + if err != nil { + return errors.Wrap(err, "new goleveldb") + } + + var c *lite.Client + if trustedHeight > 0 && len(trustedHash) > 0 { // fresh installation + c, err = lite.NewHTTPClient( + chainID, + lite.TrustOptions{ + Period: trustingPeriod, + Height: trustedHeight, + Hash: trustedHash, + }, + primaryAddr, + witnessesAddrs, + dbs.New(db, chainID), + lite.Logger(logger), + ) + } else { // continue from latest state + c, err = lite.NewHTTPClientFromTrustedStore( + chainID, + trustingPeriod, + primaryAddr, + witnessesAddrs, + dbs.New(db, chainID), + lite.Logger(logger), + ) + } + if err != nil { + return err + } + + rpcClient, err := rpchttp.New(primaryAddr, "/websocket") if err != nil { return errors.Wrapf(err, "http client for %s", primaryAddr) } - primary := httpp.NewWithClient(chainID, rpcClient) - - logger.Info("Connecting to the witness nodes...") - addrs := strings.Split(witnessesAddrs, ",") - witnesses := make([]provider.Provider, len(addrs)) - for i, addr := range addrs { - p, err := httpp.New(chainID, addr) - if err != nil { - return errors.Wrapf(err, "http provider for %s", addr) - } - witnesses[i] = p - } - - logger.Info("Creating client...") - db, err := dbm.NewGoLevelDB("lite-client-db", home) - if err != nil { - return err - } - c, err := lite.NewClient( - chainID, - lite.TrustOptions{ - Period: trustingPeriod, - Height: trustedHeight, - Hash: trustedHash, - }, - primary, - witnesses, - dbs.New(db, chainID), - lite.Logger(liteLogger), - ) - if err != nil { - return err - } - p := lproxy.Proxy{ Addr: listenAddr, Config: &rpcserver.Config{MaxOpenConnections: maxOpenConnections}, Codec: amino.NewCodec(), Client: lrpc.NewClient(rpcClient, c), - Logger: liteLogger, + Logger: logger, } // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(liteLogger, func() { + tmos.TrapSignal(logger, func() { p.Listener.Close() }) - logger.Info("Starting proxy...") + logger.Info("Starting proxy...", "laddr", listenAddr) if err := p.ListenAndServe(); err != http.ErrServerClosed { // Error starting or closing listener: logger.Error("proxy ListenAndServe", "err", err) diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 6d9f575d0..628a0d173 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -78,7 +78,7 @@ func AddNodeFlags(cmd *cobra.Command) { "Set this to false to only produce blocks when there are txs or when the AppHash changes") cmd.Flags().String( "consensus.create_empty_blocks_interval", - string(config.Consensus.CreateEmptyBlocksInterval), + config.Consensus.CreateEmptyBlocksInterval.String(), "The possible interval between empty blocks") // db flags @@ -99,11 +99,21 @@ func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command { Use: "node", Short: "Run the tendermint node", RunE: func(cmd *cobra.Command, args []string) error { + if err := checkGenesisHash(config); err != nil { + return err + } + n, err := nodeProvider(config, logger) if err != nil { - return fmt.Errorf("failed to create node: %v", err) + return fmt.Errorf("failed to create node: %w", err) } + if err := n.Start(); err != nil { + return fmt.Errorf("failed to start node: %w", err) + } + + logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo()) + // Stop upon receiving SIGTERM or CTRL-C. tmos.TrapSignal(logger, func() { if n.IsRunning() { @@ -111,15 +121,6 @@ func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command { } }) - if err := checkGenesisHash(config); err != nil { - return err - } - - if err := n.Start(); err != nil { - return fmt.Errorf("failed to start node: %v", err) - } - logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo()) - // Run forever. select {} }, diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go index b0c673373..4b885a5c3 100644 --- a/cmd/tendermint/commands/show_validator.go +++ b/cmd/tendermint/commands/show_validator.go @@ -24,7 +24,13 @@ func showValidator(cmd *cobra.Command, args []string) error { } pv := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile()) - bz, err := cdc.MarshalJSON(pv.GetPubKey()) + + pubKey, err := pv.GetPubKey() + if err != nil { + return errors.Wrap(err, "can't get pubkey") + } + + bz, err := cdc.MarshalJSON(pubKey) if err != nil { return errors.Wrap(err, "failed to marshal private validator pubkey") } diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index ddf320622..54bb1363e 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strings" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -137,11 +138,15 @@ func testnetFiles(cmd *cobra.Command, args []string) error { pvKeyFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorKey) pvStateFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorState) - pv := privval.LoadFilePV(pvKeyFile, pvStateFile) + + pubKey, err := pv.GetPubKey() + if err != nil { + return errors.Wrap(err, "can't get pubkey") + } genVals[i] = types.GenesisValidator{ - Address: pv.GetPubKey().Address(), - PubKey: pv.GetPubKey(), + Address: pubKey.Address(), + PubKey: pubKey, Power: 1, Name: nodeDirName, } diff --git a/config/config_test.go b/config/config_test.go index 6da032d07..c83f1c3f5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -134,27 +134,44 @@ func TestFastSyncConfigValidateBasic(t *testing.T) { assert.Error(t, cfg.ValidateBasic()) } -func TestConsensusConfigValidateBasic(t *testing.T) { - cfg := TestConsensusConfig() - assert.NoError(t, cfg.ValidateBasic()) - - fieldsToTest := []string{ - "TimeoutPropose", - "TimeoutProposeDelta", - "TimeoutPrevote", - "TimeoutPrevoteDelta", - "TimeoutPrecommit", - "TimeoutPrecommitDelta", - "TimeoutCommit", - "CreateEmptyBlocksInterval", - "PeerGossipSleepDuration", - "PeerQueryMaj23SleepDuration", +func TestConsensusConfig_ValidateBasic(t *testing.T) { + // nolint: lll + testcases := map[string]struct { + modify func(*ConsensusConfig) + expectErr bool + }{ + "TimeoutPropose": {func(c *ConsensusConfig) { c.TimeoutPropose = time.Second }, false}, + "TimeoutPropose negative": {func(c *ConsensusConfig) { c.TimeoutPropose = -1 }, true}, + "TimeoutProposeDelta": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false}, + "TimeoutProposeDelta negative": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true}, + "TimeoutPrevote": {func(c *ConsensusConfig) { c.TimeoutPrevote = time.Second }, false}, + "TimeoutPrevote negative": {func(c *ConsensusConfig) { c.TimeoutPrevote = -1 }, true}, + "TimeoutPrevoteDelta": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false}, + "TimeoutPrevoteDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true}, + "TimeoutPrecommit": {func(c *ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false}, + "TimeoutPrecommit negative": {func(c *ConsensusConfig) { c.TimeoutPrecommit = -1 }, true}, + "TimeoutPrecommitDelta": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false}, + "TimeoutPrecommitDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true}, + "TimeoutCommit": {func(c *ConsensusConfig) { c.TimeoutCommit = time.Second }, false}, + "TimeoutCommit negative": {func(c *ConsensusConfig) { c.TimeoutCommit = -1 }, true}, + "PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false}, + "PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true}, + "PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false}, + "PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true}, } + for desc, tc := range testcases { + tc := tc // appease linter + t.Run(desc, func(t *testing.T) { + cfg := DefaultConsensusConfig() + tc.modify(cfg) - for _, fieldName := range fieldsToTest { - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) - assert.Error(t, cfg.ValidateBasic()) - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + err := cfg.ValidateBasic() + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) } } diff --git a/config/toml.go b/config/toml.go index 609f1487a..3fe4d1aac 100644 --- a/config/toml.go +++ b/config/toml.go @@ -321,6 +321,7 @@ max_tx_bytes = {{ .Mempool.MaxTxBytes }} # Fast Sync version to use: # 1) "v0" (default) - the legacy fast sync implementation # 2) "v1" - refactor of v0 version for better testability +# 3) "v2" - refactor of v1 version for better usability version = "{{ .FastSync.Version }}" ##### consensus configuration options ##### diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 5f3fff0cd..17d499bb7 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" diff --git a/consensus/codec.go b/consensus/codec.go index 1c5bf93df..ae7dbaab2 100644 --- a/consensus/codec.go +++ b/consensus/codec.go @@ -2,6 +2,7 @@ package consensus import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/consensus/common_test.go b/consensus/common_test.go index 0403ccaf3..f8055f01c 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -13,9 +13,13 @@ import ( "time" "github.com/go-kit/kit/log/term" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" "path" + dbm "github.com/tendermint/tm-db" + abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" @@ -33,7 +37,6 @@ import ( "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) const ( @@ -82,17 +85,23 @@ func (vs *validatorStub) signVote( voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { - addr := vs.PrivValidator.GetPubKey().Address() + + pubKey, err := vs.PrivValidator.GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") + } + vote := &types.Vote{ ValidatorIndex: vs.Index, - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), Height: vs.Height, Round: vs.Round, Timestamp: tmtime.Now(), Type: voteType, BlockID: types.BlockID{Hash: hash, PartsHeader: header}, } - err := vs.PrivValidator.SignVote(config.ChainID(), vote) + + err = vs.PrivValidator.SignVote(config.ChainID(), vote) return vote, err } @@ -136,7 +145,15 @@ func (vss ValidatorStubsByAddress) Len() int { } func (vss ValidatorStubsByAddress) Less(i, j int) bool { - return bytes.Compare(vss[i].GetPubKey().Address(), vss[j].GetPubKey().Address()) == -1 + vssi, err := vss[i].GetPubKey() + if err != nil { + panic(err) + } + vssj, err := vss[j].GetPubKey() + if err != nil { + panic(err) + } + return bytes.Compare(vssi.Address(), vssj.Address()) == -1 } func (vss ValidatorStubsByAddress) Swap(i, j int) { @@ -199,7 +216,9 @@ func signAddVotes( func validatePrevote(t *testing.T, cs *State, round int, privVal *validatorStub, blockHash []byte) { prevotes := cs.Votes.Prevotes(round) - address := privVal.GetPubKey().Address() + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + address := pubKey.Address() var vote *types.Vote if vote = prevotes.GetByAddress(address); vote == nil { panic("Failed to find prevote from validator") @@ -217,7 +236,9 @@ func validatePrevote(t *testing.T, cs *State, round int, privVal *validatorStub, func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { votes := cs.LastCommit - address := privVal.GetPubKey().Address() + pv, err := privVal.GetPubKey() + require.NoError(t, err) + address := pv.Address() var vote *types.Vote if vote = votes.GetByAddress(address); vote == nil { panic("Failed to find precommit from validator") @@ -237,7 +258,9 @@ func validatePrecommit( lockedBlockHash []byte, ) { precommits := cs.Votes.Precommits(thisRound) - address := privVal.GetPubKey().Address() + pv, err := privVal.GetPubKey() + require.NoError(t, err) + address := pv.Address() var vote *types.Vote if vote = precommits.GetByAddress(address); vote == nil { panic("Failed to find precommit from validator") diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index 2ade0c067..3034b011a 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -9,12 +9,13 @@ import ( "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" mempl "github.com/tendermint/tendermint/mempool" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // for testing diff --git a/consensus/reactor.go b/consensus/reactor.go index 0f2dad743..c8c344ac8 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -9,6 +9,7 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + cstypes "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" @@ -57,7 +58,7 @@ func NewReactor(consensusState *State, fastSync bool, options ...ReactorOption) metrics: NopMetrics(), } conR.updateFastSyncingMetric() - conR.BaseReactor = *p2p.NewBaseReactor("Reactor", conR) + conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) for _, option := range options { option(conR) @@ -493,8 +494,8 @@ OUTER_LOOP: } } - // If the peer is on a previous height, help catch up. - if (0 < prs.Height) && (prs.Height < rs.Height) { + // If the peer is on a previous height that we have, help catch up. + if (0 < prs.Height) && (prs.Height < rs.Height) && (prs.Height >= conR.conS.blockStore.Base()) { heightLogger := logger.With("height", prs.Height) // if we never received the commit message from the peer, the block parts wont be initialized @@ -502,7 +503,7 @@ OUTER_LOOP: blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { heightLogger.Error("Failed to load block meta", - "blockstoreHeight", conR.conS.blockStore.Height()) + "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) time.Sleep(conR.conS.config.PeerGossipSleepDuration) } else { ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) @@ -566,8 +567,8 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt // Ensure that the peer's PartSetHeader is correct blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { - logger.Error("Failed to load block meta", - "ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height()) + logger.Error("Failed to load block meta", "ourHeight", rs.Height, + "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { @@ -802,15 +803,17 @@ OUTER_LOOP: // Maybe send Height/CatchupCommitRound/CatchupCommit. { prs := ps.GetRoundState() - if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() { - commit := conR.conS.LoadCommit(prs.Height) - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ - Height: prs.Height, - Round: commit.Round, - Type: types.PrecommitType, - BlockID: commit.BlockID, - })) - time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && + prs.Height >= conR.conS.blockStore.Base() { + if commit := conR.conS.LoadCommit(prs.Height); commit != nil { + peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + Height: prs.Height, + Round: commit.Round, + Type: types.PrecommitType, + BlockID: commit.BlockID, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } } } diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index a18b04020..e9111bc65 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" @@ -29,7 +31,6 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) //---------------------------------------------- @@ -154,8 +155,9 @@ func TestReactorWithEvidence(t *testing.T) { // mock the evidence pool // everyone includes evidence of another double signing vIdx := (i + 1) % nValidators - addr := privVals[vIdx].GetPubKey().Address() - evpool := newMockEvidencePool(addr) + pubKey, err := privVals[vIdx].GetPubKey() + require.NoError(t, err) + evpool := newMockEvidencePool(pubKey.Address()) // Make State blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) @@ -331,7 +333,9 @@ func TestReactorVotingPowerChange(t *testing.T) { // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < nVals; i++ { - addr := css[i].privValidator.GetPubKey().Address() + pubKey, err := css[i].privValidator.GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() activeVals[string(addr)] = struct{}{} } @@ -343,7 +347,8 @@ func TestReactorVotingPowerChange(t *testing.T) { //--------------------------------------------------------------------------- logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") - val1PubKey := css[0].privValidator.GetPubKey() + val1PubKey, err := css[0].privValidator.GetPubKey() + require.NoError(t, err) val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey) updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() @@ -410,8 +415,9 @@ func TestReactorValidatorSetChanges(t *testing.T) { // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < nVals; i++ { - addr := css[i].privValidator.GetPubKey().Address() - activeVals[string(addr)] = struct{}{} + pubKey, err := css[i].privValidator.GetPubKey() + require.NoError(t, err) + activeVals[string(pubKey.Address())] = struct{}{} } // wait till everyone makes block 1 @@ -422,7 +428,8 @@ func TestReactorValidatorSetChanges(t *testing.T) { //--------------------------------------------------------------------------- logger.Info("---------------------------- Testing adding one validator") - newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + require.NoError(t, err) valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) @@ -449,7 +456,8 @@ func TestReactorValidatorSetChanges(t *testing.T) { //--------------------------------------------------------------------------- logger.Info("---------------------------- Testing changing the voting power of one validator") - updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + require.NoError(t, err) updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() @@ -469,11 +477,13 @@ func TestReactorValidatorSetChanges(t *testing.T) { //--------------------------------------------------------------------------- logger.Info("---------------------------- Testing adding two validators at once") - newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() + newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() + require.NoError(t, err) newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() + newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() + require.NoError(t, err) newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) diff --git a/consensus/replay.go b/consensus/replay.go index 1453849cc..3d9d6614b 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -288,6 +288,7 @@ func (h *Handshaker) ReplayBlocks( appBlockHeight int64, proxyApp proxy.AppConns, ) ([]byte, error) { + storeBlockBase := h.store.Base() storeBlockHeight := h.store.Height() stateBlockHeight := state.LastBlockHeight h.logger.Info( @@ -341,12 +342,16 @@ func (h *Handshaker) ReplayBlocks( } } - // First handle edge cases and constraints on the storeBlockHeight. + // First handle edge cases and constraints on the storeBlockHeight and storeBlockBase. switch { case storeBlockHeight == 0: assertAppHashEqualsOneFromState(appHash, state) return appHash, nil + case appBlockHeight < storeBlockBase-1: + // the app is too far behind truncated store (can be 1 behind since we replay the next) + return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase} + case storeBlockHeight < appBlockHeight: // the app should never be ahead of the store (but this is under app's control) return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight} @@ -472,7 +477,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap blockExec.SetEventBus(h.eventBus) var err error - state, err = blockExec.ApplyBlock(state, meta.BlockID, block) + state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block) if err != nil { return sm.State{}, err } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 18931c2aa..15813f4f4 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -17,6 +17,8 @@ import ( "sort" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" @@ -30,7 +32,6 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) func TestMain(m *testing.M) { @@ -306,7 +307,8 @@ var ( // 0 - all synced up // 1 - saved block but app and state are behind // 2 - save block and committed but state is behind -var modes = []uint{0, 1, 2} +// 3 - save block and committed with truncated block store and state behind +var modes = []uint{0, 1, 2, 3} // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay func TestSimulateValidatorsChange(t *testing.T) { @@ -344,10 +346,11 @@ func TestSimulateValidatorsChange(t *testing.T) { //height 2 height++ incrementHeight(vss...) - newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + require.NoError(t, err) valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - err := assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempl.TxInfo{}) assert.Nil(t, err) propBlock, _ := css[0].createProposalBlock() //changeProposer(t, cs1, vs2) propBlockParts := propBlock.MakePartSet(partSize) @@ -369,7 +372,8 @@ func TestSimulateValidatorsChange(t *testing.T) { //height 3 height++ incrementHeight(vss...) - updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + require.NoError(t, err) updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil, mempl.TxInfo{}) @@ -394,12 +398,14 @@ func TestSimulateValidatorsChange(t *testing.T) { //height 4 height++ incrementHeight(vss...) - newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() + newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() + require.NoError(t, err) newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil, mempl.TxInfo{}) assert.Nil(t, err) - newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() + newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() + require.NoError(t, err) newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil, mempl.TxInfo{}) @@ -412,7 +418,13 @@ func TestSimulateValidatorsChange(t *testing.T) { sort.Sort(ValidatorStubsByAddress(newVss)) selfIndex := 0 for i, vs := range newVss { - if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { + vsPubKey, err := vs.GetPubKey() + require.NoError(t, err) + + css0PubKey, err := css[0].privValidator.GetPubKey() + require.NoError(t, err) + + if vsPubKey.Equals(css0PubKey) { selfIndex = i break } @@ -469,7 +481,13 @@ func TestSimulateValidatorsChange(t *testing.T) { copy(newVss, vss[:nVals+3]) sort.Sort(ValidatorStubsByAddress(newVss)) for i, vs := range newVss { - if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { + vsKeyKey, err := vs.GetPubKey() + require.NoError(t, err) + + css0PubKey, err := css[0].privValidator.GetPubKey() + require.NoError(t, err) + + if vsKeyKey.Equals(css0PubKey) { selfIndex = i break } @@ -514,10 +532,10 @@ func TestHandshakeReplayAll(t *testing.T) { // Sync many, not from scratch func TestHandshakeReplaySome(t *testing.T) { for _, m := range modes { - testHandshakeReplay(t, config, 1, m, false) + testHandshakeReplay(t, config, 2, m, false) } for _, m := range modes { - testHandshakeReplay(t, config, 1, m, true) + testHandshakeReplay(t, config, 2, m, true) } } @@ -620,7 +638,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin stateDB = dbm.NewMemDB() genisisState = sim.GenesisState config = sim.Config - chain = sim.Chain + chain = append([]*types.Block{}, sim.Chain...) // copy chain commits = sim.Commits store = newMockBlockStore(config, genisisState.ConsensusParams) } else { //test single node @@ -642,7 +660,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin chain, commits, err = makeBlockchainFromWAL(wal) require.NoError(t, err) - stateDB, genisisState, store = stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + stateDB, genisisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) } store.chain = chain store.commits = commits @@ -666,6 +686,15 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode) } + // Prune block store if requested + expectError := false + if mode == 3 { + pruned, err := store.PruneBlocks(2) + require.NoError(t, err) + require.EqualValues(t, 1, pruned) + expectError = int64(nBlocks) < 2 + } + // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) handshaker := NewHandshaker(stateDB, state, store, genDoc) @@ -674,7 +703,11 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin t.Fatalf("Error starting proxy app connections: %v", err) } defer proxyApp.Stop() - if err := handshaker.Handshake(proxyApp); err != nil { + err := handshaker.Handshake(proxyApp) + if expectError { + require.Error(t, err) + return + } else if err != nil { t.Fatalf("Error on abci handshake: %v", err) } @@ -709,7 +742,7 @@ func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.Ap blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()} - newState, err := blockExec.ApplyBlock(st, blkID, blk) + newState, _, err := blockExec.ApplyBlock(st, blkID, blk) if err != nil { panic(err) } @@ -739,17 +772,19 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, block := chain[i] state = applyBlock(stateDB, state, block, proxyApp) } - case 1, 2: + case 1, 2, 3: for i := 0; i < nBlocks-1; i++ { block := chain[i] state = applyBlock(stateDB, state, block, proxyApp) } - if mode == 2 { + if mode == 2 || mode == 3 { // update the kvstore height and apphash // as if we ran commit but not state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) } + default: + panic(fmt.Sprintf("unknown mode %v", mode)) } } @@ -787,7 +822,7 @@ func buildTMStateFromChain( state = applyBlock(stateDB, state, block, proxyApp) } - case 1, 2: + case 1, 2, 3: // sync up to the penultimate as if we stored the block. // whether we commit or not depends on the appHash for _, block := range chain[:len(chain)-1] { @@ -797,6 +832,8 @@ func buildTMStateFromChain( // apply the final block to a state copy so we can // get the right next appHash but keep the state back applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) + default: + panic(fmt.Sprintf("unknown mode %v", mode)) } return state @@ -811,7 +848,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { t.Cleanup(func() { os.RemoveAll(config.RootDir) }) privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) const appVersion = 0x0 - stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), appVersion) + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + stateDB, state, store := stateAndStore(config, pubKey, appVersion) genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks @@ -1054,14 +1093,17 @@ type mockBlockStore struct { params types.ConsensusParams chain []*types.Block commits []*types.Commit + base int64 } // TODO: NewBlockStore(db.NewMemDB) ... func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { - return &mockBlockStore{config, params, nil, nil} + return &mockBlockStore{config, params, nil, nil, 0} } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } +func (bs *mockBlockStore) Base() int64 { return bs.base } +func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return bs.chain[int64(len(bs.chain))-1] @@ -1083,6 +1125,17 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return bs.commits[height-1] } +func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { + pruned := uint64(0) + for i := int64(0); i < height-1; i++ { + bs.chain[i] = nil + bs.commits[i] = nil + pruned++ + } + bs.base = height + return pruned, nil +} + //--------------------------------------- // Test handshake/init chain @@ -1095,7 +1148,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) { config := ResetConfig("handshake_test_") t.Cleanup(func() { os.RemoveAll(config.RootDir) }) privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) - stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0) + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + stateDB, state, store := stateAndStore(config, pubKey, 0x0) oldValAddr := state.Validators.Validators[0].Address diff --git a/consensus/state.go b/consensus/state.go index cd25aa32f..b58bb3050 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -490,6 +490,10 @@ func (cs *State) reconstructLastCommit(state sm.State) { return } seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) + if seenCommit == nil { + panic(fmt.Sprintf("Failed to reconstruct LastCommit: seen commit for height %v not found", + state.LastBlockHeight)) + } lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators) if !lastPrecommits.HasTwoThirdsMajority() { panic("Failed to reconstruct LastCommit: Does not have +2/3 maj") @@ -878,6 +882,9 @@ func (cs *State) needProofBlock(height int64) bool { } lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + if lastBlockMeta == nil { + panic(fmt.Sprintf("needProofBlock: last block meta for height %d not found", height-1)) + } return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) } @@ -921,19 +928,27 @@ func (cs *State) enterPropose(height int64, round int) { logger.Debug("This node is not a validator") return } + logger.Debug("This node is a validator") + + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // If this node is a validator & proposer in the current round, it will + // miss the opportunity to create a block. + logger.Error("Error on retrival of pubkey", "err", err) + return + } + address := pubKey.Address() // if not a validator, we're done - address := cs.privValidator.GetPubKey().Address() if !cs.Validators.HasAddress(address) { logger.Debug("This node is not a validator", "addr", address, "vals", cs.Validators) return } - logger.Debug("This node is a validator") if cs.isProposer(address) { logger.Info("enterPropose: Our turn to propose", "proposer", - cs.Validators.GetProposer().Address, + address, "privValidator", cs.privValidator) cs.decideProposal(height, round) @@ -961,7 +976,7 @@ func (cs *State) defaultDecideProposal(height int64, round int) { } else { // Create a new proposal block from state/txs from the mempool. block, blockParts = cs.createProposalBlock() - if block == nil { // on error + if block == nil { return } } @@ -1004,11 +1019,13 @@ func (cs *State) isProposalComplete() bool { } -// Create the next block to propose and return it. -// We really only need to return the parts, but the block -// is returned for convenience so we can log the proposal block. -// Returns nil block upon error. +// Create the next block to propose and return it. Returns nil block upon error. +// +// We really only need to return the parts, but the block is returned for +// convenience so we can log the proposal block. +// // NOTE: keep it side-effect free for clarity. +// CONTRACT: cs.privValidator is not nil. func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { var commit *types.Commit switch { @@ -1019,13 +1036,23 @@ func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.Pa case cs.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = cs.LastCommit.MakeCommit() - default: - // This shouldn't happen. - cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block.") + default: // This shouldn't happen. + cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") return } - proposerAddr := cs.privValidator.GetPubKey().Address() + if cs.privValidator == nil { + panic("entered createProposalBlock with privValidator being nil") + } + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // If this node is a validator & proposer in the current round, it will + // miss the opportunity to create a block. + cs.Logger.Error("Error on retrival of pubkey", "err", err) + return + } + proposerAddr := pubKey.Address() + return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr) } @@ -1428,7 +1455,8 @@ func (cs *State) finalizeCommit(height int64) { // Execute and commit the block, update and save the state, and update the mempool. // NOTE The block.AppHash wont reflect these txs until the next block. var err error - stateCopy, err = cs.blockExec.ApplyBlock( + var retainHeight int64 + stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( stateCopy, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}, block) @@ -1443,6 +1471,16 @@ func (cs *State) finalizeCommit(height int64) { fail.Fail() // XXX + // Prune old heights, if requested by ABCI app. + if retainHeight > 0 { + pruned, err := cs.pruneBlocks(retainHeight) + if err != nil { + cs.Logger.Error("Failed to prune blocks", "retainHeight", retainHeight, "err", err) + } else { + cs.Logger.Info("Pruned blocks", "pruned", pruned, "retainHeight", retainHeight) + } + } + // must be called before we update state cs.recordMetrics(height, block) @@ -1461,6 +1499,22 @@ func (cs *State) finalizeCommit(height int64) { // * cs.StartTime is set to when we will start round0. } +func (cs *State) pruneBlocks(retainHeight int64) (uint64, error) { + base := cs.blockStore.Base() + if retainHeight <= base { + return 0, nil + } + pruned, err := cs.blockStore.PruneBlocks(retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune block store: %w", err) + } + err = sm.PruneStates(cs.blockExec.DB(), base, retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune state database: %w", err) + } + return pruned, nil +} + func (cs *State) recordMetrics(height int64, block *types.Block) { cs.metrics.Validators.Set(float64(cs.Validators.Size())) cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower())) @@ -1491,15 +1545,24 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { missingValidatorsPower += val.VotingPower } - if cs.privValidator != nil && bytes.Equal(val.Address, cs.privValidator.GetPubKey().Address()) { - label := []string{ - "validator_address", val.Address.String(), + if cs.privValidator != nil { + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // Metrics won't be updated, but it's not critical. + cs.Logger.Error("Error on retrival of pubkey", "err", err) + continue } - cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower)) - if commitSig.ForBlock() { - cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height)) - } else { - cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) + + if bytes.Equal(val.Address, pubKey.Address()) { + label := []string{ + "validator_address", val.Address.String(), + } + cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower)) + if commitSig.ForBlock() { + cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height)) + } else { + cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) + } } } } @@ -1518,9 +1581,11 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { if height > 1 { lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) - cs.metrics.BlockIntervalSeconds.Set( - block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), - ) + if lastBlockMeta != nil { + cs.metrics.BlockIntervalSeconds.Set( + block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), + ) + } } cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) @@ -1648,8 +1713,12 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { if err == ErrVoteHeightMismatch { return added, err } else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { - addr := cs.privValidator.GetPubKey().Address() - if bytes.Equal(vote.ValidatorAddress, addr) { + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + return false, errors.Wrap(err, "can't get pubkey") + } + + if bytes.Equal(vote.ValidatorAddress, pubKey.Address()) { cs.Logger.Error( "Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", @@ -1838,6 +1907,7 @@ func (cs *State) addVote( return added, err } +// CONTRACT: cs.privValidator is not nil. func (cs *State) signVote( msgType types.SignedMsgType, hash []byte, @@ -1847,19 +1917,24 @@ func (cs *State) signVote( // and the privValidator will refuse to sign anything. cs.wal.FlushAndSync() - addr := cs.privValidator.GetPubKey().Address() - valIndex, _ := cs.Validators.GetByAddress(addr) + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") + } + addr := pubKey.Address() + valIdx, _ := cs.Validators.GetByAddress(addr) vote := &types.Vote{ ValidatorAddress: addr, - ValidatorIndex: valIndex, + ValidatorIndex: valIdx, Height: cs.Height, Round: cs.Round, Timestamp: cs.voteTime(), Type: msgType, BlockID: types.BlockID{Hash: hash, PartsHeader: header}, } - err := cs.privValidator.SignVote(cs.state.ChainID, vote) + + err = cs.privValidator.SignVote(cs.state.ChainID, vote) return vote, err } @@ -1884,10 +1959,23 @@ func (cs *State) voteTime() time.Time { // sign the vote and publish on internalMsgQueue func (cs *State) signAddVote(msgType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { - // if we don't have a key or we're not in the validator set, do nothing - if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetPubKey().Address()) { + if cs.privValidator == nil { // the node does not have a key return nil } + + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // Vote won't be signed, but it's not critical. + cs.Logger.Error("Error on retrival of pubkey", "err", err) + return nil + } + + // If the node not in the validator set, do nothing. + if !cs.Validators.HasAddress(pubKey.Address()) { + return nil + } + + // TODO: pass pubKey to signVote vote, err := cs.signVote(msgType, hash, header) if err == nil { cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) diff --git a/consensus/state_test.go b/consensus/state_test.go index e8df64981..3de5a37d1 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -65,7 +65,9 @@ func TestStateProposerSelection0(t *testing.T) { // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() - address := cs1.privValidator.GetPubKey().Address() + pv, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + address := pv.Address() if !bytes.Equal(prop.Address, address) { t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) } @@ -80,7 +82,9 @@ func TestStateProposerSelection0(t *testing.T) { ensureNewRound(newRoundCh, height+1, 0) prop = cs1.GetRoundState().Validators.GetProposer() - addr := vss[1].GetPubKey().Address() + pv1, err := vss[1].GetPubKey() + require.NoError(t, err) + addr := pv1.Address() if !bytes.Equal(prop.Address, addr) { panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address)) } @@ -104,7 +108,9 @@ func TestStateProposerSelection2(t *testing.T) { // everyone just votes nil. we get a new proposer each round for i := 0; i < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - addr := vss[(i+round)%len(vss)].GetPubKey().Address() + pvk, err := vss[(i+round)%len(vss)].GetPubKey() + require.NoError(t, err) + addr := pvk.Address() correctProposer := addr if !bytes.Equal(prop.Address, correctProposer) { panic(fmt.Sprintf( @@ -516,7 +522,9 @@ func TestStateLockPOLRelock(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) @@ -608,7 +616,9 @@ func TestStateLockPOLUnlock(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // everything done from perspective of cs1 @@ -700,7 +710,9 @@ func TestStateLockPOLSafety1(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round and wait for propose and prevote @@ -817,7 +829,9 @@ func TestStateLockPOLSafety2(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // the block for R0: gets polkad but we miss it @@ -909,7 +923,9 @@ func TestProposeValidBlock(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round and wait for propose and prevote @@ -996,7 +1012,9 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round and wait for propose and prevote @@ -1056,7 +1074,9 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -1127,7 +1147,9 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round @@ -1161,7 +1183,9 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round @@ -1195,7 +1219,9 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round in which PO is not proposer @@ -1310,7 +1336,9 @@ func TestStartNextHeightCorrectly(t *testing.T) { newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round and wait for propose and prevote @@ -1365,7 +1393,9 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round and wait for propose and prevote @@ -1502,7 +1532,9 @@ func TestStateHalt1(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round and wait for propose and prevote diff --git a/consensus/types/codec.go b/consensus/types/codec.go index e8a05b355..69ac8c4a5 100644 --- a/consensus/types/codec.go +++ b/consensus/types/codec.go @@ -2,6 +2,7 @@ package types import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 93c73f1a1..654880d27 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -54,9 +54,13 @@ func TestPeerCatchupRounds(t *testing.T) { func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote { privVal := privVals[valIndex] - addr := privVal.GetPubKey().Address() + pubKey, err := privVal.GetPubKey() + if err != nil { + panic(err) + } + vote := &types.Vote{ - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), ValidatorIndex: valIndex, Height: height, Round: round, @@ -65,7 +69,7 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali BlockID: types.BlockID{Hash: []byte("fakehash"), PartsHeader: types.PartSetHeader{}}, } chainID := config.ChainID() - err := privVal.SignVote(chainID, vote) + err = privVal.SignVote(chainID, vote) if err != nil { panic(fmt.Sprintf("Error signing vote: %v", err)) } diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go index f5f5f72c0..131158f0e 100644 --- a/consensus/types/round_state_test.go +++ b/consensus/types/round_state_test.go @@ -4,6 +4,7 @@ import ( "testing" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" diff --git a/consensus/wal.go b/consensus/wal.go index 989a5dc29..7b09ffa2d 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + auto "github.com/tendermint/tendermint/libs/autofile" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 244edd536..422c3f73b 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -11,6 +11,8 @@ import ( "github.com/pkg/errors" + db "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" @@ -21,7 +23,6 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) // WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a diff --git a/crypto/ed25519/ed25519_test.go b/crypto/ed25519/ed25519_test.go index 503050274..6fe2c0946 100644 --- a/crypto/ed25519/ed25519_test.go +++ b/crypto/ed25519/ed25519_test.go @@ -5,6 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" ) diff --git a/crypto/encoding/amino/amino.go b/crypto/encoding/amino/amino.go index b63eb738d..f7a2dde77 100644 --- a/crypto/encoding/amino/amino.go +++ b/crypto/encoding/amino/amino.go @@ -4,6 +4,7 @@ import ( "reflect" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/multisig" diff --git a/crypto/encoding/amino/encode_test.go b/crypto/encoding/amino/encode_test.go index 67a7566dd..edc54292f 100644 --- a/crypto/encoding/amino/encode_test.go +++ b/crypto/encoding/amino/encode_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/multisig" diff --git a/crypto/merkle/simple_map.go b/crypto/merkle/simple_map.go index 36434f67f..840bebd51 100644 --- a/crypto/merkle/simple_map.go +++ b/crypto/merkle/simple_map.go @@ -4,6 +4,7 @@ import ( "bytes" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/kv" ) diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go index 660bf236f..44b97f606 100644 --- a/crypto/merkle/simple_proof.go +++ b/crypto/merkle/simple_proof.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/tmhash" ) diff --git a/crypto/multisig/bitarray/compact_bit_array_test.go b/crypto/multisig/bitarray/compact_bit_array_test.go index ba0949178..f086dc877 100644 --- a/crypto/multisig/bitarray/compact_bit_array_test.go +++ b/crypto/multisig/bitarray/compact_bit_array_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + tmrand "github.com/tendermint/tendermint/libs/rand" ) diff --git a/crypto/multisig/codec.go b/crypto/multisig/codec.go index 3a5869398..cc1e12f92 100644 --- a/crypto/multisig/codec.go +++ b/crypto/multisig/codec.go @@ -2,6 +2,7 @@ package multisig import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 26dcead59..5338d10a5 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -12,6 +12,7 @@ import ( "golang.org/x/crypto/ripemd160" // nolint: staticcheck // necessary for Bitcoin address format amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" ) diff --git a/crypto/secp256k1/secp256k1_cgo_test.go b/crypto/secp256k1/secp256k1_cgo_test.go index edb207b53..96b026bc9 100644 --- a/crypto/secp256k1/secp256k1_cgo_test.go +++ b/crypto/secp256k1/secp256k1_cgo_test.go @@ -3,9 +3,10 @@ package secp256k1 import ( - "github.com/magiconair/properties/assert" "testing" + "github.com/magiconair/properties/assert" + "github.com/stretchr/testify/require" ) diff --git a/crypto/sr25519/codec.go b/crypto/sr25519/codec.go index c3e6bd646..f33b616f9 100644 --- a/crypto/sr25519/codec.go +++ b/crypto/sr25519/codec.go @@ -2,6 +2,7 @@ package sr25519 import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" ) diff --git a/crypto/tmhash/hash_test.go b/crypto/tmhash/hash_test.go index 89a779801..57fd0faa5 100644 --- a/crypto/tmhash/hash_test.go +++ b/crypto/tmhash/hash_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/tmhash" ) diff --git a/docs/.vuepress/styles/index.styl b/docs/.vuepress/styles/index.styl index 0b40a6f9b..0ca835191 100644 --- a/docs/.vuepress/styles/index.styl +++ b/docs/.vuepress/styles/index.styl @@ -1,3 +1,3 @@ :root - --accent-color #00BB00 + --accent-color #018A01 --background #222222 \ No newline at end of file diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index 0da81aa21..ae16c18c9 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -2,8 +2,7 @@ The documentation for Tendermint Core is hosted at: -- https://docs.tendermint.com/master/ and -- https://tendermint-staging.interblock.io/docs/ +- https://docs.tendermint.com/master/ built from the files in this (`/docs`) directory for [master](https://github.com/tendermint/tendermint/tree/master/docs) respectively. @@ -78,9 +77,13 @@ Install the theme and all dependencies. npm run serve ``` + + Run `pre` and `post` hooks and start a hot-reloading web-server. See output of this command for the URL (it is often https://localhost:8080). -To build documentation as a static website run `npm run build`. You will find the website in `.vuepress/dist` directory. + + +To build documentation as a static website run `npm run build`. You will find the website in `.vuepress/dist` directory. ## Search diff --git a/docs/README.md b/docs/README.md index b1b876045..1b7d691e3 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,7 +14,7 @@ Tendermint?](introduction/what-is-tendermint.md). To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md). -To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci/). +To learn about application development on Tendermint, see the [Application Blockchain Interface](https://github.com/tendermint/spec/tree/master/spec/abci). For more details on using Tendermint, see the respective documentation for [Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](networks/). diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index a72dc220f..ec8b0abf3 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -142,7 +142,7 @@ response. The server may be generic for a particular language, and we provide a [reference implementation in Golang](https://github.com/tendermint/tendermint/tree/master/abci/server). See the -[list of other ABCI implementations](./ecosystem.md) for servers in +[list of other ABCI implementations](https://github.com/tendermint/awesome#ecosystem) for servers in other languages. The handler is specific to the application, and may be arbitrary, so @@ -192,7 +192,7 @@ Try running these commands: > query "abc" -> code: OK -> log: exists --> height: 0 +-> height: 2 -> value: abc -> value.hex: 616263 @@ -206,7 +206,7 @@ Try running these commands: > query "def" -> code: OK -> log: exists --> height: 0 +-> height: 3 -> value: xyz -> value.hex: 78797A ``` diff --git a/docs/app-dev/app-development.md b/docs/app-dev/app-development.md index 44a2ea623..9c1acc289 100644 --- a/docs/app-dev/app-development.md +++ b/docs/app-dev/app-development.md @@ -7,7 +7,7 @@ order: 4 ## XXX This page is undergoing deprecation. All content is being moved to the new [home -of the ABCI specification](../spec/abci/README.md). +of the ABCI specification](https://github.com/tendermint/spec/tree/master/spec/abci). ## ABCI Design diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index 4afca5775..3e3fcd551 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -14,7 +14,7 @@ type, only the key-value pairs defined in `EndBlock` are used. Each event contains a type and a list of attributes, which are key-value pairs denoting something about what happened during the method's execution. For more -details on `Events`, see the [ABCI](../spec/abci/abci.md) documentation. +details on `Events`, see the [ABCI]https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#events) documentation. An Event has a composite key associated with it. A `compositeKey` is constructed by its type and key separated by a dot. For example: diff --git a/docs/app-dev/subscribing-to-events-via-websocket.md b/docs/app-dev/subscribing-to-events-via-websocket.md index 5f5cc8921..6e4f0d207 100644 --- a/docs/app-dev/subscribing-to-events-via-websocket.md +++ b/docs/app-dev/subscribing-to-events-via-websocket.md @@ -36,7 +36,7 @@ transactions](./indexing-transactions.md) for details. When validator set changes, ValidatorSetUpdates event is published. The event carries a list of pubkey/power pairs. The list is the same Tendermint receives from ABCI application (see [EndBlock -section](../spec/abci/abci.md#endblock) in +section](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#endblock) in the ABCI spec). Response: diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 9b3c2f661..9f754fd37 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -37,7 +37,7 @@ Note the context/background should be written in the present tense. - [ADR-006-Trust-Metric](./adr-006-trust-metric.md) - [ADR-007-Trust-Metric-Usage](./adr-007-trust-metric-usage.md) - [ADR-008-Priv-Validator](./adr-008-priv-validator.md) -- [ADR-009-ABCI-Design](./adr-009-abci-design.md) +- [ADR-009-ABCI-Design](./adr-009-ABCI-design.md) - [ADR-010-Crypto-Changes](./adr-010-crypto-changes.md) - [ADR-011-Monitoring](./adr-011-monitoring.md) - [ADR-012-Peer-Transport](./adr-012-peer-transport.md) @@ -46,7 +46,7 @@ Note the context/background should be written in the present tense. - [ADR-015-Crypto-Encoding](./adr-015-crypto-encoding.md) - [ADR-016-Protocol-Versions](./adr-016-protocol-versions.md) - [ADR-017-Chain-Versions](./adr-017-chain-versions.md) -- [ADR-018-ABCI-Validators](./adr-018-abci-validators.md) +- [ADR-018-ABCI-Validators](./adr-018-ABCI-Validators.md) - [ADR-019-Multisigs](./adr-019-multisigs.md) - [ADR-020-Block-Size](./adr-020-block-size.md) - [ADR-021-ABCI-Events](./adr-021-abci-events.md) @@ -67,5 +67,8 @@ Note the context/background should be written in the present tense. - [ADR-044-Lite-Client-With-Weak-Subjectivity](./adr-044-lite-client-with-weak-subjectivity.md) - [ADR-045-ABCI-Evidence](./adr-045-abci-evidence.md) - [ADR-046-Light-Client-Implementation](./adr-046-light-client-implementation.md) +- [ADR-047-Handling-Evidence-From-Light-Client](./adr-047-handling-evidence-from-light-client.md) - [ADR-051-Double-Signing-Risk-Reduction](./adr-051-double-signing-risk-reduction.md) - [ADR-052-Tendermint-Mode](./adr-052-tendermint-mode.md) +- [ADR-053-State-Sync-Prototype](./adr-053-state-sync-prototype.md) +- [ADR-054-crypto-encoding-2](./adr-054-crypto-encoding-2.md) diff --git a/docs/architecture/adr-002-event-subscription.md b/docs/architecture/adr-002-event-subscription.md index a73d584ab..e5c98637f 100644 --- a/docs/architecture/adr-002-event-subscription.md +++ b/docs/architecture/adr-002-event-subscription.md @@ -65,7 +65,6 @@ For historic queries we will need a indexing storage (Postgres, SQLite, ...). ### Issues -- https://github.com/tendermint/basecoin/issues/91 - https://github.com/tendermint/tendermint/issues/376 - https://github.com/tendermint/tendermint/issues/287 - https://github.com/tendermint/tendermint/issues/525 (related) diff --git a/docs/architecture/adr-011-monitoring.md b/docs/architecture/adr-011-monitoring.md index 8f2d009dd..4c79507a1 100644 --- a/docs/architecture/adr-011-monitoring.md +++ b/docs/architecture/adr-011-monitoring.md @@ -19,7 +19,7 @@ A few solutions were considered: b) [go-kit metrics package](https://github.com/go-kit/kit/tree/master/metrics) as an interface plus Prometheus c) [telegraf](https://github.com/influxdata/telegraf) d) new service, which will listen to events emitted by pubsub and report metrics -2. [OpenCensus](https://opencensus.io/go/index.html) +2. [OpenCensus](https://opencensus.io/introduction/) ### 1. Prometheus diff --git a/docs/architecture/adr-018-ABCI-Validators.md b/docs/architecture/adr-018-ABCI-Validators.md index f40efca15..f5ffdccaa 100644 --- a/docs/architecture/adr-018-ABCI-Validators.md +++ b/docs/architecture/adr-018-ABCI-Validators.md @@ -32,7 +32,7 @@ message ValidatorUpdate { } ``` -As noted in ADR-009[https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-009-ABCI-design.md], +As noted in [ADR-009](adr-009-ABCI-design.md), the `Validator` does not contain a pubkey because quantum public keys are quite large and it would be wasteful to send them all over ABCI with every block. Thus, applications that want to take advantage of the information in BeginBlock diff --git a/docs/architecture/adr-042-state-sync.md b/docs/architecture/adr-042-state-sync.md index d525a4974..89d95f2e4 100644 --- a/docs/architecture/adr-042-state-sync.md +++ b/docs/architecture/adr-042-state-sync.md @@ -34,7 +34,7 @@ across different criteria: ### Implementation Question * What is the format of a snapshot - * Complete snapshot + * Complete snapshot * Ordered IAVL key ranges * Compressed individually chunks which can be validated * How is data validated @@ -58,7 +58,7 @@ request time. This solution would create an auxiliary data structure optimized for batch read/writes. Additionally the propsosals tend to vary on how they provide safety -properties. +properties. **LightClient** Where a client can aquire the merkle root from the block headers synchronized from a trusted validator set. Subsets of the application state, @@ -70,7 +70,7 @@ downloaded and compared against versions provided by a majority of peers. #### Lazy StateSync -An [initial specification](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) was published by Alexis Sellier. +An initial specification was published by Alexis Sellier. In this design, the state has a given `size` of primitive elements (like keys or nodes), each element is assigned a number from 0 to `size-1`, and chunks consists of a range of such elements. Ackratos raised @@ -104,7 +104,7 @@ chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a manifest file which co-ordinates the state-sync. Obtaining a correct manifest file seems to require an honest majority of peers. This means you may not find out the state is incorrect until you download the whole thing and compare it -with a verified block header. +with a verified block header. A similar solution was implemented by Binance in [#3594](https://github.com/tendermint/tendermint/pull/3594) @@ -229,7 +229,6 @@ Proposed ## References [sync: Sync current state without full replay for Applications](https://github.com/tendermint/tendermint/issues/828) - original issue -[tendermint state sync proposal](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) - Cloudhead proposal [tendermint state sync proposal 2](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit) - ackratos proposal [proposal 2 implementation](https://github.com/tendermint/tendermint/pull/3243) - ackratos implementation [WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal diff --git a/docs/architecture/adr-043-blockchain-riri-org.md b/docs/architecture/adr-043-blockchain-riri-org.md index 6bb018f51..303def716 100644 --- a/docs/architecture/adr-043-blockchain-riri-org.md +++ b/docs/architecture/adr-043-blockchain-riri-org.md @@ -1,25 +1,26 @@ # ADR 043: Blockhchain Reactor Riri-Org ## Changelog -* 18-06-2019: Initial draft -* 08-07-2019: Reviewed -* 29-11-2019: Implemented -* 14-02-2020: Updated with the implementation details + +- 18-06-2019: Initial draft +- 08-07-2019: Reviewed +- 29-11-2019: Implemented +- 14-02-2020: Updated with the implementation details ## Context -The blockchain reactor is responsible for two high level processes:sending/receiving blocks from peers and FastSync-ing blocks to catch upnode who is far behind. The goal of [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) was to refactor these two processes by separating business logic currently wrapped up in go-channels into pure `handle*` functions. While the ADR specified what the final form of the reactor might look like it lacked guidance on intermediary steps to get there. -The following diagram illustrates the state of the [blockchain-reorg](https://github.com/tendermint/tendermint/pull/35610) reactor which will be referred to as `v1`. +The blockchain reactor is responsible for two high level processes:sending/receiving blocks from peers and FastSync-ing blocks to catch upnode who is far behind. The goal of [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) was to refactor these two processes by separating business logic currently wrapped up in go-channels into pure `handle*` functions. While the ADR specified what the final form of the reactor might look like it lacked guidance on intermediary steps to get there. +The following diagram illustrates the state of the [blockchain-reorg](https://github.com/tendermint/tendermint/pull/3561) reactor which will be referred to as `v1`. ![v1 Blockchain Reactor Architecture Diagram](https://github.com/tendermint/tendermint/blob/f9e556481654a24aeb689bdadaf5eab3ccd66829/docs/architecture/img/blockchain-reactor-v1.png) While `v1` of the blockchain reactor has shown significant improvements in terms of simplifying the concurrency model, the current PR has run into few roadblocks. -* The current PR large and difficult to review. -* Block gossiping and fast sync processes are highly coupled to the shared `Pool` data structure. -* Peer communication is spread over multiple components creating complex dependency graph which must be mocked out during testing. -* Timeouts modeled as stateful tickers introduce non-determinism in tests +- The current PR large and difficult to review. +- Block gossiping and fast sync processes are highly coupled to the shared `Pool` data structure. +- Peer communication is spread over multiple components creating complex dependency graph which must be mocked out during testing. +- Timeouts modeled as stateful tickers introduce non-determinism in tests This ADR is meant to specify the missing components and control necessary to achieve [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md). @@ -32,7 +33,7 @@ Diagram](https://github.com/tendermint/tendermint/blob/584e67ac3fac220c5c3e0652e ### Fast Sync Related Communication Channels -The diagram below shows the fast sync routines and the types of channels and queues used to communicate with each other. +The diagram below shows the fast sync routines and the types of channels and queues used to communicate with each other. In addition the per reactor channels used by the sendRoutine to send messages over the Peer MConnection are shown. ![v2 Blockchain Channels and Queues @@ -42,7 +43,6 @@ Diagram](https://github.com/tendermint/tendermint/blob/5cf570690f989646fb3b615b7 The reactor will include a demultiplexing routine which will send each message to each sub routine for independent processing. Each sub routine will then select the messages it's interested in and call the handle specific function specified in [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md). The demuxRoutine acts as "pacemaker" setting the time in which events are expected to be handled. - ```go func demuxRoutine(msgs, scheduleMsgs, processorMsgs, ioMsgs) { timer := time.NewTicker(interval) @@ -143,6 +143,7 @@ func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { ``` ## IO handling + An io handling routine within the reactor will isolate peer communication. Message going through the ioRoutine will usually be one way, using `p2p` APIs. In the case in which the `p2p` API such as `trySend` return errors, the ioRoutine can funnel those message back to the demuxRoutine for distribution to the other routines. For instance errors from the ioRoutine can be consumed by the scheduler to inform better peer selection implementations. ```go @@ -169,6 +170,7 @@ func (r *BlockchainReacor) ioRoutine(ioMesgs chan Message, outMsgs chan Message) } ``` + ### Processor Internals The processor is responsible for ordering, verifying and executing blocks. The Processor will maintain an internal cursor `height` refering to the last processed block. As a set of blocks arrive unordered, the Processor will check if it has `height+1` necessary to process the next block. The processor also maintains the map `blockPeers` of peers to height, to keep track of which peer provided the block at `height`. `blockPeers` can be used in`handleRemovePeer(...)` to reschedule all unprocessed blocks provided by a peer who has errored. @@ -232,10 +234,10 @@ func handleTimeCheckEv(time) { The Schedule maintains the internal state used for scheduling blockRequestMessages based on some scheduling algorithm. The schedule needs to maintain state on: -* The state `blockState` of every block seem up to height of maxHeight -* The set of peers and their peer state `peerState` -* which peers have which blocks -* which blocks have been requested from which peers +- The state `blockState` of every block seem up to height of maxHeight +- The set of peers and their peer state `peerState` +- which peers have which blocks +- which blocks have been requested from which peers ```go type blockState int @@ -256,7 +258,7 @@ type schedule { // a map of peerID to schedule specific peer struct `scPeer` peers map[p2p.ID]scPeer - + // a map of heights to the peer we are waiting for a response from pending map[height]scPeer @@ -309,6 +311,7 @@ type scPeer struct { ``` # Scheduler + The scheduler is configured to maintain a target `n` of in flight messages and will use feedback from `_blockResponseMessage`, `_statusResponseMessage` and `_peerError` produce an optimal assignment @@ -342,7 +345,7 @@ func handleTimeCheckEv(time) { events = [] for peerID := range schedule.peersNotTouchedSince(time) { - pending = schedule.pendingFrom(peerID) + pending = schedule.pendingFrom(peerID) schedule.setPeerState(peerID, timedout) schedule.resetBlocks(pending) events = append(events, peerTimeout{peerID}) @@ -355,6 +358,7 @@ func handleTimeCheckEv(time) { ``` ## Peer + The Peer Stores per peer state based on messages received by the scheduler. ```go @@ -376,19 +380,19 @@ type Peer struct { This design is under active development. The Implementation has been staged in the following PRs: -* [Routine](https://github.com/tendermint/tendermint/pull/3878) -* [Processor](https://github.com/tendermint/tendermint/pull/4012) -* [Scheduler](https://github.com/tendermint/tendermint/pull/4043) -* [Reactor](https://github.com/tendermint/tendermint/pull/4067) +- [Routine](https://github.com/tendermint/tendermint/pull/3878) +- [Processor](https://github.com/tendermint/tendermint/pull/4012) +- [Scheduler](https://github.com/tendermint/tendermint/pull/4043) +- [Reactor](https://github.com/tendermint/tendermint/pull/4067) ## Consequences ### Positive -* Test become deterministic -* Simulation becomes a-termporal: no need wait for a wall-time timeout -* Peer Selection can be independently tested/simulated -* Develop a general approach to refactoring reactors +- Test become deterministic +- Simulation becomes a-termporal: no need wait for a wall-time timeout +- Peer Selection can be independently tested/simulated +- Develop a general approach to refactoring reactors ### Negative @@ -396,11 +400,11 @@ staged in the following PRs: ### Implementation Path -* Implement the scheduler, test the scheduler, review the rescheduler -* Implement the processor, test the processor, review the processor -* Implement the demuxer, write integration test, review integration tests +- Implement the scheduler, test the scheduler, review the rescheduler +- Implement the processor, test the processor, review the processor +- Implement the demuxer, write integration test, review integration tests ## References -* [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md): The original blockchain reactor re-org proposal -* [Blockchain re-org](https://github.com/tendermint/tendermint/pull/3561): The current blockchain reactor re-org implementation (v1) +- [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md): The original blockchain reactor re-org proposal +- [Blockchain re-org](https://github.com/tendermint/tendermint/pull/3561): The current blockchain reactor re-org implementation (v1) diff --git a/docs/architecture/adr-046-light-client-implementation.md b/docs/architecture/adr-046-light-client-implementation.md index 6058da8f8..7620409a0 100644 --- a/docs/architecture/adr-046-light-client-implementation.md +++ b/docs/architecture/adr-046-light-client-implementation.md @@ -4,28 +4,22 @@ * 13-02-2020: Initial draft * 26-02-2020: Cross-checking the first header * 28-02-2020: Bisection algorithm details +* 31-03-2020: Verify signature got changed ## Context A `Client` struct represents a light client, connected to a single blockchain. -As soon as it's started (via `Start`), it tries to update to the latest header -(using bisection algorithm by default). -Cleaning routine is also started to remove headers outside of trusting period. -NOTE: since it's periodic, we still need to check header is not expired in -`TrustedHeader`, `TrustedValidatorSet` methods (and others which are using the -latest trusted header). - -The user has an option to manually verify headers using `VerifyHeader` and -`VerifyHeaderAtHeight` methods. To avoid races, `UpdatePeriod(0)` needs to be -passed when initializing the light client (it turns off the auto update). +The user has an option to verify headers using `VerifyHeader` or +`VerifyHeaderAtHeight` or `Update` methods. The latter method downloads the +latest header from primary and compares it with the currently trusted one. ```go type Client interface { - // start and stop updating & cleaning goroutines - Start() error - Stop() - Cleanup() error + // verify new headers + VerifyHeaderAtHeight(height int64, now time.Time) (*types.SignedHeader, error) + VerifyHeader(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error + Update(now time.Time) (*types.SignedHeader, error) // get trusted headers & validators TrustedHeader(height int64) (*types.SignedHeader, error) @@ -38,9 +32,7 @@ type Client interface { Primary() provider.Provider Witnesses() []provider.Provider - // verify new headers - VerifyHeaderAtHeight(height int64, now time.Time) (*types.SignedHeader, error) - VerifyHeader(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error + Cleanup() error } ``` @@ -69,8 +61,9 @@ also cross-checked with witnesses for additional security. Due to bisection algorithm nature, some headers might be skipped. If the light client does not have a header for height `X` and `VerifyHeaderAtHeight(X)` or -`VerifyHeader(H#X)` methods are called, it will perform a backwards -verification from the latest header back to the header at height `X`. +`VerifyHeader(H#X)` methods are called, these will perform either a) backwards +verification from the latest header back to the header at height `X` or b) +bisection verification from the first stored header to the header at height `X`. `TrustedHeader`, `TrustedValidatorSet` only communicate with the trusted store. If some header is not there, an error will be returned indicating that @@ -108,6 +101,10 @@ type Store interface { FirstSignedHeaderHeight() (int64, error) SignedHeaderAfter(height int64) (*types.SignedHeader, error) + + Prune(size uint16) error + + Size() uint16 } ``` @@ -118,12 +115,13 @@ database, used in Tendermint). In the future, remote adapters are possible ```go func Verify( chainID string, - h1 *types.SignedHeader, - h1NextVals *types.ValidatorSet, - h2 *types.SignedHeader, - h2Vals *types.ValidatorSet, + trustedHeader *types.SignedHeader, // height=X + trustedVals *types.ValidatorSet, // height=X or height=X+1 + untrustedHeader *types.SignedHeader, // height=Y + untrustedVals *types.ValidatorSet, // height=Y trustingPeriod time.Duration, now time.Time, + maxClockDrift time.Duration, trustLevel tmmath.Fraction) error { ``` @@ -132,6 +130,9 @@ cases of adjacent and non-adjacent headers. In the former case, it compares the hashes directly (2/3+ signed transition). Otherwise, it verifies 1/3+ (`trustLevel`) of trusted validators are still present in new validators. +While `Verify` function is certainly handy, `VerifyAdjacent` and +`VerifyNonAdjacent` should be used most often to avoid logic errors. + ### Bisection algorithm details Non-recursive bisection algorithm was implemented despite the spec containing diff --git a/docs/architecture/adr-047-handling-evidence-from-light-client.md b/docs/architecture/adr-047-handling-evidence-from-light-client.md new file mode 100644 index 000000000..8b3a850ba --- /dev/null +++ b/docs/architecture/adr-047-handling-evidence-from-light-client.md @@ -0,0 +1,186 @@ +# ADR 047: Handling evidence from light client + +## Changelog +* 18-02-2020: Initial draft +* 24-02-2020: Second version + +## Context + +If the light client is under attack, either directly -> lunatic/phantom +validators (light fork) or indirectly -> full fork, it's supposed to halt and +send evidence of misbehavior to a correct full node. Upon receiving an +evidence, the full node should punish malicious validators (if possible). + +## Decision + +When a light client sees two conflicting headers (`H1.Hash() != H2.Hash()`, +`H1.Height == H2.Height`), both having 1/3+ of the voting power of the +currently trusted validator set, it will submit a `ConflictingHeadersEvidence` +to all full nodes it's connected to. Evidence needs to be submitted to all full +nodes since there's no way to determine which full node is correct (honest). + +```go +type ConflictingHeadersEvidence struct { + H1 types.SignedHeader + H2 types.SignedHeader +} +``` + +When a full node receives the `ConflictingHeadersEvidence` evidence, it should +a) validate it b) figure out if malicious behaviour is obvious (immediately +slashable) or the fork accountability protocol needs to be started. + +### Validating headers + +Check both headers are valid (`ValidateBasic`), have the same height, and +signed by 1/3+ of the validator set that the full node had at height +`H1.Height-1`. + +- Q: What if light client validator set is not equal to full node's validator + set (i.e. from full node's point of view both headers are not properly signed; + this includes the case where none of the two headers were committed on the + main chain) + + Reject the evidence. It means light client is following a fork, but, hey, at + least it will halt. + +- Q: Don't we want to punish validators who signed something else even if they + have less or equal than 1/3? + + No consensus so far. Ethan said no, Zarko said yes. + https://github.com/tendermint/spec/pull/71#discussion_r374210533 + +### Figuring out if malicious behaviour is immediately slashable + +Let's say H1 was committed from this full node's perspective (see Appendix A). +Intersect validator sets of H1 and H2. + +* if there are signers(H2) that are not part of validators(H1), they misbehaved as +they are signing protocol messages in heights they are not validators => +immediately slashable (#F4). + +* if `H1.Round == H2.Round`, and some signers signed different precommit +messages in both commits, then it is an equivocation misbehavior => immediately +slashable (#F1). + +* if `H1.Round != H2.Round` we need to run full detection procedure => not +immediately slashable. + +* if `ValidatorsHash`, `NextValidatorsHash`, `ConsensusHash`, +`AppHash`, and `LastResultsHash` in H2 are different (incorrect application +state transition), then it is a lunatic misbehavior => immediately slashable (#F5). + +If evidence is not immediately slashable, fork accountability needs to invoked +(ADR does not yet exist). + +It's unclear if we should further break up `ConflictingHeadersEvidence` or +gossip and commit it directly. See +https://github.com/tendermint/tendermint/issues/4182#issuecomment-590339233 + +If we'd go without breaking evidence, all we'll need to do is to strip the +committed header from `ConflictingHeadersEvidence` (H1) and leave only the +uncommitted header (H2): + +```go +type ConflictingHeaderEvidence struct { + H types.SignedHeader +} +``` + +If we'd go with breaking evidence, here are the types we'll need: + +### F1. Equivocation + +Existing `DuplicateVoteEvidence` needs to be created and gossiped. + +### F4. Phantom validators + +A new type of evidence needs to be created: + +```go +type PhantomValidatorEvidence struct { + PubKey crypto.PubKey + Vote types.Vote +} +``` + +It contains a validator's public key and a vote for a block, where this +validator is not part of the validator set. + +### F5. Lunatic validator + +```go +type LunaticValidatorEvidence struct { + Header types.Header + Vote types.Vote +} +``` + +To punish this attack, we need support for a new Evidence type - +`LunaticValidatorEvidence`. This type includes a vote and a header. The header +must contain fields that are invalid with respect to the previous block, and a +vote for that header by a validator that was in a validator set within the +unbonding period. While the attack is only possible if +1/3 of some validator +set colludes, the evidence should be verifiable independently for each +individual validator. This means the total evidence can be split into one piece +of evidence per attacking validator and gossipped to nodes to be verified one +piece at a time, reducing the DoS attack surface at the peer layer. + +Note it is not sufficient to simply compare this header with that committed for +the corresponding height, as an honest node may vote for a header that is not +ultimately committed. Certain fields may also be variable, for instance the +`LastCommitHash` and the `Time` may depend on which votes the proposer includes. +Thus, the header must be explicitly checked for invalid data. + +For the attack to succeed, VC must sign a header that changes the validator set +to consist of something they control. Without doing this, they can not +otherwise attack the light client, since the client verifies commits according +to validator sets. Thus, it should be sufficient to check only that +`ValidatorsHash` and `NextValidatorsHash` are correct with respect to the +header that was committed at the corresponding height. + +That said, if the attack is conducted by +2/3 of the validator set, they don't +need to make an invalid change to the validator set, since they already control +it. Instead they would make invalid changes to the `AppHash`, or possibly other +fields. In order to punish them, then, we would have to check all header +fields. + +Note some header fields require the block itself to verify, which the light +client, by definition, does not possess, so it may not be possible to check +these fields. For now, then, `LunaticValidatorEvidence` must be checked against +all header fields which are a function of the application at previous blocks. +This includes `ValidatorsHash`, `NextValidatorsHash`, `ConsensusHash`, +`AppHash`, and `LastResultsHash`. These should all match what's in the header +for the block that was actually committed at the corresponding height, and +should thus be easy to check. + +## Status + +Proposed. + +## Consequences + +### Positive + +* Tendermint will be able to detect & punish new types of misbehavior +* light clients connected to multiple full nodes can help full nodes notice a + fork faster + +### Negative + +* Accepting `ConflictingHeadersEvidence` from light clients opens up a DDOS +attack vector (same is fair for any RPC endpoint open to public; remember that +RPC is not open by default). + +### Neutral + +## References + +* [Fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md) + +## Appendix A + +If there is an actual fork (full fork), a full node may follow either one or +another branch. So both H1 or H2 can be considered committed depending on which +branch the full node is following. It's supposed to halt if it notices an +actual fork, but there's a small chance it doesn't. diff --git a/docs/architecture/adr-053-state-sync-prototype.md b/docs/architecture/adr-053-state-sync-prototype.md index 79e39b24d..2848f9dd4 100644 --- a/docs/architecture/adr-053-state-sync-prototype.md +++ b/docs/architecture/adr-053-state-sync-prototype.md @@ -14,6 +14,14 @@ This ADR outlines the plan for an initial state sync prototype, and is subject t * Added experimental prototype info. * Added open questions and implementation plan. +* 2020-03-29: Strengthened and simplified ABCI interface (Erik Grinaker) + * ABCI: replaced `chunks` with `chunk_hashes` in `Snapshot`. + * ABCI: removed `SnapshotChunk` message. + * ABCI: renamed `GetSnapshotChunk` to `LoadSnapshotChunk`. + * ABCI: chunks are now exchanged simply as `bytes`. + * ABCI: chunks are now 0-indexed, for parity with `chunk_hashes` array. + * Reduced maximum chunk size to 16 MB, and increased snapshot message size to 4 MB. + ## Context State sync will allow a new node to receive a snapshot of the application state without downloading blocks or going through consensus. This bootstraps the node significantly faster than the current fast sync system, which replays all historical blocks. @@ -36,28 +44,18 @@ This describes the snapshot/restore process seen from Tendermint. The interface ### Snapshot Data Structure -A node can have multiple snapshots taken at various heights. Snapshots can be taken in different application-specified formats (e.g. MessagePack as format `1` and Protobuf as format `2`, or similarly for schema versioning). Each snapshot consists of multiple chunks containing the actual state data, allowing parallel downloads and reduced memory usage. +A node can have multiple snapshots taken at various heights. Snapshots can be taken in different application-specified formats (e.g. MessagePack as format `1` and Protobuf as format `2`, or similarly for schema versioning). Each snapshot consists of multiple chunks containing the actual state data, for parallel downloads and reduced memory usage. ```proto message Snapshot { - uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - uint32 chunks = 3; // The number of chunks in the snapshot - bytes metadata = 4; // Arbitrary application metadata -} - -message SnapshotChunk { - uint64 height = 1; // The height of the corresponding snapshot - uint32 format = 2; // The application-specific snapshot format - uint32 chunk = 3; // The chunk index (one-based) - bytes data = 4; // Serialized application state in an arbitrary format - bytes checksum = 5; // SHA-1 checksum of data + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + repeated bytes chunk_hashes = 3; // SHA-256 checksums of all chunks, in order + bytes metadata = 4; // Arbitrary application metadata } ``` -Chunk verification data must be encoded along with the state data in the `data` field. - -Chunk `data` cannot be larger than 64 MB, and snapshot `metadata` cannot be larger than 64 KB. +Chunks are exchanged simply as `bytes`, and cannot be larger than 16 MB. `Snapshot` messages should be less than 4 MB. ### ABCI Interface @@ -72,41 +70,43 @@ message ResponseListSnapshots { // Offers a snapshot to the application message RequestOfferSnapshot { Snapshot snapshot = 1; - bytes app_hash = 2; + bytes app_hash = 2; } message ResponseOfferSnapshot { - bool accepted = 1; - Reason reason = 2; // Reason why snapshot was rejected - enum Reason { - unknown = 0; // Unknown or generic reason - invalid_height = 1; // Height is rejected: avoid this height - invalid_format = 2; // Format is rejected: avoid this format + bool accepted = 1; + Reason reason = 2; + + enum Reason { // Reason why snapshot was rejected + unknown = 0; // Unknown or generic reason + invalid_height = 1; // Height is rejected: avoid this height + invalid_format = 2; // Format is rejected: avoid this format } } -// Fetches a snapshot chunk -message RequestGetSnapshotChunk { +// Loads a snapshot chunk +message RequestLoadSnapshotChunk { uint64 height = 1; uint32 format = 2; - uint32 chunk = 3; + uint32 chunk = 3; // Zero-indexed } -message ResponseGetSnapshotChunk { - SnapshotChunk chunk = 1; +message ResponseLoadSnapshotChunk { + bytes chunk = 1; } // Applies a snapshot chunk message RequestApplySnapshotChunk { - SnapshotChunk chunk = 1; + bytes chunk = 1; } message ResponseApplySnapshotChunk { - bool applied = 1; - Reason reason = 2; // Reason why chunk failed - enum Reason { - unknown = 0; // Unknown or generic reason - verify_failed = 1; // Chunk verification failed + bool applied = 1; + Reason reason = 2; // Reason why chunk failed + + enum Reason { // Reason why chunk failed + unknown = 0; // Unknown or generic reason + verify_failed = 1; // Snapshot verification failed } } ``` @@ -139,19 +139,19 @@ When starting an empty node with state sync and fast sync enabled, snapshots are 3. The node contacts a set of full nodes, and verifies the trusted block header using the given hash via the light client. -4. The node requests available snapshots via `RequestListSnapshots`. Snapshots with `metadata` greater than 64 KB are rejected. +4. The node requests available snapshots via P2P from peers, via `RequestListSnapshots`. Peers will return the 10 most recent snapshots, one message per snapshot. -5. The node iterates over all snapshots in reverse order by height and format until it finds one that satisfies all of the following conditions: +5. The node aggregates snapshots from multiple peers, ordered by height and format (in reverse). If there are `chunk_hashes` mismatches between different snapshots, the one hosted by the largest amount of peers is chosen. The node iterates over all snapshots in reverse order by height and format until it finds one that satisfies all of the following conditions: * The snapshot height's block is considered trustworthy by the light client (i.e. snapshot height is greater than trusted header and within unbonding period of the latest trustworthy block). - * The snapshot's height or format hasn't been explicitly rejected by an earlier `RequestOffsetSnapshot` call (via `invalid_height` or `invalid_format`). + * The snapshot's height or format hasn't been explicitly rejected by an earlier `RequestOfferSnapshot` call (via `invalid_height` or `invalid_format`). * The application accepts the `RequestOfferSnapshot` call. -6. The node downloads chunks in parallel from multiple peers via `RequestGetSnapshotChunk`, and both the sender and receiver verifies their checksums. Chunks with `data` greater than 64 MB are rejected. +6. The node downloads chunks in parallel from multiple peers, via `RequestLoadSnapshotChunk`, and both the sender and receiver verifies their checksums. Chunk messages cannot exceed 16 MB. -7. The node passes chunks sequentially to the app via `RequestApplySnapshotChunk`, along with the chain's app hash at the snapshot height for verification. If the chunk is rejected the node should retry it. If it was rejected with `verify_failed`, it should be refetched from a different source. If an internal error occurred, `ResponseException` should be returned and state sync should be aborted. +7. The node passes chunks sequentially to the app via `RequestApplySnapshotChunk`. 8. Once all chunks have been applied, the node compares the app hash to the chain app hash, and if they do not match it either errors or discards the state and starts over. @@ -167,7 +167,7 @@ This describes the snapshot process seen from Gaia, using format version `1`. Th In the initial version there is no snapshot metadata, so it is set to an empty byte buffer. -Once all chunks have been successfully built, snapshot metadata should be serialized and stored in the file system as e.g. `snapshots///metadata`, and served via `RequestListSnapshots`. +Once all chunks have been successfully built, snapshot metadata should be stored in a database and served via `RequestListSnapshots`. ### Snapshot Chunk Format @@ -181,7 +181,7 @@ For the initial prototype, each chunk consists of a complete dump of all node da For a production version, it should be sufficient to store key/value/version for all nodes (leaf and inner) in insertion order, chunked in some appropriate way. If per-chunk verification is required, the chunk must also contain enough information to reconstruct the Merkle proofs all the way up to the root of the multistore, e.g. by storing a complete subtree's key/value/version data plus Merkle hashes of all other branches up to the multistore root. The exact approach will depend on tradeoffs between size, time, and verification. IAVL RangeProofs are not recommended, since these include redundant data such as proofs for intermediate and leaf nodes that can be derived from the above data. -Chunks should be built greedily by collecting node data up to some size limit (e.g. 32 MB) and serializing it. Chunk data is stored in the file system as `snapshots////data`, along with a SHA-1 checksum in `snapshots////checksum`, and served via `RequestGetSnapshotChunk`. +Chunks should be built greedily by collecting node data up to some size limit (e.g. 10 MB) and serializing it. Chunk data is stored in the file system as `snapshots///`, and a SHA-256 checksum is stored along with the snapshot metadata. ### Snapshot Scheduling @@ -223,12 +223,6 @@ To stop the testnet, run: $ ./tools/stop.sh ``` -## Open Questions - -* Should we have a simpler scheme for discovering snapshots? E.g. announce supported formats, and have peer supply latest available snapshot. - - Downsides: app has to announce supported formats, having a single snapshot per peer may make fewer peers available for chosen snapshot. - ## Resolved Questions * Is it OK for state-synced nodes to not have historical blocks nor historical IAVL versions? @@ -309,6 +303,8 @@ $ ./tools/stop.sh * **Tendermint:** node should go back to fast-syncing when lagging significantly [#129](https://github.com/tendermint/tendermint/issues/129) +* **Tendermint:** backfill historical blocks [#4629](https://github.com/tendermint/tendermint/issues/4629) + ## Status Accepted diff --git a/docs/architecture/adr-054-crypto-encoding-2.md b/docs/architecture/adr-054-crypto-encoding-2.md new file mode 100644 index 000000000..9ec05f229 --- /dev/null +++ b/docs/architecture/adr-054-crypto-encoding-2.md @@ -0,0 +1,83 @@ +# ADR 054: Crypto encoding (part 2) + +## Changelog + +\*2020-2-27: Created + +## Context + +Amino has been a pain point of many users in the ecosystem. While Tendermint does not suffer greatly from the performance degradation introduced by amino, we are making an effort in moving the encoding format to a widely adopted format, [Protocol Buffers](https://developers.google.com/protocol-buffers). With this migration a new standard is needed for the encoding of keys. This will cause ecosystem wide breaking changes. + +Currently amino encodes keys as ` `. + +## Decision + +When using the `oneof` protobuf type there are many times where one will have to manually switch over the possible messages and then pass them to the interface which is needed. By transitioning from a fixed size byte array (`[size]byte`) to byte slice's (`[]byte`) then this would enable the usage of the [cosmos-proto's](hhttps://github.com/regen-network/cosmos-proto#interface_type) interface type, which will generate these switch statements. + +The approach that will be taken to minimize headaches for users is one where all encoding of keys will shift to protobuf and where amino encoding is relied on, there will be custom marshal and unmarshal functions. + +Protobuf messages: + +```proto +message PubKey { + option (cosmos_proto.interface_type) = "*github.com/tendermint/tendermint/crypto.PubKey"; + oneof key { + bytes ed25519 = 1 + [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/ed25519.PubKey"]; + bytes secp256k1 = 2 + [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/secp256k1.PubKey"]; + bytes sr25519 = 3 + [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/sr25519.PubKey"]; + PubKeyMultiSigThreshold multisig = 4 + [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/multisig.PubKeyMultisigThreshold"];; + } + +message PrivKey { + option (cosmos_proto.interface_type) = "github.com/tendermint/tendermint/crypto.PrivKey"; + oneof sum { + bytes ed25519 = 1 + [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/ed25519.PrivKey"]; + bytes secp256k1 = 2 + [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/secp256k1.PrivKey"]; + bytes sr25519 = 3 + [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/sr25519.PrivKey"];; + } +} +``` + +> Note: The places where backwards compatibility is needed is still unclear. + +All modules currently do not rely on amino encoded bytes and keys are not amino encoded for genesis, therefore a hardfork upgrade is what will be needed to adopt these changes. + +This work will be broken out into a few PRs, this work will be merged into a proto-breakage branch, all PRs will be reviewed prior to being merged: + +1. Encoding of keys to protobuf and protobuf messages +2. Move Tendermint types to protobuf, mainly the ones that are being encoded. +3. Go one by one through the reactors and transition amino encoded messages to protobuf. +4. Test with cosmos-sdk and/or testnets repo. + +## Status + +Proposed + +## Consequences + +- Move keys to protobuf encoding, where backwards compatibility is needed, amino marshal and unmarshal functions will be used. + +### Positive + +- Protocol Buffer encoding will not change going forward. +- Removing amino overhead from keys will help with the KSM. +- Have a large ecosystem of supported languages. + +### Negative + +- Hardfork is required to integrate this into running chains. + +### Neutral + +## References + +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! + +- {reference link} diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md index 28a5ecfbb..759fc6d72 100644 --- a/docs/architecture/adr-template.md +++ b/docs/architecture/adr-template.md @@ -1,17 +1,19 @@ # ADR {ADR-NUMBER}: {TITLE} ## Changelog -* {date}: {changelog} + +- {date}: {changelog} ## Context -> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. +> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. + ## Decision > This section explains all of the details of the proposed solution, including implementation details. -It should also describe affects / corollary items that may need to be changed as a part of this. -If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. -(e.g. the optimal split of things to do between separate PR's) +> It should also describe affects / corollary items that may need to be changed as a part of this. +> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. +> (e.g. the optimal split of things to do between separate PR's) ## Status @@ -31,6 +33,6 @@ If the proposed change will be large, please also indicate a way to do the chang ## References -> Are there any relevant PR comments, issues that led up to this, or articles referrenced for why we made the given design choice? If so link them here! +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! -* {reference link} +- {reference link} diff --git a/docs/imgs/light_client_bisection_alg.png b/docs/imgs/light_client_bisection_alg.png new file mode 100644 index 000000000..2a12c7542 Binary files /dev/null and b/docs/imgs/light_client_bisection_alg.png differ diff --git a/docs/package-lock.json b/docs/package-lock.json index a5e8c0880..fd136c226 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -12,18 +12,35 @@ "@babel/highlight": "^7.8.3" } }, + "@babel/compat-data": { + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.8.6.tgz", + "integrity": "sha512-CurCIKPTkS25Mb8mz267vU95vy+TyUpnctEX2lV33xWNmHAfjruztgiPBbXZRh3xZZy1CYvGx6XfxyTVS+sk7Q==", + "requires": { + "browserslist": "^4.8.5", + "invariant": "^2.2.4", + "semver": "^5.5.0" + }, + "dependencies": { + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + } + } + }, "@babel/core": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.8.4.tgz", - "integrity": "sha512-0LiLrB2PwrVI+a2/IEskBopDYSd8BCb3rOvH7D5tzoWd696TBEduBvuLVm4Nx6rltrLZqvI3MCalB2K2aVzQjA==", + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.8.7.tgz", + "integrity": "sha512-rBlqF3Yko9cynC5CCFy6+K/w2N+Sq/ff2BPy+Krp7rHlABIr5epbA7OxVeKoMHB39LZOp1UY5SuLjy6uWi35yA==", "requires": { "@babel/code-frame": "^7.8.3", - "@babel/generator": "^7.8.4", + "@babel/generator": "^7.8.7", "@babel/helpers": "^7.8.4", - "@babel/parser": "^7.8.4", - "@babel/template": "^7.8.3", - "@babel/traverse": "^7.8.4", - "@babel/types": "^7.8.3", + "@babel/parser": "^7.8.7", + "@babel/template": "^7.8.6", + "@babel/traverse": "^7.8.6", + "@babel/types": "^7.8.7", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.1", @@ -68,11 +85,11 @@ } }, "@babel/generator": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.8.4.tgz", - "integrity": "sha512-PwhclGdRpNAf3IxZb0YVuITPZmmrXz9zf6fH8lT4XbrmfQKr6ryBzhv593P5C6poJRciFCL/eHGW2NuGrgEyxA==", + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.8.7.tgz", + "integrity": "sha512-DQwjiKJqH4C3qGiyQCAExJHoZssn49JTMJgZ8SANGgVFdkupcUhLOdkAeoC6kmHZCPfoDG5M0b6cFlSN5wW7Ew==", "requires": { - "@babel/types": "^7.8.3", + "@babel/types": "^7.8.7", "jsesc": "^2.5.1", "lodash": "^4.17.13", "source-map": "^0.5.0" @@ -103,33 +120,53 @@ } }, "@babel/helper-call-delegate": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-call-delegate/-/helper-call-delegate-7.8.3.tgz", - "integrity": "sha512-6Q05px0Eb+N4/GTyKPPvnkig7Lylw+QzihMpws9iiZQv7ZImf84ZsZpQH7QoWN4n4tm81SnSzPgHw2qtO0Zf3A==", + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/helper-call-delegate/-/helper-call-delegate-7.8.7.tgz", + "integrity": "sha512-doAA5LAKhsFCR0LAFIf+r2RSMmC+m8f/oQ+URnUET/rWeEzC0yTRmAGyWkD4sSu3xwbS7MYQ2u+xlt1V5R56KQ==", "requires": { "@babel/helper-hoist-variables": "^7.8.3", "@babel/traverse": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/types": "^7.8.7" + } + }, + "@babel/helper-compilation-targets": { + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.8.7.tgz", + "integrity": "sha512-4mWm8DCK2LugIS+p1yArqvG1Pf162upsIsjE7cNBjez+NjliQpVhj20obE520nao0o14DaTnFJv+Fw5a0JpoUw==", + "requires": { + "@babel/compat-data": "^7.8.6", + "browserslist": "^4.9.1", + "invariant": "^2.2.4", + "levenary": "^1.1.1", + "semver": "^5.5.0" + }, + "dependencies": { + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + } } }, "@babel/helper-create-class-features-plugin": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.8.3.tgz", - "integrity": "sha512-qmp4pD7zeTxsv0JNecSBsEmG1ei2MqwJq4YQcK3ZWm/0t07QstWfvuV/vm3Qt5xNMFETn2SZqpMx2MQzbtq+KA==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.8.6.tgz", + "integrity": "sha512-klTBDdsr+VFFqaDHm5rR69OpEQtO2Qv8ECxHS1mNhJJvaHArR6a1xTf5K/eZW7eZpJbhCx3NW1Yt/sKsLXLblg==", "requires": { "@babel/helper-function-name": "^7.8.3", "@babel/helper-member-expression-to-functions": "^7.8.3", "@babel/helper-optimise-call-expression": "^7.8.3", "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-replace-supers": "^7.8.3", + "@babel/helper-replace-supers": "^7.8.6", "@babel/helper-split-export-declaration": "^7.8.3" } }, "@babel/helper-create-regexp-features-plugin": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.8.3.tgz", - "integrity": "sha512-Gcsm1OHCUr9o9TcJln57xhWHtdXbA2pgQ58S0Lxlks0WMGNXuki4+GLfX0p+L2ZkINUGZvfkz8rzoqJQSthI+Q==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.8.6.tgz", + "integrity": "sha512-bPyujWfsHhV/ztUkwGHz/RPV1T1TDEsSZDsN42JPehndA+p1KKTh3npvTadux0ZhCrytx9tvjpWNowKby3tM6A==", "requires": { + "@babel/helper-annotate-as-pure": "^7.8.3", "@babel/helper-regex": "^7.8.3", "regexpu-core": "^4.6.0" } @@ -196,15 +233,16 @@ } }, "@babel/helper-module-transforms": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.8.3.tgz", - "integrity": "sha512-C7NG6B7vfBa/pwCOshpMbOYUmrYQDfCpVL/JCRu0ek8B5p8kue1+BCXpg2vOYs7w5ACB9GTOBYQ5U6NwrMg+3Q==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.8.6.tgz", + "integrity": "sha512-RDnGJSR5EFBJjG3deY0NiL0K9TO8SXxS9n/MPsbPK/s9LbQymuLNtlzvDiNS7IpecuL45cMeLVkA+HfmlrnkRg==", "requires": { "@babel/helper-module-imports": "^7.8.3", + "@babel/helper-replace-supers": "^7.8.6", "@babel/helper-simple-access": "^7.8.3", "@babel/helper-split-export-declaration": "^7.8.3", - "@babel/template": "^7.8.3", - "@babel/types": "^7.8.3", + "@babel/template": "^7.8.6", + "@babel/types": "^7.8.6", "lodash": "^4.17.13" } }, @@ -242,14 +280,14 @@ } }, "@babel/helper-replace-supers": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.8.3.tgz", - "integrity": "sha512-xOUssL6ho41U81etpLoT2RTdvdus4VfHamCuAm4AHxGr+0it5fnwoVdwUJ7GFEqCsQYzJUhcbsN9wB9apcYKFA==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.8.6.tgz", + "integrity": "sha512-PeMArdA4Sv/Wf4zXwBKPqVj7n9UF/xg6slNRtZW84FM7JpE1CbG8B612FyM4cxrf4fMAMGO0kR7voy1ForHHFA==", "requires": { "@babel/helper-member-expression-to-functions": "^7.8.3", "@babel/helper-optimise-call-expression": "^7.8.3", - "@babel/traverse": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/traverse": "^7.8.6", + "@babel/types": "^7.8.6" } }, "@babel/helper-simple-access": { @@ -301,9 +339,9 @@ } }, "@babel/parser": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.8.4.tgz", - "integrity": "sha512-0fKu/QqildpXmPVaRBoXOlyBb3MC+J0A66x97qEfLOMkn3u6nfY5esWogQwi/K0BjASYy4DbnsEWnpNL6qT5Mw==" + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.8.7.tgz", + "integrity": "sha512-9JWls8WilDXFGxs0phaXAZgpxTZhSk/yOYH2hTHC0X1yC7Z78IJfvR1vJ+rmJKq3I35td2XzXzN6ZLYlna+r/A==" }, "@babel/plugin-proposal-async-generator-functions": { "version": "7.8.3", @@ -334,6 +372,15 @@ "@babel/plugin-syntax-decorators": "^7.8.3" } }, + "@babel/plugin-proposal-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.8.3.tgz", + "integrity": "sha512-NyaBbyLFXFLT9FP+zk0kYlUlA8XtCUbehs67F0nnEg7KICgMc2mNkIeu9TYhKzyXMkrapZFwAhXLdnt4IYHy1w==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-dynamic-import": "^7.8.0" + } + }, "@babel/plugin-proposal-json-strings": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.8.3.tgz", @@ -343,6 +390,15 @@ "@babel/plugin-syntax-json-strings": "^7.8.0" } }, + "@babel/plugin-proposal-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-TS9MlfzXpXKt6YYomudb/KU7nQI6/xnapG6in1uZxoxDghuSMZsPb6D2fyUwNYSAp4l1iR7QtFOjkqcRYcUsfw==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0" + } + }, "@babel/plugin-proposal-object-rest-spread": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.8.3.tgz", @@ -361,6 +417,15 @@ "@babel/plugin-syntax-optional-catch-binding": "^7.8.0" } }, + "@babel/plugin-proposal-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.8.3.tgz", + "integrity": "sha512-QIoIR9abkVn+seDE3OjA08jWcs3eZ9+wJCKSRgo3WdEU2csFYgdScb+8qHB3+WXsGJD55u+5hWCISI7ejXS+kg==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.0" + } + }, "@babel/plugin-proposal-unicode-property-regex": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.8.3.tgz", @@ -410,6 +475,14 @@ "@babel/helper-plugin-utils": "^7.8.3" } }, + "@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, "@babel/plugin-syntax-object-rest-spread": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", @@ -426,6 +499,22 @@ "@babel/helper-plugin-utils": "^7.8.0" } }, + "@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-top-level-await": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.8.3.tgz", + "integrity": "sha512-kwj1j9lL/6Wd0hROD3b/OZZ7MSrZLqqn9RAZ5+cYYsflQ9HZBIKCUkr3+uL1MEJ1NePiUbf98jjiMQSv0NMR9g==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, "@babel/plugin-transform-arrow-functions": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.8.3.tgz", @@ -462,16 +551,16 @@ } }, "@babel/plugin-transform-classes": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.8.3.tgz", - "integrity": "sha512-SjT0cwFJ+7Rbr1vQsvphAHwUHvSUPmMjMU/0P59G8U2HLFqSa082JO7zkbDNWs9kH/IUqpHI6xWNesGf8haF1w==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.8.6.tgz", + "integrity": "sha512-k9r8qRay/R6v5aWZkrEclEhKO6mc1CCQr2dLsVHBmOQiMpN6I2bpjX3vgnldUWeEI1GHVNByULVxZ4BdP4Hmdg==", "requires": { "@babel/helper-annotate-as-pure": "^7.8.3", "@babel/helper-define-map": "^7.8.3", "@babel/helper-function-name": "^7.8.3", "@babel/helper-optimise-call-expression": "^7.8.3", "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-replace-supers": "^7.8.3", + "@babel/helper-replace-supers": "^7.8.6", "@babel/helper-split-export-declaration": "^7.8.3", "globals": "^11.1.0" } @@ -519,9 +608,9 @@ } }, "@babel/plugin-transform-for-of": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.8.4.tgz", - "integrity": "sha512-iAXNlOWvcYUYoV8YIxwS7TxGRJcxyl8eQCfT+A5j8sKUzRFvJdcyjp97jL2IghWSRDaL2PU2O2tX8Cu9dTBq5A==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.8.6.tgz", + "integrity": "sha512-M0pw4/1/KI5WAxPsdcUL/w2LJ7o89YHN3yLkzNjg7Yl15GlVGgzHyCU+FMeAxevHGsLVmUqbirlUIKTafPmzdw==", "requires": { "@babel/helper-plugin-utils": "^7.8.3" } @@ -543,6 +632,14 @@ "@babel/helper-plugin-utils": "^7.8.3" } }, + "@babel/plugin-transform-member-expression-literals": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.8.3.tgz", + "integrity": "sha512-3Wk2EXhnw+rP+IDkK6BdtPKsUE5IeZ6QOGrPYvw52NwBStw9V1ZVzxgK6fSKSxqUvH9eQPR3tm3cOq79HlsKYA==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, "@babel/plugin-transform-modules-amd": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.8.3.tgz", @@ -610,21 +707,37 @@ } }, "@babel/plugin-transform-parameters": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.8.4.tgz", - "integrity": "sha512-IsS3oTxeTsZlE5KqzTbcC2sV0P9pXdec53SU+Yxv7o/6dvGM5AkTotQKhoSffhNgZ/dftsSiOoxy7evCYJXzVA==", + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.8.7.tgz", + "integrity": "sha512-brYWaEPTRimOctz2NDA3jnBbDi7SVN2T4wYuu0aqSzxC3nozFZngGaw29CJ9ZPweB7k+iFmZuoG3IVPIcXmD2g==", "requires": { - "@babel/helper-call-delegate": "^7.8.3", + "@babel/helper-call-delegate": "^7.8.7", "@babel/helper-get-function-arity": "^7.8.3", "@babel/helper-plugin-utils": "^7.8.3" } }, - "@babel/plugin-transform-regenerator": { + "@babel/plugin-transform-property-literals": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.8.3.tgz", - "integrity": "sha512-qt/kcur/FxrQrzFR432FGZznkVAjiyFtCOANjkAKwCbt465L6ZCiUQh2oMYGU3Wo8LRFJxNDFwWn106S5wVUNA==", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.8.3.tgz", + "integrity": "sha512-uGiiXAZMqEoQhRWMK17VospMZh5sXWg+dlh2soffpkAl96KAm+WZuJfa6lcELotSRmooLqg0MWdH6UUq85nmmg==", "requires": { - "regenerator-transform": "^0.14.0" + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-regenerator": { + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.8.7.tgz", + "integrity": "sha512-TIg+gAl4Z0a3WmD3mbYSk+J9ZUH6n/Yc57rtKRnlA/7rcCvpekHXe0CMZHP1gYp7/KLe9GHTuIba0vXmls6drA==", + "requires": { + "regenerator-transform": "^0.14.2" + } + }, + "@babel/plugin-transform-reserved-words": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.8.3.tgz", + "integrity": "sha512-mwMxcycN3omKFDjDQUl+8zyMsBfjRFr0Zn/64I41pmjv4NJuqcYlEtezwYtw9TFd9WR1vN5kiM+O0gMZzO6L0A==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" } }, "@babel/plugin-transform-runtime": { @@ -697,53 +810,67 @@ } }, "@babel/preset-env": { - "version": "7.3.4", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.3.4.tgz", - "integrity": "sha512-2mwqfYMK8weA0g0uBKOt4FE3iEodiHy9/CW0b+nWXcbL+pGzLx8ESYc+j9IIxr6LTDHWKgPm71i9smo02bw+gA==", + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.8.7.tgz", + "integrity": "sha512-BYftCVOdAYJk5ASsznKAUl53EMhfBbr8CJ1X+AJLfGPscQkwJFiaV/Wn9DPH/7fzm2v6iRYJKYHSqyynTGw0nw==", "requires": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-async-generator-functions": "^7.2.0", - "@babel/plugin-proposal-json-strings": "^7.2.0", - "@babel/plugin-proposal-object-rest-spread": "^7.3.4", - "@babel/plugin-proposal-optional-catch-binding": "^7.2.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.2.0", - "@babel/plugin-syntax-async-generators": "^7.2.0", - "@babel/plugin-syntax-json-strings": "^7.2.0", - "@babel/plugin-syntax-object-rest-spread": "^7.2.0", - "@babel/plugin-syntax-optional-catch-binding": "^7.2.0", - "@babel/plugin-transform-arrow-functions": "^7.2.0", - "@babel/plugin-transform-async-to-generator": "^7.3.4", - "@babel/plugin-transform-block-scoped-functions": "^7.2.0", - "@babel/plugin-transform-block-scoping": "^7.3.4", - "@babel/plugin-transform-classes": "^7.3.4", - "@babel/plugin-transform-computed-properties": "^7.2.0", - "@babel/plugin-transform-destructuring": "^7.2.0", - "@babel/plugin-transform-dotall-regex": "^7.2.0", - "@babel/plugin-transform-duplicate-keys": "^7.2.0", - "@babel/plugin-transform-exponentiation-operator": "^7.2.0", - "@babel/plugin-transform-for-of": "^7.2.0", - "@babel/plugin-transform-function-name": "^7.2.0", - "@babel/plugin-transform-literals": "^7.2.0", - "@babel/plugin-transform-modules-amd": "^7.2.0", - "@babel/plugin-transform-modules-commonjs": "^7.2.0", - "@babel/plugin-transform-modules-systemjs": "^7.3.4", - "@babel/plugin-transform-modules-umd": "^7.2.0", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.3.0", - "@babel/plugin-transform-new-target": "^7.0.0", - "@babel/plugin-transform-object-super": "^7.2.0", - "@babel/plugin-transform-parameters": "^7.2.0", - "@babel/plugin-transform-regenerator": "^7.3.4", - "@babel/plugin-transform-shorthand-properties": "^7.2.0", - "@babel/plugin-transform-spread": "^7.2.0", - "@babel/plugin-transform-sticky-regex": "^7.2.0", - "@babel/plugin-transform-template-literals": "^7.2.0", - "@babel/plugin-transform-typeof-symbol": "^7.2.0", - "@babel/plugin-transform-unicode-regex": "^7.2.0", - "browserslist": "^4.3.4", + "@babel/compat-data": "^7.8.6", + "@babel/helper-compilation-targets": "^7.8.7", + "@babel/helper-module-imports": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-proposal-async-generator-functions": "^7.8.3", + "@babel/plugin-proposal-dynamic-import": "^7.8.3", + "@babel/plugin-proposal-json-strings": "^7.8.3", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-proposal-object-rest-spread": "^7.8.3", + "@babel/plugin-proposal-optional-catch-binding": "^7.8.3", + "@babel/plugin-proposal-optional-chaining": "^7.8.3", + "@babel/plugin-proposal-unicode-property-regex": "^7.8.3", + "@babel/plugin-syntax-async-generators": "^7.8.0", + "@babel/plugin-syntax-dynamic-import": "^7.8.0", + "@babel/plugin-syntax-json-strings": "^7.8.0", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0", + "@babel/plugin-syntax-object-rest-spread": "^7.8.0", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.0", + "@babel/plugin-syntax-optional-chaining": "^7.8.0", + "@babel/plugin-syntax-top-level-await": "^7.8.3", + "@babel/plugin-transform-arrow-functions": "^7.8.3", + "@babel/plugin-transform-async-to-generator": "^7.8.3", + "@babel/plugin-transform-block-scoped-functions": "^7.8.3", + "@babel/plugin-transform-block-scoping": "^7.8.3", + "@babel/plugin-transform-classes": "^7.8.6", + "@babel/plugin-transform-computed-properties": "^7.8.3", + "@babel/plugin-transform-destructuring": "^7.8.3", + "@babel/plugin-transform-dotall-regex": "^7.8.3", + "@babel/plugin-transform-duplicate-keys": "^7.8.3", + "@babel/plugin-transform-exponentiation-operator": "^7.8.3", + "@babel/plugin-transform-for-of": "^7.8.6", + "@babel/plugin-transform-function-name": "^7.8.3", + "@babel/plugin-transform-literals": "^7.8.3", + "@babel/plugin-transform-member-expression-literals": "^7.8.3", + "@babel/plugin-transform-modules-amd": "^7.8.3", + "@babel/plugin-transform-modules-commonjs": "^7.8.3", + "@babel/plugin-transform-modules-systemjs": "^7.8.3", + "@babel/plugin-transform-modules-umd": "^7.8.3", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.8.3", + "@babel/plugin-transform-new-target": "^7.8.3", + "@babel/plugin-transform-object-super": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.8.7", + "@babel/plugin-transform-property-literals": "^7.8.3", + "@babel/plugin-transform-regenerator": "^7.8.7", + "@babel/plugin-transform-reserved-words": "^7.8.3", + "@babel/plugin-transform-shorthand-properties": "^7.8.3", + "@babel/plugin-transform-spread": "^7.8.3", + "@babel/plugin-transform-sticky-regex": "^7.8.3", + "@babel/plugin-transform-template-literals": "^7.8.3", + "@babel/plugin-transform-typeof-symbol": "^7.8.4", + "@babel/plugin-transform-unicode-regex": "^7.8.3", + "@babel/types": "^7.8.7", + "browserslist": "^4.8.5", + "core-js-compat": "^3.6.2", "invariant": "^2.2.2", - "js-levenshtein": "^1.1.3", - "semver": "^5.3.0" + "levenary": "^1.1.1", + "semver": "^5.5.0" }, "dependencies": { "semver": { @@ -754,57 +881,41 @@ } }, "@babel/runtime": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.8.4.tgz", - "integrity": "sha512-neAp3zt80trRVBI1x0azq6c57aNBqYZH8KhMm3TaB7wEI5Q4A2SHfBHE8w9gOhI/lrqxtEbXZgQIrHP+wvSGwQ==", + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.8.7.tgz", + "integrity": "sha512-+AATMUFppJDw6aiR5NVPHqIQBlV/Pj8wY/EZH+lmvRdUo9xBaz/rF3alAwFJQavvKfeOlPE7oaaDHVbcySbCsg==", "requires": { - "regenerator-runtime": "^0.13.2" + "regenerator-runtime": "^0.13.4" }, "dependencies": { "regenerator-runtime": { - "version": "0.13.3", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz", - "integrity": "sha512-naKIZz2GQ8JWh///G7L3X6LaQUAMp2lvb1rvwwsURe/VXwD6VMfr+/1NuNw3ag8v2kY1aQ/go5SNn79O9JU7yw==" - } - } - }, - "@babel/runtime-corejs2": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs2/-/runtime-corejs2-7.8.4.tgz", - "integrity": "sha512-7jU2FgNqNHX6yTuU/Dr/vH5/O8eVL9U85MG5aDw1LzGfCvvhXC1shdXfVzCQDsoY967yrAKeLujRv7l8BU+dZA==", - "requires": { - "core-js": "^2.6.5", - "regenerator-runtime": "^0.13.2" - }, - "dependencies": { - "regenerator-runtime": { - "version": "0.13.3", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz", - "integrity": "sha512-naKIZz2GQ8JWh///G7L3X6LaQUAMp2lvb1rvwwsURe/VXwD6VMfr+/1NuNw3ag8v2kY1aQ/go5SNn79O9JU7yw==" + "version": "0.13.4", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.4.tgz", + "integrity": "sha512-plpwicqEzfEyTQohIKktWigcLzmNStMGwbOUbykx51/29Z3JOGYldaaNGK7ngNXV+UcoqvIMmloZ48Sr74sd+g==" } } }, "@babel/template": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.3.tgz", - "integrity": "sha512-04m87AcQgAFdvuoyiQ2kgELr2tV8B4fP/xJAVUL3Yb3bkNdMedD3d0rlSQr3PegP0cms3eHjl1F7PWlvWbU8FQ==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.6.tgz", + "integrity": "sha512-zbMsPMy/v0PWFZEhQJ66bqjhH+z0JgMoBWuikXybgG3Gkd/3t5oQ1Rw2WQhnSrsOmsKXnZOx15tkC4qON/+JPg==", "requires": { "@babel/code-frame": "^7.8.3", - "@babel/parser": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/parser": "^7.8.6", + "@babel/types": "^7.8.6" } }, "@babel/traverse": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.8.4.tgz", - "integrity": "sha512-NGLJPZwnVEyBPLI+bl9y9aSnxMhsKz42so7ApAv9D+b4vAFPpY013FTS9LdKxcABoIYFU52HcYga1pPlx454mg==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.8.6.tgz", + "integrity": "sha512-2B8l0db/DPi8iinITKuo7cbPznLCEk0kCxDoB9/N6gGNg/gxOXiR/IcymAFPiBwk5w6TtQ27w4wpElgp9btR9A==", "requires": { "@babel/code-frame": "^7.8.3", - "@babel/generator": "^7.8.4", + "@babel/generator": "^7.8.6", "@babel/helper-function-name": "^7.8.3", "@babel/helper-split-export-declaration": "^7.8.3", - "@babel/parser": "^7.8.4", - "@babel/types": "^7.8.3", + "@babel/parser": "^7.8.6", + "@babel/types": "^7.8.6", "debug": "^4.1.0", "globals": "^11.1.0", "lodash": "^4.17.13" @@ -826,9 +937,9 @@ } }, "@babel/types": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.8.3.tgz", - "integrity": "sha512-jBD+G8+LWpMBBWvVcdr4QysjUE4mU/syrhN17o1u3gx0/WzJB1kwiVZAXRtWbsIPOwW8pF/YJV5+nmetPzepXg==", + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.8.7.tgz", + "integrity": "sha512-k2TreEHxFA4CjGkL+GYjRyx35W0Mr7DP5+9q6WMkyKXB+904bYmG40syjMFV0oLlhhFCwWl0vA0DyzTDkwAiJw==", "requires": { "esutils": "^2.0.2", "lodash": "^4.17.13", @@ -843,10 +954,13 @@ } }, "@cosmos-ui/vue": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.5.21.tgz", - "integrity": "sha512-Y60AMxFKgHrgE/EHxnGKaTcYUN1nJa5m3SylhsCe/d0AvzF9RSYGSPwVgDxmW4KiufBKXkv4PmiNG9WDNWwdxw==", + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.10.0.tgz", + "integrity": "sha512-dQySi+cjICuEl7OnFHMTY1ZKiVQ/dkZ22oiVFQRzfrhRLYa3HgEgM22EMBSzmhGVg6J52yS18OWpIruftWIW1Q==", "requires": { + "clipboard-copy": "^3.1.0", + "js-base64": "^2.5.2", + "prismjs": "^1.19.0", "tiny-cookie": "^2.3.1", "vue": "^2.6.10" } @@ -917,9 +1031,9 @@ "integrity": "sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==" }, "@types/node": { - "version": "13.7.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-13.7.0.tgz", - "integrity": "sha512-GnZbirvmqZUzMgkFn70c74OQpTTUcCzlhQliTzYjQMqg+hVKcDnxdL19Ne3UdYzdMA/+W3eb646FWn/ZaT1NfQ==" + "version": "13.9.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-13.9.0.tgz", + "integrity": "sha512-0ARSQootUG1RljH2HncpsY2TJBfGQIKOOi7kxzUY6z54ePu/ZD+wJA8zI2Q6v8rol2qpG/rvqsReco8zNMPvhQ==" }, "@types/q": { "version": "1.5.2", @@ -945,23 +1059,31 @@ } }, "@vue/babel-preset-app": { - "version": "3.12.1", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-3.12.1.tgz", - "integrity": "sha512-Zjy5jQaikV1Pz+ri0YgXFS7q4/5wCxB5tRkDOEIt5+4105u0Feb/pvH20nVL6nx9GyXrECFfcm7Yxr/z++OaPQ==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.2.3.tgz", + "integrity": "sha512-Xlc8d9Ebgu9pNZMUxKZWVP2CctVZzfX3LAxjBDWAAIiVpdXX4IkQQCevDhgiANFzlmE3KXtiSgPGs57Sso2g7Q==", "requires": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/plugin-proposal-class-properties": "^7.0.0", - "@babel/plugin-proposal-decorators": "^7.1.0", - "@babel/plugin-syntax-dynamic-import": "^7.0.0", - "@babel/plugin-syntax-jsx": "^7.0.0", - "@babel/plugin-transform-runtime": "^7.4.0", - "@babel/preset-env": "^7.0.0 < 7.4.0", - "@babel/runtime": "^7.0.0", - "@babel/runtime-corejs2": "^7.2.0", - "@vue/babel-preset-jsx": "^1.0.0", - "babel-plugin-dynamic-import-node": "^2.2.0", - "babel-plugin-module-resolver": "3.2.0", - "core-js": "^2.6.5" + "@babel/core": "^7.8.4", + "@babel/helper-compilation-targets": "^7.8.4", + "@babel/helper-module-imports": "^7.8.3", + "@babel/plugin-proposal-class-properties": "^7.8.3", + "@babel/plugin-proposal-decorators": "^7.8.3", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-jsx": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.8.3", + "@babel/preset-env": "^7.8.4", + "@babel/runtime": "^7.8.4", + "@vue/babel-preset-jsx": "^1.1.2", + "babel-plugin-dynamic-import-node": "^2.3.0", + "core-js": "^3.6.4", + "core-js-compat": "^3.6.4" + }, + "dependencies": { + "core-js": { + "version": "3.6.4", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.4.tgz", + "integrity": "sha512-4paDGScNgZP2IXXilaffL9X7968RuvwlkK3xWtZRVqgd8SYNiVKRJvkFd1aqqEuPfN7E68ZHEp9hDj6lHj4Hyw==" + } } }, "@vue/babel-preset-jsx": { @@ -1063,23 +1185,24 @@ } }, "@vuepress/core": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.3.0.tgz", - "integrity": "sha512-/KaH10ggZeEnwh/i8A02VtGHfuIfTEf/pIPV9BBVjK5M6ToPhF2pkcXlPk5PbCWam2dKm7ZDQddJzev1dY5TNA==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.3.1.tgz", + "integrity": "sha512-BBtM3imJUPwCTz0Fzl++ZLgf1afcsas4jo/wbVvroIdI0R6GEbXdivnisVGD48tZ10WcwvY94tlL1jWO8xV6bg==", "requires": { - "@babel/core": "^7.0.0", - "@vue/babel-preset-app": "^3.1.1", - "@vuepress/markdown": "^1.3.0", - "@vuepress/markdown-loader": "^1.3.0", - "@vuepress/plugin-last-updated": "^1.3.0", - "@vuepress/plugin-register-components": "^1.3.0", - "@vuepress/shared-utils": "^1.3.0", + "@babel/core": "^7.8.4", + "@vue/babel-preset-app": "^4.1.2", + "@vuepress/markdown": "^1.3.1", + "@vuepress/markdown-loader": "^1.3.1", + "@vuepress/plugin-last-updated": "^1.3.1", + "@vuepress/plugin-register-components": "^1.3.1", + "@vuepress/shared-utils": "^1.3.1", "autoprefixer": "^9.5.1", "babel-loader": "^8.0.4", "cache-loader": "^3.0.0", "chokidar": "^2.0.3", "connect-history-api-fallback": "^1.5.0", "copy-webpack-plugin": "^5.0.2", + "core-js": "^3.6.4", "cross-spawn": "^6.0.5", "css-loader": "^2.1.1", "file-loader": "^3.0.1", @@ -1104,14 +1227,21 @@ "webpack-dev-server": "^3.5.1", "webpack-merge": "^4.1.2", "webpackbar": "3.2.0" + }, + "dependencies": { + "core-js": { + "version": "3.6.4", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.4.tgz", + "integrity": "sha512-4paDGScNgZP2IXXilaffL9X7968RuvwlkK3xWtZRVqgd8SYNiVKRJvkFd1aqqEuPfN7E68ZHEp9hDj6lHj4Hyw==" + } } }, "@vuepress/markdown": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.3.0.tgz", - "integrity": "sha512-h4FCAxcYLSGuoftbumsesqquRuQksb98sygiP/EV1J7z3qVj8r/1YdRRoUoE0Yd9hw0izN52KJRYZC7tlUmBnw==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.3.1.tgz", + "integrity": "sha512-UJoGHR9GsFnPk+Jot8tieO4M6WJQ5CkdIWlQfbpC1+Z0ETJjlNIel23BKLNzqfo3NhLq+/i33RnzMVzkBKlVvQ==", "requires": { - "@vuepress/shared-utils": "^1.3.0", + "@vuepress/shared-utils": "^1.3.1", "markdown-it": "^8.4.1", "markdown-it-anchor": "^5.0.2", "markdown-it-chain": "^1.3.0", @@ -1140,56 +1270,61 @@ } }, "@vuepress/markdown-loader": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.3.0.tgz", - "integrity": "sha512-20J9+wuyCxhwOWfb7aDY0F/+j2oQYaoDE1VbH3zaqI9XesPl42DsEwA1Nw1asEm3yXdh+uC2scBCiNcv94tsHg==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.3.1.tgz", + "integrity": "sha512-JxjQgSClW51hE0bCrcAqnG0yrvVURzcZwP2zbWkcCMD7vomHbvkHyPmuf6oa8Jk4S//RQUYINrzC/KrDjVuzIQ==", "requires": { - "@vuepress/markdown": "^1.3.0", + "@vuepress/markdown": "^1.3.1", "loader-utils": "^1.1.0", "lru-cache": "^5.1.1" } }, "@vuepress/plugin-active-header-links": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.3.0.tgz", - "integrity": "sha512-C+EhZefAOxN83jVZebRWqFUBUklTsTtWRiDFczxcxqH995ZZumi1UFKj9TurOjrZppUDr4ftfxIqGkj4QSUeWw==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.3.1.tgz", + "integrity": "sha512-mrawXXAv2K1GrD1JNoFHxF8xX3KiphVcwvf+58GXpsyAQ5ag5X1BZG3gCA1JdNFUe3SXRh5jF6HTBuM2dc6Ovg==", "requires": { "lodash.debounce": "^4.0.8" } }, + "@vuepress/plugin-google-analytics": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.3.1.tgz", + "integrity": "sha512-Xb7g86JT/LD1sLYG2txvpp4ztDTOqN5yNRZK5OtzEekCh0NWxIxz0fEYKqVlaskIFnzoA1dfizudyjGCKY+hMw==" + }, "@vuepress/plugin-last-updated": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.3.0.tgz", - "integrity": "sha512-zCg98YiCFzBo7hHh5CE4H7lO13QaexeNXKC8SC7aNopjhg1/+rzFKEWt5frARnYqhMrkhEqcegSuB4xWxNV+zQ==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.3.1.tgz", + "integrity": "sha512-n1EhhFcaWxQtbC9ICyLg8kmSULjV18wYMbHCyaKRYAvyhlPau95zbSpQfG2Nl3ZgFR6kRodK6AmZUOgho0zh/g==", "requires": { "cross-spawn": "^6.0.5" } }, "@vuepress/plugin-nprogress": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.3.0.tgz", - "integrity": "sha512-PuBDAhaYLvwG63LamIc1fMk+s4kUqPuvNYKfZjQlF3LtXjlCMvd6YEQyogfB9cZnFOg1nryeHJwWoAdFvzw29Q==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.3.1.tgz", + "integrity": "sha512-vDBnIhTgGZbADwhaatSLsFnuj+MDDpCWQ79m9o+8RtMZO2HemedcCRNIj/ZLRJSBFjXrDdnXF5lpW4EEIeRaew==", "requires": { "nprogress": "^0.2.0" } }, "@vuepress/plugin-register-components": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.3.0.tgz", - "integrity": "sha512-IkBacuTDHSHhI3qWXPQtVWTEAL+wprrbaYrD+g2n9xV3dzMkhHJxbpRpw7eAbvsP85a03rVouwRukZ+YlhYPPQ==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.3.1.tgz", + "integrity": "sha512-ae/94omRTPZkJKuVic8Rvzfnu2NtqsyVPYTL6qcnjDgxieR3L7EAYLNEvYpg1jof+QTHoEDCaVU2c63chZcfEQ==", "requires": { - "@vuepress/shared-utils": "^1.3.0" + "@vuepress/shared-utils": "^1.3.1" } }, "@vuepress/plugin-search": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.3.0.tgz", - "integrity": "sha512-buoQ6gQ2MLbLQ7Nhg5KJWPzKo7NtvdK/e6Fo1ig/kbOG5HyYKHCyqLjbQ/ZqT+fGbaSeEjH3DaVYTNx55GRX5A==" + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.3.1.tgz", + "integrity": "sha512-iOIvMWUTPHrGxjDprFoGTcuI8Y8/6e6JjLO4mO6qe6qVqR1yCQ8cJzVYXIizjEHUFYJ04uZ3jF9gBV8npS+3ZQ==" }, "@vuepress/shared-utils": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.3.0.tgz", - "integrity": "sha512-n1AFgt8SiMDdc5aIj5yOqS3E6+dAZ+9tPw6qf1mBiqvdZzwaUtlydvXqVkskrwUo18znLrUr55VYwubMOaxFnQ==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.3.1.tgz", + "integrity": "sha512-MlIAlnptjDC9+l0SJKW6BpkuwtxfKDzq4Rmag75RdyIqkkNv4EsCXZ8Y3HSuzENWFBwoD31jLC+nCZ3hULcvSg==", "requires": { "chalk": "^2.3.2", "diacritics": "^1.3.0", @@ -1203,13 +1338,13 @@ } }, "@vuepress/theme-default": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.3.0.tgz", - "integrity": "sha512-0KKTIQQAyO3xE9Gn5vdQYWY+B1onzMm2i3Td610FiLsCRqeHsWs/stl6tlP3nV75OUHwBRH/w0ITrIF4kMR7GQ==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.3.1.tgz", + "integrity": "sha512-CihkB6/+5vfgeTI5HRDs4+QgTkIN4/K54OpQCGLW51OinXuz4rjMVQW2uSlSqSeKEr+MERHa+Jc5deIpA0opoA==", "requires": { - "@vuepress/plugin-active-header-links": "^1.3.0", - "@vuepress/plugin-nprogress": "^1.3.0", - "@vuepress/plugin-search": "^1.3.0", + "@vuepress/plugin-active-header-links": "^1.3.1", + "@vuepress/plugin-nprogress": "^1.3.1", + "@vuepress/plugin-search": "^1.3.1", "docsearch.js": "^2.5.2", "lodash": "^4.17.15", "stylus": "^0.54.5", @@ -1426,9 +1561,9 @@ "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8=" }, "ajv": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.11.0.tgz", - "integrity": "sha512-nCprB/0syFYy9fVYU1ox1l2KN8S9I+tziH8D4zdZuLT3N6RMlGSGt5FSTpAiHB/Whv8Qs1cWHma1aMKZyaHRKA==", + "version": "6.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.0.tgz", + "integrity": "sha512-D6gFiFA0RRLyUbvijN74DWAjXSFxWKaWP7mldxkVhyhAV3+SWA9HEJPHQ2c9soIeTFJqcSdFDGFgdqs1iUU2Hw==", "requires": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -1557,11 +1692,11 @@ "integrity": "sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA==" }, "ansi-escapes": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.0.tgz", - "integrity": "sha512-EiYhwo0v255HUL6eDyuLrXEkTi7WwVCLAw+SeOQ7M7qdun1z1pum4DEm/nuqIVbPvi9RPPc9k9LbyBv6H0DwVg==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.1.tgz", + "integrity": "sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA==", "requires": { - "type-fest": "^0.8.1" + "type-fest": "^0.11.0" } }, "ansi-html": { @@ -1796,18 +1931,6 @@ "object.assign": "^4.1.0" } }, - "babel-plugin-module-resolver": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-module-resolver/-/babel-plugin-module-resolver-3.2.0.tgz", - "integrity": "sha512-tjR0GvSndzPew/Iayf4uICWZqjBwnlMWjSx6brryfQ81F9rxBVqwDJtFCV8oOs0+vJeefK9TmdZtkIFdFe1UnA==", - "requires": { - "find-babel-config": "^1.1.0", - "glob": "^7.1.2", - "pkg-up": "^2.0.0", - "reselect": "^3.0.1", - "resolve": "^1.4.0" - } - }, "babel-runtime": { "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", @@ -2081,6 +2204,11 @@ "requires": { "has-flag": "^4.0.0" } + }, + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==" } } }, @@ -2181,13 +2309,13 @@ } }, "browserslist": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.8.6.tgz", - "integrity": "sha512-ZHao85gf0eZ0ESxLfCp73GG9O/VTytYDIkIiZDlURppLTI9wErSM/5yAKEq6rcUdxBLjMELmrYUJGg5sxGKMHg==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.9.1.tgz", + "integrity": "sha512-Q0DnKq20End3raFulq6Vfp1ecB9fh8yUNV55s8sekaDDeqBaCtWlRHCUdaWyUeSSBJM7IbM6HcsyaeYqgeDhnw==", "requires": { - "caniuse-lite": "^1.0.30001023", - "electron-to-chromium": "^1.3.341", - "node-releases": "^1.1.47" + "caniuse-lite": "^1.0.30001030", + "electron-to-chromium": "^1.3.363", + "node-releases": "^1.1.50" } }, "buffer": { @@ -2231,9 +2359,9 @@ "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" }, "cac": { - "version": "6.5.6", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.5.6.tgz", - "integrity": "sha512-8jsGLeBiYEVYTDExaj/rDPG4tyra4yjjacIL10TQ+MobPcg9/IST+dkKLu6sOzq0GcIC6fQqX1nkH9HoskQLAw==" + "version": "6.5.7", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.5.7.tgz", + "integrity": "sha512-DjjOqLvoX/oO/snovTNm553kRYWTmIIQBfHQ2UqktbCudoHJuxzvRhjwdCHkXrQwp/lnu3bYyZ+LfaHtwk0Wjw==" }, "cacache": { "version": "12.0.3", @@ -2372,9 +2500,9 @@ } }, "caniuse-lite": { - "version": "1.0.30001027", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001027.tgz", - "integrity": "sha512-7xvKeErvXZFtUItTHgNtLgS9RJpVnwBlWX8jSo/BO8VsF6deszemZSkJJJA1KOKrXuzZH4WALpAJdq5EyfgMLg==" + "version": "1.0.30001033", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001033.tgz", + "integrity": "sha512-8Ibzxee6ibc5q88cM1usPsMpJOG5CTq0s/dKOmlekPbDGKt+UrnOOTPSjQz3kVo6yL7N4SB5xd+FGLHQmbzh6A==" }, "caseless": { "version": "0.12.0", @@ -2448,9 +2576,9 @@ } }, "chownr": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.3.tgz", - "integrity": "sha512-i70fVHhmV3DtTl6nqvZOnIjbY0Pe4kAUjwHj8z0zAdgBtYrJyYwLKCCuRBQ5ppkyL0AkN7HKRnETdmdp1zqNXw==" + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" }, "chrome-trace-event": { "version": "1.0.2", @@ -2509,9 +2637,9 @@ "integrity": "sha512-gpaBrMAizVEANOpfZp/EEUixTXDyGt7DFzdK5hU+UbWt/J0lB0w20ncZj59Z9a93xHb9u12zF5BS6i9RKbtg4w==" }, "clipboard": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.4.tgz", - "integrity": "sha512-Vw26VSLRpJfBofiVaFb/I8PVfdI1OxKcYShe6fm0sP/DtmiWQNCjhM/okTvdCo0G+lMMm1rMYbk4IK4x1X+kgQ==", + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.6.tgz", + "integrity": "sha512-g5zbiixBRk/wyKakSwCKd7vQXDjFnAMGHoEyBogG/bw9kTD9GvdAvaoRR1ALcEzt3pVKxZR0pViekPMIS0QyGg==", "optional": true, "requires": { "good-listener": "^1.2.2", @@ -2703,11 +2831,11 @@ } }, "configstore": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.0.tgz", - "integrity": "sha512-eE/hvMs7qw7DlcB5JPRnthmrITuHMmACUJAp89v6PT6iOqzoLS7HRWhBtuHMlhNHo2AhUSA/3Dh1bKNJHcublQ==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", + "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", "requires": { - "dot-prop": "^5.1.0", + "dot-prop": "^5.2.0", "graceful-fs": "^4.1.2", "make-dir": "^3.0.0", "unique-string": "^2.0.0", @@ -2715,23 +2843,10 @@ "xdg-basedir": "^4.0.0" }, "dependencies": { - "dot-prop": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.2.0.tgz", - "integrity": "sha512-uEUyaDKoSQ1M4Oq8l45hSE26SnTxL6snNnqvK/VWx5wJhmff5z0FUVJDKDanor/6w3kzE3i7XZOk+7wC0EXr1A==", - "requires": { - "is-obj": "^2.0.0" - } - }, - "is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==" - }, "make-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.0.0.tgz", - "integrity": "sha512-grNJDhb8b1Jm1qeqW5R/O63wUo4UXo2v2HMic6YT9i/HBlF93S8jkMgH7yugvY9ABDShH4VZMn8I+U8+fCNegw==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.0.2.tgz", + "integrity": "sha512-rYKABKutXa6vXTXhoV18cBE7PaewPXHe/Bdq4v+ZLMhxbWApkFFplT0LcbMW+6BbjnQXzZ/sAvSE/JdguApG5w==", "requires": { "semver": "^6.0.0" } @@ -2877,19 +2992,6 @@ "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" }, - "p-limit": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", - "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", - "requires": { - "p-try": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" - }, "pify": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", @@ -2907,6 +3009,22 @@ "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.11.tgz", "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg==" }, + "core-js-compat": { + "version": "3.6.4", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.6.4.tgz", + "integrity": "sha512-zAa3IZPvsJ0slViBQ2z+vgyyTuhd3MFn1rBQjZSKVEgB0UMYhUkCj9jJUVPgGTGqWvsBVmfnruXgTcNyTlEiSA==", + "requires": { + "browserslist": "^4.8.3", + "semver": "7.0.0" + }, + "dependencies": { + "semver": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz", + "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==" + } + } + }, "core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", @@ -3087,11 +3205,6 @@ "source-map": "^0.6.1" } }, - "css-unit-converter": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/css-unit-converter/-/css-unit-converter-1.1.1.tgz", - "integrity": "sha1-2bkoGtz9jO2TW9urqDeGiX9k6ZY=" - }, "css-what": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/css-what/-/css-what-2.1.3.tgz", @@ -3498,11 +3611,11 @@ } }, "dot-prop": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-4.2.0.tgz", - "integrity": "sha512-tUMXrxlExSW6U2EXiiKGSBVdYgtV8qlHL+C10TsW4PURY/ic+eaysnSkwB4kA/mBlCyy/IKDJ+Lc3wbWeaXtuQ==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.2.0.tgz", + "integrity": "sha512-uEUyaDKoSQ1M4Oq8l45hSE26SnTxL6snNnqvK/VWx5wJhmff5z0FUVJDKDanor/6w3kzE3i7XZOk+7wC0EXr1A==", "requires": { - "is-obj": "^1.0.0" + "is-obj": "^2.0.0" } }, "duplexer3": { @@ -3565,9 +3678,9 @@ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" }, "electron-to-chromium": { - "version": "1.3.346", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.346.tgz", - "integrity": "sha512-Yy4jF5hJd57BWmGPt0KjaXc25AmWZeQK75kdr4zIzksWVtiT6DwaNtvTb9dt+LkQKwUpvBfCyyPsXXtbY/5GYw==" + "version": "1.3.375", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.375.tgz", + "integrity": "sha512-zmaFnYVBtfpF8bGRYxgPeVAlXB7N3On8rjBE2ROc6wOpTPpzRWaiHo6KkbJMvlH07CH33uks/TEb6kuMMn8q6A==" }, "elliptic": { "version": "6.5.2", @@ -3589,9 +3702,9 @@ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" }, "emojis-list": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", - "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=" + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==" }, "encodeurl": { "version": "1.0.2", @@ -3722,6 +3835,11 @@ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" }, + "escape-goat": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", + "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==" + }, "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", @@ -4005,9 +4123,9 @@ "integrity": "sha512-vNKxJHTEKNThjfrdJwHc7brvM6eVevuO5nTj6ez8ZQ1qbXTvGthucRF7S4vf2cr71QVnT70V34v0S1DyQsti0w==" }, "figures": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.1.0.tgz", - "integrity": "sha512-ravh8VRXqHuMvZt/d8GblBeqDMkdJMBdv/2KntFH+ra5MXkO7nxNKpzQ3n6QD/2da1kH0aWmNISdvhM7gl2gVg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", "requires": { "escape-string-regexp": "^1.0.5" } @@ -4062,22 +4180,6 @@ } } }, - "find-babel-config": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/find-babel-config/-/find-babel-config-1.2.0.tgz", - "integrity": "sha512-jB2CHJeqy6a820ssiqwrKMeyC6nNdmrcgkKWJWmpoxpE8RKciYJXCcXRq1h2AzCo5I5BJeN2tkGEO3hLTuePRA==", - "requires": { - "json5": "^0.5.1", - "path-exists": "^3.0.0" - }, - "dependencies": { - "json5": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", - "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=" - } - } - }, "find-cache-dir": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", @@ -4089,11 +4191,11 @@ } }, "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", "requires": { - "locate-path": "^2.0.0" + "locate-path": "^3.0.0" } }, "flush-write-stream": { @@ -4774,9 +4876,9 @@ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" }, "fuse.js": { - "version": "3.4.6", - "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.4.6.tgz", - "integrity": "sha512-H6aJY4UpLFwxj1+5nAvufom5b2BT2v45P1MkPvdGIK8fWjQx/7o6tTT1+ALV0yawQvbmvCF0ufl2et8eJ7v7Cg==" + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.6.1.tgz", + "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw==" }, "gensync": { "version": "1.0.0-beta.1", @@ -5187,9 +5289,9 @@ } }, "http-cache-semantics": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.0.3.tgz", - "integrity": "sha512-TcIMG3qeVLgDr1TEd2XvHaTnMPwYQUQMIBLy+5pLSDKYFc7UIqj39w8EGzZkaxoLv/l2K8HaI0t5AVA+YYgUew==" + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", + "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" }, "http-deceiver": { "version": "1.2.7", @@ -5403,9 +5505,9 @@ "integrity": "sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk=" }, "ipaddr.js": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.0.tgz", - "integrity": "sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA==" + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==" }, "is-absolute-url": { "version": "2.1.0", @@ -5610,9 +5712,9 @@ } }, "is-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==" }, "is-path-cwd": { "version": "2.2.0", @@ -5732,10 +5834,10 @@ "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-1.6.0.tgz", "integrity": "sha1-FC0RHzpuPa6PSpr9d9RYVbWpzOM=" }, - "js-levenshtein": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/js-levenshtein/-/js-levenshtein-1.1.6.tgz", - "integrity": "sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==" + "js-base64": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.5.2.tgz", + "integrity": "sha512-Vg8czh0Q7sFBSUMWWArX/miJeBWYBPpdU/3M/DKSaekLMqrqVPaedp+5mZhie/r0lgrcaYBfwXatEew6gwgiQQ==" }, "js-stringify": { "version": "1.0.2", @@ -5880,6 +5982,19 @@ "invert-kv": "^2.0.0" } }, + "leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==" + }, + "levenary": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/levenary/-/levenary-1.1.1.tgz", + "integrity": "sha512-mkAdOIt79FD6irqjYSs4rdbnlT5vRonMEvBVPVb3XmevfS8kgRXwfes0dhPdEtzTWD/1eNE/Bm/G1iRt6DcnQQ==", + "requires": { + "leven": "^3.1.0" + } + }, "linkify-it": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz", @@ -5899,21 +6014,21 @@ "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==" }, "loader-utils": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.2.3.tgz", - "integrity": "sha512-fkpz8ejdnEMG3s37wGL07iSBDg99O9D5yflE9RGNH3hRdx9SOwYfnGYdZOUIZitN8E+E2vkq3MUMYMvPYl5ZZA==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz", + "integrity": "sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA==", "requires": { "big.js": "^5.2.2", - "emojis-list": "^2.0.0", + "emojis-list": "^3.0.0", "json5": "^1.0.1" } }, "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", "requires": { - "p-locate": "^2.0.0", + "p-locate": "^3.0.0", "path-exists": "^3.0.0" } }, @@ -5985,9 +6100,9 @@ "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" }, "loglevel": { - "version": "1.6.6", - "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.6.6.tgz", - "integrity": "sha512-Sgr5lbboAUBo3eXCSPL4/KoVz3ROKquOjcctxmHIt+vol2DrqTQe3SwkKKuYhEiWB5kYa13YyopJ69deJ1irzQ==" + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.6.7.tgz", + "integrity": "sha512-cY2eLFrQSAfVPhCgH1s7JI73tMbg9YC3v3+ZHVW67sBS7UxWzNEk/ZBbSfLykBWHp33dqqtOv82gjhKEi81T/A==" }, "longest": { "version": "1.0.1", @@ -6328,9 +6443,9 @@ } }, "minimist": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", - "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.4.tgz", + "integrity": "sha512-wTiNDqe4D2rbTJGZk1qcdZgFtY0/r+iuE6GDT7V0/+Gu5MLpIDm4+CssDECR79OJs/OxLPXMzdxy153b5Qy3hg==" }, "mississippi": { "version": "3.0.0", @@ -6553,9 +6668,9 @@ } }, "node-releases": { - "version": "1.1.48", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.48.tgz", - "integrity": "sha512-Hr8BbmUl1ujAST0K0snItzEA5zkJTQup8VNTKNfT6Zw8vTJkIiagUPNfxHmgDOyfFYNfKAul40sD0UEYTvwebw==", + "version": "1.1.51", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.51.tgz", + "integrity": "sha512-1eQEs6HFYY1kMXQPOLzCf7HdjReErmvn85tZESMczdCNVWP3Y7URYLBAyYynuI7yef1zj4HN5q+oB2x67QU0lw==", "requires": { "semver": "^6.3.0" } @@ -6811,19 +6926,19 @@ "integrity": "sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==" }, "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", + "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", "requires": { - "p-try": "^1.0.0" + "p-try": "^2.0.0" } }, "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", "requires": { - "p-limit": "^1.1.0" + "p-limit": "^2.0.0" } }, "p-map": { @@ -6840,9 +6955,9 @@ } }, "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=" + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" }, "package-json": { "version": "6.5.0", @@ -7043,54 +7158,6 @@ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", "requires": { "find-up": "^3.0.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-limit": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", - "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "requires": { - "p-limit": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" - } - } - }, - "pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-2.0.0.tgz", - "integrity": "sha1-yBmscoBZpGHKscOImivjxJoATX8=", - "requires": { - "find-up": "^2.1.0" } }, "portfinder": { @@ -7124,9 +7191,9 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "7.0.26", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.26.tgz", - "integrity": "sha512-IY4oRjpXWYshuTDFxMVkJDtWIk2LhsTlu8bZnbEJA4+bYT16Lvpo8Qv6EvDumhYRgzjZl489pmsY3qVgJQ08nA==", + "version": "7.0.27", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.27.tgz", + "integrity": "sha512-WuQETPMcW9Uf1/22HWUWP9lgsIC+KEHg2kozMflKjbeUtw9ujvFX6QmIfozaErDkmLWS9WEnEdEe6Uo9/BNTdQ==", "requires": { "chalk": "^2.4.2", "source-map": "^0.6.1", @@ -7144,36 +7211,13 @@ } }, "postcss-calc": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.1.tgz", - "integrity": "sha512-oXqx0m6tb4N3JGdmeMSc/i91KppbYsFZKdH0xMOqK8V1rJlzrKlTdokz8ozUXLVejydRN6u2IddxpcijRj2FqQ==", + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.2.tgz", + "integrity": "sha512-rofZFHUg6ZIrvRwPeFktv06GdbDYLcGqh9EwiMutZg+a0oePCCw1zHOEiji6LCpyRcjTREtPASuUqeAvYlEVvQ==", "requires": { - "css-unit-converter": "^1.1.1", - "postcss": "^7.0.5", - "postcss-selector-parser": "^5.0.0-rc.4", - "postcss-value-parser": "^3.3.1" - }, - "dependencies": { - "cssesc": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-2.0.0.tgz", - "integrity": "sha512-MsCAG1z9lPdoO/IUMLSBWBSVxVtJ1395VGIQ+Fc2gNdkQ1hNDnQdw3YhA71WJCBW1vdwA0cAnk/DnW6bqoEUYg==" - }, - "postcss-selector-parser": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-5.0.0.tgz", - "integrity": "sha512-w+zLE5Jhg6Liz8+rQOWEAwtwkyqpfnmsinXjXg6cY7YIONZZtgvE0v2O0uhQBs0peNomOJwWRKt6JBfTdTd3OQ==", - "requires": { - "cssesc": "^2.0.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - } - }, - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } + "postcss": "^7.0.27", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.0.2" } }, "postcss-colormin": { @@ -7295,11 +7339,11 @@ }, "dependencies": { "postcss-selector-parser": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz", - "integrity": "sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", + "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", "requires": { - "dot-prop": "^4.1.1", + "dot-prop": "^5.2.0", "indexes-of": "^1.0.1", "uniq": "^1.0.1" } @@ -7372,11 +7416,11 @@ }, "dependencies": { "postcss-selector-parser": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz", - "integrity": "sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", + "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", "requires": { - "dot-prop": "^4.1.1", + "dot-prop": "^5.2.0", "indexes-of": "^1.0.1", "uniq": "^1.0.1" } @@ -7670,9 +7714,9 @@ } }, "postcss-value-parser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.0.2.tgz", - "integrity": "sha512-LmeoohTpp/K4UiyQCwuGWlONxXamGzCMtFxLq4W1nZVGIQLYvMCJx3yAF9qyyuFpflABI9yVdtJAqbihOsCsJQ==" + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.0.3.tgz", + "integrity": "sha512-N7h4pG+Nnu5BEIzyeaaIYWs0LI5XC40OrRh5L60z0QjFsqGWcHcbkBvpe1WYpcIS9yQ8sOi/vIPt1ejQCrMVrg==" }, "prepend-http": { "version": "2.0.0", @@ -7735,12 +7779,12 @@ "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=" }, "proxy-addr": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.5.tgz", - "integrity": "sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ==", + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", + "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", "requires": { "forwarded": "~0.1.2", - "ipaddr.js": "1.9.0" + "ipaddr.js": "1.9.1" } }, "prr": { @@ -7928,6 +7972,14 @@ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" }, + "pupa": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.0.1.tgz", + "integrity": "sha512-hEJH0s8PXLY/cdXh66tNEQGndDrIKNqNC5xmrysZy3i5C3oEoLna7YAOad+7u125+zH1HNXUmGEkrhb3c2VriA==", + "requires": { + "escape-goat": "^2.0.0" + } + }, "q": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", @@ -8015,9 +8067,9 @@ } }, "readable-stream": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.5.0.tgz", - "integrity": "sha512-gSz026xs2LfxBPudDuI41V1lka8cxg64E66SGe78zJlsUofOg/yqwezdIcdfwik6B4h8LFmWPA9ef9X3FiNFLA==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", "requires": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", @@ -8077,9 +8129,9 @@ "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==" }, "regenerate-unicode-properties": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.1.0.tgz", - "integrity": "sha512-LGZzkgtLY79GeXLm8Dp0BVLdQlWICzBnJz/ipWUgo59qBaZ+BHtq51P2q1uVZlppMuUAT37SDk39qUbjTWB7bA==", + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz", + "integrity": "sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA==", "requires": { "regenerate": "^1.4.0" } @@ -8090,11 +8142,12 @@ "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==" }, "regenerator-transform": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.1.tgz", - "integrity": "sha512-flVuee02C3FKRISbxhXl9mGzdbWUVHubl1SMaknjxkFB1/iqpJhArQUvRxOOPEc/9tAiX0BaQ28FJH10E4isSQ==", + "version": "0.14.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.2.tgz", + "integrity": "sha512-V4+lGplCM/ikqi5/mkkpJ06e9Bujq1NFmNLvsCs56zg3ZbzrnUzAtizZ24TXxtRX/W2jcdScwQCnbL0CICTFkQ==", "requires": { - "private": "^0.1.6" + "@babel/runtime": "^7.8.4", + "private": "^0.1.8" } }, "regex-not": { @@ -8135,16 +8188,16 @@ } }, "regexpu-core": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.6.0.tgz", - "integrity": "sha512-YlVaefl8P5BnFYOITTNzDvan1ulLOiXJzCNZxduTIosN17b87h3bvG9yHMoHaRuo88H4mQ06Aodj5VtYGGGiTg==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.0.tgz", + "integrity": "sha512-TQ4KXRnIn6tz6tjnrXEkD/sshygKH/j5KzK86X8MkeHyZ8qst/LZ89j3X4/8HEIfHANTFIP/AbXakeRhWIl5YQ==", "requires": { "regenerate": "^1.4.0", - "regenerate-unicode-properties": "^8.1.0", - "regjsgen": "^0.5.0", - "regjsparser": "^0.6.0", + "regenerate-unicode-properties": "^8.2.0", + "regjsgen": "^0.5.1", + "regjsparser": "^0.6.4", "unicode-match-property-ecmascript": "^1.0.4", - "unicode-match-property-value-ecmascript": "^1.1.0" + "unicode-match-property-value-ecmascript": "^1.2.0" } }, "registry-auth-token": { @@ -8169,9 +8222,9 @@ "integrity": "sha512-5qxzGZjDs9w4tzT3TPhCJqWdCc3RLYwy9J2NB0nm5Lz+S273lvWcpjaTGHsT1dc6Hhfq41uSEOw8wBmxrKOuyg==" }, "regjsparser": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.2.tgz", - "integrity": "sha512-E9ghzUtoLwDekPT0DYCp+c4h+bvuUpe6rRHCTYn6eGoqj1LgKXxT6I0Il4WbjhQkOghzi/V+y03bPKvbllL93Q==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.4.tgz", + "integrity": "sha512-64O87/dPDgfk8/RQqC4gkZoGyyWFIEUTTh80CU6CWuK5vkCGyekIx+oKcEIYtP/RAxSQltCZHCNu/mdd7fqlJw==", "requires": { "jsesc": "~0.5.0" }, @@ -8216,9 +8269,9 @@ "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=" }, "request": { - "version": "2.88.0", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.0.tgz", - "integrity": "sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg==", + "version": "2.88.2", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", + "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", "requires": { "aws-sign2": "~0.7.0", "aws4": "^1.8.0", @@ -8227,7 +8280,7 @@ "extend": "~3.0.2", "forever-agent": "~0.6.1", "form-data": "~2.3.2", - "har-validator": "~5.1.0", + "har-validator": "~5.1.3", "http-signature": "~1.2.0", "is-typedarray": "~1.0.0", "isstream": "~0.1.2", @@ -8237,7 +8290,7 @@ "performance-now": "^2.1.0", "qs": "~6.5.2", "safe-buffer": "^5.1.2", - "tough-cookie": "~2.4.3", + "tough-cookie": "~2.5.0", "tunnel-agent": "^0.6.0", "uuid": "^3.3.2" }, @@ -8264,11 +8317,6 @@ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" }, - "reselect": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/reselect/-/reselect-3.0.1.tgz", - "integrity": "sha1-79qpjqdFEyTQkrKyFjpqHXqaIUc=" - }, "resolve": { "version": "1.15.1", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.15.1.tgz", @@ -9153,11 +9201,11 @@ }, "dependencies": { "postcss-selector-parser": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz", - "integrity": "sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", + "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", "requires": { - "dot-prop": "^4.1.1", + "dot-prop": "^5.2.0", "indexes-of": "^1.0.1", "uniq": "^1.0.1" } @@ -9267,9 +9315,9 @@ "integrity": "sha512-a6sumDlzyHVJWb8+YofY4TW112G6p2FCPEAFk+59gIYHv3XHRhm9ltVQ9kli4hNWeQBwSpe8cRN25x0ROunMOw==" }, "terser": { - "version": "4.6.3", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.6.3.tgz", - "integrity": "sha512-Lw+ieAXmY69d09IIc/yqeBqXpEQIpDGZqT34ui1QWXIUpR2RjbqEkT8X7Lgex19hslSqcWM5iMN2kM11eMsESQ==", + "version": "4.6.6", + "resolved": "https://registry.npmjs.org/terser/-/terser-4.6.6.tgz", + "integrity": "sha512-4lYPyeNmstjIIESr/ysHg2vUPRGf2tzF9z2yYwnowXVuVzLEamPN1Gfrz7f8I9uEPuHcbFlW4PLIAsJoxXyJ1g==", "requires": { "commander": "^2.20.0", "source-map": "~0.6.1", @@ -9474,19 +9522,12 @@ "integrity": "sha1-LmhELZ9k7HILjMieZEOsbKqVACk=" }, "tough-cookie": { - "version": "2.4.3", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.4.3.tgz", - "integrity": "sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ==", + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", + "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", "requires": { - "psl": "^1.1.24", - "punycode": "^1.4.1" - }, - "dependencies": { - "punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" - } + "psl": "^1.1.28", + "punycode": "^2.1.1" } }, "tr46": { @@ -9498,9 +9539,9 @@ } }, "tslib": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.10.0.tgz", - "integrity": "sha512-qOebF53frne81cf0S9B41ByenJ3/IuH8yJKngAX35CmiZySA0khhkovshKK+jGCaMnVomla7gVlIcc3EvKPbTQ==" + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.11.1.tgz", + "integrity": "sha512-aZW88SY8kQbU7gpV19lN24LtXh/yD4ZZg6qieAJDDg+YBsJcSmLGK9QpnUjAKVG/xefmvJGd1WUmfpT/g6AJGA==" }, "tty-browserify": { "version": "0.0.0", @@ -9521,9 +9562,9 @@ "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" }, "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==" + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz", + "integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==" }, "type-is": { "version": "1.6.18", @@ -9590,14 +9631,14 @@ } }, "unicode-match-property-value-ecmascript": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.1.0.tgz", - "integrity": "sha512-hDTHvaBk3RmFzvSl0UVrUmC3PuW9wKVnpoUDYH0JDkSIovzw+J5viQmeYHxVSBptubnr7PbH2e0fnpDRQnQl5g==" + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz", + "integrity": "sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ==" }, "unicode-property-aliases-ecmascript": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.5.tgz", - "integrity": "sha512-L5RAqCfXqAwR3RriF8pM0lU0w4Ryf/GgzONwi6KnL1taJQa7x1TCxdJnILX59WIGOwR57IVxn7Nej0fz1Ny6fw==" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz", + "integrity": "sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg==" }, "union-value": { "version": "1.0.1", @@ -9701,13 +9742,13 @@ "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==" }, "update-notifier": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.0.0.tgz", - "integrity": "sha512-p9zf71hWt5GVXM4iEBujpUgx8mK9AWiCCapEJm/O1z5ntCim83Z1ATqzZFBHFYqx03laMqv8LiDgs/7ikXjf/g==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.0.tgz", + "integrity": "sha512-w3doE1qtI0/ZmgeoDoARmI5fjDoT93IfKgEGqm26dGUOh8oNpaSTsGNdYRN/SjOuo10jcJGwkEL3mroKzktkew==", "requires": { "boxen": "^4.2.0", "chalk": "^3.0.0", - "configstore": "^5.0.0", + "configstore": "^5.0.1", "has-yarn": "^2.1.0", "import-lazy": "^2.1.0", "is-ci": "^2.0.0", @@ -9715,6 +9756,7 @@ "is-npm": "^4.0.0", "is-yarn-global": "^0.3.0", "latest-version": "^5.0.0", + "pupa": "^2.0.1", "semver-diff": "^3.1.1", "xdg-basedir": "^4.0.0" }, @@ -9923,9 +9965,9 @@ "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==" }, "vue-loader": { - "version": "15.8.3", - "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.8.3.tgz", - "integrity": "sha512-yFksTFbhp+lxlm92DrKdpVIWMpranXnTEuGSc0oW+Gk43M9LWaAmBTnfj5+FCdve715mTHvo78IdaXf5TbiTJg==", + "version": "15.9.0", + "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.0.tgz", + "integrity": "sha512-FeDHvTSpwyLeF7LIV1PYkvqUQgTJ8UmOxhSlCyRSxaXCKk+M6NF4tDQsLsPPNeDPyR7TfRQ8MLg6v+8PsDV9xQ==", "requires": { "@vue/component-compiler-utils": "^3.1.0", "hash-sum": "^1.0.2", @@ -9935,9 +9977,9 @@ } }, "vue-router": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.1.5.tgz", - "integrity": "sha512-BszkPvhl7I9h334GjckCh7sVFyjTPMMJFJ4Bsrem/Ik+B/9gt5tgrk8k4gGLO4ZpdvciVdg7O41gW4DisQWurg==" + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.1.6.tgz", + "integrity": "sha512-GYhn2ynaZlysZMkFE5oCHRUTqE8BWs/a9YbKpNLi0i7xD6KG1EzDqpHQmv1F5gXjr8kL5iIVS8EOtRaVUEXTqA==" }, "vue-server-renderer": { "version": "2.6.11", @@ -10007,13 +10049,13 @@ "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==" }, "vuepress": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.3.0.tgz", - "integrity": "sha512-TmPmHiT70aq4xqy4XczUJmUdpGlMSheOGGVwA2nhYSIS9IEd4ngPbfT9oEcAFTsGHXsr5KH8EgEU7G+3wWzY/A==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.3.1.tgz", + "integrity": "sha512-i0f0JB0zdmdVH8P8cO4w7PljPQpf8ObiVk/1pOidvMQCMEhFmIpYz+730Wlf0rtB/GG4QUsqQ27Ckp5Rfob+hQ==", "requires": { - "@vuepress/core": "^1.3.0", - "@vuepress/theme-default": "^1.3.0", - "cac": "^6.5.5", + "@vuepress/core": "^1.3.1", + "@vuepress/theme-default": "^1.3.1", + "cac": "^6.5.6", "envinfo": "^7.2.0", "opencollective-postinstall": "^2.0.2", "update-notifier": "^4.0.0" @@ -10038,6 +10080,11 @@ "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz", "integrity": "sha512-+hN/Zh2D08Mx65pZ/4g5bsmNiZUuChDiQfTUQ7qJr4/kuopCr88xZsAXv6mBoZEsUI4OuGHlX59qE94K2mMW8Q==" }, + "emojis-list": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", + "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=" + }, "json5": { "version": "0.5.1", "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", @@ -10090,11 +10137,11 @@ } }, "vuepress-theme-cosmos": { - "version": "1.0.150", - "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.150.tgz", - "integrity": "sha512-f4McVndkB+CqJ6mWpOG4UZSR14LJyXqwcgwoDoDUx149g2PKU3qI/AF5AcrM25+4UKMCXFKcJloQCl/aWq+1ig==", + "version": "1.0.156", + "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.156.tgz", + "integrity": "sha512-g5KqkeRu8Dg+eYUwWYy4TfdMKp9yYy+BQpD+gegwqIPbHSBIvFV+0w3olk5/mFaCzXKfEmTXtLltok9arrayLg==", "requires": { - "@cosmos-ui/vue": "^0.5.20", + "@cosmos-ui/vue": "^0.10.0", "axios": "^0.19.0", "cheerio": "^1.0.0-rc.3", "clipboard-copy": "^3.1.0", @@ -10138,9 +10185,9 @@ "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==" }, "webpack": { - "version": "4.41.5", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.41.5.tgz", - "integrity": "sha512-wp0Co4vpyumnp3KlkmpM5LWuzvZYayDwM2n17EHFr4qxBBbRokC7DJawPJC7TfSFZ9HZ6GsdH40EBj4UV0nmpw==", + "version": "4.42.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.42.0.tgz", + "integrity": "sha512-EzJRHvwQyBiYrYqhyjW9AqM90dE4+s1/XtCfn7uWg6cS72zH+2VPFAlsnW0+W0cDi0XRjNKUMoJtpSi50+Ph6w==", "requires": { "@webassemblyjs/ast": "1.8.5", "@webassemblyjs/helper-module-context": "1.8.5", @@ -10168,9 +10215,9 @@ }, "dependencies": { "acorn": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.0.tgz", - "integrity": "sha512-gac8OEcQ2Li1dxIEWGZzsp2BitJxwkwcOm0zHAJLcPJaVvm58FRnk6RkuLRpU1EujipU2ZFODv2P9DLMfnV8mw==" + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.1.tgz", + "integrity": "sha512-ZVA9k326Nwrj3Cj9jlh3wGFutC2ZornPNARZwsNYqQYgN0EsV2d53w5RN/co65Ohn4sUAUtb1rSUAOD6XN9idA==" } } }, @@ -10275,54 +10322,16 @@ "ms": "^2.1.1" } }, - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "requires": { - "locate-path": "^3.0.0" - } - }, "is-absolute-url": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz", "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==" }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, - "p-limit": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", - "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "requires": { - "p-limit": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" - }, "supports-color": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", @@ -10576,9 +10585,9 @@ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, "write-file-atomic": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.1.tgz", - "integrity": "sha512-JPStrIyyVJ6oCSz/691fAjFtefZ6q+fP6tm+OS4Qw6o+TGQxNp1ziY2PgS+X/m0V8OWhZiO/m4xSj+Pr4RrZvw==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", "requires": { "imurmurhash": "^0.1.4", "is-typedarray": "^1.0.0", diff --git a/docs/package.json b/docs/package.json index 8ce869057..dfef8c2de 100644 --- a/docs/package.json +++ b/docs/package.json @@ -4,7 +4,8 @@ "description": "Welcome to the Tendermint Core documentation!", "main": "index.js", "dependencies": { - "vuepress-theme-cosmos": "^1.0.150" + "@vuepress/plugin-google-analytics": "^1.3.1", + "vuepress-theme-cosmos": "^1.0.156" }, "scripts": { "preserve": "./pre.sh", @@ -16,4 +17,4 @@ }, "author": "", "license": "ISC" -} \ No newline at end of file +} diff --git a/docs/tendermint-core/how-to-read-logs.md b/docs/tendermint-core/how-to-read-logs.md index 4031a178a..195a515a5 100644 --- a/docs/tendermint-core/how-to-read-logs.md +++ b/docs/tendermint-core/how-to-read-logs.md @@ -66,9 +66,8 @@ I[10-04|13:54:30.392] Started node module=main n Next follows a standard block creation cycle, where we enter a new round, propose a block, receive more than 2/3 of prevotes, then precommits and finally have a chance to commit a block. For details, -please refer to [Consensus -Overview](../introduction/introduction.md#consensus-overview) or [Byzantine Consensus -Algorithm](../spec/consensus/consensus.md). +please refer to [Byzantine Consensus +Algorithm](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md). ``` I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus diff --git a/docs/tendermint-core/light-client-protocol.md b/docs/tendermint-core/light-client-protocol.md index 4537412c1..41b7a0e07 100644 --- a/docs/tendermint-core/light-client-protocol.md +++ b/docs/tendermint-core/light-client-protocol.md @@ -55,9 +55,9 @@ passing them back to the caller. Other than that, it will present the same interface as a full Tendermint node. ```sh -$ tendermint lite --chain-id=supernova --primary=tcp://233.123.0.140:26657 \ - --witnesses=tcp://179.63.29.15:26657,tcp://144.165.223.135:26657 \ - --trusted-height=10 --trusted-hash=37E9A6DD3FA25E83B22C18835401E8E56088D0D7ABC6FD99FCDC920DD76C1C57 +$ tendermint lite supernova -p tcp://233.123.0.140:26657 \ + -w tcp://179.63.29.15:26657,tcp://144.165.223.135:26657 \ + --height=10 --hash=37E9A6DD3FA25E83B22C18835401E8E56088D0D7ABC6FD99FCDC920DD76C1C57 ``` For additional options, run `tendermint lite --help`. diff --git a/docs/tendermint-core/local_config.png b/docs/tendermint-core/local_config.png new file mode 100644 index 000000000..050a6df2f Binary files /dev/null and b/docs/tendermint-core/local_config.png differ diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index 7a436ec95..d386308de 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -106,59 +106,49 @@ Rate-limiting and authentication are another key aspects to help protect against DOS attacks. While in the future we may implement these features, for now, validators are supposed to use external tools like [NGINX](https://www.nginx.com/blog/rate-limiting-nginx/) or -[traefik](https://docs.traefik.io/configuration/commons/#rate-limiting) +[traefik](https://docs.traefik.io/middlewares/ratelimit/) to achieve the same things. ## Debugging Tendermint -If you ever have to debug Tendermint, the first thing you should -probably do is to check out the logs. See [How to read -logs](./how-to-read-logs.md), where we explain what certain log -statements mean. +If you ever have to debug Tendermint, the first thing you should probably do is +check out the logs. See [How to read logs](./how-to-read-logs.md), where we +explain what certain log statements mean. -If, after skimming through the logs, things are not clear still, the -next thing to try is query the /status RPC endpoint. It provides the -necessary info: whenever the node is syncing or not, what height it is -on, etc. +If, after skimming through the logs, things are not clear still, the next thing +to try is querying the `/status` RPC endpoint. It provides the necessary info: +whenever the node is syncing or not, what height it is on, etc. -``` +```sh curl http(s)://{ip}:{rpcPort}/status ``` -`dump_consensus_state` will give you a detailed overview of the -consensus state (proposer, lastest validators, peers states). From it, -you should be able to figure out why, for example, the network had -halted. +`/dump_consensus_state` will give you a detailed overview of the consensus +state (proposer, latest validators, peers states). From it, you should be able +to figure out why, for example, the network had halted. -``` +```sh curl http(s)://{ip}:{rpcPort}/dump_consensus_state ``` -There is a reduced version of this endpoint - `consensus_state`, which -returns just the votes seen at the current height. +There is a reduced version of this endpoint - `/consensus_state`, which returns +just the votes seen at the current height. -- [Github Issues](https://github.com/tendermint/tendermint/issues) -- [StackOverflow - questions](https://stackoverflow.com/questions/tagged/tendermint) +If, after consulting with the logs and above endpoints, you still have no idea +what's happening, consider using `tendermint debug kill` sub-command. This +command will scrap all the available info and kill the process. See +[Debugging](../tools/debugging.md) for the exact format. -### Debug Utility - -Tendermint also ships with a `debug` sub-command that allows you to kill a live -Tendermint process while collecting useful information in a compressed archive -such as the configuration used, consensus state, network state, the node' status, -the WAL, and even the stacktrace of the process before exit. These files can be -useful to examine when debugging a faulty Tendermint process. - -In addition, the `debug` sub-command also allows you to dump debugging data into -compressed archives at a regular interval. These archives contain the goroutine -and heap profiles in addition to the consensus state, network info, node status, -and even the WAL. +You can inspect the resulting archive yourself or create an issue on +[Github](https://github.com/tendermint/tendermint). Before opening an issue +however, be sure to check if there's [no existing +issue](https://github.com/tendermint/tendermint/issues) already. ## Monitoring Tendermint -Each Tendermint instance has a standard `/health` RPC endpoint, which -responds with 200 (OK) if everything is fine and 500 (or no response) - -if something is wrong. +Each Tendermint instance has a standard `/health` RPC endpoint, which responds +with 200 (OK) if everything is fine and 500 (or no response) - if something is +wrong. Other useful endpoints include mentioned earlier `/status`, `/net_info` and `/validators`. @@ -166,6 +156,10 @@ Other useful endpoints include mentioned earlier `/status`, `/net_info` and Tendermint also can report and serve Prometheus metrics. See [Metrics](./metrics.md). +`tendermint debug dump` sub-command can be used to periodically dump useful +information into an archive. See [Debugging](../tools/debugging.md) for more +information. + ## What happens when my app dies? You are supposed to run Tendermint under a [process diff --git a/docs/tendermint-core/sentry_layout.png b/docs/tendermint-core/sentry_layout.png new file mode 100644 index 000000000..240abde18 Binary files /dev/null and b/docs/tendermint-core/sentry_layout.png differ diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index 2c6370e84..b33e770d7 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -104,36 +104,37 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g ## Run -To run a Tendermint node, use +To run a Tendermint node, use: -``` +```sh tendermint node ``` By default, Tendermint will try to connect to an ABCI application on -[127.0.0.1:26658](127.0.0.1:26658). If you have the `kvstore` ABCI app -installed, run it in another window. If you don't, kill Tendermint and -run an in-process version of the `kvstore` app: +`127.0.0.1:26658`. If you have the `kvstore` ABCI app installed, run it in +another window. If you don't, kill Tendermint and run an in-process version of +the `kvstore` app: -``` +```sh tendermint node --proxy_app=kvstore ``` -After a few seconds you should see blocks start streaming in. Note that -blocks are produced regularly, even if there are no transactions. See -_No Empty Blocks_, below, to modify this setting. +After a few seconds, you should see blocks start streaming in. Note that blocks +are produced regularly, even if there are no transactions. See _No Empty +Blocks_, below, to modify this setting. -Tendermint supports in-process versions of the `counter`, `kvstore` and -`noop` apps that ship as examples with `abci-cli`. It's easy to compile -your own app in-process with Tendermint if it's written in Go. If your -app is not written in Go, simply run it in another process, and use the -`--proxy_app` flag to specify the address of the socket it is listening -on, for instance: +Tendermint supports in-process versions of the `counter`, `kvstore`, and `noop` +apps that ship as examples with `abci-cli`. It's easy to compile your app +in-process with Tendermint if it's written in Go. If your app is not written in +Go, run it in another process, and use the `--proxy_app` flag to specify the +address of the socket it is listening on, for instance: -``` +```sh tendermint node --proxy_app=/var/run/abci.sock ``` +You can find out what flags are supported by running `tendermint node --help`. + ## Transactions To send a transaction, use `curl` to make requests to the Tendermint RPC @@ -155,10 +156,14 @@ and the `latest_app_hash` in particular: curl http://localhost:26657/status | json_pp | grep latest_app_hash ``` + + Visit http://localhost:26657 in your browser to see the list of other endpoints. Some take no arguments (like `/status`), while others specify the argument name and use `_` as a placeholder. + + ::: tip Find the RPC Documentation [here](https://docs.tendermint.com/master/rpc/) ::: diff --git a/docs/tendermint-core/validators.md b/docs/tendermint-core/validators.md index 97a5da8ca..911405d58 100644 --- a/docs/tendermint-core/validators.md +++ b/docs/tendermint-core/validators.md @@ -22,9 +22,78 @@ Validators have a cryptographic key-pair and an associated amount of There are two ways to become validator. -1. They can be pre-established in the [genesis state](./using-tendermint.md#genesis) -2. The ABCI app responds to the EndBlock message with changes to the - existing validator set. +1. They can be pre-established in the [genesis state](./using-tendermint.md#genesis) +2. The ABCI app responds to the EndBlock message with changes to the + existing validator set. + +## Setting up a Validator + +When setting up a validator there are countless ways to configure your setup. This guide is aimed at showing one of them, the sentry node design. This design is mainly for DDOS prevention. + +### Network Layout + +![ALT Network Layout](./sentry_layout.png) + +The diagram is based on AWS, other cloud providers will have similar solutions to design a solution. Running nodes is not limited to cloud providers, you can run nodes on bare metal systems as well. The architecture will be the same no matter which setup you decide to go with. + +The proposed network diagram is similar to the classical backend/frontend separation of services in a corporate environment. The “backend” in this case is the private network of the validator in the data center. The data center network might involve multiple subnets, firewalls and redundancy devices, which is not detailed on this diagram. The important point is that the data center allows direct connectivity to the chosen cloud environment. Amazon AWS has “Direct Connect”, while Google Cloud has “Partner Interconnect”. This is a dedicated connection to the cloud provider (usually directly to your virtual private cloud instance in one of the regions). + +All sentry nodes (the “frontend”) connect to the validator using this private connection. The validator does not have a public IP address to provide its services. + +Amazon has multiple availability zones within a region. One can install sentry nodes in other regions too. In this case the second, third and further regions need to have a private connection to the validator node. This can be achieved by VPC Peering (“VPC Network Peering” in Google Cloud). In this case, the second, third and further region sentry nodes will be directed to the first region and through the direct connect to the data center, arriving to the validator. + +A more persistent solution (not detailed on the diagram) is to have multiple direct connections to different regions from the data center. This way VPC Peering is not mandatory, although still beneficial for the sentry nodes. This overcomes the risk of depending on one region. It is more costly. + +### Local Configuration + +![ALT Local Configuration](./local_config.png) + +The validator will only talk to the sentry that are provided, the sentry nodes will communicate to the validator via a secret connection and the rest of the network through a normal connection. The sentry nodes do have the option of communicating with each other as well. + +When initializing nodes there are five parameters in the `config.toml` that may need to be altered. + +- `pex:` boolean. This turns the peer exchange reactor on or off for a node. When `pex=false`, only the `persistent_peers` list is available for connection. +- `persistent_peers:` a comma separated list of `nodeID@ip:port` values that define a list of peers that are expected to be online at all times. This is necessary at first startup because by setting `pex=false` the node will not be able to join the network. +- `unconditional_peer_ids:` comma separated list of nodeID's. These nodes will be connected to no matter the limits of inbound and outbound peers. This is useful for when sentry nodes have full address books. +- `private_peer_ids:` comma separated list of nodeID's. These nodes will not be gossiped to the network. This is an important field as you do not want your validator IP gossiped to the network. +- `addr_book_strict:` boolean. By default nodes with a routable address will be considered for connection. If this setting is turned off (false), non-routable IP addresses, like addresses in a private network can be added to the address book. + +#### Validator Node Configuration + +| Config Option | Setting | +| ---------------------- | -------------------------- | +| pex | false | +| persistent_peers | list of sentry nodes | +| private_peer_ids | none | +| unconditional_peer_ids | optionally sentry node IDs | +| addr_book_strict | false | + +The validator node should have `pex=false` so it does not gossip to the entire network. The persistent peers will be your sentry nodes. Private peers can be left empty as the validator is not trying to hide who it is communicating with. Setting unconditional peers is optional for a validator because they will not have a full address books. + +#### Sentry Node Configuration + +| Config Option | Setting | +| ---------------------- | --------------------------------------------- | +| pex | true | +| persistent_peers | validator node, optionally other sentry nodes | +| private_peer_ids | validator node ID | +| unconditional_peer_ids | validator node ID, optionally sentry node IDs | +| addr_book_strict | false | + +The sentry nodes should be able to talk to the entire network hence why `pex=true`. The persistent peers of a sentry node will be the validator, and optionally other sentry nodes. The sentry nodes should make sure that they do not gossip the validator's ip, to do this you must put the validators nodeID as a private peer. The unconditional peer IDs will be the validator ID and optionally other sentry nodes. + +> Note: Do not forget to secure your node's firewalls when setting them up. + +More Information can be found at these links: + +- https://kb.certus.one/ +- https://forum.cosmos.network/t/sentry-node-architecture-overview/454 + +### Validator keys + +Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io/) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM. + +Currently Tendermint uses [Ed25519](https://ed25519.cr.yp.to/) keys which are widely supported across the security sector and HSMs. ## Committing a Block diff --git a/docs/tools/README.md b/docs/tools/README.md index c326cde5b..86ba128f6 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -9,16 +9,21 @@ parent: Tendermint has some tools that are associated with it for: +- [Debugging](./debugging.md) - [Benchmarking](#benchmarking) -- [Validation of remote signers](./remote-signer-validation.md) - [Testnets](#testnets) - +- [Validation of remote signers](./remote-signer-validation.md) ## Benchmarking -Benchmarking is done with tm-load-test, for information on how to use the tool please visit the docs: https://github.com/interchainio/tm-load-test +- https://github.com/informalsystems/tm-load-test +`tm-load-test` is a distributed load testing tool (and framework) for load +testing Tendermint networks. ## Testnets -The testnets tool is aimed at testing Tendermint with different configurations. For more information please visit: https://github.com/interchainio/testnets. +- https://github.com/informalsystems/testnets + +This repository contains various different configurations of test networks for, +and relating to, Tendermint. diff --git a/docs/tools/debugging.md b/docs/tools/debugging.md new file mode 100644 index 000000000..50961dd3b --- /dev/null +++ b/docs/tools/debugging.md @@ -0,0 +1,57 @@ +# Debugging + +## tendermint debug kill + +Tendermint comes with a `debug` sub-command that allows you to kill a live +Tendermint process while collecting useful information in a compressed archive. +The information includes the configuration used, consensus state, network +state, the node' status, the WAL, and even the stack trace of the process +before exit. These files can be useful to examine when debugging a faulty +Tendermint process. + +```sh +tendermint debug kill --home= +``` + +will write debug info into a compressed archive. The archive will contain the +following: + +``` +├── config.toml +├── consensus_state.json +├── net_info.json +├── stacktrace.out +├── status.json +└── wal +``` + +Under the hood, `debug kill` fetches info from `/status`, `/net_info`, and +`/dump_consensus_state` HTTP endpoints, and kills the process with `-6`, which +catches the go-routine dump. + +## tendermint debug dump + +Also, the `debug dump` sub-command allows you to dump debugging data into +compressed archives at a regular interval. These archives contain the goroutine +and heap profiles in addition to the consensus state, network info, node +status, and even the WAL. + +```sh +tendermint debug dump --home= +``` + +will perform similarly to `kill` except it only polls the node and +dumps debugging data every frequency seconds to a compressed archive under a +given destination directory. Each archive will contain: + +``` +├── consensus_state.json +├── goroutine.out +├── heap.out +├── net_info.json +├── status.json +└── wal +``` + +Note: goroutine.out and heap.out will only be written if a profile address is +provided and is operational. This command is blocking and will log any error. diff --git a/evidence/codec.go b/evidence/codec.go index 135341068..650a34607 100644 --- a/evidence/codec.go +++ b/evidence/codec.go @@ -2,6 +2,7 @@ package evidence import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" "github.com/tendermint/tendermint/types" ) diff --git a/evidence/pool.go b/evidence/pool.go index 62b0a3325..8c7d78694 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -5,10 +5,10 @@ import ( "sync" "time" - clist "github.com/tendermint/tendermint/libs/clist" - "github.com/tendermint/tendermint/libs/log" dbm "github.com/tendermint/tm-db" + clist "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -95,7 +95,7 @@ func (evpool *Pool) Update(block *types.Block, state sm.State) { } // AddEvidence checks the evidence is valid and adds it to the pool. -func (evpool *Pool) AddEvidence(evidence types.Evidence) (err error) { +func (evpool *Pool) AddEvidence(evidence types.Evidence) error { // TODO: check if we already have evidence for this // validator at this height so we dont get spammed @@ -106,14 +106,17 @@ func (evpool *Pool) AddEvidence(evidence types.Evidence) (err error) { // fetch the validator and return its voting power as its priority // TODO: something better ? - valset, _ := sm.LoadValidators(evpool.stateDB, evidence.Height()) + valset, err := sm.LoadValidators(evpool.stateDB, evidence.Height()) + if err != nil { + return err + } _, val := valset.GetByAddress(evidence.Address()) priority := val.VotingPower added := evpool.store.AddNewEvidence(evidence, priority) if !added { // evidence already known, just ignore - return + return nil } evpool.logger.Info("Verified new evidence of byzantine behaviour", "evidence", evidence) diff --git a/evidence/pool_test.go b/evidence/pool_test.go index a39ae3eb5..844ca18af 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -8,10 +8,11 @@ import ( "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) func TestMain(m *testing.M) { diff --git a/evidence/reactor.go b/evidence/reactor.go index 19ad0f135..e4dbd51ad 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -34,7 +34,7 @@ func NewReactor(evpool *Pool) *Reactor { evR := &Reactor{ evpool: evpool, } - evR.BaseReactor = *p2p.NewBaseReactor("Reactor", evR) + evR.BaseReactor = *p2p.NewBaseReactor("Evidence", evR) return evR } diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index b013b7715..135c191da 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -9,12 +9,13 @@ import ( "github.com/go-kit/kit/log/term" "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // evidenceLogger is a TestingLogger which uses a different diff --git a/evidence/store.go b/evidence/store.go index 3547b5ffc..2bfe68523 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -3,8 +3,9 @@ package evidence import ( "fmt" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/types" ) /* diff --git a/evidence/store_test.go b/evidence/store_test.go index b85a6437b..351abfee2 100644 --- a/evidence/store_test.go +++ b/evidence/store_test.go @@ -5,8 +5,9 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/types" ) //------------------------------------------- diff --git a/go.mod b/go.mod index ccd7b4929..7bdcdb501 100644 --- a/go.mod +++ b/go.mod @@ -3,30 +3,30 @@ module github.com/tendermint/tendermint go 1.14 require ( - github.com/ChainSafe/go-schnorrkel v0.0.0-20200102211924-4bcbc698314f - github.com/Workiva/go-datastructures v1.0.50 - github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d - github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a + github.com/ChainSafe/go-schnorrkel v0.0.0-20200115165343-aa45d48b5ed6 + github.com/Workiva/go-datastructures v1.0.52 + github.com/btcsuite/btcd v0.20.1-beta + github.com/btcsuite/btcutil v1.0.1 github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.10.0 github.com/go-logfmt/logfmt v0.5.0 github.com/gogo/protobuf v1.3.1 - github.com/golang/protobuf v1.3.4 - github.com/gorilla/websocket v1.4.1 - github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f + github.com/golang/protobuf v1.3.5 + github.com/gorilla/websocket v1.4.2 + github.com/gtank/merlin v0.1.1 github.com/libp2p/go-buffer-pool v0.0.2 github.com/magiconair/properties v1.8.1 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.4.1 - github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a + github.com/prometheus/client_golang v1.5.1 + github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 github.com/rs/cors v1.7.0 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v0.0.6 + github.com/spf13/cobra v0.0.7 github.com/spf13/viper v1.6.2 github.com/stretchr/testify v1.5.1 github.com/tendermint/go-amino v0.14.1 - github.com/tendermint/tm-db v0.4.1 - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 - golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 - google.golang.org/grpc v1.27.1 + github.com/tendermint/tm-db v0.5.1 + golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 + golang.org/x/net v0.0.0-20200301022130-244492dfa37a + google.golang.org/grpc v1.28.0 ) diff --git a/go.sum b/go.sum index c8eb10010..7f28d157d 100644 --- a/go.sum +++ b/go.sum @@ -2,16 +2,17 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200102211924-4bcbc698314f h1:4O1om+UVU+Hfcihr1timk8YNXHxzZWgCo7ofnrZRApw= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200102211924-4bcbc698314f/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200115165343-aa45d48b5ed6 h1:wfM1NefWZdwihyFmcpWjBSP2lMEIWj+WHpbjNi+jIQo= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200115165343-aa45d48b5ed6/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Workiva/go-datastructures v1.0.50 h1:slDmfW6KCHcC7U+LP3DDBbm4fqTwZGn1beOFPfGaLvo= -github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= +github.com/Workiva/go-datastructures v1.0.52 h1:PLSK6pwn8mYdaoaCZEMsXBpBotr4HHn9abU0yMQt0NI= +github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= +github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -34,16 +35,25 @@ github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d h1:xG8Pj6Y6J760xwETNmMzmlt38QSwz0BLp1cZ09g27uw= -github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a h1:RQMUrEILyYJEoAT34XS/kLu40vC0+po/UfxrBBA4qZE= -github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.1 h1:GKOz8BnRjYrb/JTKgaOk+zh26NWNdSNvdvv0xoAZMSA= +github.com/btcsuite/btcutil v1.0.1/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd h1:qdGvebPBDuYDPGi1WCPjy1tGyMpmDK8IEapSsszn7HE= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723 h1:ZA/jbKoGcVAnER6pCHPEkGdZOV7U1oLUedErBHCUMs0= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -54,6 +64,7 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -80,7 +91,9 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= @@ -130,14 +143,14 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -154,11 +167,12 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -166,6 +180,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f h1:8N8XWLZelZNibkhM1FuF+3Ad3YIbgirjdMiVA0eUkaM= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -196,11 +212,13 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -212,6 +230,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= @@ -240,6 +259,7 @@ github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= @@ -302,8 +322,8 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8= -github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -335,6 +355,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= @@ -363,6 +385,8 @@ github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -382,24 +406,20 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.0 h1:DMOzIV76tmoDNE9pX6RSN0aDtCYeCg5VueieJaAo1uw= -github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= -github.com/tecbot/gorocksdb v0.0.0-20191017175515-d217d93fd4c5 h1:gVwAW5OwaZlDB5/CfqcGFM9p9C+KxvQKyNOltQ8orj0= -github.com/tecbot/gorocksdb v0.0.0-20191017175515-d217d93fd4c5/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= -github.com/tendermint/tm-db v0.4.0 h1:iPbCcLbf4nwDFhS39Zo1lpdS1X/cT9CkTlUx17FHQgA= -github.com/tendermint/tm-db v0.4.0/go.mod h1:+Cwhgowrf7NBGXmsqFMbwEtbo80XmyrlY5Jsk95JubQ= -github.com/tendermint/tm-db v0.4.1 h1:TvX7JWjJOVZ+N3y+I86wddrGttOdMmmBxXcu0/Y7ZJ0= -github.com/tendermint/tm-db v0.4.1/go.mod h1:JsJ6qzYkCGiGwm5GHl/H5GLI9XLb6qZX7PRe425dHAY= +github.com/tendermint/tm-db v0.5.0 h1:qtM5UTr1dlRnHtDY6y7MZO5Di8XAE2j3lc/pCnKJ5hQ= +github.com/tendermint/tm-db v0.5.0/go.mod h1:lSq7q5WRR/njf1LnhiZ/lIJHk2S8Y1Zyq5oP/3o9C2U= +github.com/tendermint/tm-db v0.5.1 h1:H9HDq8UEA7Eeg13kdYckkgwwkQLBnJGgX4PgLJRhieY= +github.com/tendermint/tm-db v0.5.1/go.mod h1:g92zWjHpCYlEvQXvy9M168Su8V1IBEeawpXVVBaK4f4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -432,6 +452,9 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -461,6 +484,8 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8 golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= @@ -532,10 +557,11 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go index 1e25946ac..14b7e37c0 100644 --- a/libs/clist/clist_test.go +++ b/libs/clist/clist_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" + tmrand "github.com/tendermint/tendermint/libs/rand" ) diff --git a/libs/kv/kvpair.go b/libs/kv/kvpair.go index 3007c0272..2474b2e47 100644 --- a/libs/kv/kvpair.go +++ b/libs/kv/kvpair.go @@ -35,33 +35,3 @@ func (kvs Pairs) Less(i, j int) bool { } func (kvs Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } func (kvs Pairs) Sort() { sort.Sort(kvs) } - -//---------------------------------------- -// KI64Pair - -/* -Defined in types.proto -type KI64Pair struct { - Key []byte - Value int64 -} -*/ - -type KI64Pairs []KI64Pair - -// Sorting -func (kvs KI64Pairs) Len() int { return len(kvs) } -func (kvs KI64Pairs) Less(i, j int) bool { - switch bytes.Compare(kvs[i].Key, kvs[j].Key) { - case -1: - return true - case 0: - return kvs[i].Value < kvs[j].Value - case 1: - return false - default: - panic("invalid comparison result") - } -} -func (kvs KI64Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } -func (kvs KI64Pairs) Sort() { sort.Sort(kvs) } diff --git a/libs/kv/result.go b/libs/kv/result.go index b2d855cbc..fd40450b1 100644 --- a/libs/kv/result.go +++ b/libs/kv/result.go @@ -30,16 +30,6 @@ func (r *Pair) UnmarshalJSON(b []byte) error { return jsonpbUnmarshaller.Unmarshal(reader, r) } -func (r *KI64Pair) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *KI64Pair) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} - // Some compile time assertions to ensure we don't // have accidental runtime surprises later on. // jsonEncodingRoundTripper ensures that asserted @@ -51,4 +41,3 @@ type jsonRoundTripper interface { } var _ jsonRoundTripper = (*Pair)(nil) -var _ jsonRoundTripper = (*KI64Pair)(nil) diff --git a/libs/kv/types.pb.go b/libs/kv/types.pb.go index b572ac205..7a6e6e12e 100644 --- a/libs/kv/types.pb.go +++ b/libs/kv/types.pb.go @@ -26,7 +26,6 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// Define these here for compatibility but use tmlibs/common.KVPair. type Pair struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -82,74 +81,16 @@ func (m *Pair) GetValue() []byte { return nil } -// Define these here for compatibility but use tmlibs/common.KI64Pair. -type KI64Pair struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KI64Pair) Reset() { *m = KI64Pair{} } -func (m *KI64Pair) String() string { return proto.CompactTextString(m) } -func (*KI64Pair) ProtoMessage() {} -func (*KI64Pair) Descriptor() ([]byte, []int) { - return fileDescriptor_31432671d164f444, []int{1} -} -func (m *KI64Pair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KI64Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KI64Pair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KI64Pair) XXX_Merge(src proto.Message) { - xxx_messageInfo_KI64Pair.Merge(m, src) -} -func (m *KI64Pair) XXX_Size() int { - return m.Size() -} -func (m *KI64Pair) XXX_DiscardUnknown() { - xxx_messageInfo_KI64Pair.DiscardUnknown(m) -} - -var xxx_messageInfo_KI64Pair proto.InternalMessageInfo - -func (m *KI64Pair) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *KI64Pair) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - func init() { proto.RegisterType((*Pair)(nil), "tendermint.libs.kv.Pair") golang_proto.RegisterType((*Pair)(nil), "tendermint.libs.kv.Pair") - proto.RegisterType((*KI64Pair)(nil), "tendermint.libs.kv.KI64Pair") - golang_proto.RegisterType((*KI64Pair)(nil), "tendermint.libs.kv.KI64Pair") } func init() { proto.RegisterFile("libs/kv/types.proto", fileDescriptor_31432671d164f444) } func init() { golang_proto.RegisterFile("libs/kv/types.proto", fileDescriptor_31432671d164f444) } var fileDescriptor_31432671d164f444 = []byte{ - // 196 bytes of a gzipped FileDescriptorProto + // 182 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xc9, 0x4c, 0x2a, 0xd6, 0xcf, 0x2e, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x2a, 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x03, 0xc9, 0xeb, 0x65, @@ -157,12 +98,11 @@ var fileDescriptor_31432671d164f444 = []byte{ 0x95, 0xe9, 0xa7, 0xe7, 0xa7, 0xe7, 0x23, 0x58, 0x10, 0xbd, 0x4a, 0x7a, 0x5c, 0x2c, 0x01, 0x89, 0x99, 0x45, 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x20, 0xa6, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e, 0x69, 0xaa, 0x04, 0x13, 0x58, 0x0c, 0xc2, - 0x51, 0x32, 0xe2, 0xe2, 0xf0, 0xf6, 0x34, 0x33, 0x21, 0x46, 0x0f, 0x33, 0x54, 0x8f, 0x93, 0xdb, - 0x8f, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0xee, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, - 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x1e, 0x78, 0x2c, 0xc7, 0x18, 0xa5, 0x91, - 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, 0xf0, 0x08, 0x32, 0x13, 0xea, - 0xe7, 0x24, 0x36, 0xb0, 0x93, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x5f, 0x67, 0xcb, - 0x05, 0x01, 0x00, 0x00, + 0x71, 0x72, 0xfb, 0xf1, 0x50, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x1d, 0x8f, 0xe4, 0x18, 0x4f, + 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x03, 0x8f, 0xe5, 0x18, + 0xa3, 0x34, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x11, 0x8e, 0x42, + 0x66, 0x42, 0xdd, 0x9f, 0xc4, 0x06, 0xb6, 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x3a, + 0xdc, 0xba, 0xd1, 0x00, 0x00, 0x00, } func (this *Pair) Equal(that interface{}) bool { @@ -195,36 +135,6 @@ func (this *Pair) Equal(that interface{}) bool { } return true } -func (this *KI64Pair) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*KI64Pair) - if !ok { - that2, ok := that.(KI64Pair) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Key, that1.Key) { - return false - } - if this.Value != that1.Value { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} func (m *Pair) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -266,45 +176,6 @@ func (m *Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *KI64Pair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KI64Pair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KI64Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Value != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x10 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -334,23 +205,6 @@ func NewPopulatedPair(r randyTypes, easy bool) *Pair { return this } -func NewPopulatedKI64Pair(r randyTypes, easy bool) *KI64Pair { - this := &KI64Pair{} - v3 := r.Intn(100) - this.Key = make([]byte, v3) - for i := 0; i < v3; i++ { - this.Key[i] = byte(r.Intn(256)) - } - this.Value = int64(r.Int63()) - if r.Intn(2) == 0 { - this.Value *= -1 - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - type randyTypes interface { Float32() float32 Float64() float64 @@ -370,9 +224,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v4 := r.Intn(100) - tmps := make([]rune, v4) - for i := 0; i < v4; i++ { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -394,11 +248,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v5 := r.Int63() + v4 := r.Int63() if r.Intn(2) == 0 { - v5 *= -1 + v4 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v5)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v4)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -443,25 +297,6 @@ func (m *Pair) Size() (n int) { return n } -func (m *KI64Pair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.Value != 0 { - n += 1 + sovTypes(uint64(m.Value)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -590,113 +425,6 @@ func (m *Pair) Unmarshal(dAtA []byte) error { } return nil } -func (m *KI64Pair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KI64Pair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KI64Pair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/libs/kv/types.proto b/libs/kv/types.proto index 7e1375c21..1b6a7a58d 100644 --- a/libs/kv/types.proto +++ b/libs/kv/types.proto @@ -16,14 +16,7 @@ option (gogoproto.testgen_all) = true; //---------------------------------------- // Abstract types -// Define these here for compatibility but use tmlibs/kv.Pair. message Pair { bytes key = 1; bytes value = 2; } - -// Define these here for compatibility but use tmlibs/kv.KI64Pair. -message KI64Pair { - bytes key = 1; - int64 value = 2; -} diff --git a/libs/kv/typespb_test.go b/libs/kv/typespb_test.go index 38656ecf9..dc45bf7f2 100644 --- a/libs/kv/typespb_test.go +++ b/libs/kv/typespb_test.go @@ -78,62 +78,6 @@ func TestPairMarshalTo(t *testing.T) { } } -func TestKI64PairProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedKI64Pair(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &KI64Pair{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestKI64PairMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedKI64Pair(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &KI64Pair{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - func TestPairJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -152,24 +96,6 @@ func TestPairJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestKI64PairJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedKI64Pair(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &KI64Pair{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} func TestPairProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -198,34 +124,6 @@ func TestPairProtoCompactText(t *testing.T) { } } -func TestKI64PairProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedKI64Pair(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &KI64Pair{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestKI64PairProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedKI64Pair(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &KI64Pair{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - func TestPairSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -248,26 +146,4 @@ func TestPairSize(t *testing.T) { } } -func TestKI64PairSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedKI64Pair(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - //These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go index d6f039ce4..86d0e5693 100644 --- a/libs/log/tmfmt_logger_test.go +++ b/libs/log/tmfmt_logger_test.go @@ -10,6 +10,7 @@ import ( kitlog "github.com/go-kit/kit/log" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/libs/log/tracing_logger_test.go b/libs/log/tracing_logger_test.go index b40d2b9e0..354476755 100644 --- a/libs/log/tracing_logger_test.go +++ b/libs/log/tracing_logger_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/pkg/errors" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 325403cd8..2a89e7591 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -39,6 +39,7 @@ import ( "sync" "github.com/pkg/errors" + "github.com/tendermint/tendermint/libs/service" ) diff --git a/lite/client/provider.go b/lite/client/provider.go index e24dbe0e4..a79a3b9fc 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -1,5 +1,5 @@ /* -Package client defines a provider that uses a rpcclient +Package client defines a provider that uses a rpchttp to get information, which is used to get new headers and validators directly from a Tendermint client. */ @@ -12,6 +12,7 @@ import ( "github.com/tendermint/tendermint/lite" lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" ) @@ -40,7 +41,7 @@ func NewProvider(chainID string, client SignStatusClient) lite.Provider { // NewHTTPProvider can connect to a tendermint json-rpc endpoint // at the given url, and uses that as a read-only provider. func NewHTTPProvider(chainID, remote string) (lite.Provider, error) { - httpClient, err := rpcclient.NewHTTP(remote, "/websocket") + httpClient, err := rpchttp.New(remote, "/websocket") if err != nil { return nil, err } diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 35f7270ae..9b1580314 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -6,11 +6,12 @@ import ( "strconv" amino "github.com/tendermint/go-amino" + dbm "github.com/tendermint/tm-db" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) var _ PersistentProvider = (*DBProvider)(nil) diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go index 441010efb..fdb89052b 100644 --- a/lite/dynamic_verifier_test.go +++ b/lite/dynamic_verifier_test.go @@ -8,9 +8,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + log "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const testChainID = "inquiry-test" diff --git a/lite/provider_test.go b/lite/provider_test.go index 98fff8cb4..b820418ff 100644 --- a/lite/provider_test.go +++ b/lite/provider_test.go @@ -7,10 +7,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // missingProvider doesn't store anything, always a miss. diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index 34d9d1d4c..766a86040 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -15,6 +15,7 @@ import ( certclient "github.com/tendermint/tendermint/lite/client" nm "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/rpc/client" + rpclocal "github.com/tendermint/tendermint/rpc/client/local" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) @@ -47,7 +48,7 @@ func _TestAppProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) prt := defaultProofRuntime() - cl := client.NewLocal(node) + cl := rpclocal.New(node) client.WaitForHeight(cl, 1, nil) // This sets up our trust on the node based on some past point. @@ -126,7 +127,7 @@ func _TestAppProofs(t *testing.T) { func TestTxProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) - cl := client.NewLocal(node) + cl := rpclocal.New(node) client.WaitForHeight(cl, 1, nil) tx := kvstoreTx([]byte("key-a"), []byte("value-a")) diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go index b5fc3af3f..5486a3ea9 100644 --- a/lite/proxy/verifier.go +++ b/lite/proxy/verifier.go @@ -3,10 +3,11 @@ package proxy import ( "github.com/pkg/errors" + dbm "github.com/tendermint/tm-db" + log "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/lite" lclient "github.com/tendermint/tendermint/lite/client" - dbm "github.com/tendermint/tm-db" ) func NewVerifier( diff --git a/lite2/client.go b/lite2/client.go index b70123099..215e26bdb 100644 --- a/lite2/client.go +++ b/lite2/client.go @@ -22,9 +22,19 @@ const ( sequential mode = iota + 1 skipping - defaultUpdatePeriod = 5 * time.Second defaultPruningSize = 1000 defaultMaxRetryAttempts = 10 + // For bisection, when using the cache of headers from the previous batch, + // they will always be at a height greater than 1/2 (normal bisection) so to + // find something in between the range, 9/16 is used. + bisectionNumerator = 9 + bisectionDenominator = 16 + + // 10s should cover most of the clients. + // References: + // - http://vancouver-webpages.com/time/web.html + // - https://blog.codinghorror.com/keeping-time-on-the-pc/ + defaultMaxClockDrift = 10 * time.Second ) // Option sets a parameter for the light client. @@ -55,13 +65,6 @@ func SkippingVerification(trustLevel tmmath.Fraction) Option { } } -// UpdatePeriod option can be used to change default polling period (5s). -func UpdatePeriod(d time.Duration) Option { - return func(c *Client) { - c.updatePeriod = d - } -} - // PruningSize option sets the maximum amount of headers & validator set pairs // that the light client stores. When Prune() is run, all headers (along with // the associated validator sets) that are earlier than the h amount of headers @@ -97,13 +100,18 @@ func MaxRetryAttempts(max uint16) Option { } } +// MaxClockDrift defines how much new (untrusted) header's Time can drift into +// the future. Default: 10s. +func MaxClockDrift(d time.Duration) Option { + return func(c *Client) { + c.maxClockDrift = d + } +} + // Client represents a light client, connected to a single chain, which gets // headers from a primary provider, verifies them either sequentially or by // skipping some and stores them in a trusted store (usually, a local FS). // -// By default, the client will poll the primary provider for new headers every -// 5s (UpdatePeriod). If there are any, it will try to advance the state. -// // Default verification: SkippingVerification(DefaultTrustLevel) type Client struct { chainID string @@ -111,6 +119,7 @@ type Client struct { verificationMode mode trustLevel tmmath.Fraction maxRetryAttempts uint16 // see MaxRetryAttempts option + maxClockDrift time.Duration // Mutex for locking during changes of the lite clients providers providerMutex sync.Mutex @@ -126,8 +135,6 @@ type Client struct { // Highest validator set from the store (height=H). latestTrustedVals *types.ValidatorSet - // See UpdatePeriod option - updatePeriod time.Duration // See RemoveNoLongerTrustedHeadersPeriod option pruningSize uint16 // See ConfirmationFunction option @@ -157,7 +164,7 @@ func NewClient( options ...Option) (*Client, error) { if err := trustOptions.ValidateBasic(); err != nil { - return nil, errors.Wrap(err, "invalid TrustOptions") + return nil, fmt.Errorf("invalid TrustOptions: %w", err) } c, err := NewClientFromTrustedStore(chainID, trustOptions.Period, primary, witnesses, trustedStore, options...) @@ -166,12 +173,14 @@ func NewClient( } if c.latestTrustedHeader != nil { + c.logger.Info("Checking trusted header using options") if err := c.checkTrustedHeaderUsingOptions(trustOptions); err != nil { return nil, err } } if c.latestTrustedHeader == nil || c.latestTrustedHeader.Height < trustOptions.Height { + c.logger.Info("Downloading trusted header using options") if err := c.initializeWithTrustOptions(trustOptions); err != nil { return nil, err } @@ -197,10 +206,10 @@ func NewClientFromTrustedStore( verificationMode: skipping, trustLevel: DefaultTrustLevel, maxRetryAttempts: defaultMaxRetryAttempts, + maxClockDrift: defaultMaxClockDrift, primary: primary, witnesses: witnesses, trustedStore: trustedStore, - updatePeriod: defaultUpdatePeriod, pruningSize: defaultPruningSize, confirmationFn: func(action string) bool { return true }, quit: make(chan struct{}), @@ -213,13 +222,13 @@ func NewClientFromTrustedStore( // Validate the number of witnesses. if len(c.witnesses) < 1 { - return nil, errors.New("expected at least one witness") + return nil, errNoWitnesses{} } // Verify witnesses are all on the same chain. for i, w := range witnesses { if w.ChainID() != chainID { - return nil, errors.Errorf("witness #%d: %v is on another chain %s, expected %s", + return nil, fmt.Errorf("witness #%d: %v is on another chain %s, expected %s", i, w, w.ChainID(), chainID) } } @@ -241,24 +250,24 @@ func NewClientFromTrustedStore( func (c *Client) restoreTrustedHeaderAndVals() error { lastHeight, err := c.trustedStore.LastSignedHeaderHeight() if err != nil { - return errors.Wrap(err, "can't get last trusted header height") + return fmt.Errorf("can't get last trusted header height: %w", err) } if lastHeight > 0 { trustedHeader, err := c.trustedStore.SignedHeader(lastHeight) if err != nil { - return errors.Wrap(err, "can't get last trusted header") + return fmt.Errorf("can't get last trusted header: %w", err) } trustedVals, err := c.trustedStore.ValidatorSet(lastHeight) if err != nil { - return errors.Wrap(err, "can't get last trusted validators") + return fmt.Errorf("can't get last trusted validators: %w", err) } c.latestTrustedHeader = trustedHeader c.latestTrustedVals = trustedVals - c.logger.Debug("Restored trusted header and vals", "height", lastHeight) + c.logger.Info("Restored trusted header and vals", "height", lastHeight) } return nil @@ -307,7 +316,7 @@ func (c *Client) checkTrustedHeaderUsingOptions(options TrustOptions) error { // remove all the headers (options.Height, trustedHeader.Height] err := c.cleanupAfter(options.Height) if err != nil { - return errors.Wrapf(err, "cleanupAfter(%d)", options.Height) + return fmt.Errorf("cleanupAfter(%d): %w", options.Height, err) } c.logger.Info("Rolled back to older header (newer headers were removed)", @@ -329,7 +338,7 @@ func (c *Client) checkTrustedHeaderUsingOptions(options TrustOptions) error { if c.confirmationFn(action) { err := c.Cleanup() if err != nil { - return errors.Wrap(err, "failed to cleanup") + return fmt.Errorf("failed to cleanup: %w", err) } } else { return errors.New("refused to remove the stored headers despite hashes mismatch") @@ -357,7 +366,7 @@ func (c *Client) initializeWithTrustOptions(options TrustOptions) error { } if !bytes.Equal(h.Hash(), options.Hash) { - return errors.Errorf("expected header's hash %X, but got %X", options.Hash, h.Hash()) + return fmt.Errorf("expected header's hash %X, but got %X", options.Hash, h.Hash()) } err = c.compareNewHeaderWithWitnesses(h) @@ -372,7 +381,7 @@ func (c *Client) initializeWithTrustOptions(options TrustOptions) error { } if !bytes.Equal(h.ValidatorsHash, vals.Hash()) { - return errors.Errorf("expected header's validators (%X) to match those that were supplied (%X)", + return fmt.Errorf("expected header's validators (%X) to match those that were supplied (%X)", h.ValidatorsHash, vals.Hash(), ) @@ -381,34 +390,13 @@ func (c *Client) initializeWithTrustOptions(options TrustOptions) error { // Ensure that +2/3 of validators signed correctly. err = vals.VerifyCommit(c.chainID, h.Commit.BlockID, h.Height, h.Commit) if err != nil { - return errors.Wrap(err, "invalid commit") + return fmt.Errorf("invalid commit: %w", err) } // 3) Persist both of them and continue. return c.updateTrustedHeaderAndVals(h, vals) } -// Start starts two processes: 1) auto updating 2) removing outdated headers. -func (c *Client) Start() error { - c.logger.Info("Starting light client") - - if c.updatePeriod > 0 { - c.routinesWaitGroup.Add(1) - go c.autoUpdateRoutine() - } - - return nil -} - -// Stop stops two processes: 1) auto updating 2) removing outdated headers. -// Stop only returns after both of them are finished running. If you wish to -// remove all the data, call Cleanup. -func (c *Client) Stop() { - c.logger.Info("Stopping light client") - close(c.quit) - c.routinesWaitGroup.Wait() -} - // TrustedHeader returns a trusted header at the given height (0 - the latest). // // Headers along with validator sets, which can't be trusted anymore, are @@ -464,7 +452,7 @@ func (c *Client) TrustedValidatorSet(height int64) (valSet *types.ValidatorSet, func (c *Client) compareWithLatestHeight(height int64) (int64, error) { latestHeight, err := c.LastTrustedHeight() if err != nil { - return 0, errors.Wrap(err, "can't get last trusted height") + return 0, fmt.Errorf("can't get last trusted height: %w", err) } if latestHeight == -1 { return 0, errors.New("no headers exist") @@ -472,7 +460,7 @@ func (c *Client) compareWithLatestHeight(height int64) (int64, error) { switch { case height > latestHeight: - return 0, errors.Errorf("unverified header/valset requested (latest: %d)", latestHeight) + return 0, fmt.Errorf("unverified header/valset requested (latest: %d)", latestHeight) case height == 0: return latestHeight, nil case height < 0: @@ -482,36 +470,14 @@ func (c *Client) compareWithLatestHeight(height int64) (int64, error) { return height, nil } -// LastTrustedHeight returns a last trusted height. -1 and nil are returned if -// there are no trusted headers. -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) LastTrustedHeight() (int64, error) { - return c.trustedStore.LastSignedHeaderHeight() -} - -// FirstTrustedHeight returns a first trusted height. -1 and nil are returned if -// there are no trusted headers. -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) FirstTrustedHeight() (int64, error) { - return c.trustedStore.FirstSignedHeaderHeight() -} - -// ChainID returns the chain ID the light client was configured with. -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) ChainID() string { - return c.chainID -} - // VerifyHeaderAtHeight fetches header and validators at the given height // and calls VerifyHeader. It returns header immediately if such exists in // trustedStore (no verification is needed). // +// height must be > 0. +// // It returns provider.ErrSignedHeaderNotFound if header is not found by // primary. -// It returns ErrOldHeaderExpired if header expired. func (c *Client) VerifyHeaderAtHeight(height int64, now time.Time) (*types.SignedHeader, error) { if height <= 0 { return nil, errors.New("negative or zero height") @@ -536,19 +502,28 @@ func (c *Client) VerifyHeaderAtHeight(height int64, now time.Time) (*types.Signe // VerifyHeader verifies new header against the trusted state. It returns // immediately if newHeader exists in trustedStore (no verification is -// needed). +// needed). Else it performs one of the two types of verification: // // SequentialVerification: verifies that 2/3 of the trusted validator set has // signed the new header. If the headers are not adjacent, **all** intermediate -// headers will be requested. +// headers will be requested. Intermediate headers are not saved to database. // // SkippingVerification(trustLevel): verifies that {trustLevel} of the trusted // validator set has signed the new header. If it's not the case and the // headers are not adjacent, bisection is performed and necessary (not all) // intermediate headers will be requested. See the specification for details. +// Intermediate headers are not saved to database. // https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md // -// It returns ErrOldHeaderExpired if newHeader expired. +// If the header, which is older than the currently trusted header, is +// requested and the light client does not have it, VerifyHeader will perform: +// a) bisection verification if nearest trusted header is found & not expired +// b) backwards verification in all other cases +// +// It returns ErrOldHeaderExpired if the latest trusted header expired. +// +// If the primary provides an invalid header (ErrInvalidHeader), it is rejected +// and replaced by another provider until all are exhausted. // // If, at any moment, SignedHeader or ValidatorSet are not found by the primary // provider, provider.ErrSignedHeaderNotFound / @@ -563,7 +538,7 @@ func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.Vali if err == nil { // Make sure it's the same header. if !bytes.Equal(h.Hash(), newHeader.Hash()) { - return errors.Errorf("existing trusted header %X does not match newHeader %X", h.Hash(), newHeader.Hash()) + return fmt.Errorf("existing trusted header %X does not match newHeader %X", h.Hash(), newHeader.Hash()) } c.logger.Info("Header has already been verified", "height", newHeader.Height, "hash", hash2str(newHeader.Hash())) @@ -579,7 +554,7 @@ func (c *Client) verifyHeader(newHeader *types.SignedHeader, newVals *types.Vali var err error - // 1) If going forward, perform either bisection or sequential verification + // 1) If going forward, perform either bisection or sequential verification. if newHeader.Height >= c.latestTrustedHeader.Height { switch c.verificationMode { case sequential: @@ -590,29 +565,223 @@ func (c *Client) verifyHeader(newHeader *types.SignedHeader, newVals *types.Vali panic(fmt.Sprintf("Unknown verification mode: %b", c.verificationMode)) } } else { - // 2) Otherwise, perform backwards verification - // Find the closest trusted header after newHeader.Height - var closestHeader *types.SignedHeader - closestHeader, err = c.trustedStore.SignedHeaderAfter(newHeader.Height) + // 2) If verifying before the first trusted header, perform backwards + // verification. + var ( + closestHeader *types.SignedHeader + firstHeaderHeight int64 + ) + firstHeaderHeight, err = c.FirstTrustedHeight() if err != nil { - return errors.Wrapf(err, "can't get signed header after height %d", newHeader.Height) + return fmt.Errorf("can't get first header height: %w", err) + } + if newHeader.Height < firstHeaderHeight { + closestHeader, err = c.TrustedHeader(firstHeaderHeight) + if err != nil { + return fmt.Errorf("can't get first signed header: %w", err) + } + if HeaderExpired(closestHeader, c.trustingPeriod, now) { + closestHeader = c.latestTrustedHeader + } + err = c.backwards(closestHeader, newHeader, now) + } else { + // 3) OR if between trusted headers where the nearest has not expired, + // perform bisection verification, else backwards. + closestHeader, err = c.trustedStore.SignedHeaderBefore(newHeader.Height) + if err != nil { + return fmt.Errorf("can't get signed header before height %d: %w", newHeader.Height, err) + } + var closestValidatorSet *types.ValidatorSet + if c.verificationMode == sequential || HeaderExpired(closestHeader, c.trustingPeriod, now) { + err = c.backwards(c.latestTrustedHeader, newHeader, now) + } else { + closestValidatorSet, _, err = c.TrustedValidatorSet(closestHeader.Height) + if err != nil { + return fmt.Errorf("can't get validator set at height %d: %w", closestHeader.Height, err) + } + err = c.bisection(closestHeader, closestValidatorSet, newHeader, newVals, now) + } } - - err = c.backwards(closestHeader, newHeader, now) } if err != nil { c.logger.Error("Can't verify", "err", err) return err } - + // 4) Compare header with other witnesses if err := c.compareNewHeaderWithWitnesses(newHeader); err != nil { c.logger.Error("Error when comparing new header with witnesses", "err", err) return err } + // 5) Once verified, save and return return c.updateTrustedHeaderAndVals(newHeader, newVals) } +// see VerifyHeader +func (c *Client) sequence( + initiallyTrustedHeader *types.SignedHeader, + newHeader *types.SignedHeader, + newVals *types.ValidatorSet, + now time.Time) error { + + var ( + trustedHeader = initiallyTrustedHeader + + interimHeader *types.SignedHeader + interimVals *types.ValidatorSet + + err error + ) + + for height := initiallyTrustedHeader.Height + 1; height <= newHeader.Height; height++ { + // 1) Fetch interim headers and vals if needed. + if height == newHeader.Height { // last header + interimHeader, interimVals = newHeader, newVals + } else { // intermediate headers + interimHeader, interimVals, err = c.fetchHeaderAndValsAtHeight(height) + if err != nil { + return err + } + } + + // 2) Verify them + c.logger.Debug("Verify newHeader against trustedHeader", + "trustedHeight", trustedHeader.Height, + "trustedHash", hash2str(trustedHeader.Hash()), + "newHeight", interimHeader.Height, + "newHash", hash2str(interimHeader.Hash())) + + err = VerifyAdjacent(c.chainID, trustedHeader, interimHeader, interimVals, + c.trustingPeriod, now, c.maxClockDrift) + if err != nil { + err = fmt.Errorf("verify adjacent from #%d to #%d failed: %w", + trustedHeader.Height, interimHeader.Height, err) + + switch errors.Unwrap(err).(type) { + case ErrInvalidHeader: + c.logger.Error("primary sent invalid header -> replacing", "err", err) + replaceErr := c.replacePrimaryProvider() + if replaceErr != nil { + c.logger.Error("Can't replace primary", "err", replaceErr) + return err // return original error + } + // attempt to verify header again + height-- + continue + default: + return err + } + } + + // 3) Update trustedHeader + trustedHeader = interimHeader + } + + return nil +} + +// see VerifyHeader +// Bisection finds the middle header between a trusted and new header, reiterating the action until it +// verifies a header. A cache of headers requested by the primary is kept such that when a +// verification is made, and the light client tries again to verify the new header in the middle, +// the light client does not need to ask for all the same headers again. +func (c *Client) bisection( + initiallyTrustedHeader *types.SignedHeader, + initiallyTrustedVals *types.ValidatorSet, + newHeader *types.SignedHeader, + newVals *types.ValidatorSet, + now time.Time) error { + + type headerSet struct { + sh *types.SignedHeader + valSet *types.ValidatorSet + } + + var ( + headerCache = []headerSet{{newHeader, newVals}} + depth = 0 + + trustedHeader = initiallyTrustedHeader + trustedVals = initiallyTrustedVals + ) + + for { + c.logger.Debug("Verify newHeader against trustedHeader", + "trustedHeight", trustedHeader.Height, + "trustedHash", hash2str(trustedHeader.Hash()), + "newHeight", headerCache[depth].sh.Height, + "newHash", hash2str(headerCache[depth].sh.Hash())) + + err := Verify(c.chainID, trustedHeader, trustedVals, headerCache[depth].sh, headerCache[depth].valSet, + c.trustingPeriod, now, c.maxClockDrift, c.trustLevel) + switch err.(type) { + case nil: + // Have we verified the last header + if depth == 0 { + return nil + } + // If not, update the lower bound to the previous upper bound + trustedHeader, trustedVals = headerCache[depth].sh, headerCache[depth].valSet + // Remove the untrusted header at the lower bound in the header cache - it's no longer useful + headerCache = headerCache[:depth] + // Reset the cache depth so that we start from the upper bound again + depth = 0 + + case ErrNewValSetCantBeTrusted: + // do add another header to the end of the cache + if depth == len(headerCache)-1 { + pivotHeight := (headerCache[depth].sh.Height + trustedHeader. + Height) * bisectionNumerator / bisectionDenominator + interimHeader, interimVals, err := c.fetchHeaderAndValsAtHeight(pivotHeight) + if err != nil { + return err + } + headerCache = append(headerCache, headerSet{interimHeader, interimVals}) + } + depth++ + + case ErrInvalidHeader: + c.logger.Error("primary sent invalid header -> replacing", "err", err) + replaceErr := c.replacePrimaryProvider() + if replaceErr != nil { + c.logger.Error("Can't replace primary", "err", replaceErr) + // return original error + return fmt.Errorf("verify non adjacent from #%d to #%d failed: %w", + trustedHeader.Height, headerCache[depth].sh.Height, err) + } + // attempt to verify the header again + continue + + default: + return fmt.Errorf("verify non adjacent from #%d to #%d failed: %w", + trustedHeader.Height, headerCache[depth].sh.Height, err) + } + } +} + +// LastTrustedHeight returns a last trusted height. -1 and nil are returned if +// there are no trusted headers. +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) LastTrustedHeight() (int64, error) { + return c.trustedStore.LastSignedHeaderHeight() +} + +// FirstTrustedHeight returns a first trusted height. -1 and nil are returned if +// there are no trusted headers. +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) FirstTrustedHeight() (int64, error) { + return c.trustedStore.FirstSignedHeaderHeight() +} + +// ChainID returns the chain ID the light client was configured with. +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) ChainID() string { + return c.chainID +} + // Primary returns the primary provider. // // NOTE: provider may be not safe for concurrent access. @@ -643,14 +812,14 @@ func (c *Client) Cleanup() error { // cleanupAfter deletes all headers & validator sets after +height+. It also // resets latestTrustedHeader to the latest header. func (c *Client) cleanupAfter(height int64) error { - nextHeight := height + prevHeight := c.latestTrustedHeader.Height for { - h, err := c.trustedStore.SignedHeaderAfter(nextHeight) - if err == store.ErrSignedHeaderNotFound { + h, err := c.trustedStore.SignedHeaderBefore(prevHeight) + if err == store.ErrSignedHeaderNotFound || (h != nil && h.Height <= height) { break } else if err != nil { - return errors.Wrapf(err, "failed to get header after %d", nextHeight) + return fmt.Errorf("failed to get header before %d: %w", prevHeight, err) } err = c.trustedStore.DeleteSignedHeaderAndValidatorSet(h.Height) @@ -659,7 +828,7 @@ func (c *Client) cleanupAfter(height int64) error { "height", h.Height) } - nextHeight = h.Height + prevHeight = h.Height } c.latestTrustedHeader = nil @@ -672,114 +841,18 @@ func (c *Client) cleanupAfter(height int64) error { return nil } -// see VerifyHeader -func (c *Client) sequence( - initiallyTrustedHeader *types.SignedHeader, - newHeader *types.SignedHeader, - newVals *types.ValidatorSet, - now time.Time) error { - - var ( - trustedHeader = initiallyTrustedHeader - - interimHeader *types.SignedHeader - interimVals *types.ValidatorSet - - err error - ) - - for height := initiallyTrustedHeader.Height + 1; height <= newHeader.Height; height++ { - // 1) Fetch interim headers and vals if needed. - if height == newHeader.Height { // last header - interimHeader, interimVals = newHeader, newVals - } else { // intermediate headers - interimHeader, interimVals, err = c.fetchHeaderAndValsAtHeight(height) - if err != nil { - return errors.Wrapf(err, "failed to obtain the header #%d", height) - } - } - - // 2) Verify them - c.logger.Debug("Verify newHeader against trustedHeader", - "trustedHeight", trustedHeader.Height, - "trustedHash", hash2str(trustedHeader.Hash()), - "newHeight", interimHeader.Height, - "newHash", hash2str(interimHeader.Hash())) - - err = VerifyAdjacent(c.chainID, trustedHeader, interimHeader, interimVals, - c.trustingPeriod, now) - if err != nil { - return errors.Wrapf(err, "failed to verify the header #%d", height) - } - - // 3) Update trustedHeader - trustedHeader = interimHeader - } - - return nil -} - -// see VerifyHeader -func (c *Client) bisection( - initiallyTrustedHeader *types.SignedHeader, - initiallyTrustedVals *types.ValidatorSet, - newHeader *types.SignedHeader, - newVals *types.ValidatorSet, - now time.Time) error { - - var ( - trustedHeader = initiallyTrustedHeader - trustedVals = initiallyTrustedVals - - interimHeader = newHeader - interimVals = newVals - ) - - for { - c.logger.Debug("Verify newHeader against trustedHeader", - "trustedHeight", trustedHeader.Height, - "trustedHash", hash2str(trustedHeader.Hash()), - "newHeight", interimHeader.Height, - "newHash", hash2str(interimHeader.Hash())) - - err := Verify(c.chainID, trustedHeader, trustedVals, interimHeader, interimVals, c.trustingPeriod, now, - c.trustLevel) - switch err.(type) { - case nil: - if interimHeader.Height == newHeader.Height { - return nil - } - - // Update the lower bound to the previous upper bound - trustedHeader, trustedVals = interimHeader, interimVals - // Update the upper bound to the untrustedHeader - interimHeader, interimVals = newHeader, newVals - - case ErrNewValSetCantBeTrusted: - pivotHeight := (interimHeader.Height + trustedHeader.Height) / 2 - interimHeader, interimVals, err = c.fetchHeaderAndValsAtHeight(pivotHeight) - if err != nil { - return err - } - - default: - return errors.Wrapf(err, "failed to verify the header #%d", newHeader.Height) - } - } -} - func (c *Client) updateTrustedHeaderAndVals(h *types.SignedHeader, vals *types.ValidatorSet) error { if !bytes.Equal(h.ValidatorsHash, vals.Hash()) { - return errors.Errorf("expected validator's hash %X, but got %X", h.ValidatorsHash, vals.Hash()) + return fmt.Errorf("expected validator's hash %X, but got %X", h.ValidatorsHash, vals.Hash()) } if err := c.trustedStore.SaveSignedHeaderAndValidatorSet(h, vals); err != nil { - return errors.Wrap(err, "failed to save trusted header") + return fmt.Errorf("failed to save trusted header: %w", err) } if c.pruningSize > 0 { if err := c.trustedStore.Prune(c.pruningSize); err != nil { - return errors.Wrap(err, "prune") + return fmt.Errorf("prune: %w", err) } } @@ -796,22 +869,25 @@ func (c *Client) updateTrustedHeaderAndVals(h *types.SignedHeader, vals *types.V func (c *Client) fetchHeaderAndValsAtHeight(height int64) (*types.SignedHeader, *types.ValidatorSet, error) { h, err := c.signedHeaderFromPrimary(height) if err != nil { - return nil, nil, errors.Wrapf(err, "failed to obtain the header #%d", height) + return nil, nil, fmt.Errorf("failed to obtain the header #%d: %w", height, err) } vals, err := c.validatorSetFromPrimary(height) if err != nil { - return nil, nil, errors.Wrapf(err, "failed to obtain the vals #%d", height) + return nil, nil, fmt.Errorf("failed to obtain the vals #%d: %w", height, err) } return h, vals, nil } -// Backwards verification (see VerifyHeaderBackwards func in the spec) +// backwards verification (see VerifyHeaderBackwards func in the spec) verifies +// headers before a trusted header. If a sent header is invalid the primary is +// replaced with another provider and the operation is repeated. func (c *Client) backwards( initiallyTrustedHeader *types.SignedHeader, newHeader *types.SignedHeader, now time.Time) error { if HeaderExpired(initiallyTrustedHeader, c.trustingPeriod, now) { + c.logger.Error("Header Expired") return ErrOldHeaderExpired{initiallyTrustedHeader.Time.Add(c.trustingPeriod), now} } @@ -824,23 +900,21 @@ func (c *Client) backwards( for trustedHeader.Height > newHeader.Height { interimHeader, err = c.signedHeaderFromPrimary(trustedHeader.Height - 1) if err != nil { - return errors.Wrapf(err, "failed to obtain the header at height #%d", trustedHeader.Height-1) + return fmt.Errorf("failed to obtain the header at height #%d: %w", trustedHeader.Height-1, err) } - - if err := interimHeader.ValidateBasic(c.chainID); err != nil { - return errors.Wrap(err, "untrustedHeader.ValidateBasic failed") - } - - if !interimHeader.Time.Before(trustedHeader.Time) { - return errors.Errorf("expected older header time %v to be before newer header time %v", - interimHeader.Time, - trustedHeader.Time) - } - - if !bytes.Equal(interimHeader.Hash(), trustedHeader.LastBlockID.Hash) { - return errors.Errorf("older header hash %X does not match trusted header's last block %X", - interimHeader.Hash(), - trustedHeader.LastBlockID.Hash) + c.logger.Debug("Verify newHeader against trustedHeader", + "trustedHeight", trustedHeader.Height, + "trustedHash", hash2str(trustedHeader.Hash()), + "newHeight", interimHeader.Height, + "newHash", hash2str(interimHeader.Hash())) + if err := VerifyBackwards(c.chainID, interimHeader, trustedHeader); err != nil { + c.logger.Error("primary sent invalid header -> replacing", "err", err) + if replaceErr := c.replacePrimaryProvider(); replaceErr != nil { + c.logger.Error("Can't replace primary", "err", replaceErr) + // return original error + return fmt.Errorf("verify backwards from %d to %d failed: %w", + trustedHeader.Height, interimHeader.Height, err) + } } trustedHeader = interimHeader @@ -864,7 +938,7 @@ func (c *Client) compareNewHeaderWithWitnesses(h *types.SignedHeader) error { witnessesToRemove := make([]int, 0) for attempt := uint16(1); attempt <= c.maxRetryAttempts; attempt++ { if len(c.witnesses) == 0 { - return errors.New("could not find any witnesses. please reset the light client") + return errNoWitnesses{} } for i, witness := range c.witnesses { @@ -890,7 +964,7 @@ func (c *Client) compareNewHeaderWithWitnesses(h *types.SignedHeader) error { // TODO: send the diverged headers to primary && all witnesses - return errors.Errorf( + return fmt.Errorf( "header hash %X does not match one %X from the witness %v", h.Hash(), altH.Hash(), witness) } @@ -927,56 +1001,35 @@ func (c *Client) removeWitness(idx int) { } } -func (c *Client) autoUpdateRoutine() { - defer c.routinesWaitGroup.Done() - - ticker := time.NewTicker(c.updatePeriod) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - err := c.Update(time.Now()) - if err != nil { - c.logger.Error("Error during auto update", "err", err) - } - case <-c.quit: - return - } - } -} - -// Update attempts to advance the state making exponential steps (note: -// when SequentialVerification is being used, the client will still be -// downloading all intermediate headers). -// -// Exposed for testing. -func (c *Client) Update(now time.Time) error { +// Update attempts to advance the state by downloading the latest header and +// comparing it with the existing one. It returns a new header on a successful +// update. Otherwise, it returns nil (plus an error, if any). +func (c *Client) Update(now time.Time) (*types.SignedHeader, error) { lastTrustedHeight, err := c.LastTrustedHeight() if err != nil { - return errors.Wrap(err, "can't get last trusted height") + return nil, fmt.Errorf("can't get last trusted height: %w", err) } if lastTrustedHeight == -1 { // no headers yet => wait - return nil + return nil, nil } latestHeader, latestVals, err := c.fetchHeaderAndValsAtHeight(0) if err != nil { - return errors.Wrapf(err, "can't get latest header and vals") + return nil, err } if latestHeader.Height > lastTrustedHeight { err = c.VerifyHeader(latestHeader, latestVals, now) if err != nil { - return err + return nil, err } - c.logger.Info("Advanced to new state", "height", latestHeader.Height, "hash", hash2str(latestHeader.Hash())) + return latestHeader, nil } - return nil + return nil, nil } // replaceProvider takes the first alternative provider and promotes it as the @@ -986,7 +1039,7 @@ func (c *Client) replacePrimaryProvider() error { defer c.providerMutex.Unlock() if len(c.witnesses) <= 1 { - return errors.Errorf("only one witness left. please reset the light client") + return errNoWitnesses{} } c.primary = c.witnesses[0] c.witnesses = c.witnesses[1:] @@ -1006,13 +1059,14 @@ func (c *Client) signedHeaderFromPrimary(height int64) (*types.SignedHeader, err if err == nil { // sanity check if height > 0 && h.Height != height { - return nil, errors.Errorf("expected %d height, got %d", height, h.Height) + return nil, fmt.Errorf("expected %d height, got %d", height, h.Height) } return h, nil } if err == provider.ErrSignedHeaderNotFound { return nil, err } + c.logger.Error("Failed to get signed header from primary", "attempt", attempt, "err", err) time.Sleep(backoffTimeout(attempt)) } @@ -1036,6 +1090,7 @@ func (c *Client) validatorSetFromPrimary(height int64) (*types.ValidatorSet, err if err == nil || err == provider.ErrValidatorSetNotFound { return vals, err } + c.logger.Error("Failed to get validator set from primary", "attempt", attempt, "err", err) time.Sleep(backoffTimeout(attempt)) } diff --git a/lite2/client_benchmark_test.go b/lite2/client_benchmark_test.go new file mode 100644 index 000000000..5877dbc3c --- /dev/null +++ b/lite2/client_benchmark_test.go @@ -0,0 +1,106 @@ +package lite_test + +import ( + "testing" + "time" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/log" + lite "github.com/tendermint/tendermint/lite2" + "github.com/tendermint/tendermint/lite2/provider" + mockp "github.com/tendermint/tendermint/lite2/provider/mock" + dbs "github.com/tendermint/tendermint/lite2/store/db" +) + +// NOTE: block is produced every minute. Make sure the verification time +// provided in the function call is correct for the size of the blockchain. The +// benchmarking may take some time hence it can be more useful to set the time +// or the amount of iterations use the flag -benchtime t -> i.e. -benchtime 5m +// or -benchtime 100x. +// +// Remember that none of these benchmarks account for network latency. +var ( + benchmarkFullNode = mockp.New(GenMockNode(chainID, 1000, 100, 1, bTime)) + genesisHeader, _ = benchmarkFullNode.SignedHeader(1) +) + +func BenchmarkSequence(b *testing.B) { + c, err := lite.NewClient( + chainID, + lite.TrustOptions{ + Period: 24 * time.Hour, + Height: 1, + Hash: genesisHeader.Hash(), + }, + benchmarkFullNode, + []provider.Provider{benchmarkFullNode}, + dbs.New(dbm.NewMemDB(), chainID), + lite.Logger(log.TestingLogger()), + lite.SequentialVerification(), + ) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err = c.VerifyHeaderAtHeight(1000, bTime.Add(1000*time.Minute)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBisection(b *testing.B) { + c, err := lite.NewClient( + chainID, + lite.TrustOptions{ + Period: 24 * time.Hour, + Height: 1, + Hash: genesisHeader.Hash(), + }, + benchmarkFullNode, + []provider.Provider{benchmarkFullNode}, + dbs.New(dbm.NewMemDB(), chainID), + lite.Logger(log.TestingLogger()), + ) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err = c.VerifyHeaderAtHeight(1000, bTime.Add(1000*time.Minute)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBackwards(b *testing.B) { + trustedHeader, _ := benchmarkFullNode.SignedHeader(0) + c, err := lite.NewClient( + chainID, + lite.TrustOptions{ + Period: 24 * time.Hour, + Height: trustedHeader.Height, + Hash: trustedHeader.Hash(), + }, + benchmarkFullNode, + []provider.Provider{benchmarkFullNode}, + dbs.New(dbm.NewMemDB(), chainID), + lite.Logger(log.TestingLogger()), + ) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err = c.VerifyHeaderAtHeight(1, bTime) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/lite2/client_test.go b/lite2/client_test.go index b1079390a..a54786ab7 100644 --- a/lite2/client_test.go +++ b/lite2/client_test.go @@ -1,4 +1,4 @@ -package lite +package lite_test import ( "sync" @@ -11,6 +11,7 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" + lite "github.com/tendermint/tendermint/lite2" "github.com/tendermint/tendermint/lite2/provider" mockp "github.com/tendermint/tendermint/lite2/provider/mock" dbs "github.com/tendermint/tendermint/lite2/store/db" @@ -34,7 +35,7 @@ var ( h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) trustPeriod = 4 * time.Hour - trustOptions = TrustOptions{ + trustOptions = lite.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: h1.Hash(), @@ -57,7 +58,8 @@ var ( headerSet, valSet, ) - deadNode = mockp.NewDeadMock(chainID) + deadNode = mockp.NewDeadMock(chainID) + largeFullNode = mockp.New(GenMockNode(chainID, 10, 3, 0, bTime)) ) func TestClient_SequentialVerification(t *testing.T) { @@ -139,7 +141,7 @@ func TestClient_SequentialVerification(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, mockp.New( @@ -153,7 +155,7 @@ func TestClient_SequentialVerification(t *testing.T) { tc.vals, )}, dbs.New(dbm.NewMemDB(), chainID), - SequentialVerification(), + lite.SequentialVerification(), ) if tc.initErr { @@ -162,9 +164,6 @@ func TestClient_SequentialVerification(t *testing.T) { } require.NoError(t, err) - err = c.Start() - require.NoError(t, err) - defer c.Stop() _, err = c.VerifyHeaderAtHeight(3, bTime.Add(3*time.Hour)) if tc.verifyErr { @@ -265,7 +264,7 @@ func TestClient_SkippingVerification(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, mockp.New( @@ -279,7 +278,7 @@ func TestClient_SkippingVerification(t *testing.T) { tc.vals, )}, dbs.New(dbm.NewMemDB(), chainID), - SkippingVerification(DefaultTrustLevel), + lite.SkippingVerification(lite.DefaultTrustLevel), ) if tc.initErr { require.Error(t, err) @@ -287,9 +286,6 @@ func TestClient_SkippingVerification(t *testing.T) { } require.NoError(t, err) - err = c.Start() - require.NoError(t, err) - defer c.Stop() _, err = c.VerifyHeaderAtHeight(3, bTime.Add(3*time.Hour)) if tc.verifyErr { @@ -302,13 +298,13 @@ func TestClient_SkippingVerification(t *testing.T) { } func TestClient_Cleanup(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) _, err = c.TrustedHeader(1) @@ -335,13 +331,13 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) require.NoError(t, err) - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -377,9 +373,9 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { valSet, ) - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: header1.Hash(), @@ -387,7 +383,7 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { primary, []provider.Provider{primary}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -414,9 +410,9 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) require.NoError(t, err) - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 4 * time.Hour, Height: 2, Hash: h2.Hash(), @@ -424,12 +420,9 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { fullNode, []provider.Provider{fullNode}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) - err = c.Start() - require.NoError(t, err) - defer c.Stop() // Check we still have the 1st header (+header+). h, err := c.TrustedHeader(1) @@ -468,9 +461,9 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { valSet, ) - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 4 * time.Hour, Height: 2, Hash: diffHeader2.Hash(), @@ -478,12 +471,9 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { primary, []provider.Provider{primary}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) - err = c.Start() - require.NoError(t, err) - defer c.Stop() // Check we no longer have the invalid 1st header (+header+). h, err := c.TrustedHeader(1) @@ -509,18 +499,15 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { err = trustedStore.SaveSignedHeaderAndValidatorSet(h2, vals) require.NoError(t, err) - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) - err = c.Start() - require.NoError(t, err) - defer c.Stop() // Check we still have the 1st header (+header+). h, err := c.TrustedHeader(1) @@ -569,9 +556,9 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { valSet, ) - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: header1.Hash(), @@ -579,12 +566,9 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { primary, []provider.Provider{primary}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) - err = c.Start() - require.NoError(t, err) - defer c.Stop() // Check we have swapped invalid 1st header (+header+) with correct one (+header1+). h, err := c.TrustedHeader(1) @@ -611,27 +595,22 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { } func TestClient_Update(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) - err = c.Start() - require.NoError(t, err) - defer c.Stop() // should result in downloading & verifying header #3 - err = c.Update(bTime.Add(2 * time.Hour)) - require.NoError(t, err) - - h, err := c.TrustedHeader(3) + h, err := c.Update(bTime.Add(2 * time.Hour)) assert.NoError(t, err) - require.NotNil(t, h) - assert.EqualValues(t, 3, h.Height) + if assert.NotNil(t, h) { + assert.EqualValues(t, 3, h.Height) + } valSet, _, err := c.TrustedValidatorSet(3) assert.NoError(t, err) @@ -641,19 +620,15 @@ func TestClient_Update(t *testing.T) { } func TestClient_Concurrency(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - UpdatePeriod(0), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) - err = c.Start() - require.NoError(t, err) - defer c.Stop() _, err = c.VerifyHeaderAtHeight(2, bTime.Add(2*time.Hour)) require.NoError(t, err) @@ -689,19 +664,18 @@ func TestClient_Concurrency(t *testing.T) { } func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, deadNode, []provider.Provider{fullNode, fullNode}, dbs.New(dbm.NewMemDB(), chainID), - UpdatePeriod(0), - Logger(log.TestingLogger()), - MaxRetryAttempts(1), + lite.Logger(log.TestingLogger()), + lite.MaxRetryAttempts(1), ) require.NoError(t, err) - err = c.Update(bTime.Add(2 * time.Hour)) + _, err = c.Update(bTime.Add(2 * time.Hour)) require.NoError(t, err) assert.NotEqual(t, c.Primary(), deadNode) @@ -710,64 +684,65 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { func TestClient_BackwardsVerification(t *testing.T) { { - c, err := NewClient( + trustHeader, _ := largeFullNode.SignedHeader(6) + c, err := lite.NewClient( chainID, - TrustOptions{ - Period: 1 * time.Hour, - Height: 3, - Hash: h3.Hash(), + lite.TrustOptions{ + Period: 4 * time.Minute, + Height: trustHeader.Height, + Hash: trustHeader.Hash(), }, - fullNode, - []provider.Provider{fullNode}, + largeFullNode, + []provider.Provider{largeFullNode}, dbs.New(dbm.NewMemDB(), chainID), - UpdatePeriod(0), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) - // 1) header is missing => expect no error - h, err := c.VerifyHeaderAtHeight(2, bTime.Add(1*time.Hour).Add(1*time.Second)) + // 1) verify before the trusted header using backwards => expect no error + h, err := c.VerifyHeaderAtHeight(5, bTime.Add(6*time.Minute)) require.NoError(t, err) if assert.NotNil(t, h) { - assert.EqualValues(t, 2, h.Height) + assert.EqualValues(t, 5, h.Height) } // 2) untrusted header is expired but trusted header is not => expect no error - h, err = c.VerifyHeaderAtHeight(1, bTime.Add(1*time.Hour).Add(1*time.Second)) + h, err = c.VerifyHeaderAtHeight(3, bTime.Add(8*time.Minute)) assert.NoError(t, err) assert.NotNil(t, h) // 3) already stored headers should return the header without error - h, err = c.VerifyHeaderAtHeight(2, bTime.Add(1*time.Hour).Add(1*time.Second)) + h, err = c.VerifyHeaderAtHeight(5, bTime.Add(6*time.Minute)) assert.NoError(t, err) assert.NotNil(t, h) - } - { - c, err := NewClient( - chainID, - TrustOptions{ - Period: 1 * time.Hour, - Height: 3, - Hash: h3.Hash(), - }, - fullNode, - []provider.Provider{fullNode}, - dbs.New(dbm.NewMemDB(), chainID), - UpdatePeriod(0), - Logger(log.TestingLogger()), - ) + + // 4a) First verify latest header + _, err = c.VerifyHeaderAtHeight(9, bTime.Add(9*time.Minute)) require.NoError(t, err) - // 3) trusted header has expired => expect error - _, err = c.VerifyHeaderAtHeight(1, bTime.Add(4*time.Hour).Add(1*time.Second)) + // 4b) Verify backwards using bisection => expect no error + _, err = c.VerifyHeaderAtHeight(7, bTime.Add(10*time.Minute)) + assert.NoError(t, err) + // shouldn't have verified this header in the process + _, err = c.TrustedHeader(8) assert.Error(t, err) + + // 5) trusted header has expired => expect error + _, err = c.VerifyHeaderAtHeight(1, bTime.Add(20*time.Minute)) + assert.Error(t, err) + + // 6) Try bisection method, but closest header (at 7) has expired + // so change to backwards => expect no error + _, err = c.VerifyHeaderAtHeight(8, bTime.Add(12*time.Minute)) + assert.NoError(t, err) + } { testCases := []struct { provider provider.Provider }{ { - // provides incorrect height + // 7) provides incorrect height mockp.New( chainID, map[int64]*types.SignedHeader{ @@ -780,7 +755,7 @@ func TestClient_BackwardsVerification(t *testing.T) { ), }, { - // provides incorrect hash + // 8) provides incorrect hash mockp.New( chainID, map[int64]*types.SignedHeader{ @@ -795,9 +770,9 @@ func TestClient_BackwardsVerification(t *testing.T) { } for _, tc := range testCases { - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 1 * time.Hour, Height: 3, Hash: h3.Hash(), @@ -805,8 +780,7 @@ func TestClient_BackwardsVerification(t *testing.T) { tc.provider, []provider.Provider{tc.provider}, dbs.New(dbm.NewMemDB(), chainID), - UpdatePeriod(0), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -822,7 +796,7 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) { err := db.SaveSignedHeaderAndValidatorSet(h1, vals) require.NoError(t, err) - c, err := NewClientFromTrustedStore( + c, err := lite.NewClientFromTrustedStore( chainID, trustPeriod, deadNode, @@ -846,15 +820,14 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) { } func TestNewClientErrorsIfAllWitnessesUnavailable(t *testing.T) { - _, err := NewClient( + _, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{deadNode, deadNode}, dbs.New(dbm.NewMemDB(), chainID), - UpdatePeriod(0), - Logger(log.TestingLogger()), - MaxRetryAttempts(1), + lite.Logger(log.TestingLogger()), + lite.MaxRetryAttempts(1), ) if assert.Error(t, err) { assert.Contains(t, err.Error(), "awaiting response from all witnesses exceeded dropout time") @@ -890,15 +863,14 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { }, ) - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{badProvider1, badProvider2}, dbs.New(dbm.NewMemDB(), chainID), - UpdatePeriod(0), - Logger(log.TestingLogger()), - MaxRetryAttempts(1), + lite.Logger(log.TestingLogger()), + lite.MaxRetryAttempts(1), ) // witness should have behaved properly -> no error require.NoError(t, err) @@ -918,14 +890,13 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { } func TestClientTrustedValidatorSet(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - UpdatePeriod(0), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) diff --git a/lite2/doc.go b/lite2/doc.go index aa280e64f..f42aa64f1 100644 --- a/lite2/doc.go +++ b/lite2/doc.go @@ -65,33 +65,29 @@ Example usage: db, err := dbm.NewGoLevelDB("lite-client-db", dbDir) if err != nil { - // return err - t.Fatal(err) + // handle error } - c, err := NewClient( + + c, err := NewHTTPClient( chainID, TrustOptions{ Period: 504 * time.Hour, // 21 days Height: 100, Hash: header.Hash(), }, - httpp.New(chainID, "tcp://localhost:26657"), - []provider.Provider{httpp.New(chainID, "tcp://witness1:26657")}, - dbs.New(db, chainID), + "http://localhost:26657", + []string{"http://witness1:26657"}, + dbs.New(db, ""), ) - - err = c.Start() - if err != nil { - // return err - t.Fatal(err) - } - defer c.Stop() - - h, err := c.TrustedHeader(101) if err != nil { // handle error } - fmt.Println("got header", h) + + h, err := c.TrustedHeader(100) + if err != nil { + // handle error + } + fmt.Println("header", h) Check out other examples in example_test.go @@ -101,6 +97,18 @@ Verify function verifies a new header against some trusted header. See https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/verification.md for details. +There are two methods of verification: sequential and bisection + +Sequential uses the headers hashes and the validator sets to verify each adjacent header until +it reaches the target header. + +Bisection finds the middle header between a trusted and new header, reiterating the action until it +verifies a header. A cache of headers requested by the primary is kept such that when a +verification is made, and the light client tries again to verify the new header in the middle, +the light client does not need to ask for all the same headers again. + +refer to docs/imgs/light_client_bisection_alg.png + ## 3. Secure RPC proxy Tendermint RPC exposes a lot of info, but a malicious node could return any @@ -112,5 +120,8 @@ some other node. See https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html for usage example. +Or see +https://github.com/tendermint/spec/tree/master/spec/consensus/light-client +for the full spec */ package lite diff --git a/lite2/errors.go b/lite2/errors.go index d0b5d2d31..7bc70f698 100644 --- a/lite2/errors.go +++ b/lite2/errors.go @@ -28,3 +28,21 @@ type ErrNewValSetCantBeTrusted struct { func (e ErrNewValSetCantBeTrusted) Error() string { return fmt.Sprintf("cant trust new val set: %v", e.Reason) } + +// ErrInvalidHeader means the header either failed the basic validation or +// commit is not signed by 2/3+. +type ErrInvalidHeader struct { + Reason error +} + +func (e ErrInvalidHeader) Error() string { + return fmt.Sprintf("invalid header: %v", e.Reason) +} + +// errNoWitnesses means that there are not enough witnesses connected to +// continue running the light client. +type errNoWitnesses struct{} + +func (e errNoWitnesses) Error() string { + return fmt.Sprint("no witnesses connected. please reset light client") +} diff --git a/lite2/example_test.go b/lite2/example_test.go index 65997f906..0de5f1349 100644 --- a/lite2/example_test.go +++ b/lite2/example_test.go @@ -1,4 +1,4 @@ -package lite +package lite_test import ( "fmt" @@ -11,6 +11,7 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/kvstore" + lite "github.com/tendermint/tendermint/lite2" "github.com/tendermint/tendermint/lite2/provider" httpp "github.com/tendermint/tendermint/lite2/provider/http" dbs "github.com/tendermint/tendermint/lite2/store/db" @@ -48,9 +49,9 @@ func ExampleClient_Update() { stdlog.Fatal(err) } - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 504 * time.Hour, // 21 days Height: 2, Hash: header.Hash(), @@ -58,18 +59,12 @@ func ExampleClient_Update() { primary, []provider.Provider{primary}, // NOTE: primary should not be used here dbs.New(db, chainID), - UpdatePeriod(0), // NOTE: value should be greater than zero // Logger(log.TestingLogger()), ) if err != nil { stdlog.Fatal(err) } - err = c.Start() - if err != nil { - stdlog.Fatal(err) - } defer func() { - c.Stop() c.Cleanup() }() @@ -78,17 +73,13 @@ func ExampleClient_Update() { // XXX: 30 * time.Minute clock drift is needed because a) Tendermint strips // monotonic component (see types/time/time.go) b) single instance is being // run. - err = c.Update(time.Now().Add(30 * time.Minute)) + // https://github.com/tendermint/tendermint/issues/4489 + h, err := c.Update(time.Now().Add(30 * time.Minute)) if err != nil { stdlog.Fatal(err) } - h, err := c.TrustedHeader(0) - if err != nil { - stdlog.Fatal(err) - } - - if h.Height > 2 { + if h != nil && h.Height > 2 { fmt.Println("successful update") } else { fmt.Println("update failed") @@ -127,17 +118,16 @@ func ExampleClient_VerifyHeaderAtHeight() { stdlog.Fatal(err) } - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 504 * time.Hour, // 21 days Height: 2, Hash: header.Hash(), }, primary, - []provider.Provider{primary}, // TODO: primary should not be used here + []provider.Provider{primary}, // NOTE: primary should not be used here dbs.New(db, chainID), - UpdatePeriod(0), // Logger(log.TestingLogger()), ) if err != nil { diff --git a/lite2/test_helpers.go b/lite2/helpers_test.go similarity index 71% rename from lite2/test_helpers.go rename to lite2/helpers_test.go index cc1bf4eb9..d9ab46f92 100644 --- a/lite2/test_helpers.go +++ b/lite2/helpers_test.go @@ -1,4 +1,4 @@ -package lite +package lite_test import ( "time" @@ -160,3 +160,61 @@ func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTi Commit: pkz.signHeader(header, first, last), } } + +func (pkz privKeys) ChangeKeys(delta int) privKeys { + newKeys := pkz[delta:] + return newKeys.Extend(delta) +} + +// Generates the header and validator set to create a full entire mock node with blocks to height ( +// blockSize) and with variation in validator sets. BlockIntervals are in per minute. +// NOTE: Expected to have a large validator set size ~ 100 validators. +func GenMockNode( + chainID string, + blockSize int64, + valSize int, + valVariation float32, + bTime time.Time) ( + string, + map[int64]*types.SignedHeader, + map[int64]*types.ValidatorSet) { + + var ( + headers = make(map[int64]*types.SignedHeader, blockSize) + valset = make(map[int64]*types.ValidatorSet, blockSize) + keys = genPrivKeys(valSize) + totalVariation = valVariation + valVariationInt int + newKeys privKeys + ) + + valVariationInt = int(totalVariation) + totalVariation = -float32(valVariationInt) + newKeys = keys.ChangeKeys(valVariationInt) + + // genesis header and vals + lastHeader := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Minute), nil, + keys.ToValidators(2, 2), newKeys.ToValidators(2, 2), []byte("app_hash"), []byte("cons_hash"), + []byte("results_hash"), 0, len(keys)) + currentHeader := lastHeader + headers[1] = currentHeader + valset[1] = keys.ToValidators(2, 2) + keys = newKeys + + for height := int64(2); height <= blockSize; height++ { + totalVariation += valVariation + valVariationInt = int(totalVariation) + totalVariation = -float32(valVariationInt) + newKeys = keys.ChangeKeys(valVariationInt) + currentHeader = keys.GenSignedHeaderLastBlockID(chainID, height, bTime.Add(time.Duration(height)*time.Minute), + nil, + keys.ToValidators(2, 2), newKeys.ToValidators(2, 2), []byte("app_hash"), []byte("cons_hash"), + []byte("results_hash"), 0, len(keys), types.BlockID{Hash: lastHeader.Hash()}) + headers[height] = currentHeader + valset[height] = keys.ToValidators(2, 2) + lastHeader = currentHeader + keys = newKeys + } + + return chainID, headers, valset +} diff --git a/lite2/provider/http/http.go b/lite2/provider/http/http.go index 130bf0a24..dff5e4d1d 100644 --- a/lite2/provider/http/http.go +++ b/lite2/provider/http/http.go @@ -7,6 +7,7 @@ import ( "github.com/tendermint/tendermint/lite2/provider" rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/types" ) @@ -21,14 +22,14 @@ type SignStatusClient interface { // http provider uses an RPC client (or SignStatusClient more generally) to // obtain the necessary information. type http struct { - chainID string - client SignStatusClient + SignStatusClient // embed so interface can be converted to SignStatusClient for tests + chainID string } -// New creates a HTTP provider, which is using the rpcclient.HTTP +// New creates a HTTP provider, which is using the rpchttp.HTTP // client under the hood. func New(chainID, remote string) (provider.Provider, error) { - httpClient, err := rpcclient.NewHTTP(remote, "/websocket") + httpClient, err := rpchttp.New(remote, "/websocket") if err != nil { return nil, err } @@ -38,8 +39,8 @@ func New(chainID, remote string) (provider.Provider, error) { // NewWithClient allows you to provide custom SignStatusClient. func NewWithClient(chainID string, client SignStatusClient) provider.Provider { return &http{ - chainID: chainID, - client: client, + SignStatusClient: client, + chainID: chainID, } } @@ -49,7 +50,7 @@ func (p *http) ChainID() string { } func (p *http) String() string { - return fmt.Sprintf("http{%s}", p.client.Remote()) + return fmt.Sprintf("http{%s}", p.Remote()) } // SignedHeader fetches a SignedHeader at the given height and checks the @@ -60,7 +61,7 @@ func (p *http) SignedHeader(height int64) (*types.SignedHeader, error) { return nil, err } - commit, err := p.client.Commit(h) + commit, err := p.SignStatusClient.Commit(h) if err != nil { // TODO: standartise errors on the RPC side if strings.Contains(err.Error(), "height must be less than or equal") { @@ -90,7 +91,7 @@ func (p *http) ValidatorSet(height int64) (*types.ValidatorSet, error) { } const maxPerPage = 100 - res, err := p.client.Validators(h, 0, maxPerPage) + res, err := p.SignStatusClient.Validators(h, 0, maxPerPage) if err != nil { // TODO: standartise errors on the RPC side if strings.Contains(err.Error(), "height must be less than or equal") { @@ -106,7 +107,7 @@ func (p *http) ValidatorSet(height int64) (*types.ValidatorSet, error) { // Check if there are more validators. for len(res.Validators) == maxPerPage { - res, err = p.client.Validators(h, page, maxPerPage) + res, err = p.SignStatusClient.Validators(h, page, maxPerPage) if err != nil { return nil, err } diff --git a/lite2/provider/http/http_test.go b/lite2/provider/http/http_test.go index 1e5f4cb2b..73706434b 100644 --- a/lite2/provider/http/http_test.go +++ b/lite2/provider/http/http_test.go @@ -1,4 +1,4 @@ -package http +package http_test import ( "os" @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" + litehttp "github.com/tendermint/tendermint/lite2/provider/http" rpcclient "github.com/tendermint/tendermint/rpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" @@ -33,12 +34,12 @@ func TestProvider(t *testing.T) { } chainID := genDoc.ChainID t.Log("chainID:", chainID) - p, err := New(chainID, rpcAddr) + p, err := litehttp.New(chainID, rpcAddr) require.Nil(t, err) require.NotNil(t, p) // let it produce some blocks - err = rpcclient.WaitForHeight(p.(*http).client, 6, nil) + err = rpcclient.WaitForHeight(p.(rpcclient.StatusClient), 6, nil) require.Nil(t, err) // let's get the highest block diff --git a/lite2/rpc/client.go b/lite2/rpc/client.go index b59d459bc..abd15adc2 100644 --- a/lite2/rpc/client.go +++ b/lite2/rpc/client.go @@ -322,20 +322,8 @@ func (c *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { } func (c *Client) updateLiteClientIfNeededTo(height int64) (*types.SignedHeader, error) { - lastTrustedHeight, err := c.lc.LastTrustedHeight() - if err != nil { - return nil, errors.Wrap(err, "LastTrustedHeight") - } - - if lastTrustedHeight < height { - return c.lc.VerifyHeaderAtHeight(height, time.Now()) - } - - h, err := c.lc.TrustedHeader(height) - if err != nil { - return nil, errors.Wrapf(err, "TrustedHeader(#%d)", height) - } - return h, nil + h, err := c.lc.VerifyHeaderAtHeight(height, time.Now()) + return h, errors.Wrapf(err, "failed to update light client to %d", height) } func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { diff --git a/lite2/setup.go b/lite2/setup.go new file mode 100644 index 000000000..50a4a9d21 --- /dev/null +++ b/lite2/setup.go @@ -0,0 +1,77 @@ +package lite + +import ( + "time" + + "github.com/tendermint/tendermint/lite2/provider" + "github.com/tendermint/tendermint/lite2/provider/http" + "github.com/tendermint/tendermint/lite2/store" +) + +// NewHTTPClient initiates an instance of a lite client using HTTP addresses +// for both the primary provider and witnesses of the lite client. A trusted +// header and hash must be passed to initialize the client. +// +// See all Option(s) for the additional configuration. +// See NewClient. +func NewHTTPClient( + chainID string, + trustOptions TrustOptions, + primaryAddress string, + witnessesAddresses []string, + trustedStore store.Store, + options ...Option) (*Client, error) { + + providers, err := providersFromAddresses(append(witnessesAddresses, primaryAddress), chainID) + if err != nil { + return nil, err + } + + return NewClient( + chainID, + trustOptions, + providers[len(providers)-1], + providers[:len(providers)-1], + trustedStore, + options...) +} + +// NewHTTPClientFromTrustedStore initiates an instance of a lite client using +// HTTP addresses for both the primary provider and witnesses and uses a +// trusted store as the root of trust. +// +// See all Option(s) for the additional configuration. +// See NewClientFromTrustedStore. +func NewHTTPClientFromTrustedStore( + chainID string, + trustingPeriod time.Duration, + primaryAddress string, + witnessesAddresses []string, + trustedStore store.Store, + options ...Option) (*Client, error) { + + providers, err := providersFromAddresses(append(witnessesAddresses, primaryAddress), chainID) + if err != nil { + return nil, err + } + + return NewClientFromTrustedStore( + chainID, + trustingPeriod, + providers[len(providers)-1], + providers[:len(providers)-1], + trustedStore, + options...) +} + +func providersFromAddresses(addrs []string, chainID string) ([]provider.Provider, error) { + providers := make([]provider.Provider, len(addrs)) + for idx, address := range addrs { + p, err := http.New(chainID, address) + if err != nil { + return nil, err + } + providers[idx] = p + } + return providers, nil +} diff --git a/lite2/store/db/db.go b/lite2/store/db/db.go index d405b9865..8d37ace9f 100644 --- a/lite2/store/db/db.go +++ b/lite2/store/db/db.go @@ -203,18 +203,18 @@ func (s *dbs) FirstSignedHeaderHeight() (int64, error) { return -1, nil } -// SignedHeaderAfter iterates over headers until it finds a header after one at -// height. It returns ErrSignedHeaderNotFound if no such header exists. +// SignedHeaderBefore iterates over headers until it finds a header before +// the given height. It returns ErrSignedHeaderNotFound if no such header exists. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) SignedHeaderAfter(height int64) (*types.SignedHeader, error) { +func (s *dbs) SignedHeaderBefore(height int64) (*types.SignedHeader, error) { if height <= 0 { panic("negative or zero height") } - itr, err := s.db.Iterator( - s.shKey(height+1), - append(s.shKey(1<<63-1), byte(0x00)), + itr, err := s.db.ReverseIterator( + s.shKey(1), + s.shKey(height), ) if err != nil { panic(err) diff --git a/lite2/store/db/db_test.go b/lite2/store/db/db_test.go index 2b82de8f3..ce45f3bcf 100644 --- a/lite2/store/db/db_test.go +++ b/lite2/store/db/db_test.go @@ -76,19 +76,19 @@ func Test_SaveSignedHeaderAndValidatorSet(t *testing.T) { assert.Nil(t, valSet) } -func Test_SignedHeaderAfter(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_SignedHeaderAfter") +func Test_SignedHeaderBefore(t *testing.T) { + dbStore := New(dbm.NewMemDB(), "Test_SignedHeaderBefore") assert.Panics(t, func() { - dbStore.SignedHeaderAfter(0) - dbStore.SignedHeaderAfter(100) + _, _ = dbStore.SignedHeaderBefore(0) + _, _ = dbStore.SignedHeaderBefore(100) }) err := dbStore.SaveSignedHeaderAndValidatorSet( &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.ValidatorSet{}) require.NoError(t, err) - h, err := dbStore.SignedHeaderAfter(1) + h, err := dbStore.SignedHeaderBefore(3) require.NoError(t, err) if assert.NotNil(t, h) { assert.EqualValues(t, 2, h.Height) diff --git a/lite2/store/store.go b/lite2/store/store.go index 7ea6b9c6b..0d36c48b6 100644 --- a/lite2/store/store.go +++ b/lite2/store/store.go @@ -41,10 +41,10 @@ type Store interface { // If the store is empty, -1 and nil error are returned. FirstSignedHeaderHeight() (int64, error) - // SignedHeaderAfter returns the SignedHeader after the certain height. + // SignedHeaderBefore returns the SignedHeader before a certain height. // // height must be > 0 && <= LastSignedHeaderHeight. - SignedHeaderAfter(height int64) (*types.SignedHeader, error) + SignedHeaderBefore(height int64) (*types.SignedHeader, error) // Prune removes headers & the associated validator sets when Store reaches a // defined size (number of header & validator set pairs). diff --git a/lite2/verifier.go b/lite2/verifier.go index 250c8b204..1ef54677b 100644 --- a/lite2/verifier.go +++ b/lite2/verifier.go @@ -10,10 +10,6 @@ import ( "github.com/tendermint/tendermint/types" ) -const ( - maxClockDrift = 10 * time.Second -) - var ( // DefaultTrustLevel - new header can be trusted if at least one correct // validator signed it. @@ -23,13 +19,16 @@ var ( // VerifyNonAdjacent verifies non-adjacent untrustedHeader against // trustedHeader. It ensures that: // -// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned); -// b) untrustedHeader is valid; +// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) +// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) // c) trustLevel ([1/3, 1]) of trustedHeaderVals (or trustedHeaderNextVals) -// signed correctly (if not, ErrNewValSetCantBeTrusted is returned); -// d) more than 2/3 of untrustedVals have signed h2 (if not, -// ErrNotEnoughVotingPowerSigned is returned); +// signed correctly (if not, ErrNewValSetCantBeTrusted is returned) +// d) more than 2/3 of untrustedVals have signed h2 +// (otherwise, ErrInvalidHeader is returned) // e) headers are non-adjacent. +// +// maxClockDrift defines how much untrustedHeader.Time can drift into the +// future. func VerifyNonAdjacent( chainID string, trustedHeader *types.SignedHeader, // height=X @@ -38,6 +37,7 @@ func VerifyNonAdjacent( untrustedVals *types.ValidatorSet, // height=Y trustingPeriod time.Duration, now time.Time, + maxClockDrift time.Duration, trustLevel tmmath.Fraction) error { if untrustedHeader.Height == trustedHeader.Height+1 { @@ -48,8 +48,12 @@ func VerifyNonAdjacent( return ErrOldHeaderExpired{trustedHeader.Time.Add(trustingPeriod), now} } - if err := verifyNewHeaderAndVals(chainID, untrustedHeader, untrustedVals, trustedHeader, now); err != nil { - return err + if err := verifyNewHeaderAndVals( + chainID, + untrustedHeader, untrustedVals, + trustedHeader, + now, maxClockDrift); err != nil { + return ErrInvalidHeader{err} } // Ensure that +`trustLevel` (default 1/3) or more of last trusted validators signed correctly. @@ -67,11 +71,11 @@ func VerifyNonAdjacent( // Ensure that +2/3 of new validators signed correctly. // // NOTE: this should always be the last check because untrustedVals can be - // intentionaly made very large to DOS the light client. not the case for + // intentionally made very large to DOS the light client. not the case for // VerifyAdjacent, where validator set is known in advance. if err := untrustedVals.VerifyCommit(chainID, untrustedHeader.Commit.BlockID, untrustedHeader.Height, untrustedHeader.Commit); err != nil { - return err + return ErrInvalidHeader{err} } return nil @@ -80,19 +84,23 @@ func VerifyNonAdjacent( // VerifyAdjacent verifies directly adjacent untrustedHeader against // trustedHeader. It ensures that: // -// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned); -// b) untrustedHeader is valid; -// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash; -// d) more than 2/3 of new validators (untrustedVals) have signed h2 (if not, -// ErrNotEnoughVotingPowerSigned is returned); +// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) +// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) +// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash +// d) more than 2/3 of new validators (untrustedVals) have signed h2 +// (otherwise, ErrInvalidHeader is returned) // e) headers are adjacent. +// +// maxClockDrift defines how much untrustedHeader.Time can drift into the +// future. func VerifyAdjacent( chainID string, trustedHeader *types.SignedHeader, // height=X untrustedHeader *types.SignedHeader, // height=X+1 untrustedVals *types.ValidatorSet, // height=X+1 trustingPeriod time.Duration, - now time.Time) error { + now time.Time, + maxClockDrift time.Duration) error { if untrustedHeader.Height != trustedHeader.Height+1 { return errors.New("headers must be adjacent in height") @@ -102,8 +110,12 @@ func VerifyAdjacent( return ErrOldHeaderExpired{trustedHeader.Time.Add(trustingPeriod), now} } - if err := verifyNewHeaderAndVals(chainID, untrustedHeader, untrustedVals, trustedHeader, now); err != nil { - return err + if err := verifyNewHeaderAndVals( + chainID, + untrustedHeader, untrustedVals, + trustedHeader, + now, maxClockDrift); err != nil { + return ErrInvalidHeader{err} } // Check the validator hashes are the same @@ -118,7 +130,7 @@ func VerifyAdjacent( // Ensure that +2/3 of new validators signed correctly. if err := untrustedVals.VerifyCommit(chainID, untrustedHeader.Commit.BlockID, untrustedHeader.Height, untrustedHeader.Commit); err != nil { - return err + return ErrInvalidHeader{err} } return nil @@ -133,14 +145,15 @@ func Verify( untrustedVals *types.ValidatorSet, // height=Y trustingPeriod time.Duration, now time.Time, + maxClockDrift time.Duration, trustLevel tmmath.Fraction) error { if untrustedHeader.Height != trustedHeader.Height+1 { return VerifyNonAdjacent(chainID, trustedHeader, trustedVals, untrustedHeader, untrustedVals, - trustingPeriod, now, trustLevel) + trustingPeriod, now, maxClockDrift, trustLevel) } - return VerifyAdjacent(chainID, trustedHeader, untrustedHeader, untrustedVals, trustingPeriod, now) + return VerifyAdjacent(chainID, trustedHeader, untrustedHeader, untrustedVals, trustingPeriod, now, maxClockDrift) } func verifyNewHeaderAndVals( @@ -148,7 +161,8 @@ func verifyNewHeaderAndVals( untrustedHeader *types.SignedHeader, untrustedVals *types.ValidatorSet, trustedHeader *types.SignedHeader, - now time.Time) error { + now time.Time, + maxClockDrift time.Duration) error { if err := untrustedHeader.ValidateBasic(chainID); err != nil { return errors.Wrap(err, "untrustedHeader.ValidateBasic failed") @@ -174,9 +188,10 @@ func verifyNewHeaderAndVals( } if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) { - return errors.Errorf("expected new header validators (%X) to match those that were supplied (%X)", + return errors.Errorf("expected new header validators (%X) to match those that were supplied (%X) at height %d", untrustedHeader.ValidatorsHash, untrustedVals.Hash(), + untrustedHeader.Height, ) } @@ -200,3 +215,34 @@ func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time expirationTime := h.Time.Add(trustingPeriod) return !expirationTime.After(now) } + +// VerifyBackwards verifies an untrusted header with a height one less than +// that of an adjacent trusted header. It ensures that: +// +// a) untrusted header is valid +// b) untrusted header has a time before the trusted header +// c) that the LastBlockID hash of the trusted header is the same as the hash +// of the trusted header +// +// For any of these cases ErrInvalidHeader is returned. +func VerifyBackwards(chainID string, untrustedHeader, trustedHeader *types.SignedHeader) error { + if err := untrustedHeader.ValidateBasic(chainID); err != nil { + return ErrInvalidHeader{err} + } + + if !untrustedHeader.Time.Before(trustedHeader.Time) { + return ErrInvalidHeader{ + errors.Errorf("expected older header time %v to be before new header time %v", + untrustedHeader.Time, + trustedHeader.Time)} + } + + if !bytes.Equal(untrustedHeader.Hash(), trustedHeader.LastBlockID.Hash) { + return ErrInvalidHeader{ + errors.Errorf("older header hash %X does not match trusted header's last block %X", + untrustedHeader.Hash(), + trustedHeader.LastBlockID.Hash)} + } + + return nil +} diff --git a/lite2/verifier_test.go b/lite2/verifier_test.go index 241d33b05..5a207321d 100644 --- a/lite2/verifier_test.go +++ b/lite2/verifier_test.go @@ -1,4 +1,4 @@ -package lite +package lite_test import ( "fmt" @@ -8,9 +8,14 @@ import ( "github.com/stretchr/testify/assert" tmmath "github.com/tendermint/tendermint/libs/math" + lite "github.com/tendermint/tendermint/lite2" "github.com/tendermint/tendermint/types" ) +const ( + maxClockDrift = 10 * time.Second +) + func TestVerifyAdjacentHeaders(t *testing.T) { const ( chainID = "TestVerifyAdjacentHeaders" @@ -113,7 +118,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}, + lite.ErrInvalidHeader{Reason: types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, "", }, // vals does not match with what we have -> error @@ -151,7 +156,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { for i, tc := range testCases { tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - err := VerifyAdjacent(chainID, header, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now) + err := lite.VerifyAdjacent(chainID, header, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, maxClockDrift) switch { case tc.expErr != nil && assert.Error(t, err): assert.Equal(t, tc.expErr, err) @@ -227,7 +232,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}, + lite.ErrInvalidHeader{types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, "", }, // 3/3 new vals signed, 2/3 old vals present -> no error @@ -257,7 +262,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { lessThanOneThirdVals, 3 * time.Hour, bTime.Add(2 * time.Hour), - ErrNewValSetCantBeTrusted{types.ErrNotEnoughVotingPowerSigned{Got: 20, Needed: 46}}, + lite.ErrNewValSetCantBeTrusted{types.ErrNotEnoughVotingPowerSigned{Got: 20, Needed: 46}}, "", }, } @@ -265,8 +270,9 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { for i, tc := range testCases { tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - err := VerifyNonAdjacent(chainID, header, vals, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, - DefaultTrustLevel) + err := lite.VerifyNonAdjacent(chainID, header, vals, tc.newHeader, tc.newVals, tc.trustingPeriod, + tc.now, maxClockDrift, + lite.DefaultTrustLevel) switch { case tc.expErr != nil && assert.Error(t, err): @@ -295,7 +301,7 @@ func TestVerifyReturnsErrorIfTrustLevelIsInvalid(t *testing.T) { []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) ) - err := Verify(chainID, header, vals, header, vals, 2*time.Hour, time.Now(), + err := lite.Verify(chainID, header, vals, header, vals, 2*time.Hour, time.Now(), maxClockDrift, tmmath.Fraction{Numerator: 2, Denominator: 1}) assert.Error(t, err) } @@ -322,7 +328,7 @@ func TestValidateTrustLevel(t *testing.T) { } for _, tc := range testCases { - err := ValidateTrustLevel(tc.lvl) + err := lite.ValidateTrustLevel(tc.lvl) if !tc.valid { assert.Error(t, err) } else { diff --git a/mempool/reactor.go b/mempool/reactor.go index 161fc7212..1ae3592a0 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -110,7 +110,7 @@ func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor { mempool: mempool, ids: newMempoolIDs(), } - memR.BaseReactor = *p2p.NewBaseReactor("Reactor", memR) + memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) return memR } diff --git a/node/codec.go b/node/codec.go index 7607b0dd0..e172b9696 100644 --- a/node/codec.go +++ b/node/codec.go @@ -2,6 +2,7 @@ package node import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/node/node.go b/node/node.go index 485709120..70e725d42 100644 --- a/node/node.go +++ b/node/node.go @@ -17,9 +17,12 @@ import ( "github.com/rs/cors" amino "github.com/tendermint/go-amino" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" bcv0 "github.com/tendermint/tendermint/blockchain/v0" bcv1 "github.com/tendermint/tendermint/blockchain/v1" + bcv2 "github.com/tendermint/tendermint/blockchain/v2" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/consensus" cs "github.com/tendermint/tendermint/consensus" @@ -45,7 +48,6 @@ import ( "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) //------------------------------------------------------------------------------ @@ -309,12 +311,12 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL } } -func onlyValidatorIsUs(state sm.State, privVal types.PrivValidator) bool { +func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { if state.Validators.Size() > 1 { return false } addr, _ := state.Validators.GetByIndex(0) - return bytes.Equal(privVal.GetPubKey().Address(), addr) + return bytes.Equal(pubKey.Address(), addr) } func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, @@ -365,6 +367,8 @@ func createBlockchainReactor(config *cfg.Config, bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) case "v1": bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + case "v2": + bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) default: return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) } @@ -613,17 +617,16 @@ func NewNode(config *cfg.Config, } } - pubKey := privValidator.GetPubKey() - if pubKey == nil { - // TODO: GetPubKey should return errors - https://github.com/tendermint/tendermint/issues/3602 - return nil, errors.New("could not retrieve public key from private validator") + pubKey, err := privValidator.GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") } logNodeStartupInfo(state, pubKey, logger, consensusLogger) // Decide whether to fast-sync or not // We don't fast-sync when the only validator is us. - fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, privValidator) + fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) @@ -856,7 +859,10 @@ func (n *Node) ConfigureRPC() { rpccore.SetEvidencePool(n.evidencePool) rpccore.SetP2PPeers(n.sw) rpccore.SetP2PTransport(n) - pubKey := n.privValidator.GetPubKey() + pubKey, err := n.privValidator.GetPubKey() + if err != nil { + panic(err) + } rpccore.SetPubKey(pubKey) rpccore.SetGenesisDoc(n.genesisDoc) rpccore.SetProxyAppQuery(n.proxyApp.Query()) @@ -1091,6 +1097,8 @@ func makeNodeInfo( bcChannel = bcv0.BlockchainChannel case "v1": bcChannel = bcv1.BlockchainChannel + case "v2": + bcChannel = bcv2.BlockchainChannel default: return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) } diff --git a/node/node_test.go b/node/node_test.go index ec8510d58..a9a43a362 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" @@ -27,7 +29,6 @@ import ( "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) func TestNodeStartStop(t *testing.T) { diff --git a/p2p/codec.go b/p2p/codec.go index 6368b7d68..463276318 100644 --- a/p2p/codec.go +++ b/p2p/codec.go @@ -2,6 +2,7 @@ package p2p import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/p2p/conn/codec.go b/p2p/conn/codec.go index 149a09638..0625c7a38 100644 --- a/p2p/conn/codec.go +++ b/p2p/conn/codec.go @@ -2,6 +2,7 @@ package conn import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 1c2088636..0436e115c 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -16,6 +16,7 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + flow "github.com/tendermint/tendermint/libs/flowrate" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 7daa6076d..29d29fc6e 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 5ac3b8509..9044d73be 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" diff --git a/p2p/key_test.go b/p2p/key_test.go index e0579dde6..6f8e9b0f8 100644 --- a/p2p/key_test.go +++ b/p2p/key_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + tmrand "github.com/tendermint/tendermint/libs/rand" ) diff --git a/p2p/mock/reactor.go b/p2p/mock/reactor.go index cfce12bd1..40f918e9f 100644 --- a/p2p/mock/reactor.go +++ b/p2p/mock/reactor.go @@ -12,7 +12,7 @@ type Reactor struct { func NewReactor() *Reactor { r := &Reactor{} - r.BaseReactor = *p2p.NewBaseReactor("Reactor", r) + r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r) r.SetLogger(log.TestingLogger()) return r } diff --git a/p2p/node_info_test.go b/p2p/node_info_test.go index 6937affb8..8896efe1d 100644 --- a/p2p/node_info_test.go +++ b/p2p/node_info_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/ed25519" ) diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index dbba71345..c9c372638 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -59,9 +59,12 @@ type AddrBook interface { // Mark address MarkGood(p2p.ID) MarkAttempt(*p2p.NetAddress) - MarkBad(*p2p.NetAddress) + MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list + // Add bad peers back to addrBook + ReinstateBadPeers() IsGood(*p2p.NetAddress) bool + IsBanned(*p2p.NetAddress) bool // Send a selection of addresses to peers GetSelection() []*p2p.NetAddress @@ -87,6 +90,7 @@ type addrBook struct { ourAddrs map[string]struct{} privateIDs map[p2p.ID]struct{} addrLookup map[p2p.ID]*knownAddress // new & old + badPeers map[p2p.ID]*knownAddress // blacklisted peers bucketsOld []map[string]*knownAddress bucketsNew []map[string]*knownAddress nOld int @@ -108,6 +112,7 @@ func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { ourAddrs: make(map[string]struct{}), privateIDs: make(map[p2p.ID]struct{}), addrLookup: make(map[p2p.ID]*knownAddress), + badPeers: make(map[p2p.ID]*knownAddress), filePath: filePath, routabilityStrict: routabilityStrict, } @@ -205,12 +210,7 @@ func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { a.mtx.Lock() defer a.mtx.Unlock() - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - a.Logger.Info("Remove address from book", "addr", addr) - a.removeFromAllBuckets(ka) + a.removeAddress(addr) } // IsGood returns true if peer was ever marked as good and haven't @@ -222,6 +222,15 @@ func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { return a.addrLookup[addr.ID].isOld() } +// IsBanned returns true if the peer is currently banned +func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool { + a.mtx.Lock() + _, ok := a.badPeers[addr.ID] + a.mtx.Unlock() + + return ok +} + // HasAddress returns true if the address is in the book. func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { a.mtx.Lock() @@ -324,10 +333,28 @@ func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { ka.markAttempt() } -// MarkBad implements AddrBook. Currently it just ejects the address. -// TODO: black list for some amount of time -func (a *addrBook) MarkBad(addr *p2p.NetAddress) { - a.RemoveAddress(addr) +// MarkBad implements AddrBook. Kicks address out from book, places +// the address in the badPeers pool. +func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) { + a.mtx.Lock() + defer a.mtx.Unlock() + + if a.addBadPeer(addr, banTime) { + a.removeAddress(addr) + } +} + +func (a *addrBook) ReinstateBadPeers() { + a.mtx.Lock() + defer a.mtx.Unlock() + for _, ka := range a.badPeers { + if !ka.isBanned() { + bucket := a.calcNewBucket(ka.Addr, ka.Src) + a.addToNewBucket(ka, bucket) + delete(a.badPeers, ka.ID()) + a.Logger.Info("Reinstated address", "addr", ka.Addr) + } + } } // GetSelection implements AddrBook. @@ -592,6 +619,10 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err} } + if _, ok := a.badPeers[addr.ID]; ok { + return ErrAddressBanned{addr} + } + if _, ok := a.privateIDs[addr.ID]; ok { return ErrAddrBookPrivate{addr} } @@ -725,6 +756,32 @@ func (a *addrBook) moveToOld(ka *knownAddress) { } } +func (a *addrBook) removeAddress(addr *p2p.NetAddress) { + ka := a.addrLookup[addr.ID] + if ka == nil { + return + } + a.Logger.Info("Remove address from book", "addr", addr) + a.removeFromAllBuckets(ka) +} + +func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool { + // check it exists in addrbook + ka := a.addrLookup[addr.ID] + // check address is not already there + if ka == nil { + return false + } + + if _, alreadyBadPeer := a.badPeers[addr.ID]; !alreadyBadPeer { + // add to bad peer list + ka.ban(banTime) + a.badPeers[addr.ID] = ka + a.Logger.Info("Add address to blacklist", "addr", addr) + } + return true +} + //--------------------------------------------------------------------- // calculate bucket placements diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go index 4942b403b..739fff185 100644 --- a/p2p/pex/addrbook_test.go +++ b/p2p/pex/addrbook_test.go @@ -7,6 +7,7 @@ import ( "math" "os" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -60,15 +61,15 @@ func TestAddrBookSaveLoad(t *testing.T) { defer deleteTempFile(fname) // 0 addresses - book := NewAddrBook(fname, true).(*addrBook) + book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) - book.saveToFile(fname) + book.Save() - book = NewAddrBook(fname, true).(*addrBook) + book = NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) - book.loadFromFile(fname) + book.Start() - assert.Zero(t, book.Size()) + assert.True(t, book.Empty()) // 100 addresses randAddrs := randNetAddressPairs(t, 100) @@ -78,11 +79,11 @@ func TestAddrBookSaveLoad(t *testing.T) { } assert.Equal(t, 100, book.Size()) - book.saveToFile(fname) + book.Save() - book = NewAddrBook(fname, true).(*addrBook) + book = NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) - book.loadFromFile(fname) + book.Start() assert.Equal(t, 100, book.Size()) } @@ -93,19 +94,15 @@ func TestAddrBookLookup(t *testing.T) { randAddrs := randNetAddressPairs(t, 100) - book := NewAddrBook(fname, true).(*addrBook) + book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) for _, addrSrc := range randAddrs { addr := addrSrc.addr src := addrSrc.src book.AddAddress(addr, src) - ka := book.addrLookup[addr.ID] - assert.NotNil(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) - - if !(ka.Addr.Equals(addr) && ka.Src.Equals(src)) { - t.Fatalf("KnownAddress doesn't match addr & src") - } + ka := book.HasAddress(addr) + assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) } } @@ -347,7 +344,7 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) { } } - got, expected := int((float64(good)/float64(len(selection)))*100), (100 - biasTowardsNewAddrs) + got, expected := int((float64(good)/float64(len(selection)))*100), 100-biasTowardsNewAddrs // compute some slack to protect against small differences due to rounding: slack := int(math.Round(float64(100) / float64(len(selection)))) @@ -400,6 +397,33 @@ func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []st return addrs, private } +func TestBanBadPeers(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + + addr := randIPv4Address(t) + _ = book.AddAddress(addr, addr) + + book.MarkBad(addr, 1*time.Second) + // addr should not reachable + assert.False(t, book.HasAddress(addr)) + assert.True(t, book.IsBanned(addr)) + + err := book.AddAddress(addr, addr) + // book should not add address from the blacklist + assert.Error(t, err) + + time.Sleep(1 * time.Second) + book.ReinstateBadPeers() + // address should be reinstated in the new bucket + assert.EqualValues(t, 1, book.Size()) + assert.True(t, book.HasAddress(addr)) + assert.False(t, book.IsGood(addr)) +} + func TestAddrBookEmpty(t *testing.T) { fname := createTempFileName("addrbook_test") defer deleteTempFile(fname) diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go index 911389a9e..1fc54ea50 100644 --- a/p2p/pex/errors.go +++ b/p2p/pex/errors.go @@ -63,3 +63,12 @@ type ErrAddrBookInvalidAddr struct { func (err ErrAddrBookInvalidAddr) Error() string { return fmt.Sprintf("Cannot add invalid address %v: %v", err.Addr, err.AddrErr) } + +// ErrAddressBanned is thrown when the address has been banned and therefore cannot be used +type ErrAddressBanned struct { + Addr *p2p.NetAddress +} + +func (err ErrAddressBanned) Error() string { + return fmt.Sprintf("Address: %v is currently banned", err.Addr) +} diff --git a/p2p/pex/known_address.go b/p2p/pex/known_address.go index af40d6ff0..e98a9e97e 100644 --- a/p2p/pex/known_address.go +++ b/p2p/pex/known_address.go @@ -16,6 +16,7 @@ type knownAddress struct { BucketType byte `json:"bucket_type"` LastAttempt time.Time `json:"last_attempt"` LastSuccess time.Time `json:"last_success"` + LastBanTime time.Time `json:"last_ban_time"` } func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { @@ -54,6 +55,16 @@ func (ka *knownAddress) markGood() { ka.LastSuccess = now } +func (ka *knownAddress) ban(banTime time.Duration) { + if ka.LastBanTime.Before(time.Now().Add(banTime)) { + ka.LastBanTime = time.Now().Add(banTime) + } +} + +func (ka *knownAddress) isBanned() bool { + return ka.LastBanTime.After(time.Now()) +} + func (ka *knownAddress) addBucketRef(bucketIdx int) int { for _, bucket := range ka.Buckets { if bucket == bucketIdx { diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 3a3d2d7de..192e75fa1 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -8,7 +8,8 @@ import ( "github.com/pkg/errors" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/cmap" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/rand" @@ -50,6 +51,9 @@ const ( // Especially in the beginning, node should have more trusted peers than // untrusted. biasToSelectNewPeers = 30 // 70 to select good peers + + // if a peer is marked bad, it will be banned for at least this time period + defaultBanTime = 24 * time.Hour ) type errMaxAttemptsToDial struct { @@ -137,7 +141,7 @@ func NewReactor(b AddrBook, config *ReactorConfig) *Reactor { lastReceivedRequests: cmap.NewCMap(), crawlPeerInfos: make(map[p2p.ID]crawlPeerInfo), } - r.BaseReactor = *p2p.NewBaseReactor("Reactor", r) + r.BaseReactor = *p2p.NewBaseReactor("PEX", r) return r } @@ -494,6 +498,12 @@ func (r *Reactor) ensurePeers() { } if r.book.NeedMoreAddrs() { + // Check if banned nodes can be reinstated + r.book.ReinstateBadPeers() + } + + if r.book.NeedMoreAddrs() { + // 1) Pick a random peer and ask for more. peers := r.Switch.Peers().List() peersCount := len(peers) @@ -525,11 +535,7 @@ func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDial func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { attempts, lastDialed := r.dialAttemptsInfo(addr) if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial { - // TODO(melekes): have a blacklist in the addrbook with peers whom we've - // failed to connect to. Then we can clean up attemptsToDial, which acts as - // a blacklist currently. - // https://github.com/tendermint/tendermint/issues/3572 - r.book.MarkBad(addr) + r.book.MarkBad(addr, defaultBanTime) return errMaxAttemptsToDial{} } @@ -741,7 +747,7 @@ func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { // TODO: detect more "bad peer" scenarios switch err.(type) { case p2p.ErrSwitchAuthenticationFailure: - book.MarkBad(addr) + book.MarkBad(addr, defaultBanTime) default: book.MarkAttempt(addr) } diff --git a/p2p/trust/store.go b/p2p/trust/store.go index 502c88f90..166b26b1c 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -9,8 +9,9 @@ import ( "sync" "time" - "github.com/tendermint/tendermint/libs/service" dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/service" ) const defaultStorePeriodicSaveInterval = 1 * time.Minute diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go index 76dbaac1c..1cd83916c 100644 --- a/p2p/trust/store_test.go +++ b/p2p/trust/store_test.go @@ -10,8 +10,9 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/libs/log" dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/log" ) func TestTrustMetricStoreSaveLoad(t *testing.T) { diff --git a/privval/codec.go b/privval/codec.go index 9edcc7741..d1f2eafa2 100644 --- a/privval/codec.go +++ b/privval/codec.go @@ -2,6 +2,7 @@ package privval import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/privval/file.go b/privval/file.go index c31e95a6a..5f07ac525 100644 --- a/privval/file.go +++ b/privval/file.go @@ -237,8 +237,8 @@ func (pv *FilePV) GetAddress() types.Address { // GetPubKey returns the public key of the validator. // Implements PrivValidator. -func (pv *FilePV) GetPubKey() crypto.PubKey { - return pv.Key.PubKey +func (pv *FilePV) GetPubKey() (crypto.PubKey, error) { + return pv.Key.PubKey, nil } // SignVote signs a canonical representation of the vote, along with the diff --git a/privval/file_deprecated_test.go b/privval/file_deprecated_test.go index 498c04bd4..300a09c14 100644 --- a/privval/file_deprecated_test.go +++ b/privval/file_deprecated_test.go @@ -61,7 +61,9 @@ func assertEqualPV(t *testing.T, oldPV *privval.OldFilePV, newPV *privval.FilePV assert.Equal(t, oldPV.Address, newPV.Key.Address) assert.Equal(t, oldPV.Address, newPV.GetAddress()) assert.Equal(t, oldPV.PubKey, newPV.Key.PubKey) - assert.Equal(t, oldPV.PubKey, newPV.GetPubKey()) + npv, err := newPV.GetPubKey() + require.NoError(t, err) + assert.Equal(t, oldPV.PubKey, npv) assert.Equal(t, oldPV.PrivKey, newPV.Key.PrivKey) assert.Equal(t, oldPV.LastHeight, newPV.LastSignState.Height) diff --git a/privval/file_test.go b/privval/file_test.go index 38f6e6fe3..343131e1a 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" diff --git a/privval/messages.go b/privval/messages.go index c172a5ea1..fa7a0b09d 100644 --- a/privval/messages.go +++ b/privval/messages.go @@ -2,6 +2,7 @@ package privval import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" ) diff --git a/privval/signer_client.go b/privval/signer_client.go index 0885ee4aa..3e69c6c08 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -1,6 +1,7 @@ package privval import ( + "fmt" "time" "github.com/pkg/errors" @@ -66,25 +67,26 @@ func (sc *SignerClient) Ping() error { } // GetPubKey retrieves a public key from a remote signer -func (sc *SignerClient) GetPubKey() crypto.PubKey { +// returns an error if client is not able to provide the key +func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { response, err := sc.endpoint.SendRequest(&PubKeyRequest{}) if err != nil { sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", err) - return nil + return nil, errors.Wrap(err, "send") } pubKeyResp, ok := response.(*PubKeyResponse) if !ok { sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", "response != PubKeyResponse") - return nil + return nil, errors.Errorf("unexpected response type %T", response) } if pubKeyResp.Error != nil { sc.endpoint.Logger.Error("failed to get private validator's public key", "err", pubKeyResp.Error) - return nil + return nil, fmt.Errorf("remote error: %w", pubKeyResp.Error) } - return pubKeyResp.PubKey + return pubKeyResp.PubKey, nil } // SignVote requests a remote signer to sign a vote diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 1776e6efe..43631ff8d 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -74,15 +74,20 @@ func TestSignerGetPubKey(t *testing.T) { t.Cleanup(func() { tc.signerServer.Stop() }) t.Cleanup(func() { tc.signerClient.Close() }) - pubKey := tc.signerClient.GetPubKey() - expectedPubKey := tc.mockPV.GetPubKey() + pubKey, err := tc.signerClient.GetPubKey() + require.NoError(t, err) + expectedPubKey, err := tc.mockPV.GetPubKey() + require.NoError(t, err) assert.Equal(t, expectedPubKey, pubKey) - addr := tc.signerClient.GetPubKey().Address() - expectedAddr := tc.mockPV.GetPubKey().Address() + pubKey, err = tc.signerClient.GetPubKey() + require.NoError(t, err) + expectedpk, err := tc.mockPV.GetPubKey() + require.NoError(t, err) + expectedAddr := expectedpk.Address() - assert.Equal(t, expectedAddr, addr) + assert.Equal(t, expectedAddr, pubKey.Address()) } } diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index 2562764ba..c658abdfd 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -17,9 +17,13 @@ func DefaultValidationRequestHandler( switch r := req.(type) { case *PubKeyRequest: - var p crypto.PubKey - p = privVal.GetPubKey() - res = &PubKeyResponse{p, nil} + var pubKey crypto.PubKey + pubKey, err = privVal.GetPubKey() + if err != nil { + res = &PubKeyResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &PubKeyResponse{pubKey, nil} + } case *SignVoteRequest: err = privVal.SignVote(chainID, r.Vote) diff --git a/privval/socket_dialers.go b/privval/socket_dialers.go index 1945e7728..f9e5c7879 100644 --- a/privval/socket_dialers.go +++ b/privval/socket_dialers.go @@ -5,6 +5,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto" tmnet "github.com/tendermint/tendermint/libs/net" p2pconn "github.com/tendermint/tendermint/p2p/conn" diff --git a/rpc/client/codec.go b/rpc/client/codec.go index ef1a00ec4..2dc0f6319 100644 --- a/rpc/client/codec.go +++ b/rpc/client/codec.go @@ -2,6 +2,7 @@ package client import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 5c9d902fd..a25b6ebb2 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -1,11 +1,13 @@ package client_test import ( + "context" "fmt" "reflect" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" @@ -135,3 +137,21 @@ func testTxEventsSent(t *testing.T, broadcastMethod string) { func TestClientsResubscribe(t *testing.T) { // TODO(melekes) } + +func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) { + c := getHTTPClient() + + // on Subscribe + _, err := c.Subscribe(context.Background(), "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeader).String()) + assert.Error(t, err) + + // on Unsubscribe + err = c.Unsubscribe(context.Background(), "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeader).String()) + assert.Error(t, err) + + // on UnsubscribeAll + err = c.UnsubscribeAll(context.Background(), "TestHeaderEvents") + assert.Error(t, err) +} diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index a543de70d..5d87a3a98 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -3,9 +3,10 @@ package client_test import ( "bytes" "fmt" + "log" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctest "github.com/tendermint/tendermint/rpc/test" ) @@ -18,9 +19,9 @@ func ExampleHTTP_simple() { // Create our RPC client rpcAddr := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTP(rpcAddr, "/websocket") + c, err := rpchttp.New(rpcAddr, "/websocket") if err != nil { - panic(err) + log.Fatal(err) } // Create a transaction @@ -29,28 +30,28 @@ func ExampleHTTP_simple() { tx := append(k, append([]byte("="), v...)...) // Broadcast the transaction and wait for it to commit (rather use - // c.BroadcastTxSync though in production) + // c.BroadcastTxSync though in production). bres, err := c.BroadcastTxCommit(tx) if err != nil { - panic(err) + log.Fatal(err) } if bres.CheckTx.IsErr() || bres.DeliverTx.IsErr() { - panic("BroadcastTxCommit transaction failed") + log.Fatal("BroadcastTxCommit transaction failed") } // Now try to fetch the value for the key qres, err := c.ABCIQuery("/key", k) if err != nil { - panic(err) + log.Fatal(err) } if qres.Response.IsErr() { - panic("ABCIQuery failed") + log.Fatal("ABCIQuery failed") } if !bytes.Equal(qres.Response.Key, k) { - panic("returned key does not match queried key") + log.Fatal("returned key does not match queried key") } if !bytes.Equal(qres.Response.Value, v) { - panic("returned value does not match sent value") + log.Fatal("returned value does not match sent value") } fmt.Println("Sent tx :", string(tx)) @@ -71,9 +72,9 @@ func ExampleHTTP_batching() { // Create our RPC client rpcAddr := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTP(rpcAddr, "/websocket") + c, err := rpchttp.New(rpcAddr, "/websocket") if err != nil { - panic(err) + log.Fatal(err) } // Create our two transactions @@ -92,28 +93,30 @@ func ExampleHTTP_batching() { // Queue up our transactions for _, tx := range txs { + // Broadcast the transaction and wait for it to commit (rather use + // c.BroadcastTxSync though in production). if _, err := batch.BroadcastTxCommit(tx); err != nil { - panic(err) + log.Fatal(err) } } // Send the batch of 2 transactions if _, err := batch.Send(); err != nil { - panic(err) + log.Fatal(err) } // Now let's query for the original results as a batch keys := [][]byte{k1, k2} for _, key := range keys { if _, err := batch.ABCIQuery("/key", key); err != nil { - panic(err) + log.Fatal(err) } } // Send the 2 queries and keep the results results, err := batch.Send() if err != nil { - panic(err) + log.Fatal(err) } // Each result in the returned list is the deserialized result of each @@ -121,7 +124,7 @@ func ExampleHTTP_batching() { for _, result := range results { qr, ok := result.(*ctypes.ResultABCIQuery) if !ok { - panic("invalid result type from ABCIQuery request") + log.Fatal("invalid result type from ABCIQuery request") } fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) } diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 756ba2818..0e54ec03b 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -5,6 +5,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index 8b843fcdb..3b78dfe5f 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" diff --git a/rpc/client/httpclient.go b/rpc/client/http/http.go similarity index 85% rename from rpc/client/httpclient.go rename to rpc/client/http/http.go index 98875c91e..9af0b6cf4 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/http/http.go @@ -1,4 +1,4 @@ -package client +package http import ( "context" @@ -15,8 +15,9 @@ import ( "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" + rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpcclient "github.com/tendermint/tendermint/rpc/lib/client" + rpcclientlib "github.com/tendermint/tendermint/rpc/lib/client" "github.com/tendermint/tendermint/types" ) @@ -37,10 +38,31 @@ indefinitely until successful. Request batching is available for JSON RPC requests over HTTP, which conforms to the JSON RPC specification (https://www.jsonrpc.org/specification#batch). See the example for more details. + +Example: + + c, err := New("http://192.168.1.10:26657", "/websocket") + if err != nil { + // handle error + } + + // call Start/Stop if you're subscribing to events + err = c.Start() + if err != nil { + // handle error + } + defer c.Stop() + + res, err := c.Status() + if err != nil { + // handle error + } + + // handle result */ type HTTP struct { remote string - rpc *rpcclient.JSONRPCClient + rpc *rpcclientlib.JSONRPCClient *baseRPCClient *WSEvents @@ -57,7 +79,7 @@ type HTTP struct { // batch, but ordering of transactions in the batch cannot be guaranteed in such // an example. type BatchHTTP struct { - rpcBatch *rpcclient.JSONRPCRequestBatch + rpcBatch *rpcclientlib.JSONRPCRequestBatch *baseRPCClient } @@ -65,17 +87,17 @@ type BatchHTTP struct { // non-batch) must conform. Acts as an additional code-level sanity check to // make sure the implementations stay coherent. type rpcClient interface { - ABCIClient - HistoryClient - NetworkClient - SignClient - StatusClient + rpcclient.ABCIClient + rpcclient.HistoryClient + rpcclient.NetworkClient + rpcclient.SignClient + rpcclient.StatusClient } // baseRPCClient implements the basic RPC method logic without the actual // underlying RPC call functionality, which is provided by `caller`. type baseRPCClient struct { - caller rpcclient.JSONRPCCaller + caller rpcclientlib.JSONRPCCaller } var _ rpcClient = (*HTTP)(nil) @@ -85,35 +107,35 @@ var _ rpcClient = (*baseRPCClient)(nil) //----------------------------------------------------------------------------- // HTTP -// NewHTTP takes a remote endpoint in the form ://: and +// New takes a remote endpoint in the form ://: and // the websocket path (which always seems to be "/websocket") // An error is returned on invalid remote. The function panics when remote is nil. -func NewHTTP(remote, wsEndpoint string) (*HTTP, error) { - httpClient, err := rpcclient.DefaultHTTPClient(remote) +func New(remote, wsEndpoint string) (*HTTP, error) { + httpClient, err := rpcclientlib.DefaultHTTPClient(remote) if err != nil { return nil, err } - return NewHTTPWithClient(remote, wsEndpoint, httpClient) + return NewWithClient(remote, wsEndpoint, httpClient) } // Create timeout enabled http client -func NewHTTPWithTimeout(remote, wsEndpoint string, timeout uint) (*HTTP, error) { - httpClient, err := rpcclient.DefaultHTTPClient(remote) +func NewWithTimeout(remote, wsEndpoint string, timeout uint) (*HTTP, error) { + httpClient, err := rpcclientlib.DefaultHTTPClient(remote) if err != nil { return nil, err } httpClient.Timeout = time.Duration(timeout) * time.Second - return NewHTTPWithClient(remote, wsEndpoint, httpClient) + return NewWithClient(remote, wsEndpoint, httpClient) } -// NewHTTPWithClient allows for setting a custom http client (See NewHTTP). +// NewWithClient allows for setting a custom http client (See New). // An error is returned on invalid remote. The function panics when remote is nil. -func NewHTTPWithClient(remote, wsEndpoint string, client *http.Client) (*HTTP, error) { +func NewWithClient(remote, wsEndpoint string, client *http.Client) (*HTTP, error) { if client == nil { panic("nil http.Client provided") } - rc, err := rpcclient.NewJSONRPCClientWithHTTPClient(remote, client) + rc, err := rpcclientlib.NewJSONRPCClientWithHTTPClient(remote, client) if err != nil { return nil, err } @@ -121,17 +143,22 @@ func NewHTTPWithClient(remote, wsEndpoint string, client *http.Client) (*HTTP, e ctypes.RegisterAmino(cdc) rc.SetCodec(cdc) + wsEvents, err := newWSEvents(cdc, remote, wsEndpoint) + if err != nil { + return nil, err + } + httpClient := &HTTP{ rpc: rc, remote: remote, baseRPCClient: &baseRPCClient{caller: rc}, - WSEvents: newWSEvents(cdc, remote, wsEndpoint), + WSEvents: wsEvents, } return httpClient, nil } -var _ Client = (*HTTP)(nil) +var _ rpcclient.Client = (*HTTP)(nil) // SetLogger sets a logger. func (c *HTTP) SetLogger(l log.Logger) { @@ -198,13 +225,13 @@ func (c *baseRPCClient) ABCIInfo() (*ctypes.ResultABCIInfo, error) { } func (c *baseRPCClient) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) + return c.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) } func (c *baseRPCClient) ABCIQueryWithOptions( path string, data bytes.HexBytes, - opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) _, err := c.caller.Call("abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, @@ -406,48 +433,51 @@ func (c *baseRPCClient) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroa //----------------------------------------------------------------------------- // WSEvents +var errNotRunning = errors.New("client is not running. Use .Start() method to start") + +// WSEvents is a wrapper around WSClient, which implements EventsClient. type WSEvents struct { service.BaseService cdc *amino.Codec remote string endpoint string - ws *rpcclient.WSClient + ws *rpcclientlib.WSClient - mtx sync.RWMutex - // query -> chan - subscriptions map[string]chan ctypes.ResultEvent + mtx sync.RWMutex + subscriptions map[string]chan ctypes.ResultEvent // query -> chan } -func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents { - wsEvents := &WSEvents{ +func newWSEvents(cdc *amino.Codec, remote, endpoint string) (*WSEvents, error) { + w := &WSEvents{ cdc: cdc, endpoint: endpoint, remote: remote, subscriptions: make(map[string]chan ctypes.ResultEvent), } + w.BaseService = *service.NewBaseService(nil, "WSEvents", w) - wsEvents.BaseService = *service.NewBaseService(nil, "WSEvents", wsEvents) - return wsEvents -} - -// OnStart implements service.Service by starting WSClient and event loop. -func (w *WSEvents) OnStart() (err error) { - w.ws, err = rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() { + var err error + w.ws, err = rpcclientlib.NewWSClient(w.remote, w.endpoint, rpcclientlib.OnReconnect(func() { // resubscribe immediately w.redoSubscriptionsAfter(0 * time.Second) })) if err != nil { - return err + return nil, err } w.ws.SetCodec(w.cdc) w.ws.SetLogger(w.Logger) - err = w.ws.Start() - if err != nil { + return w, nil +} + +// OnStart implements service.Service by starting WSClient and event loop. +func (w *WSEvents) OnStart() error { + if err := w.ws.Start(); err != nil { return err } go w.eventListener() + return nil } @@ -459,10 +489,17 @@ func (w *WSEvents) OnStop() { // Subscribe implements EventsClient by using WSClient to subscribe given // subscriber to query. By default, returns a channel with cap=1. Error is // returned if it fails to subscribe. -// Channel is never closed to prevent clients from seeing an erroneus event. +// +// Channel is never closed to prevent clients from seeing an erroneous event. +// +// It returns an error if WSEvents is not running. func (w *WSEvents) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + if !w.IsRunning() { + return nil, errNotRunning + } + if err := w.ws.Subscribe(ctx, query); err != nil { return nil, err } @@ -484,7 +521,13 @@ func (w *WSEvents) Subscribe(ctx context.Context, subscriber, query string, // Unsubscribe implements EventsClient by using WSClient to unsubscribe given // subscriber from query. +// +// It returns an error if WSEvents is not running. func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { + if !w.IsRunning() { + return errNotRunning + } + if err := w.ws.Unsubscribe(ctx, query); err != nil { return err } @@ -501,7 +544,13 @@ func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber, query string) er // UnsubscribeAll implements EventsClient by using WSClient to unsubscribe // given subscriber from all the queries. +// +// It returns an error if WSEvents is not running. func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { + if !w.IsRunning() { + return errNotRunning + } + if err := w.ws.UnsubscribeAll(ctx); err != nil { return err } diff --git a/rpc/client/localclient.go b/rpc/client/local/local.go similarity index 96% rename from rpc/client/localclient.go rename to rpc/client/local/local.go index e6b0eb937..a28e9f939 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/local/local.go @@ -1,4 +1,4 @@ -package client +package local import ( "context" @@ -11,6 +11,7 @@ import ( tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" nm "github.com/tendermint/tendermint/node" + rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/lib/types" @@ -49,7 +50,7 @@ type Local struct { // you can only have one node per process. So make sure test cases // don't run in parallel, or try to simulate an entire network in // one process... -func NewLocal(node *nm.Node) *Local { +func New(node *nm.Node) *Local { node.ConfigureRPC() return &Local{ EventBus: node.EventBus(), @@ -58,7 +59,7 @@ func NewLocal(node *nm.Node) *Local { } } -var _ Client = (*Local)(nil) +var _ rpcclient.Client = (*Local)(nil) // SetLogger allows to set a logger on the client. func (c *Local) SetLogger(l log.Logger) { @@ -74,13 +75,13 @@ func (c *Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { } func (c *Local) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) + return c.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) } func (c *Local) ABCIQueryWithOptions( path string, data bytes.HexBytes, - opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index d1f84f2b1..ebee8b4e8 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -63,7 +63,13 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck } - return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil + return &ctypes.ResultBroadcastTx{ + Code: c.Code, + Data: c.Data, + Log: c.Log, + Codespace: c.Codespace, + Hash: tx.Hash(), + }, nil } func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { @@ -72,7 +78,13 @@ func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck } - return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil + return &ctypes.ResultBroadcastTx{ + Code: c.Code, + Data: c.Data, + Log: c.Log, + Codespace: c.Codespace, + Hash: tx.Hash(), + }, nil } // ABCIMock will send all abci related request to the named app, diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 9196bead0..62b9f23ef 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -23,15 +23,17 @@ import ( mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + rpclocal "github.com/tendermint/tendermint/rpc/client/local" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcclient "github.com/tendermint/tendermint/rpc/lib/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) -func getHTTPClient() *client.HTTP { +func getHTTPClient() *rpchttp.HTTP { rpcAddr := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTP(rpcAddr, "/websocket") + c, err := rpchttp.New(rpcAddr, "/websocket") if err != nil { panic(err) } @@ -39,9 +41,9 @@ func getHTTPClient() *client.HTTP { return c } -func getHTTPClientWithTimeout(timeout uint) *client.HTTP { +func getHTTPClientWithTimeout(timeout uint) *rpchttp.HTTP { rpcAddr := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTPWithTimeout(rpcAddr, "/websocket", timeout) + c, err := rpchttp.NewWithTimeout(rpcAddr, "/websocket", timeout) if err != nil { panic(err) } @@ -49,8 +51,8 @@ func getHTTPClientWithTimeout(timeout uint) *client.HTTP { return c } -func getLocalClient() *client.Local { - return client.NewLocal(node) +func getLocalClient() *rpclocal.Local { + return rpclocal.New(node) } // GetClients returns a slice of clients for table-driven tests @@ -63,7 +65,7 @@ func GetClients() []client.Client { func TestNilCustomHTTPClient(t *testing.T) { require.Panics(t, func() { - _, _ = client.NewHTTPWithClient("http://example.com", "/websocket", nil) + _, _ = rpchttp.NewWithClient("http://example.com", "/websocket", nil) }) require.Panics(t, func() { _, _ = rpcclient.NewJSONRPCClientWithHTTPClient("http://example.com", nil) @@ -72,7 +74,7 @@ func TestNilCustomHTTPClient(t *testing.T) { func TestCustomHTTPClient(t *testing.T) { remote := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTPWithClient(remote, "/websocket", http.DefaultClient) + c, err := rpchttp.NewWithClient(remote, "/websocket", http.DefaultClient) require.Nil(t, err) status, err := c.Status() require.NoError(t, err) @@ -701,7 +703,7 @@ func TestBatchedJSONRPCCalls(t *testing.T) { testBatchedJSONRPCCalls(t, c) } -func testBatchedJSONRPCCalls(t *testing.T, c *client.HTTP) { +func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) { k1, v1, tx1 := MakeTxKV() k2, v2, tx2 := MakeTxKV() diff --git a/rpc/core/README.md b/rpc/core/README.md index d767c5f71..f62d2dbf4 100644 --- a/rpc/core/README.md +++ b/rpc/core/README.md @@ -5,3 +5,14 @@ Requests that return multiple items will be paginated to 30 items by default. You can specify further pages with the ?page parameter. You can also set a custom page size up to 100 with the ?per_page parameter. + +## Subscribing to events + +The user can subscribe to events emitted by Tendermint, using `/subscribe`. If +the maximum number of clients is reached or the client has too many +subscriptions, an error will be returned. The subscription timeout is 5 sec. +Each subscription has a buffer to accommodate short bursts of events or some +slowness in clients. If the buffer gets full, the subscription will be canceled +("client is not pulling messages fast enough"). If Tendermint exits, all +subscriptions are canceled ("Tendermint exited"). The user can unsubscribe +using either `/unsubscribe` or `/unsubscribe_all`. diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index e340d4dfb..1d608534a 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -17,7 +17,7 @@ func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes. // maximum 20 block metas const limit int64 = 20 var err error - minHeight, maxHeight, err = filterMinMax(blockStore.Height(), minHeight, maxHeight, limit) + minHeight, maxHeight, err = filterMinMax(blockStore.Base(), blockStore.Height(), minHeight, maxHeight, limit) if err != nil { return nil, err } @@ -34,11 +34,10 @@ func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes. BlockMetas: blockMetas}, nil } -// error if either min or max are negative or min < max -// if 0, use 1 for min, latest block height for max +// error if either min or max are negative or min > max +// if 0, use blockstore base for min, latest block height for max // enforce limit. -// error if min > max -func filterMinMax(height, min, max, limit int64) (int64, int64, error) { +func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // filter negatives if min < 0 || max < 0 { return min, max, fmt.Errorf("heights must be non-negative") @@ -55,6 +54,9 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // limit max to the height max = tmmath.MinInt64(height, max) + // limit min to the base + min = tmmath.MaxInt64(base, min) + // limit min to within `limit` of max // so the total number of blocks returned will be `limit` min = tmmath.MaxInt64(min, max-limit+1) @@ -69,8 +71,7 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // If no height is provided, it will fetch the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/block func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + height, err := getHeight(blockStore.Base(), blockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -99,8 +100,7 @@ func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/commit func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + height, err := getHeight(blockStore.Base(), blockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -113,7 +113,7 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // If the next block has not been committed yet, // use a non-canonical commit - if height == storeHeight { + if height == blockStore.Height() { commit := blockStore.LoadSeenCommit(height) return ctypes.NewResultCommit(&header, commit, false), nil } @@ -131,8 +131,7 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // getBlock(h).Txs[5] // More: https://docs.tendermint.com/master/rpc/#/Info/block_results func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + height, err := getHeight(blockStore.Base(), blockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -152,7 +151,7 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR }, nil } -func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { +func getHeight(currentBase int64, currentHeight int64, heightPtr *int64) (int64, error) { if heightPtr != nil { height := *heightPtr if height <= 0 { @@ -161,6 +160,10 @@ func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { if height > currentHeight { return 0, fmt.Errorf("height must be less than or equal to the current blockchain height") } + if height < currentBase { + return 0, fmt.Errorf("height %v is not available, blocks pruned at height %v", + height, currentBase) + } return height, nil } return currentHeight, nil diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index d537f6c3a..c0561647f 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -7,53 +7,58 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/lib/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestBlockchainInfo(t *testing.T) { cases := []struct { min, max int64 - height int64 + base, height int64 limit int64 resultLength int64 wantErr bool }{ // min > max - {0, 0, 0, 10, 0, true}, // min set to 1 - {0, 1, 0, 10, 0, true}, // max set to height (0) - {0, 0, 1, 10, 1, false}, // max set to height (1) - {2, 0, 1, 10, 0, true}, // max set to height (1) - {2, 1, 5, 10, 0, true}, + {0, 0, 0, 0, 10, 0, true}, // min set to 1 + {0, 1, 0, 0, 10, 0, true}, // max set to height (0) + {0, 0, 0, 1, 10, 1, false}, // max set to height (1) + {2, 0, 0, 1, 10, 0, true}, // max set to height (1) + {2, 1, 0, 5, 10, 0, true}, // negative - {1, 10, 14, 10, 10, false}, // control - {-1, 10, 14, 10, 0, true}, - {1, -10, 14, 10, 0, true}, - {-9223372036854775808, -9223372036854775788, 100, 20, 0, true}, + {1, 10, 0, 14, 10, 10, false}, // control + {-1, 10, 0, 14, 10, 0, true}, + {1, -10, 0, 14, 10, 0, true}, + {-9223372036854775808, -9223372036854775788, 0, 100, 20, 0, true}, + + // check base + {1, 1, 1, 1, 1, 1, false}, + {2, 5, 3, 5, 5, 3, false}, // check limit and height - {1, 1, 1, 10, 1, false}, - {1, 1, 5, 10, 1, false}, - {2, 2, 5, 10, 1, false}, - {1, 2, 5, 10, 2, false}, - {1, 5, 1, 10, 1, false}, - {1, 5, 10, 10, 5, false}, - {1, 15, 10, 10, 10, false}, - {1, 15, 15, 10, 10, false}, - {1, 15, 15, 20, 15, false}, - {1, 20, 15, 20, 15, false}, - {1, 20, 20, 20, 20, false}, + {1, 1, 0, 1, 10, 1, false}, + {1, 1, 0, 5, 10, 1, false}, + {2, 2, 0, 5, 10, 1, false}, + {1, 2, 0, 5, 10, 2, false}, + {1, 5, 0, 1, 10, 1, false}, + {1, 5, 0, 10, 10, 5, false}, + {1, 15, 0, 10, 10, 10, false}, + {1, 15, 0, 15, 10, 10, false}, + {1, 15, 0, 15, 20, 15, false}, + {1, 20, 0, 15, 20, 15, false}, + {1, 20, 0, 20, 20, 20, false}, } for i, c := range cases { caseString := fmt.Sprintf("test %d failed", i) - min, max, err := filterMinMax(c.height, c.min, c.max, c.limit) + min, max, err := filterMinMax(c.base, c.height, c.min, c.max, c.limit) if c.wantErr { require.Error(t, err, caseString) } else { @@ -111,12 +116,15 @@ type mockBlockStore struct { height int64 } +func (mockBlockStore) Base() int64 { return 1 } func (store mockBlockStore) Height() int64 { return store.height } +func (store mockBlockStore) Size() int64 { return store.height } func (mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return nil } func (mockBlockStore) LoadBlock(height int64) *types.Block { return nil } func (mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return nil } func (mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } func (mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return nil } func (mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return nil } +func (mockBlockStore) PruneBlocks(height int64) (uint64, error) { return 0, nil } func (mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index a2a619ea5..8ea2dde4f 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -18,7 +18,7 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ct // The latest validator that we know is the // NextValidator of the last block. height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(height, heightPtr) + height, err := getHeight(blockStore.Base(), height, heightPtr) if err != nil { return nil, err } @@ -91,7 +91,7 @@ func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(height, heightPtr) + height, err := getHeight(blockStore.Base(), height, heightPtr) if err != nil { return nil, err } diff --git a/rpc/core/events.go b/rpc/core/events.go index 165aa6e54..7802f160e 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -12,6 +12,11 @@ import ( rpctypes "github.com/tendermint/tendermint/rpc/lib/types" ) +const ( + // Buffer on the Tendermint (server) side to allow some slowness in clients. + subBufferSize = 100 +) + // Subscribe for events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { @@ -33,7 +38,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() - sub, err := eventBus.Subscribe(subCtx, addr, q) + sub, err := eventBus.Subscribe(subCtx, addr, q, subBufferSize) if err != nil { return nil, err } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 28b73ab33..0e6ce7a2c 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -43,10 +43,11 @@ func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcas res := <-resCh r := res.GetCheckTx() return &ctypes.ResultBroadcastTx{ - Code: r.Code, - Data: r.Data, - Log: r.Log, - Hash: tx.Hash(), + Code: r.Code, + Data: r.Data, + Log: r.Log, + Codespace: r.Codespace, + Hash: tx.Hash(), }, nil } diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 532493451..4fb3b9b13 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -4,6 +4,8 @@ import ( "fmt" "time" + dbm "github.com/tendermint/tm-db" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/crypto" @@ -14,7 +16,6 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const ( diff --git a/rpc/core/status.go b/rpc/core/status.go index e6438009a..4e950d4a3 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -16,6 +16,20 @@ import ( // hash, app hash, block height and time. // More: https://docs.tendermint.com/master/rpc/#/Info/status func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + var ( + earliestBlockMeta *types.BlockMeta + earliestBlockHash tmbytes.HexBytes + earliestAppHash tmbytes.HexBytes + earliestBlockTimeNano int64 + ) + earliestBlockHeight := blockStore.Base() + earliestBlockMeta = blockStore.LoadBlockMeta(earliestBlockHeight) + if earliestBlockMeta != nil { + earliestAppHash = earliestBlockMeta.Header.AppHash + earliestBlockHash = earliestBlockMeta.BlockID.Hash + earliestBlockTimeNano = earliestBlockMeta.Header.Time.UnixNano() + } + var latestHeight int64 if consensusReactor.FastSync() { latestHeight = blockStore.Height() @@ -36,8 +50,6 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() } - latestBlockTime := time.Unix(0, latestBlockTimeNano) - var votingPower int64 if val := validatorAtHeight(latestHeight); val != nil { votingPower = val.VotingPower @@ -46,11 +58,15 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { result := &ctypes.ResultStatus{ NodeInfo: p2pTransport.NodeInfo().(p2p.DefaultNodeInfo), SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: latestBlockHash, - LatestAppHash: latestAppHash, - LatestBlockHeight: latestHeight, - LatestBlockTime: latestBlockTime, - CatchingUp: consensusReactor.FastSync(), + LatestBlockHash: latestBlockHash, + LatestAppHash: latestAppHash, + LatestBlockHeight: latestHeight, + LatestBlockTime: time.Unix(0, latestBlockTimeNano), + EarliestBlockHash: earliestBlockHash, + EarliestAppHash: earliestAppHash, + EarliestBlockHeight: earliestBlockHeight, + EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), + CatchingUp: consensusReactor.FastSync(), }, ValidatorInfo: ctypes.ValidatorInfo{ Address: pubKey.Address(), diff --git a/rpc/core/types/codec.go b/rpc/core/types/codec.go index 82543ff6e..8e0b5303f 100644 --- a/rpc/core/types/codec.go +++ b/rpc/core/types/codec.go @@ -2,6 +2,7 @@ package coretypes import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 9aee485e9..18b2109ed 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -65,7 +65,13 @@ type SyncInfo struct { LatestAppHash bytes.HexBytes `json:"latest_app_hash"` LatestBlockHeight int64 `json:"latest_block_height"` LatestBlockTime time.Time `json:"latest_block_time"` - CatchingUp bool `json:"catching_up"` + + EarliestBlockHash bytes.HexBytes `json:"earliest_block_hash"` + EarliestAppHash bytes.HexBytes `json:"earliest_app_hash"` + EarliestBlockHeight int64 `json:"earliest_block_height"` + EarliestBlockTime time.Time `json:"earliest_block_time"` + + CatchingUp bool `json:"catching_up"` } // Info about the node's validator @@ -148,9 +154,10 @@ type ResultConsensusState struct { // CheckTx result type ResultBroadcastTx struct { - Code uint32 `json:"code"` - Data bytes.HexBytes `json:"data"` - Log string `json:"log"` + Code uint32 `json:"code"` + Data bytes.HexBytes `json:"data"` + Log string `json:"log"` + Codespace string `json:"codespace"` Hash bytes.HexBytes `json:"hash"` } diff --git a/rpc/lib/client/integration_test.go b/rpc/lib/client/integration_test.go index 393783c51..5fee3752b 100644 --- a/rpc/lib/client/integration_test.go +++ b/rpc/lib/client/integration_test.go @@ -14,6 +14,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index 4d8a58b8e..ddddc97cf 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -14,6 +14,7 @@ import ( metrics "github.com/rcrowley/go-metrics" amino "github.com/tendermint/go-amino" + tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" types "github.com/tendermint/tendermint/rpc/lib/types" diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go index 33a65dcbe..a4f033867 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/lib/client/ws_client_test.go @@ -11,6 +11,7 @@ import ( "github.com/gorilla/websocket" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" types "github.com/tendermint/tendermint/rpc/lib/types" diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index 5b95666a7..aef795d3e 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -17,6 +17,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" diff --git a/rpc/lib/server/http_json_handler_test.go b/rpc/lib/server/http_json_handler_test.go index e4ae2f8bf..ef1fcc9f5 100644 --- a/rpc/lib/server/http_json_handler_test.go +++ b/rpc/lib/server/http_json_handler_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/log" types "github.com/tendermint/tendermint/rpc/lib/types" ) diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go index 3780861e4..09a3d18ee 100644 --- a/rpc/lib/server/parse_test.go +++ b/rpc/lib/server/parse_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/bytes" types "github.com/tendermint/tendermint/rpc/lib/types" ) diff --git a/rpc/swagger/swagger.yaml b/rpc/swagger/swagger.yaml index 40b6e0169..000ea972c 100644 --- a/rpc/swagger/swagger.yaml +++ b/rpc/swagger/swagger.yaml @@ -253,9 +253,10 @@ paths: https://godoc.org/github.com/tendermint/tendermint/libs/pubsub/query. ```go + import rpchttp "github.com/tendermint/rpc/client/http" import "github.com/tendermint/tendermint/types" - client := client.NewHTTP("tcp:0.0.0.0:26657", "/websocket") + client := rpchttp.New("tcp:0.0.0.0:26657", "/websocket") err := client.Start() if err != nil { handle error @@ -309,7 +310,7 @@ paths: operationId: unsubscribe description: | ```go - client := client.NewHTTP("tcp:0.0.0.0:26657", "/websocket") + client := rpchttp.New("tcp:0.0.0.0:26657", "/websocket") err := client.Start() if err != nil { handle error @@ -1122,6 +1123,18 @@ components: latest_block_time: type: string example: "2019-08-01T11:52:22.818762194Z" + earliest_block_hash: + type: string + example: "790BA84C3545FCCC49A5C629CEE6EA58A6E875C3862175BDC11EE7AF54703501" + earliest_app_hash: + type: string + example: "C9AEBB441B787D9F1D846DE51F3826F4FD386108B59B08239653ABF59455C3F8" + earliest_block_height: + type: string + example: "1262196" + earliest_block_time: + type: string + example: "2019-08-01T11:52:22.818762194Z" catching_up: type: boolean example: false @@ -2947,6 +2960,9 @@ components: log: type: "string" example: "" + codespace: + type: "string" + example: "ibc" hash: type: "string" example: "0D33F2F03A5234F38706E43004489E061AC40A2E" diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index 1d1f6256c..9f6cdb2b6 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -15,6 +15,7 @@ import ( "strings" amino "github.com/tendermint/go-amino" + cs "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/types" ) diff --git a/scripts/privValUpgrade_test.go b/scripts/privValUpgrade_test.go index d62d4ceee..287c4fc50 100644 --- a/scripts/privValUpgrade_test.go +++ b/scripts/privValUpgrade_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/privval" ) @@ -100,7 +101,9 @@ func TestLoadAndUpgrade(t *testing.T) { assert.Equal(t, oldPV.Address, upgradedPV.Key.Address) assert.Equal(t, oldPV.Address, upgradedPV.GetAddress()) assert.Equal(t, oldPV.PubKey, upgradedPV.Key.PubKey) - assert.Equal(t, oldPV.PubKey, upgradedPV.GetPubKey()) + upv, err := upgradedPV.GetPubKey() + require.NoError(t, err) + assert.Equal(t, oldPV.PubKey, upv) assert.Equal(t, oldPV.PrivKey, upgradedPV.Key.PrivKey) assert.Equal(t, oldPV.LastHeight, upgradedPV.LastSignState.Height) diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 922512ace..e6226dc1a 100644 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -8,4 +8,4 @@ for dir in $proto_dirs; do -I. \ --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative:. \ $(find "${dir}" -name '*.proto') -done \ No newline at end of file +done diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 48195eead..181f40c75 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -13,6 +13,7 @@ import ( "os" amino "github.com/tendermint/go-amino" + cs "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/types" ) diff --git a/state/codec.go b/state/codec.go index abbec6e39..df2c15545 100644 --- a/state/codec.go +++ b/state/codec.go @@ -2,6 +2,7 @@ package state import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/state/errors.go b/state/errors.go index cd4cd7824..6e0cdfa47 100644 --- a/state/errors.go +++ b/state/errors.go @@ -21,6 +21,11 @@ type ( AppHeight int64 } + ErrAppBlockHeightTooLow struct { + AppHeight int64 + StoreBase int64 + } + ErrLastStateMismatch struct { Height int64 Core []byte @@ -46,12 +51,12 @@ type ( ) func (e ErrUnknownBlock) Error() string { - return fmt.Sprintf("Could not find block #%d", e.Height) + return fmt.Sprintf("could not find block #%d", e.Height) } func (e ErrBlockHashMismatch) Error() string { return fmt.Sprintf( - "App block hash (%X) does not match core block hash (%X) for height %d", + "app block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height, @@ -59,11 +64,16 @@ func (e ErrBlockHashMismatch) Error() string { } func (e ErrAppBlockHeightTooHigh) Error() string { - return fmt.Sprintf("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) + return fmt.Sprintf("app block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) } + +func (e ErrAppBlockHeightTooLow) Error() string { + return fmt.Sprintf("app block height (%d) is too far below block store base (%d)", e.AppHeight, e.StoreBase) +} + func (e ErrLastStateMismatch) Error() string { return fmt.Sprintf( - "Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", + "latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App, @@ -72,20 +82,20 @@ func (e ErrLastStateMismatch) Error() string { func (e ErrStateMismatch) Error() string { return fmt.Sprintf( - "State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", + "state after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected, ) } func (e ErrNoValSetForHeight) Error() string { - return fmt.Sprintf("Could not find validator set for height #%d", e.Height) + return fmt.Sprintf("could not find validator set for height #%d", e.Height) } func (e ErrNoConsensusParamsForHeight) Error() string { - return fmt.Sprintf("Could not find consensus params for height #%d", e.Height) + return fmt.Sprintf("could not find consensus params for height #%d", e.Height) } func (e ErrNoABCIResponsesForHeight) Error() string { - return fmt.Sprintf("Could not find results for height #%d", e.Height) + return fmt.Sprintf("could not find results for height #%d", e.Height) } diff --git a/state/execution.go b/state/execution.go index 4025b6e7b..c6d0b8475 100644 --- a/state/execution.go +++ b/state/execution.go @@ -4,13 +4,14 @@ import ( "fmt" "time" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/fail" "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) //----------------------------------------------------------------------------- @@ -118,13 +119,14 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e // ApplyBlock validates the block against the state, executes it against the app, // fires the relevant events, commits the app, and saves the new state and responses. +// It returns the new state and the block height to retain (pruning older blocks). // It's the only function that needs to be called // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. -func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, block *types.Block) (State, error) { +func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, block *types.Block) (State, int64, error) { if err := blockExec.ValidateBlock(state, block); err != nil { - return state, ErrInvalidBlock(err) + return state, 0, ErrInvalidBlock(err) } startTime := time.Now().UnixNano() @@ -132,7 +134,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { - return state, ErrProxyAppConn(err) + return state, 0, ErrProxyAppConn(err) } fail.Fail() // XXX @@ -146,11 +148,11 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b abciValUpdates := abciResponses.EndBlock.ValidatorUpdates err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator) if err != nil { - return state, fmt.Errorf("error in validator updates: %v", err) + return state, 0, fmt.Errorf("error in validator updates: %v", err) } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates) if err != nil { - return state, err + return state, 0, err } if len(validatorUpdates) > 0 { blockExec.logger.Info("Updates to validators", "updates", types.ValidatorListString(validatorUpdates)) @@ -159,13 +161,13 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b // Update the state with the block and responses. state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, 0, fmt.Errorf("commit failed for application: %v", err) } // Lock mempool, commit app state, update mempoool. - appHash, err := blockExec.Commit(state, block, abciResponses.DeliverTxs) + appHash, retainHeight, err := blockExec.Commit(state, block, abciResponses.DeliverTxs) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, 0, fmt.Errorf("commit failed for application: %v", err) } // Update evpool with the block and state. @@ -183,12 +185,12 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b // NOTE: if we crash between Commit and Save, events wont be fired during replay fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses, validatorUpdates) - return state, nil + return state, retainHeight, nil } // Commit locks the mempool, runs the ABCI Commit message, and updates the // mempool. -// It returns the result of calling abci.Commit (the AppHash), and an error. +// It returns the result of calling abci.Commit (the AppHash) and the height to retain (if any). // The Mempool must be locked during commit and update because state is // typically reset on Commit and old txs must be replayed against committed // state before new txs are run in the mempool, lest they be invalid. @@ -196,7 +198,7 @@ func (blockExec *BlockExecutor) Commit( state State, block *types.Block, deliverTxResponses []*abci.ResponseDeliverTx, -) ([]byte, error) { +) ([]byte, int64, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() @@ -205,7 +207,7 @@ func (blockExec *BlockExecutor) Commit( err := blockExec.mempool.FlushAppConn() if err != nil { blockExec.logger.Error("Client error during mempool.FlushAppConn", "err", err) - return nil, err + return nil, 0, err } // Commit block, get hash back @@ -215,7 +217,7 @@ func (blockExec *BlockExecutor) Commit( "Client error during proxyAppConn.CommitSync", "err", err, ) - return nil, err + return nil, 0, err } // ResponseCommit has no error code - just data @@ -235,7 +237,7 @@ func (blockExec *BlockExecutor) Commit( TxPostCheck(state), ) - return res.Data, err + return res.Data, res.RetainHeight, err } //--------------------------------------------------------- diff --git a/state/execution_test.go b/state/execution_test.go index 78f4d6ca4..041f232bd 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" @@ -26,7 +27,9 @@ var ( ) func TestApplyBlock(t *testing.T) { - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) + app := kvstore.NewApplication() + app.RetainBlocks = 1 + cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) @@ -40,9 +43,9 @@ func TestApplyBlock(t *testing.T) { block := makeBlock(state, 1) blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} - //nolint:ineffassign - state, err = blockExec.ApplyBlock(state, blockID, block) + _, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) + assert.EqualValues(t, retainHeight, 1) // TODO check state and mempool } @@ -355,7 +358,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { {PubKey: types.TM2PB.PubKey(pubkey), Power: 10}, } - state, err = blockExec.ApplyBlock(state, blockID, block) + state, _, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) // test new validator was added to NextValidators @@ -409,7 +412,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { {PubKey: types.TM2PB.PubKey(state.Validators.Validators[0].PubKey), Power: 0}, } - assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block) }) + assert.NotPanics(t, func() { state, _, err = blockExec.ApplyBlock(state, blockID, block) }) assert.NotNil(t, err) assert.NotEmpty(t, state.NextValidators.Validators) diff --git a/state/export_test.go b/state/export_test.go index 1f3990bbd..cba07eca3 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -1,9 +1,10 @@ package state import ( + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // diff --git a/state/helpers_test.go b/state/helpers_test.go index f8758f987..a85e35748 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -66,7 +66,7 @@ func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commi } blockID := types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} - state, err := blockExec.ApplyBlock(state, blockID, block) + state, _, err := blockExec.ApplyBlock(state, blockID, block) if err != nil { return state, types.BlockID{}, err } diff --git a/state/services.go b/state/services.go index bf9942811..a30956bdc 100644 --- a/state/services.go +++ b/state/services.go @@ -12,12 +12,19 @@ import ( //------------------------------------------------------ // blockstore -// BlockStoreRPC is the block store interface used by the RPC. -type BlockStoreRPC interface { +// BlockStore defines the interface used by the ConsensusState. +type BlockStore interface { + Base() int64 Height() int64 + Size() int64 LoadBlockMeta(height int64) *types.BlockMeta LoadBlock(height int64) *types.Block + + SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) + + PruneBlocks(height int64) (uint64, error) + LoadBlockByHash(hash []byte) *types.Block LoadBlockPart(height int64, index int) *types.Part @@ -25,13 +32,7 @@ type BlockStoreRPC interface { LoadSeenCommit(height int64) *types.Commit } -// BlockStore defines the BlockStore interface used by the ConsensusState. -type BlockStore interface { - BlockStoreRPC - SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) -} - -//----------------------------------------------------------------------------------------------------- +//----------------------------------------------------------------------------- // evidence pool // EvidencePool defines the EvidencePool interface used by the ConsensusState. diff --git a/state/state_test.go b/state/state_test.go index b015a4e17..746f7837c 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -11,15 +11,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/kv" "github.com/tendermint/tendermint/libs/rand" tmrand "github.com/tendermint/tendermint/libs/rand" sm "github.com/tendermint/tendermint/state" - dbm "github.com/tendermint/tm-db" - - cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" ) @@ -326,7 +327,8 @@ func TestProposerFrequency(t *testing.T) { votePower := int64(tmrand.Int()%maxPower) + 1 totalVotePower += votePower privVal := types.NewMockPV() - pubKey := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) val := types.NewValidator(pubKey, votePower) val.ProposerPriority = tmrand.Int64() vals[j] = val diff --git a/state/store.go b/state/store.go index e49e289f0..08b695f8a 100644 --- a/state/store.go +++ b/state/store.go @@ -3,11 +3,12 @@ package state import ( "fmt" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" tmmath "github.com/tendermint/tendermint/libs/math" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const ( @@ -124,6 +125,102 @@ type ABCIResponses struct { BeginBlock *abci.ResponseBeginBlock `json:"begin_block"` } +// PruneStates deletes states between the given heights (including from, excluding to). It is not +// guaranteed to delete all states, since the last checkpointed state and states being pointed to by +// e.g. `LastHeightChanged` must remain. The state at to must also exist. +// +// The from parameter is necessary since we can't do a key scan in a performant way due to the key +// encoding not preserving ordering: https://github.com/tendermint/tendermint/issues/4567 +// This will cause some old states to be left behind when doing incremental partial prunes, +// specifically older checkpoints and LastHeightChanged targets. +func PruneStates(db dbm.DB, from int64, to int64) error { + if from <= 0 || to <= 0 { + return fmt.Errorf("from height %v and to height %v must be greater than 0", from, to) + } + if from >= to { + return fmt.Errorf("from height %v must be lower than to height %v", from, to) + } + valInfo := loadValidatorsInfo(db, to) + if valInfo == nil { + return fmt.Errorf("validators at height %v not found", to) + } + paramsInfo := loadConsensusParamsInfo(db, to) + if paramsInfo == nil { + return fmt.Errorf("consensus params at height %v not found", to) + } + + keepVals := make(map[int64]bool) + if valInfo.ValidatorSet == nil { + keepVals[valInfo.LastHeightChanged] = true + keepVals[lastStoredHeightFor(to, valInfo.LastHeightChanged)] = true // keep last checkpoint too + } + keepParams := make(map[int64]bool) + if paramsInfo.ConsensusParams.Equals(&types.ConsensusParams{}) { + keepParams[paramsInfo.LastHeightChanged] = true + } + + batch := db.NewBatch() + defer batch.Close() + pruned := uint64(0) + var err error + + // We have to delete in reverse order, to avoid deleting previous heights that have validator + // sets and consensus params that we may need to retrieve. + for h := to - 1; h >= from; h-- { + // For heights we keep, we must make sure they have the full validator set or consensus + // params, otherwise they will panic if they're retrieved directly (instead of + // indirectly via a LastHeightChanged pointer). + if keepVals[h] { + v := loadValidatorsInfo(db, h) + if v.ValidatorSet == nil { + v.ValidatorSet, err = LoadValidators(db, h) + if err != nil { + return err + } + v.LastHeightChanged = h + batch.Set(calcValidatorsKey(h), v.Bytes()) + } + } else { + batch.Delete(calcValidatorsKey(h)) + } + + if keepParams[h] { + p := loadConsensusParamsInfo(db, h) + if p.ConsensusParams.Equals(&types.ConsensusParams{}) { + p.ConsensusParams, err = LoadConsensusParams(db, h) + if err != nil { + return err + } + p.LastHeightChanged = h + batch.Set(calcConsensusParamsKey(h), p.Bytes()) + } + } else { + batch.Delete(calcConsensusParamsKey(h)) + } + + batch.Delete(calcABCIResponsesKey(h)) + pruned++ + + // avoid batches growing too large by flushing to database regularly + if pruned%1000 == 0 && pruned > 0 { + err := batch.Write() + if err != nil { + return err + } + batch.Close() + batch = db.NewBatch() + defer batch.Close() + } + } + + err = batch.WriteSync() + if err != nil { + return err + } + + return nil +} + // NewABCIResponses returns a new ABCIResponses func NewABCIResponses(block *types.Block) *ABCIResponses { resDeliverTxs := make([]*abci.ResponseDeliverTx, len(block.Data.Txs)) diff --git a/state/store_test.go b/state/store_test.go index 0f38f21a5..d46eaa9eb 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -8,10 +8,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + cfg "github.com/tendermint/tendermint/config" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestStoreLoadValidators(t *testing.T) { @@ -64,3 +65,118 @@ func BenchmarkLoadValidators(b *testing.B) { }) } } + +func TestPruneStates(t *testing.T) { + testcases := map[string]struct { + makeHeights int64 + pruneFrom int64 + pruneTo int64 + expectErr bool + expectVals []int64 + expectParams []int64 + expectABCI []int64 + }{ + "error on pruning from 0": {100, 0, 5, true, nil, nil, nil}, + "error when from > to": {100, 3, 2, true, nil, nil, nil}, + "error when from == to": {100, 3, 3, true, nil, nil, nil}, + "error when to does not exist": {100, 1, 101, true, nil, nil, nil}, + "prune all": {100, 1, 100, false, []int64{93, 100}, []int64{95, 100}, []int64{100}}, + "prune some": {10, 2, 8, false, []int64{1, 3, 8, 9, 10}, []int64{1, 5, 8, 9, 10}, []int64{1, 8, 9, 10}}, + "prune across checkpoint": {100001, 1, 100001, false, []int64{99993, 100000, 100001}, []int64{99995, 100001}, []int64{100001}}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + db := dbm.NewMemDB() + + // Generate a bunch of state data. Validators change for heights ending with 3, and + // parameters when ending with 5. + validator := &types.Validator{Address: []byte{1, 2, 3}, VotingPower: 100} + validatorSet := &types.ValidatorSet{ + Validators: []*types.Validator{validator}, + Proposer: validator, + } + valsChanged := int64(0) + paramsChanged := int64(0) + + for h := int64(1); h <= tc.makeHeights; h++ { + if valsChanged == 0 || h%10 == 2 { + valsChanged = h + 1 // Have to add 1, since NextValidators is what's stored + } + if paramsChanged == 0 || h%10 == 5 { + paramsChanged = h + } + + sm.SaveState(db, sm.State{ + LastBlockHeight: h - 1, + Validators: validatorSet, + NextValidators: validatorSet, + ConsensusParams: types.ConsensusParams{ + Block: types.BlockParams{MaxBytes: 10e6}, + }, + LastHeightValidatorsChanged: valsChanged, + LastHeightConsensusParamsChanged: paramsChanged, + }) + sm.SaveABCIResponses(db, h, sm.NewABCIResponses(&types.Block{ + Header: types.Header{Height: h}, + Data: types.Data{ + Txs: types.Txs{ + []byte{1}, + []byte{2}, + []byte{3}, + }, + }, + })) + } + + // Test assertions + err := sm.PruneStates(db, tc.pruneFrom, tc.pruneTo) + if tc.expectErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + expectVals := sliceToMap(tc.expectVals) + expectParams := sliceToMap(tc.expectParams) + expectABCI := sliceToMap(tc.expectABCI) + + for h := int64(1); h <= tc.makeHeights; h++ { + vals, err := sm.LoadValidators(db, h) + if expectVals[h] { + require.NoError(t, err, "validators height %v", h) + require.NotNil(t, vals) + } else { + require.Error(t, err, "validators height %v", h) + require.Equal(t, sm.ErrNoValSetForHeight{Height: h}, err) + } + + params, err := sm.LoadConsensusParams(db, h) + if expectParams[h] { + require.NoError(t, err, "params height %v", h) + require.False(t, params.Equals(&types.ConsensusParams{})) + } else { + require.Error(t, err, "params height %v", h) + require.Equal(t, sm.ErrNoConsensusParamsForHeight{Height: h}, err) + } + + abci, err := sm.LoadABCIResponses(db, h) + if expectABCI[h] { + require.NoError(t, err, "abci height %v", h) + require.NotNil(t, abci) + } else { + require.Error(t, err, "abci height %v", h) + require.Equal(t, sm.ErrNoABCIResponsesForHeight{Height: h}, err) + } + } + }) + } +} + +func sliceToMap(s []int64) map[int64]bool { + m := make(map[int64]bool, len(s)) + for _, i := range s { + m[i] = true + } + return m +} diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 9e666265c..2dac856bd 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -7,10 +7,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + tmrand "github.com/tendermint/tendermint/libs/rand" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestTxFilter(t *testing.T) { diff --git a/state/txindex/indexer_service_test.go b/state/txindex/indexer_service_test.go index 6df7c984a..23968dbca 100644 --- a/state/txindex/indexer_service_test.go +++ b/state/txindex/indexer_service_test.go @@ -7,12 +7,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + db "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/state/txindex/kv" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) func TestIndexerServiceIndexesBlocks(t *testing.T) { diff --git a/state/txindex/kv/kv_bench_test.go b/state/txindex/kv/kv_bench_test.go index 34d770040..31267f54c 100644 --- a/state/txindex/kv/kv_bench_test.go +++ b/state/txindex/kv/kv_bench_test.go @@ -7,11 +7,12 @@ import ( "io/ioutil" "testing" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/kv" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func BenchmarkTxSearch(b *testing.B) { diff --git a/state/validation.go b/state/validation.go index 6c306e2df..41f12cc40 100644 --- a/state/validation.go +++ b/state/validation.go @@ -5,9 +5,10 @@ import ( "errors" "fmt" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) //----------------------------------------------------- diff --git a/state/validation_test.go b/state/validation_test.go index da975351c..373b77dc1 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -5,11 +5,11 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" @@ -171,8 +171,12 @@ func TestValidateBlockCommit(t *testing.T) { time.Now(), ) require.NoError(t, err, "height %d", height) + + bpvPubKey, err := badPrivVal.GetPubKey() + require.NoError(t, err) + badVote := &types.Vote{ - ValidatorAddress: badPrivVal.GetPubKey().Address(), + ValidatorAddress: bpvPubKey.Address(), ValidatorIndex: 0, Height: height, Round: 0, diff --git a/store/codec.go b/store/codec.go index 4895e8994..29a59948d 100644 --- a/store/codec.go +++ b/store/codec.go @@ -2,6 +2,7 @@ package store import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/store/store.go b/store/store.go index 2f9ba93fd..38a53d590 100644 --- a/store/store.go +++ b/store/store.go @@ -7,6 +7,7 @@ import ( "github.com/pkg/errors" + db "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/types" @@ -24,6 +25,8 @@ Currently the precommit signatures are duplicated in the Block parts as well as the Commit. In the future this may change, perhaps by moving the Commit data outside the Block. (TODO) +The store can be assumed to contain all contiguous blocks between base and height (inclusive). + // NOTE: BlockStore methods will panic if they encounter errors // deserializing loaded data, indicating probable corruption on disk. */ @@ -31,6 +34,7 @@ type BlockStore struct { db dbm.DB mtx sync.RWMutex + base int64 height int64 } @@ -39,18 +43,36 @@ type BlockStore struct { func NewBlockStore(db dbm.DB) *BlockStore { bsjson := LoadBlockStoreStateJSON(db) return &BlockStore{ + base: bsjson.Base, height: bsjson.Height, db: db, } } -// Height returns the last known contiguous block height. +// Base returns the first known contiguous block height, or 0 for empty block stores. +func (bs *BlockStore) Base() int64 { + bs.mtx.RLock() + defer bs.mtx.RUnlock() + return bs.base +} + +// Height returns the last known contiguous block height, or 0 for empty block stores. func (bs *BlockStore) Height() int64 { bs.mtx.RLock() defer bs.mtx.RUnlock() return bs.height } +// Size returns the number of blocks in the block store. +func (bs *BlockStore) Size() int64 { + bs.mtx.RLock() + defer bs.mtx.RUnlock() + if bs.height == 0 { + return 0 + } + return bs.height - bs.base + 1 +} + // LoadBlock returns the block with the given height. // If no block is found for that height, it returns nil. func (bs *BlockStore) LoadBlock(height int64) *types.Block { @@ -171,6 +193,74 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { return commit } +// PruneBlocks removes block up to (but not including) a height. It returns number of blocks pruned. +func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { + if height <= 0 { + return 0, fmt.Errorf("height must be greater than 0") + } + bs.mtx.RLock() + if height > bs.height { + bs.mtx.RUnlock() + return 0, fmt.Errorf("cannot prune beyond the latest height %v", bs.height) + } + base := bs.base + bs.mtx.RUnlock() + if height < base { + return 0, fmt.Errorf("cannot prune to height %v, it is lower than base height %v", + height, base) + } + + pruned := uint64(0) + batch := bs.db.NewBatch() + defer batch.Close() + flush := func(batch db.Batch, base int64) error { + // We can't trust batches to be atomic, so update base first to make sure noone + // tries to access missing blocks. + bs.mtx.Lock() + bs.base = base + bs.mtx.Unlock() + bs.saveState() + + err := batch.WriteSync() + if err != nil { + return fmt.Errorf("failed to prune up to height %v: %w", base, err) + } + batch.Close() + return nil + } + + for h := base; h < height; h++ { + meta := bs.LoadBlockMeta(h) + if meta == nil { // assume already deleted + continue + } + batch.Delete(calcBlockMetaKey(h)) + batch.Delete(calcBlockHashKey(meta.BlockID.Hash)) + batch.Delete(calcBlockCommitKey(h)) + batch.Delete(calcSeenCommitKey(h)) + for p := 0; p < meta.BlockID.PartsHeader.Total; p++ { + batch.Delete(calcBlockPartKey(h, p)) + } + pruned++ + + // flush every 1000 blocks to avoid batches becoming too large + if pruned%1000 == 0 && pruned > 0 { + err := flush(batch, h) + if err != nil { + return 0, err + } + batch = bs.db.NewBatch() + defer batch.Close() + } + } + + err := flush(batch, height) + if err != nil { + return 0, err + } + return pruned, nil +} + // SaveBlock persists the given block, blockParts, and seenCommit to the underlying db. // blockParts: Must be parts of the block // seenCommit: The +2/3 precommits that were seen which committed at height. @@ -213,14 +303,17 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit) bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) - // Save new BlockStoreStateJSON descriptor - BlockStoreStateJSON{Height: height}.Save(bs.db) - // Done! bs.mtx.Lock() bs.height = height + if bs.base == 0 && height == 1 { + bs.base = 1 + } bs.mtx.Unlock() + // Save new BlockStoreStateJSON descriptor + bs.saveState() + // Flush bs.db.SetSync(nil, nil) } @@ -233,6 +326,16 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { bs.db.Set(calcBlockPartKey(height, index), partBytes) } +func (bs *BlockStore) saveState() { + bs.mtx.RLock() + bsJSON := BlockStoreStateJSON{ + Base: bs.base, + Height: bs.height, + } + bs.mtx.RUnlock() + bsJSON.Save(bs.db) +} + //----------------------------------------------------------------------------- func calcBlockMetaKey(height int64) []byte { @@ -261,6 +364,7 @@ var blockStoreKey = []byte("blockStore") // BlockStoreStateJSON is the block store state JSON structure. type BlockStoreStateJSON struct { + Base int64 `json:"base"` Height int64 `json:"height"` } @@ -282,6 +386,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { } if len(bytes) == 0 { return BlockStoreStateJSON{ + Base: 0, Height: 0, } } @@ -290,5 +395,9 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { if err != nil { panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes)) } + // Backwards compatibility with persisted data from before Base existed. + if bsj.Height > 0 && bsj.Base == 0 { + bsj.Base = 1 + } return bsj } diff --git a/store/store_test.go b/store/store_test.go index 7fedf8606..16f52aa88 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -65,20 +65,39 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu func TestLoadBlockStoreStateJSON(t *testing.T) { db := db.NewMemDB() + bsj := &BlockStoreStateJSON{Base: 100, Height: 1000} + bsj.Save(db) + + retrBSJ := LoadBlockStoreStateJSON(db) + assert.Equal(t, *bsj, retrBSJ, "expected the retrieved DBs to match") +} + +func TestLoadBlockStoreStateJSON_Empty(t *testing.T) { + db := db.NewMemDB() + + bsj := &BlockStoreStateJSON{} + bsj.Save(db) + + retrBSJ := LoadBlockStoreStateJSON(db) + assert.Equal(t, BlockStoreStateJSON{}, retrBSJ, "expected the retrieved DBs to match") +} + +func TestLoadBlockStoreStateJSON_NoBase(t *testing.T) { + db := db.NewMemDB() bsj := &BlockStoreStateJSON{Height: 1000} bsj.Save(db) retrBSJ := LoadBlockStoreStateJSON(db) - - assert.Equal(t, *bsj, retrBSJ, "expected the retrieved DBs to match") + assert.Equal(t, BlockStoreStateJSON{Base: 1, Height: 1000}, retrBSJ, "expected the retrieved DBs to match") } func TestNewBlockStore(t *testing.T) { db := db.NewMemDB() - err := db.Set(blockStoreKey, []byte(`{"height": "10000"}`)) + err := db.Set(blockStoreKey, []byte(`{"base": "100", "height": "10000"}`)) require.NoError(t, err) bs := NewBlockStore(db) + require.Equal(t, int64(100), bs.Base(), "failed to properly parse blockstore") require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore") panicCausers := []struct { @@ -140,6 +159,7 @@ func TestMain(m *testing.M) { func TestBlockStoreSaveLoadBlock(t *testing.T) { state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) defer cleanup() + require.Equal(t, bs.Base(), int64(0), "initially the base should be zero") require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") // check there are no blocks at various heights @@ -155,7 +175,8 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { validPartSet := block.MakePartSet(2) seenCommit := makeTestCommit(10, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) - require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") + require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") + require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2}) uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0}) @@ -364,6 +385,92 @@ func TestLoadBlockPart(t *testing.T) { "expecting successful retrieval of previously saved block") } +func TestPruneBlocks(t *testing.T) { + config := cfg.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(config.RootDir) + state, err := sm.LoadStateFromDBOrGenesisFile(dbm.NewMemDB(), config.GenesisFile()) + require.NoError(t, err) + db := dbm.NewMemDB() + bs := NewBlockStore(db) + assert.EqualValues(t, 0, bs.Base()) + assert.EqualValues(t, 0, bs.Height()) + assert.EqualValues(t, 0, bs.Size()) + + // pruning an empty store should error, even when pruning to 0 + _, err = bs.PruneBlocks(1) + require.Error(t, err) + + _, err = bs.PruneBlocks(0) + require.Error(t, err) + + // make more than 1000 blocks, to test batch deletions + for h := int64(1); h <= 1500; h++ { + block := makeBlock(h, state, new(types.Commit)) + partSet := block.MakePartSet(2) + seenCommit := makeTestCommit(h, tmtime.Now()) + bs.SaveBlock(block, partSet, seenCommit) + } + + assert.EqualValues(t, 1, bs.Base()) + assert.EqualValues(t, 1500, bs.Height()) + assert.EqualValues(t, 1500, bs.Size()) + + prunedBlock := bs.LoadBlock(1199) + + // Check that basic pruning works + pruned, err := bs.PruneBlocks(1200) + require.NoError(t, err) + assert.EqualValues(t, 1199, pruned) + assert.EqualValues(t, 1200, bs.Base()) + assert.EqualValues(t, 1500, bs.Height()) + assert.EqualValues(t, 301, bs.Size()) + assert.EqualValues(t, BlockStoreStateJSON{ + Base: 1200, + Height: 1500, + }, LoadBlockStoreStateJSON(db)) + + require.NotNil(t, bs.LoadBlock(1200)) + require.Nil(t, bs.LoadBlock(1199)) + require.Nil(t, bs.LoadBlockByHash(prunedBlock.Hash())) + require.Nil(t, bs.LoadBlockCommit(1199)) + require.Nil(t, bs.LoadBlockMeta(1199)) + require.Nil(t, bs.LoadBlockPart(1199, 1)) + + for i := int64(1); i < 1200; i++ { + require.Nil(t, bs.LoadBlock(i)) + } + for i := int64(1200); i <= 1500; i++ { + require.NotNil(t, bs.LoadBlock(i)) + } + + // Pruning below the current base should error + _, err = bs.PruneBlocks(1199) + require.Error(t, err) + + // Pruning to the current base should work + pruned, err = bs.PruneBlocks(1200) + require.NoError(t, err) + assert.EqualValues(t, 0, pruned) + + // Pruning again should work + pruned, err = bs.PruneBlocks(1300) + require.NoError(t, err) + assert.EqualValues(t, 100, pruned) + assert.EqualValues(t, 1300, bs.Base()) + + // Pruning beyond the current height should error + _, err = bs.PruneBlocks(1501) + require.Error(t, err) + + // Pruning to the current height should work + pruned, err = bs.PruneBlocks(1500) + require.NoError(t, err) + assert.EqualValues(t, 200, pruned) + assert.Nil(t, bs.LoadBlock(1499)) + assert.NotNil(t, bs.LoadBlock(1500)) + assert.Nil(t, bs.LoadBlock(1501)) +} + func TestLoadBlockMeta(t *testing.T) { bs, db := freshBlockStore() height := int64(10) diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go index 354443e5c..88e4650ab 100644 --- a/test/app/grpc_client.go +++ b/test/app/grpc_client.go @@ -8,6 +8,7 @@ import ( "context" amino "github.com/tendermint/go-amino" + coregrpc "github.com/tendermint/tendermint/rpc/grpc" ) diff --git a/tools.mk b/tools.mk index 66c6f2978..de722ae89 100644 --- a/tools.mk +++ b/tools.mk @@ -38,14 +38,8 @@ mkfile_dir := $(shell cd $(shell dirname $(mkfile_path)); pwd) # Go tools ### -BIN ?= /usr/local/bin -UNAME_S ?= $(shell uname -s) -UNAME_M ?= $(shell uname -m) - TOOLS_DESTDIR ?= $(GOPATH)/bin -BUF_VERSION ?= 0.7.0 - CERTSTRAP = $(TOOLS_DESTDIR)/certstrap PROTOBUF = $(TOOLS_DESTDIR)/protoc GOODMAN = $(TOOLS_DESTDIR)/goodman @@ -77,29 +71,6 @@ $(PROTOBUF): @go get github.com/gogo/protobuf/protoc-gen-gogo@v1.3.1 .PHONY: protobuf -buf: protoc-gen-buf-check-breaking protoc-gen-buf-check-lint - @echo "Installing buf..." - @curl -sSL \ - "https://github.com/bufbuild/buf/releases/download/v${BUF_VERSION}/buf-${UNAME_S}-${UNAME_M}" \ - -o "${BIN}/buf" && \ - chmod +x "${BIN}/buf" -.PHONY: buf - -protoc-gen-buf-check-breaking: - @echo "Installing protoc-gen-buf-check-breaking..." - @curl -sSL \ - "https://github.com/bufbuild/buf/releases/download/v${BUF_VERSION}/protoc-gen-buf-check-breaking-${UNAME_S}-${UNAME_M}" \ - -o "${BIN}/protoc-gen-buf-check-breaking" && \ - chmod +x "${BIN}/protoc-gen-buf-check-breaking" - -protoc-gen-buf-check-lint: - @echo "Installing protoc-gen-buf-check-lint..." - @curl -sSL \ - "https://github.com/bufbuild/buf/releases/download/v${BUF_VERSION}/protoc-gen-buf-check-lint-${UNAME_S}-${UNAME_M}" \ - -o "${BIN}/protoc-gen-buf-check-lint" && \ - chmod +x "${BIN}/protoc-gen-buf-check-lint" -.PHONY: protoc-gen-buf-check-lint - goodman: $(GOODMAN) $(GOODMAN): @echo "Get Goodman" diff --git a/tools/build/.gitignore b/tools/build/.gitignore deleted file mode 100644 index 9974388f1..000000000 --- a/tools/build/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -BUILD -RPMS -SPECS -tmp diff --git a/tools/build/RPM-GPG-KEY-Tendermint b/tools/build/RPM-GPG-KEY-Tendermint deleted file mode 100644 index e6f200d87..000000000 --- a/tools/build/RPM-GPG-KEY-Tendermint +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v2.0.22 (GNU/Linux) - -mQENBFk97ngBCADaiPQFKJI7zWYdUKqC490DzY9g9LatsWoJErK5LuMXwEnF5i+a -UkygueukA4C5U7L71l5EeOB9rtb6AbkF4IEZsmmp93APec/3Vfbac9xvK4dBdiht -F8SrazPdHeR6AKcZH8ZpG/+mdONvGb/gEgtxVjaeIJFpCbjKLlKEXazh2zamhhth -q+Nn/17QmI3KBiaGqQK5w4kGZ4mZPy6fXMQhW5dDMq9f4anlGIAYi9O53dVxsx2S -5d+NHuGer5Ps0u6WMJi/e+UT2EGwzP6ygOxkIjyhMFuVftabOtSSrRHHetw8UAaI -N/RPn2gSbQtOQ7unzHDXp3/o6/r2nDEErPyJABEBAAG0LkdyZWcgU3phYm8gKFRl -bmRlcm1pbnQpIDxncmVnQHBoaWxvc29iZWFyLmNvbT6JATkEEwECACMFAlk97ngC -GwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAKCRDIkIHIISLL6bX/CACXTKmO -u5XgvJICH0pHNeVS5/4Om1Rsg1xNmEkGFBP8N2fqn576exbOLgWLSyNHTEyrJNoc -iTeUtod2qqbVGwRgWm1zeiP8NBYiQ9SUbqskIqcPavJNGWIxsCB0p/odoZah8xSj -tGrkoyoxrc+7z2JgKYK8SVSkJXQkzuc5/ZlY85ci5gPKQhlo5YDqGo+4U9n/Ieo5 -nkF8LBalFC2j7A7sQNroEicpulpGhIq3jyUHtadX01z3pNzuX+wfHX9futoet0YS -tG2007WoPGV0whGnoKxmk0JhwzhscC2XNtJl1GZcwqOOlPU9eGtZuPKj/HBAlRtz -4xTOAcklpg8soqRA -=jNDW ------END PGP PUBLIC KEY BLOCK----- diff --git a/tools/build/Release b/tools/build/Release deleted file mode 100644 index 9003d1320..000000000 --- a/tools/build/Release +++ /dev/null @@ -1,7 +0,0 @@ -Origin: Tendermint -Label: Tendermint -Suite: stable -Date: Fri, 16 Jun 2017 19:44:00 UTC -Architectures: amd64 -Components: main -Description: Tendermint repository diff --git a/tools/build/Release_amd64 b/tools/build/Release_amd64 deleted file mode 100644 index 1f2ecbfe2..000000000 --- a/tools/build/Release_amd64 +++ /dev/null @@ -1,5 +0,0 @@ -Archive: stable -Component: main -Origin: Tendermint -Label: Tendermint -Architecture: amd64 diff --git a/tools/build/_gpg b/tools/build/_gpg deleted file mode 100644 index 73742b5d8..000000000 --- a/tools/build/_gpg +++ /dev/null @@ -1,8 +0,0 @@ -Version: 4 -Signer: -Date: @DATETIMESTAMP@ -Role: builder -Files: - @BINMD5@ @BINSHA1@ @BINSIZE@ debian-binary - @CONMD5@ @CONSHA1@ @CONSIZE@ control.tar.gz - @DATMD5@ @DATSHA1@ @DATSIZE@ data.tar.xz diff --git a/tools/build/basecoind/DEBIAN/changelog b/tools/build/basecoind/DEBIAN/changelog deleted file mode 100644 index 260718eaf..000000000 --- a/tools/build/basecoind/DEBIAN/changelog +++ /dev/null @@ -1,6 +0,0 @@ -basecoind (@VERSION@) @STABILITY@; urgency=medium - - * Automatic build. See https://github.com/cosmos/cosmos-sdk for more information. - - -- Greg Szabo @DATETIMESTAMP@ - diff --git a/tools/build/basecoind/DEBIAN/compat b/tools/build/basecoind/DEBIAN/compat deleted file mode 100644 index ec635144f..000000000 --- a/tools/build/basecoind/DEBIAN/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/tools/build/basecoind/DEBIAN/control b/tools/build/basecoind/DEBIAN/control deleted file mode 100644 index c15d49110..000000000 --- a/tools/build/basecoind/DEBIAN/control +++ /dev/null @@ -1,14 +0,0 @@ -Source: basecoind -Section: net -Priority: optional -Maintainer: Greg Szabo -Build-Depends: debhelper (>=9) -Standards-Version: 3.9.6 -Homepage: https://tendermint.com -Package: basecoind -Architecture: amd64 -Version: @VERSION@ -Installed-Size: @INSTALLEDSIZE@ -Description: basecoind is a Proof-of-Stake cryptocurrency and framework - Basecoind is an ABCI application designed to be used with the Tendermint consensus engine to form a Proof-of-Stake cryptocurrency. It also provides a general purpose framework for extending the feature-set of the cryptocurrency by implementing plugins. - diff --git a/tools/build/basecoind/DEBIAN/copyright b/tools/build/basecoind/DEBIAN/copyright deleted file mode 100644 index fe449650c..000000000 --- a/tools/build/basecoind/DEBIAN/copyright +++ /dev/null @@ -1,21 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: basecoind -Source: https://github.com/cosmos/cosmos-sdk - -Files: * -Copyright: 2017 All In Bits, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the full text of the Apache License 2.0 can be found - in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/build/basecoind/DEBIAN/postinst b/tools/build/basecoind/DEBIAN/postinst deleted file mode 100644 index d7d8f4413..000000000 --- a/tools/build/basecoind/DEBIAN/postinst +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postinst script for basecoind -# - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - configure) - chown basecoind.basecoind /etc/basecoind - sudo -Hu basecoind basecoind node init --home /etc/basecoind 2B24DEE2364762300168DF19B6C18BCE2D399EA2 - systemctl daemon-reload - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/basecoind/DEBIAN/postrm b/tools/build/basecoind/DEBIAN/postrm deleted file mode 100644 index b84c9f2a4..000000000 --- a/tools/build/basecoind/DEBIAN/postrm +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postrm script for basecoin -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - upgrade|failed-upgrade|abort-upgrade) - systemctl daemon-reload - ;; - - purge|remove|abort-install|disappear) - systemctl daemon-reload - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/basecoind/DEBIAN/preinst b/tools/build/basecoind/DEBIAN/preinst deleted file mode 100644 index 53124c0ce..000000000 --- a/tools/build/basecoind/DEBIAN/preinst +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# preinst script for basecoind -# - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - if ! grep -q '^basecoind:' /etc/passwd ; then - useradd -k /dev/null -r -m -b /etc basecoind - chmod 755 /etc/basecoind - fi - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/basecoind/DEBIAN/prerm b/tools/build/basecoind/DEBIAN/prerm deleted file mode 100644 index 18ef42079..000000000 --- a/tools/build/basecoind/DEBIAN/prerm +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# prerm script for basecoin -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - systemctl stop basecoind 2> /dev/null || : - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/basecoind/etc/systemd/system-preset/50-basecoind.preset b/tools/build/basecoind/etc/systemd/system-preset/50-basecoind.preset deleted file mode 100644 index 358334fc3..000000000 --- a/tools/build/basecoind/etc/systemd/system-preset/50-basecoind.preset +++ /dev/null @@ -1,2 +0,0 @@ -disable basecoind.service - diff --git a/tools/build/basecoind/etc/systemd/system/basecoind.service b/tools/build/basecoind/etc/systemd/system/basecoind.service deleted file mode 100644 index 68b46d84f..000000000 --- a/tools/build/basecoind/etc/systemd/system/basecoind.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=Basecoind -Requires=network-online.target -After=network-online.target - -[Service] -Environment="BCHOME=/etc/basecoind" -Restart=on-failure -User=basecoind -Group=basecoind -PermissionsStartOnly=true -ExecStart=/usr/bin/basecoind start -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/tools/build/basecoind/usr/share/basecoind/key.json b/tools/build/basecoind/usr/share/basecoind/key.json deleted file mode 100644 index bdefe8fd4..000000000 --- a/tools/build/basecoind/usr/share/basecoind/key.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "address": "1B1BE55F969F54064628A63B9559E7C21C925165", - "priv_key": { - "type": "ed25519", - "data": "C70D6934B4F55F1B7BC33B56B9CA8A2061384AFC19E91E44B40C4BBA182953D1619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - }, - "pub_key": { - "type": "ed25519", - "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - } -} - diff --git a/tools/build/basecoind/usr/share/basecoind/key2.json b/tools/build/basecoind/usr/share/basecoind/key2.json deleted file mode 100644 index ddfc6809b..000000000 --- a/tools/build/basecoind/usr/share/basecoind/key2.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "address": "1DA7C74F9C219229FD54CC9F7386D5A3839F0090", - "priv_key": { - "type": "ed25519", - "data": "34BAE9E65CE8245FAD035A0E3EED9401BDE8785FFB3199ACCF8F5B5DDF7486A8352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" - }, - "pub_key": { - "type": "ed25519", - "data": "352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" - } -} - diff --git a/tools/build/ethermint/DEBIAN/changelog b/tools/build/ethermint/DEBIAN/changelog deleted file mode 100644 index 76a1fb154..000000000 --- a/tools/build/ethermint/DEBIAN/changelog +++ /dev/null @@ -1,6 +0,0 @@ -ethermint (@VERSION@) @STABILITY@; urgency=medium - - * Automatic build. See https://github.com/tendermint/tendermint for more information. - - -- Greg Szabo @DATETIMESTAMP@ - diff --git a/tools/build/ethermint/DEBIAN/compat b/tools/build/ethermint/DEBIAN/compat deleted file mode 100644 index ec635144f..000000000 --- a/tools/build/ethermint/DEBIAN/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/tools/build/ethermint/DEBIAN/control b/tools/build/ethermint/DEBIAN/control deleted file mode 100644 index 2d8b3b002..000000000 --- a/tools/build/ethermint/DEBIAN/control +++ /dev/null @@ -1,15 +0,0 @@ -Source: ethermint -Section: net -Priority: optional -Maintainer: Greg Szabo -Build-Depends: debhelper (>=9) -Depends: tendermint (>=0.11.0) -Standards-Version: 3.9.6 -Homepage: https://tendermint.com -Package: ethermint -Architecture: amd64 -Version: @VERSION@ -Installed-Size: @INSTALLEDSIZE@ -Description: ethermint enables ethereum as an ABCI application on tendermint and the COSMOS hub - Ethermint enables ethereum to run as an ABCI application on tendermint and the COSMOS hub. This application allows you to get all the benefits of ethereum without having to run your own miners. - diff --git a/tools/build/ethermint/DEBIAN/copyright b/tools/build/ethermint/DEBIAN/copyright deleted file mode 100644 index 6d1bab01b..000000000 --- a/tools/build/ethermint/DEBIAN/copyright +++ /dev/null @@ -1,21 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: ethermint -Source: https://github.com/tendermint/ethermint - -Files: * -Copyright: 2017 All In Bits, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the full text of the Apache License 2.0 can be found - in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/build/ethermint/DEBIAN/postinst b/tools/build/ethermint/DEBIAN/postinst deleted file mode 100644 index 439fdc395..000000000 --- a/tools/build/ethermint/DEBIAN/postinst +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh -# postinst script for ethermint -# - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - configure) - chown ethermint.ethermint /etc/ethermint - chown ethermint.ethermint /etc/ethermint/genesis.json - chown ethermint.ethermint /etc/ethermint/keystore - chown ethermint.ethermint /etc/ethermint/keystore/UTC--2016-10-21T22-30-03.071787745Z--7eff122b94897ea5b0e2a9abf47b86337fafebdc - - sudo -Hu ethermint /usr/bin/ethermint --datadir /etc/ethermint init /etc/ethermint/genesis.json - sudo -Hu ethermint tendermint init --home /etc/ethermint - systemctl daemon-reload - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/ethermint/DEBIAN/postrm b/tools/build/ethermint/DEBIAN/postrm deleted file mode 100644 index f1d9d6afc..000000000 --- a/tools/build/ethermint/DEBIAN/postrm +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postrm script for ethermint -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - upgrade|failed-upgrade|abort-upgrade) - systemctl daemon-reload - ;; - - purge|remove|abort-install|disappear) - systemctl daemon-reload - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/ethermint/DEBIAN/preinst b/tools/build/ethermint/DEBIAN/preinst deleted file mode 100644 index 829112e6b..000000000 --- a/tools/build/ethermint/DEBIAN/preinst +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# preinst script for ethermint -# - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - if ! grep -q '^ethermint:' /etc/passwd ; then - useradd -k /dev/null -r -m -b /etc ethermint - chmod 755 /etc/ethermint - fi - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/ethermint/DEBIAN/prerm b/tools/build/ethermint/DEBIAN/prerm deleted file mode 100644 index 00a775cef..000000000 --- a/tools/build/ethermint/DEBIAN/prerm +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# prerm script for ethermint -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - systemctl stop ethermint 2> /dev/null || : - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/ethermint/etc/systemd/system-preset/50-ethermint.preset b/tools/build/ethermint/etc/systemd/system-preset/50-ethermint.preset deleted file mode 100644 index 836a28c30..000000000 --- a/tools/build/ethermint/etc/systemd/system-preset/50-ethermint.preset +++ /dev/null @@ -1,2 +0,0 @@ -disable ethermint.service - diff --git a/tools/build/ethermint/etc/systemd/system/ethermint.service b/tools/build/ethermint/etc/systemd/system/ethermint.service deleted file mode 100644 index f71a074ea..000000000 --- a/tools/build/ethermint/etc/systemd/system/ethermint.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Ethermint -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User=ethermint -Group=ethermint -PermissionsStartOnly=true -ExecStart=/usr/bin/ethermint --datadir /etc/ethermint -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/tools/build/gaia/DEBIAN/changelog b/tools/build/gaia/DEBIAN/changelog deleted file mode 100644 index eca5fbc3d..000000000 --- a/tools/build/gaia/DEBIAN/changelog +++ /dev/null @@ -1,6 +0,0 @@ -gaia (@VERSION@) @STABILITY@; urgency=medium - - * Automatic build. See https://github.com/tendermint/basecoin for more information. - - -- Greg Szabo @DATETIMESTAMP@ - diff --git a/tools/build/gaia/DEBIAN/compat b/tools/build/gaia/DEBIAN/compat deleted file mode 100644 index ec635144f..000000000 --- a/tools/build/gaia/DEBIAN/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/tools/build/gaia/DEBIAN/control b/tools/build/gaia/DEBIAN/control deleted file mode 100644 index 55d1cd5dd..000000000 --- a/tools/build/gaia/DEBIAN/control +++ /dev/null @@ -1,14 +0,0 @@ -Source: gaia -Section: net -Priority: optional -Maintainer: Greg Szabo -Build-Depends: debhelper (>=9) -Standards-Version: 3.9.6 -Homepage: https://cosmos.network -Package: gaia -Architecture: amd64 -Version: @VERSION@ -Installed-Size: @INSTALLEDSIZE@ -Description: gaia - Tendermint Cosmos delegation game chain - Gaia description comes later. - diff --git a/tools/build/gaia/DEBIAN/copyright b/tools/build/gaia/DEBIAN/copyright deleted file mode 100644 index ffc230134..000000000 --- a/tools/build/gaia/DEBIAN/copyright +++ /dev/null @@ -1,21 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: gaia -Source: https://github.com/cosmos/gaia - -Files: * -Copyright: 2017 All In Bits, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the full text of the Apache License 2.0 can be found - in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/build/gaia/DEBIAN/postinst b/tools/build/gaia/DEBIAN/postinst deleted file mode 100644 index 427b7c493..000000000 --- a/tools/build/gaia/DEBIAN/postinst +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postinst script for gaia -# - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - configure) - chown gaia.gaia /etc/gaia - sudo -Hu gaia gaia node init --home /etc/gaia 2B24DEE2364762300168DF19B6C18BCE2D399EA2 - systemctl daemon-reload - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/gaia/DEBIAN/postrm b/tools/build/gaia/DEBIAN/postrm deleted file mode 100644 index da526ec30..000000000 --- a/tools/build/gaia/DEBIAN/postrm +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postrm script for gaia -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - upgrade|failed-upgrade|abort-upgrade) - systemctl daemon-reload - ;; - - purge|remove|abort-install|disappear) - systemctl daemon-reload - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/gaia/DEBIAN/preinst b/tools/build/gaia/DEBIAN/preinst deleted file mode 100644 index 382fa419f..000000000 --- a/tools/build/gaia/DEBIAN/preinst +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# preinst script for gaia -# - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - if ! grep -q '^gaia:' /etc/passwd ; then - useradd -k /dev/null -r -m -b /etc gaia - chmod 755 /etc/gaia - fi - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/gaia/DEBIAN/prerm b/tools/build/gaia/DEBIAN/prerm deleted file mode 100644 index 165c1ab6a..000000000 --- a/tools/build/gaia/DEBIAN/prerm +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# prerm script for gaia -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - systemctl stop gaia 2> /dev/null || : - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/gaia/etc/systemd/system-preset/50-gaia.preset b/tools/build/gaia/etc/systemd/system-preset/50-gaia.preset deleted file mode 100644 index dfbf0bc06..000000000 --- a/tools/build/gaia/etc/systemd/system-preset/50-gaia.preset +++ /dev/null @@ -1,2 +0,0 @@ -disable gaia.service - diff --git a/tools/build/gaia/etc/systemd/system/gaia.service b/tools/build/gaia/etc/systemd/system/gaia.service deleted file mode 100644 index 372fe9343..000000000 --- a/tools/build/gaia/etc/systemd/system/gaia.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Gaia -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User=gaia -Group=gaia -PermissionsStartOnly=true -ExecStart=/usr/bin/gaia node start --home=/etc/gaia -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/tools/build/gaia/usr/share/gaia/key.json b/tools/build/gaia/usr/share/gaia/key.json deleted file mode 100644 index bdefe8fd4..000000000 --- a/tools/build/gaia/usr/share/gaia/key.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "address": "1B1BE55F969F54064628A63B9559E7C21C925165", - "priv_key": { - "type": "ed25519", - "data": "C70D6934B4F55F1B7BC33B56B9CA8A2061384AFC19E91E44B40C4BBA182953D1619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - }, - "pub_key": { - "type": "ed25519", - "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - } -} - diff --git a/tools/build/gaia/usr/share/gaia/key2.json b/tools/build/gaia/usr/share/gaia/key2.json deleted file mode 100644 index ddfc6809b..000000000 --- a/tools/build/gaia/usr/share/gaia/key2.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "address": "1DA7C74F9C219229FD54CC9F7386D5A3839F0090", - "priv_key": { - "type": "ed25519", - "data": "34BAE9E65CE8245FAD035A0E3EED9401BDE8785FFB3199ACCF8F5B5DDF7486A8352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" - }, - "pub_key": { - "type": "ed25519", - "data": "352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" - } -} - diff --git a/tools/build/generate-spec b/tools/build/generate-spec deleted file mode 100755 index 4ca60a1d4..000000000 --- a/tools/build/generate-spec +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -if [ $# -ne 3 ]; then - echo "Usage: $0 " - exit 1 -fi - -app=$1 -src=$2 -dst=$3 - -# Find spectemplate -if [ ! -f "$src/$app.spec" ]; then - if [ ! -f "$src/app-template.spec" ]; then - echo "Source template not found." - exit 1 - else - srcfile="$src/app-template.spec" - fi -else - srcfile="$src/$app.spec" -fi - -# Copy spectemplate to SPECS -cp "$srcfile" "$dst/$app.spec" - -# Apply any variables defined in .data -if [ -f "$src/$app.data" ]; then - srcdata="$src/$app.data" - source "$srcdata" - for var in `grep -v -e ^# -e ^\s*$ "$srcdata" | grep = | sed 's/\s*=.*$//'` - do - sed -i "s\\@${var}@\\${!var}\\g" "$dst/$app.spec" - done -fi - diff --git a/tools/build/sign b/tools/build/sign deleted file mode 100755 index 0371b5d4b..000000000 --- a/tools/build/sign +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/expect -f -set timeout 3 -set PACKAGE [lindex $argv 0] -set GPG_NAME [lindex $argv 1] -set GPG_PATH [lindex $argv 2] -set GPG_PASSPHRASE $env(GPG_PASSPHRASE) - -if {[llength $argv] == 0} { - send_user "Usage: ./sign \n" - exit 1 -} - -send_user "\nSigning $PACKAGE\n" -spawn rpmsign --resign $PACKAGE --define "_signature gpg" --define "_gpg_name $GPG_NAME" --define "_gpgbin $GPG_PATH" -expect { - timeout { send_user "\nTimeout signing $PACKAGE\n"; exit 1 } - "Enter pass phrase:" -} -send "$GPG_PASSPHRASE\r" -expect { - timeout { send_user "\nTimeout signing $PACKAGE\n"; exit 1 } - "Pass phrase is good." -} -interact -sleep 3 - diff --git a/tools/build/spectemplates/app-template.spec b/tools/build/spectemplates/app-template.spec deleted file mode 100644 index 6cb8145bb..000000000 --- a/tools/build/spectemplates/app-template.spec +++ /dev/null @@ -1,55 +0,0 @@ -Version: @VERSION@ -Release: @BUILD_NUMBER@ - -%define __spec_install_post %{nil} -%define debug_package %{nil} -%define __os_install_post %{nil} - -Name: @PACKAGE_NAME@ -Summary: @PACKAGE_SUMMARY@ -License: Apache 2.0 -URL: @PACKAGE_URL@ -Packager: Greg Szabo -@PACKAGE_ADDITIONAL_HEADER@ - -%description -@PACKAGE_DESCRIPTION@ - -%pre -if ! %{__grep} -q '^%{name}:' /etc/passwd ; then - useradd -r -b %{_sysconfdir} %{name} - mkdir -p %{_sysconfdir}/%{name} - chmod 755 %{_sysconfdir}/%{name} - chown %{name}.%{name} %{_sysconfdir}/%{name} -fi - -%prep -# Nothing to do here. - It is done in the Makefile. - -%build -# Nothing to do here. - -%install -cd %{name}-%{version}-%{release} -%{__cp} -a * %{buildroot} - -%post -sudo -Hu %{name} %{name} node init --home %{_sysconfdir}/%{name} 2B24DEE2364762300168DF19B6C18BCE2D399EA2 -systemctl daemon-reload - -%preun -systemctl stop %{name} 2> /dev/null || : - -%postun -systemctl daemon-reload - -%files -%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} -%{_bindir}/* -%{_sysconfdir}/systemd/system/* -%{_sysconfdir}/systemd/system-preset/* -%dir %{_datadir}/%{name} -%{_datadir}/%{name}/* -%dir %{_defaultlicensedir}/%{name} -%doc %{_defaultlicensedir}/%{name}/LICENSE - diff --git a/tools/build/spectemplates/basecoind.data b/tools/build/spectemplates/basecoind.data deleted file mode 100644 index 36b172ecf..000000000 --- a/tools/build/spectemplates/basecoind.data +++ /dev/null @@ -1,5 +0,0 @@ -PACKAGE_SUMMARY="basecoind is a Proof-of-Stake cryptocurrency and framework" -PACKAGE_URL="https://cosmos.network/" -PACKAGE_ADDITIONAL_HEADER="Provides: basecoind" -PACKAGE_DESCRIPTION="Basecoind is an ABCI application designed to be used with the Tendermint consensus engine to form a Proof-of-Stake cryptocurrency. It also provides a general purpose framework for extending the feature-set of the cryptocurrency by implementing plugins." - diff --git a/tools/build/spectemplates/ethermint.data b/tools/build/spectemplates/ethermint.data deleted file mode 100644 index e9d403db7..000000000 --- a/tools/build/spectemplates/ethermint.data +++ /dev/null @@ -1,5 +0,0 @@ -PACKAGE_SUMMARY="ethermint enables ethereum as an ABCI application on tendermint and the COSMOS hub" -PACKAGE_URL="https://tendermint.com/" -PACKAGE_ADDITIONAL_HEADER="Provides: ethermint" -PACKAGE_DESCRIPTION="Ethermint enables ethereum to run as an ABCI application on tendermint and the COSMOS hub. This application allows you to get all the benefits of ethereum without having to run your own miners." - diff --git a/tools/build/spectemplates/ethermint.spec b/tools/build/spectemplates/ethermint.spec deleted file mode 100644 index fc443e35b..000000000 --- a/tools/build/spectemplates/ethermint.spec +++ /dev/null @@ -1,60 +0,0 @@ -Version: @VERSION@ -Release: @BUILD_NUMBER@ - -%define __spec_install_post %{nil} -%define debug_package %{nil} -%define __os_install_post %{nil} - -Name: @PACKAGE_NAME@ -Summary: @PACKAGE_SUMMARY@ -License: Apache 2.0 -URL: @PACKAGE_URL@ -Packager: Greg Szabo -Requires: tendermint >= 0.11.0 -@PACKAGE_ADDITIONAL_HEADER@ - -%description -@PACKAGE_DESCRIPTION@ - -%pre -if ! %{__grep} -q '^%{name}:' /etc/passwd ; then - useradd -r -b %{_sysconfdir} %{name} - mkdir -p %{_sysconfdir}/%{name} - chmod 755 %{_sysconfdir}/%{name} - chown %{name}.%{name} %{_sysconfdir}/%{name} -fi - -%prep -# Nothing to do here. - It is done in the Makefile. - -%build -# Nothing to do here. - -%install -cd %{name}-%{version}-%{release} -%{__cp} -a * %{buildroot} - -%post -sudo -Hu %{name} tendermint init --home %{_sysconfdir}/%{name} -sudo -Hu %{name} %{name} --datadir %{_sysconfdir}/%{name} init %{_sysconfdir}/%{name}/genesis.json - -systemctl daemon-reload - -%preun -systemctl stop %{name} 2> /dev/null || : -systemctl stop %{name}-service 2> /dev/null || : - -%postun -systemctl daemon-reload - -%files -%attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} -%config(noreplace) %attr(0644, %{name}, %{name}) %{_sysconfdir}/%{name}/genesis.json -%attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name}/keystore -%attr(0644, %{name}, %{name}) %{_sysconfdir}/%{name}/keystore/* -%{_bindir}/* -%{_sysconfdir}/systemd/system/* -%{_sysconfdir}/systemd/system-preset/* -%dir %{_defaultlicensedir}/%{name} -%doc %{_defaultlicensedir}/%{name}/LICENSE - diff --git a/tools/build/spectemplates/gaia.data b/tools/build/spectemplates/gaia.data deleted file mode 100644 index 7152b1b51..000000000 --- a/tools/build/spectemplates/gaia.data +++ /dev/null @@ -1,5 +0,0 @@ -PACKAGE_SUMMARY="gaia - Tendermint Cosmos delegation game chain" -PACKAGE_URL="https://cosmos.network/" -PACKAGE_ADDITIONAL_HEADER="" -PACKAGE_DESCRIPTION="Gaia description comes later." - diff --git a/tools/build/spectemplates/tendermint.spec b/tools/build/spectemplates/tendermint.spec deleted file mode 100644 index 68902a170..000000000 --- a/tools/build/spectemplates/tendermint.spec +++ /dev/null @@ -1,31 +0,0 @@ -Version: @VERSION@ -Release: @BUILD_NUMBER@ - -%define __spec_install_post %{nil} -%define debug_package %{nil} -%define __os_install_post %{nil} - -Name: tendermint -Summary: securely and consistently replicate an application on many machines -License: Apache 2.0 -URL: https://tendermint.com/ -Packager: Greg Szabo - -%description -Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, we mean that every non-faulty machine sees the same transaction log and computes the same state. - -%prep -# Nothing to do here. - It is done in the Makefile. - -%build -# Nothing to do here. - -%install -cd %{name}-%{version}-%{release} -%{__cp} -a * %{buildroot} - -%files -%{_bindir}/tendermint -%dir %{_defaultlicensedir}/%{name} -%doc %{_defaultlicensedir}/%{name}/LICENSE - diff --git a/tools/build/tendermint.list b/tools/build/tendermint.list deleted file mode 100644 index bba521af5..000000000 --- a/tools/build/tendermint.list +++ /dev/null @@ -1 +0,0 @@ -deb http://tendermint-packages.s3-website-us-west-1.amazonaws.com/debian stable main diff --git a/tools/build/tendermint.repo b/tools/build/tendermint.repo deleted file mode 100644 index 439f98ecb..000000000 --- a/tools/build/tendermint.repo +++ /dev/null @@ -1,12 +0,0 @@ -#This is the .repo file for the Tendermint CentOS repositories. -#Although it has only been tested under CentOS 7, it should work under Fedora and RedHat 7 too. -#Currently only 64-bit packages are built. - -[tendermint] -name=Tendermint stable releases repository -baseurl=https://do9rmxapsag1v.cloudfront.net/centos/7/os/x86_64 -gpgcheck=1 -gpgkey=https://do9rmxapsag1v.cloudfront.net/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint -enabled=1 -#sslverify = 1 - diff --git a/tools/build/tendermint/DEBIAN/changelog b/tools/build/tendermint/DEBIAN/changelog deleted file mode 100644 index 4b016f845..000000000 --- a/tools/build/tendermint/DEBIAN/changelog +++ /dev/null @@ -1,6 +0,0 @@ -tendermint (@VERSION@) @STABILITY@; urgency=medium - - * Automatic build. See https://github.com/tendermint/tendermint for more information. - - -- Greg Szabo @DATETIMESTAMP@ - diff --git a/tools/build/tendermint/DEBIAN/compat b/tools/build/tendermint/DEBIAN/compat deleted file mode 100644 index ec635144f..000000000 --- a/tools/build/tendermint/DEBIAN/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/tools/build/tendermint/DEBIAN/control b/tools/build/tendermint/DEBIAN/control deleted file mode 100644 index d9da17dd1..000000000 --- a/tools/build/tendermint/DEBIAN/control +++ /dev/null @@ -1,14 +0,0 @@ -Source: tendermint -Section: net -Priority: optional -Maintainer: Greg Szabo -Build-Depends: debhelper (>=9) -Standards-Version: 3.9.6 -Homepage: https://tendermint.com -Package: tendermint -Architecture: amd64 -Version: @VERSION@ -Installed-Size: @INSTALLEDSIZE@ -Description: securely and consistently replicate an application on many machines - Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, we mean that every non-faulty machine sees the same transaction log and computes the same state. - diff --git a/tools/build/tendermint/DEBIAN/copyright b/tools/build/tendermint/DEBIAN/copyright deleted file mode 100644 index 15ee960dd..000000000 --- a/tools/build/tendermint/DEBIAN/copyright +++ /dev/null @@ -1,21 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: tendermint -Source: https://github.com/tendermint/tendermint - -Files: * -Copyright: 2017 All In Bits, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the full text of the Apache License 2.0 can be found - in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index c489a2fd4..f9d48fdcb 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -190,9 +190,17 @@ func (th *TestHarness) Run() { // local Tendermint version. func (th *TestHarness) TestPublicKey() error { th.logger.Info("TEST: Public key of remote signer") - th.logger.Info("Local", "pubKey", th.fpv.GetPubKey()) - th.logger.Info("Remote", "pubKey", th.signerClient.GetPubKey()) - if th.fpv.GetPubKey() != th.signerClient.GetPubKey() { + fpvk, err := th.fpv.GetPubKey() + if err != nil { + return err + } + th.logger.Info("Local", "pubKey", fpvk) + sck, err := th.signerClient.GetPubKey() + if err != nil { + return err + } + th.logger.Info("Remote", "pubKey", sck) + if fpvk != sck { th.logger.Error("FAILED: Local and remote public keys do not match") return newTestHarnessError(ErrTestPublicKeyFailed, nil, "") } @@ -230,8 +238,12 @@ func (th *TestHarness) TestSignProposal() error { th.logger.Error("FAILED: Signed proposal is invalid", "err", err) return newTestHarnessError(ErrTestSignProposalFailed, err, "") } + sck, err := th.signerClient.GetPubKey() + if err != nil { + return err + } // now validate the signature on the proposal - if th.signerClient.GetPubKey().VerifyBytes(propBytes, prop.Signature) { + if sck.VerifyBytes(propBytes, prop.Signature) { th.logger.Info("Successfully validated proposal signature") } else { th.logger.Error("FAILED: Proposal signature validation failed") @@ -274,8 +286,13 @@ func (th *TestHarness) TestSignVote() error { th.logger.Error("FAILED: Signed vote is invalid", "err", err) return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) } + sck, err := th.signerClient.GetPubKey() + if err != nil { + return err + } + // now validate the signature on the proposal - if th.signerClient.GetPubKey().VerifyBytes(voteBytes, vote.Signature) { + if sck.VerifyBytes(voteBytes, vote.Signature) { th.logger.Info("Successfully validated vote signature", "type", voteType) } else { th.logger.Error("FAILED: Vote signature validation failed", "type", voteType) diff --git a/types/block_test.go b/types/block_test.go index a7ae31e36..fe3da920c 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -475,9 +475,10 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { vi := 0 for n := range tc.blockIDs { for i := 0; i < tc.numVotes[n]; i++ { - addr := vals[vi].GetPubKey().Address() + pubKey, err := vals[vi].GetPubKey() + require.NoError(t, err) vote := &Vote{ - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), ValidatorIndex: vi, Height: height - 1, Round: round, diff --git a/types/codec.go b/types/codec.go index d77f2b29d..b4989d267 100644 --- a/types/codec.go +++ b/types/codec.go @@ -2,6 +2,7 @@ package types import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/types/events.go b/types/events.go index fb80db0f0..c257ba328 100644 --- a/types/events.go +++ b/types/events.go @@ -4,6 +4,7 @@ import ( "fmt" amino "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/types/evidence.go b/types/evidence.go index 199a01c70..244244f9e 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -7,6 +7,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/tmhash" amino "github.com/tendermint/go-amino" @@ -59,7 +60,7 @@ type Evidence interface { Height() int64 // height of the equivocation Time() time.Time // time of the equivocation Address() []byte // address of the equivocating validator - Bytes() []byte // bytes which compromise the evidence + Bytes() []byte // bytes which comprise the evidence Hash() []byte // hash of the evidence Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence Equal(Evidence) bool // check equality of evidence diff --git a/types/evidence_test.go b/types/evidence_test.go index bb04d9a4b..40e096fcd 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -17,17 +18,20 @@ type voteData struct { valid bool } -func makeVote(val PrivValidator, chainID string, valIndex int, height int64, round, step int, blockID BlockID) *Vote { - addr := val.GetPubKey().Address() +func makeVote( + t *testing.T, val PrivValidator, chainID string, valIndex int, height int64, round, step int, blockID BlockID, +) *Vote { + pubKey, err := val.GetPubKey() + require.NoError(t, err) v := &Vote{ - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), ValidatorIndex: valIndex, Height: height, Round: round, Type: SignedMsgType(step), BlockID: blockID, } - err := val.SignVote(chainID, v) + err = val.SignVote(chainID, v) if err != nil { panic(err) } @@ -45,28 +49,27 @@ func TestEvidence(t *testing.T) { const chainID = "mychain" - vote1 := makeVote(val, chainID, 0, 10, 2, 1, blockID) - badVote := makeVote(val, chainID, 0, 10, 2, 1, blockID) + vote1 := makeVote(t, val, chainID, 0, 10, 2, 1, blockID) + badVote := makeVote(t, val, chainID, 0, 10, 2, 1, blockID) err := val2.SignVote(chainID, badVote) - if err != nil { - panic(err) - } + assert.NoError(t, err) cases := []voteData{ - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID2), true}, // different block ids - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID3), true}, - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID4), true}, - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID), false}, // wrong block id - {vote1, makeVote(val, "mychain2", 0, 10, 2, 1, blockID2), false}, // wrong chain id - {vote1, makeVote(val, chainID, 1, 10, 2, 1, blockID2), false}, // wrong val index - {vote1, makeVote(val, chainID, 0, 11, 2, 1, blockID2), false}, // wrong height - {vote1, makeVote(val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round - {vote1, makeVote(val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step - {vote1, makeVote(val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID2), true}, // different block ids + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID3), true}, + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID4), true}, + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID), false}, // wrong block id + {vote1, makeVote(t, val, "mychain2", 0, 10, 2, 1, blockID2), false}, // wrong chain id + {vote1, makeVote(t, val, chainID, 1, 10, 2, 1, blockID2), false}, // wrong val index + {vote1, makeVote(t, val, chainID, 0, 11, 2, 1, blockID2), false}, // wrong height + {vote1, makeVote(t, val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round + {vote1, makeVote(t, val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step + {vote1, makeVote(t, val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator {vote1, badVote, false}, // signed by wrong key } - pubKey := val.GetPubKey() + pubKey, err := val.GetPubKey() + require.NoError(t, err) for _, c := range cases { ev := &DuplicateVoteEvidence{ VoteA: c.vote1, @@ -81,14 +84,14 @@ func TestEvidence(t *testing.T) { } func TestDuplicatedVoteEvidence(t *testing.T) { - ev := randomDuplicatedVoteEvidence() + ev := randomDuplicatedVoteEvidence(t) assert.True(t, ev.Equal(ev)) assert.False(t, ev.Equal(&DuplicateVoteEvidence{})) } func TestEvidenceList(t *testing.T) { - ev := randomDuplicatedVoteEvidence() + ev := randomDuplicatedVoteEvidence(t) evl := EvidenceList([]Evidence{ev}) assert.NotNil(t, evl.Hash()) @@ -103,8 +106,8 @@ func TestMaxEvidenceBytes(t *testing.T) { const chainID = "mychain" ev := &DuplicateVoteEvidence{ PubKey: secp256k1.GenPrivKey().PubKey(), // use secp because it's pubkey is longer - VoteA: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID), - VoteB: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2), + VoteA: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID), + VoteB: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2), } bz, err := cdc.MarshalBinaryLengthPrefixed(ev) @@ -113,14 +116,14 @@ func TestMaxEvidenceBytes(t *testing.T) { assert.EqualValues(t, MaxEvidenceBytes, len(bz)) } -func randomDuplicatedVoteEvidence() *DuplicateVoteEvidence { +func randomDuplicatedVoteEvidence(t *testing.T) *DuplicateVoteEvidence { val := NewMockPV() blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" return &DuplicateVoteEvidence{ - VoteA: makeVote(val, chainID, 0, 10, 2, 1, blockID), - VoteB: makeVote(val, chainID, 0, 10, 2, 1, blockID2), + VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID), + VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2), } } @@ -143,7 +146,7 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { ev.VoteB = nil }, true}, {"Invalid vote type", func(ev *DuplicateVoteEvidence) { - ev.VoteA = makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, blockID2) + ev.VoteA = makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, blockID2) }, true}, {"Invalid vote order", func(ev *DuplicateVoteEvidence) { swap := ev.VoteA.Copy() @@ -155,8 +158,8 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { tc := tc t.Run(tc.testName, func(t *testing.T) { pk := secp256k1.GenPrivKey().PubKey() - vote1 := makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID) - vote2 := makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID2) + vote1 := makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID) + vote2 := makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID2) ev := NewDuplicateVoteEvidence(pk, vote1, vote2) tc.malleateEvidence(ev) assert.Equal(t, tc.expectErr, ev.ValidateBasic() != nil, "Validate Basic had an unexpected result") diff --git a/types/params_test.go b/types/params_test.go index 719d51da0..b446bda33 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/stretchr/testify/assert" + abci "github.com/tendermint/tendermint/abci/types" ) diff --git a/types/priv_validator.go b/types/priv_validator.go index f2af88750..fbe8cebf0 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -12,8 +12,7 @@ import ( // PrivValidator defines the functionality of a local Tendermint validator // that signs votes and proposals, and never double signs. type PrivValidator interface { - // TODO: Extend the interface to return errors too. Issue: https://github.com/tendermint/tendermint/issues/3602 - GetPubKey() crypto.PubKey + GetPubKey() (crypto.PubKey, error) SignVote(chainID string, vote *Vote) error SignProposal(chainID string, proposal *Proposal) error @@ -29,7 +28,16 @@ func (pvs PrivValidatorsByAddress) Len() int { } func (pvs PrivValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(pvs[i].GetPubKey().Address(), pvs[j].GetPubKey().Address()) == -1 + pvi, err := pvs[i].GetPubKey() + if err != nil { + panic(err) + } + pvj, err := pvs[j].GetPubKey() + if err != nil { + panic(err) + } + + return bytes.Compare(pvi.Address(), pvj.Address()) == -1 } func (pvs PrivValidatorsByAddress) Swap(i, j int) { @@ -61,8 +69,8 @@ func NewMockPVWithParams(privKey crypto.PrivKey, breakProposalSigning, breakVote } // Implements PrivValidator. -func (pv MockPV) GetPubKey() crypto.PubKey { - return pv.PrivKey.PubKey() +func (pv MockPV) GetPubKey() (crypto.PubKey, error) { + return pv.PrivKey.PubKey(), nil } // Implements PrivValidator. @@ -97,8 +105,8 @@ func (pv MockPV) SignProposal(chainID string, proposal *Proposal) error { // String returns a string representation of the MockPV. func (pv MockPV) String() string { - addr := pv.GetPubKey().Address() - return fmt.Sprintf("MockPV{%v}", addr) + mpv, _ := pv.GetPubKey() // mockPV will never return an error, ignored here + return fmt.Sprintf("MockPV{%v}", mpv.Address()) } // XXX: Implement. diff --git a/types/proposal_test.go b/types/proposal_test.go index 1b30a7286..e4ea19183 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -45,7 +46,8 @@ func TestProposalString(t *testing.T) { func TestProposalVerifySignature(t *testing.T) { privVal := NewMockPV() - pubKey := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) prop := NewProposal( 4, 2, 2, @@ -53,7 +55,7 @@ func TestProposalVerifySignature(t *testing.T) { signBytes := prop.SignBytes("test_chain_id") // sign it - err := privVal.SignProposal("test_chain_id", prop) + err = privVal.SignProposal("test_chain_id", prop) require.NoError(t, err) // verify the same proposal @@ -93,8 +95,9 @@ func BenchmarkProposalSign(b *testing.B) { func BenchmarkProposalVerifySignature(b *testing.B) { privVal := NewMockPV() err := privVal.SignProposal("test_chain_id", testProposal) - require.Nil(b, err) - pubKey := privVal.GetPubKey() + require.NoError(b, err) + pubKey, err := privVal.GetPubKey() + require.NoError(b, err) for i := 0; i < b.N; i++ { pubKey.VerifyBytes(testProposal.SignBytes("test_chain_id"), testProposal.Signature) diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 7d5434be6..636091b5c 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -6,8 +6,10 @@ import ( "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" @@ -131,11 +133,12 @@ func TestABCIEvidence(t *testing.T) { blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" - pubKey := val.GetPubKey() + pubKey, err := val.GetPubKey() + require.NoError(t, err) ev := &DuplicateVoteEvidence{ PubKey: pubKey, - VoteA: makeVote(val, chainID, 0, 10, 2, 1, blockID), - VoteB: makeVote(val, chainID, 0, 10, 2, 1, blockID2), + VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID), + VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2), } abciEv := TM2PB.Evidence( ev, diff --git a/types/results_test.go b/types/results_test.go index a37de9ec4..9ecfe35ca 100644 --- a/types/results_test.go +++ b/types/results_test.go @@ -5,6 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" ) diff --git a/types/test_util.go b/types/test_util.go index 48913f308..377c965a8 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -2,6 +2,8 @@ package types import ( "time" + + "github.com/pkg/errors" ) func MakeCommit(blockID BlockID, height int64, round int, @@ -9,9 +11,12 @@ func MakeCommit(blockID BlockID, height int64, round int, // all sign for i := 0; i < len(validators); i++ { - addr := validators[i].GetPubKey().Address() + pubKey, err := validators[i].GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") + } vote := &Vote{ - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), ValidatorIndex: i, Height: height, Round: round, @@ -20,7 +25,7 @@ func MakeCommit(blockID BlockID, height int64, round int, Timestamp: now, } - _, err := signAddVote(validators[i], vote, voteSet) + _, err = signAddVote(validators[i], vote, voteSet) if err != nil { return nil, err } @@ -45,7 +50,11 @@ func MakeVote( chainID string, now time.Time, ) (*Vote, error) { - addr := privVal.GetPubKey().Address() + pubKey, err := privVal.GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") + } + addr := pubKey.Address() idx, _ := valSet.GetByAddress(addr) vote := &Vote{ ValidatorAddress: addr, diff --git a/types/validator.go b/types/validator.go index c3cadc4d3..359a19114 100644 --- a/types/validator.go +++ b/types/validator.go @@ -105,7 +105,10 @@ func RandValidator(randPower bool, minPower int64) (*Validator, PrivValidator) { if randPower { votePower += int64(tmrand.Uint32()) } - pubKey := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey() + if err != nil { + panic(fmt.Errorf("could not retrieve pubkey %w", err)) + } val := NewValidator(pubKey, votePower) return val, privVal } diff --git a/types/validator_set.go b/types/validator_set.go index 04de50646..1a17d1e83 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/merkle" tmmath "github.com/tendermint/tendermint/libs/math" ) diff --git a/types/vote_set_test.go b/types/vote_set_test.go index ab4433a39..fc4eb76f3 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -73,7 +74,10 @@ func TestAddVote(t *testing.T) { // t.Logf(">> %v", voteSet) - val0Addr := val0.GetPubKey().Address() + val0p, err := val0.GetPubKey() + require.NoError(t, err) + val0Addr := val0p.Address() + if voteSet.GetByAddress(val0Addr) != nil { t.Errorf("expected GetByAddress(val0.Address) to be nil") } @@ -94,7 +98,7 @@ func TestAddVote(t *testing.T) { Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } - _, err := signAddVote(val0, vote, voteSet) + _, err = signAddVote(val0, vote, voteSet) if err != nil { t.Error(err) } @@ -126,9 +130,11 @@ func Test2_3Majority(t *testing.T) { } // 6 out of 10 voted for nil. for i := 0; i < 6; i++ { - addr := privValidators[i].GetPubKey().Address() + pubKey, err := privValidators[i].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, i) - _, err := signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(privValidators[i], vote, voteSet) if err != nil { t.Error(err) } @@ -140,9 +146,11 @@ func Test2_3Majority(t *testing.T) { // 7th validator voted for some blockhash { - addr := privValidators[6].GetPubKey().Address() + pubKey, err := privValidators[6].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 6) - _, err := signAddVote(privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + _, err = signAddVote(privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet) if err != nil { t.Error(err) } @@ -154,9 +162,11 @@ func Test2_3Majority(t *testing.T) { // 8th validator voted for nil. { - addr := privValidators[7].GetPubKey().Address() + pubKey, err := privValidators[7].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 7) - _, err := signAddVote(privValidators[7], vote, voteSet) + _, err = signAddVote(privValidators[7], vote, voteSet) if err != nil { t.Error(err) } @@ -187,9 +197,11 @@ func Test2_3MajorityRedux(t *testing.T) { // 66 out of 100 voted for nil. for i := 0; i < 66; i++ { - addr := privValidators[i].GetPubKey().Address() + pubKey, err := privValidators[i].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, i) - _, err := signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(privValidators[i], vote, voteSet) if err != nil { t.Error(err) } @@ -201,9 +213,11 @@ func Test2_3MajorityRedux(t *testing.T) { // 67th validator voted for nil { - adrr := privValidators[66].GetPubKey().Address() + pubKey, err := privValidators[66].GetPubKey() + require.NoError(t, err) + adrr := pubKey.Address() vote := withValidator(voteProto, adrr, 66) - _, err := signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) + _, err = signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) if err != nil { t.Error(err) } @@ -215,10 +229,12 @@ func Test2_3MajorityRedux(t *testing.T) { // 68th validator voted for a different BlockParts PartSetHeader { - addr := privValidators[67].GetPubKey().Address() + pubKey, err := privValidators[67].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 67) blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} - _, err := signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + _, err = signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) if err != nil { t.Error(err) } @@ -230,10 +246,12 @@ func Test2_3MajorityRedux(t *testing.T) { // 69th validator voted for different BlockParts Total { - addr := privValidators[68].GetPubKey().Address() + pubKey, err := privValidators[68].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 68) blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartsHeader.Hash} - _, err := signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + _, err = signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) if err != nil { t.Error(err) } @@ -245,9 +263,11 @@ func Test2_3MajorityRedux(t *testing.T) { // 70th validator voted for different BlockHash { - addr := privValidators[69].GetPubKey().Address() + pubKey, err := privValidators[69].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 69) - _, err := signAddVote(privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + _, err = signAddVote(privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet) if err != nil { t.Error(err) } @@ -259,9 +279,11 @@ func Test2_3MajorityRedux(t *testing.T) { // 71st validator voted for the right BlockHash & BlockPartsHeader { - addr := privValidators[70].GetPubKey().Address() + pubKey, err := privValidators[70].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 70) - _, err := signAddVote(privValidators[70], vote, voteSet) + _, err = signAddVote(privValidators[70], vote, voteSet) if err != nil { t.Error(err) } @@ -288,7 +310,9 @@ func TestBadVotes(t *testing.T) { // val0 votes for nil. { - addr := privValidators[0].GetPubKey().Address() + pubKey, err := privValidators[0].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 0) added, err := signAddVote(privValidators[0], vote, voteSet) if !added || err != nil { @@ -298,7 +322,9 @@ func TestBadVotes(t *testing.T) { // val0 votes again for some block. { - addr := privValidators[0].GetPubKey().Address() + pubKey, err := privValidators[0].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 0) added, err := signAddVote(privValidators[0], withBlockHash(vote, tmrand.Bytes(32)), voteSet) if added || err == nil { @@ -308,7 +334,9 @@ func TestBadVotes(t *testing.T) { // val1 votes on another height { - addr := privValidators[1].GetPubKey().Address() + pubKey, err := privValidators[1].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 1) added, err := signAddVote(privValidators[1], withHeight(vote, height+1), voteSet) if added || err == nil { @@ -318,7 +346,9 @@ func TestBadVotes(t *testing.T) { // val2 votes on another round { - addr := privValidators[2].GetPubKey().Address() + pubKey, err := privValidators[2].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 2) added, err := signAddVote(privValidators[2], withRound(vote, round+1), voteSet) if added || err == nil { @@ -328,7 +358,9 @@ func TestBadVotes(t *testing.T) { // val3 votes of another type. { - addr := privValidators[3].GetPubKey().Address() + pubKey, err := privValidators[3].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 3) added, err := signAddVote(privValidators[3], withType(vote, byte(PrecommitType)), voteSet) if added || err == nil { @@ -353,7 +385,10 @@ func TestConflicts(t *testing.T) { BlockID: BlockID{nil, PartSetHeader{}}, } - val0Addr := privValidators[0].GetPubKey().Address() + val0, err := privValidators[0].GetPubKey() + require.NoError(t, err) + val0Addr := val0.Address() + // val0 votes for nil. { vote := withValidator(voteProto, val0Addr, 0) @@ -407,7 +442,9 @@ func TestConflicts(t *testing.T) { // val1 votes for blockHash1. { - addr := privValidators[1].GetPubKey().Address() + pv, err := privValidators[1].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 1) added, err := signAddVote(privValidators[1], withBlockHash(vote, blockHash1), voteSet) if !added || err != nil { @@ -425,7 +462,9 @@ func TestConflicts(t *testing.T) { // val2 votes for blockHash2. { - addr := privValidators[2].GetPubKey().Address() + pv, err := privValidators[2].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 2) added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash2), voteSet) if !added || err != nil { @@ -446,7 +485,9 @@ func TestConflicts(t *testing.T) { // val2 votes for blockHash1. { - addr := privValidators[2].GetPubKey().Address() + pv, err := privValidators[2].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 2) added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash1), voteSet) if !added { @@ -488,9 +529,11 @@ func TestMakeCommit(t *testing.T) { // 6 out of 10 voted for some block. for i := 0; i < 6; i++ { - addr := privValidators[i].GetPubKey().Address() + pv, err := privValidators[i].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, i) - _, err := signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(privValidators[i], vote, voteSet) if err != nil { t.Error(err) } @@ -501,12 +544,14 @@ func TestMakeCommit(t *testing.T) { // 7th voted for some other block. { - addr := privValidators[6].GetPubKey().Address() + pv, err := privValidators[6].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 6) vote = withBlockHash(vote, tmrand.Bytes(32)) vote = withBlockPartsHeader(vote, PartSetHeader{123, tmrand.Bytes(32)}) - _, err := signAddVote(privValidators[6], vote, voteSet) + _, err = signAddVote(privValidators[6], vote, voteSet) if err != nil { t.Error(err) } @@ -514,9 +559,11 @@ func TestMakeCommit(t *testing.T) { // The 8th voted like everyone else. { - addr := privValidators[7].GetPubKey().Address() + pv, err := privValidators[7].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 7) - _, err := signAddVote(privValidators[7], vote, voteSet) + _, err = signAddVote(privValidators[7], vote, voteSet) if err != nil { t.Error(err) } @@ -524,11 +571,13 @@ func TestMakeCommit(t *testing.T) { // The 9th voted for nil. { - addr := privValidators[8].GetPubKey().Address() + pv, err := privValidators[8].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 8) vote.BlockID = BlockID{} - _, err := signAddVote(privValidators[8], vote, voteSet) + _, err = signAddVote(privValidators[8], vote, voteSet) if err != nil { t.Error(err) } diff --git a/types/vote_test.go b/types/vote_test.go index 40a9d650a..eb4f6a955 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -143,13 +143,14 @@ func TestVoteProposalNotEq(t *testing.T) { func TestVoteVerifySignature(t *testing.T) { privVal := NewMockPV() - pubkey := privVal.GetPubKey() + pubkey, err := privVal.GetPubKey() + require.NoError(t, err) vote := examplePrecommit() signBytes := vote.SignBytes("test_chain_id") // sign it - err := privVal.SignVote("test_chain_id", vote) + err = privVal.SignVote("test_chain_id", vote) require.NoError(t, err) // verify the same vote @@ -193,12 +194,13 @@ func TestIsVoteTypeValid(t *testing.T) { func TestVoteVerify(t *testing.T) { privVal := NewMockPV() - pubkey := privVal.GetPubKey() + pubkey, err := privVal.GetPubKey() + require.NoError(t, err) vote := examplePrevote() vote.ValidatorAddress = pubkey.Address() - err := vote.Verify("test_chain_id", ed25519.GenPrivKey().PubKey()) + err = vote.Verify("test_chain_id", ed25519.GenPrivKey().PubKey()) if assert.Error(t, err) { assert.Equal(t, ErrVoteInvalidValidatorAddress, err) } diff --git a/version/version.go b/version/version.go index 57ef21f1f..9a16e5d58 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.33.1" + TMCoreSemVer = "0.33.2" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.16.1"