Compare commits

..

2 Commits

Author SHA1 Message Date
Emmanuel Odeke
acc161f7cc version: bump to v0.10.5 2017-11-01 07:03:14 -07:00
Anton Kaliaev
eebc7f9511 only notify when there are some txs (Refs #753) 2017-11-01 06:19:39 -07:00
970 changed files with 17644 additions and 121669 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,370 +0,0 @@
version: 2
defaults: &defaults
working_directory: /go/src/github.com/tendermint/tendermint
docker:
- image: circleci/golang:1.10.3
environment:
GOBIN: /tmp/workspace/bin
jobs:
setup_dependencies:
<<: *defaults
steps:
- run: mkdir -p /tmp/workspace/bin
- run: mkdir -p /tmp/workspace/profiles
- checkout
- restore_cache:
keys:
- v3-pkg-cache
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run:
name: binaries
command: |
export PATH="$GOBIN:$PATH"
make install install_abci
- persist_to_workspace:
root: /tmp/workspace
paths:
- bin
- profiles
- save_cache:
key: v3-pkg-cache
paths:
- /go/pkg
# - save_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# paths:
# - /go/src/github.com/tendermint/tendermint
build_slate:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: slate docs
command: |
set -ex
export PATH="$GOBIN:$PATH"
make build-slate
lint:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: metalinter
command: |
set -ex
export PATH="$GOBIN:$PATH"
make metalinter
- run:
name: check_dep
command: |
set -ex
export PATH="$GOBIN:$PATH"
make check_dep
test_abci_apps:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run abci apps tests
command: |
export PATH="$GOBIN:$PATH"
bash abci/tests/test_app/test.sh
# if this test fails, fix it and update the docs at:
# https://github.com/tendermint/tendermint/blob/develop/docs/abci-cli.md
test_abci_cli:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run abci-cli tests
command: |
export PATH="$GOBIN:$PATH"
bash abci/tests/test_cli/test.sh
test_apps:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
- run:
name: Run tests
command: bash test/app/test.sh
test_cover:
<<: *defaults
parallelism: 4
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: mkdir -p /tmp/logs
- run:
name: Run tests
command: |
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
id=$(basename "$pkg")
GOCACHE=off go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
done
- persist_to_workspace:
root: /tmp/workspace
paths:
- "profiles/*"
- store_artifacts:
path: /tmp/logs
test_persistence:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run tests
command: bash test/persist/test_failure_indices.sh
localnet:
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
machine:
image: circleci/classic:latest
environment:
GOBIN: /home/circleci/.go_workspace/bin
GOPATH: /home/circleci/.go_workspace/
GOOS: linux
GOARCH: amd64
parallelism: 1
steps:
- checkout
- run:
name: run localnet and exit on failure
command: |
set -x
make get_tools
make get_vendor_deps
make build-linux
make localnet-start &
./scripts/localnet-blocks-test.sh 40 5 10 localhost
test_p2p:
environment:
GOBIN: /home/circleci/.go_workspace/bin
GOPATH: /home/circleci/.go_workspace
machine:
image: circleci/classic:latest
steps:
- checkout
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: bash test/p2p/circleci.sh
- store_artifacts:
path: /home/circleci/project/test/p2p/logs
upload_coverage:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: gather
command: |
set -ex
echo "mode: atomic" > coverage.txt
for prof in $(ls /tmp/workspace/profiles/); do
tail -n +2 /tmp/workspace/profiles/"$prof" >> coverage.txt
done
- run:
name: upload
command: bash .circleci/codecov.sh -f coverage.txt
workflows:
version: 2
test-suite:
jobs:
- setup_dependencies
- lint:
requires:
- setup_dependencies
- test_abci_apps:
requires:
- setup_dependencies
- test_abci_cli:
requires:
- setup_dependencies
- test_apps:
requires:
- setup_dependencies
- test_cover:
requires:
- setup_dependencies
- test_persistence:
requires:
- setup_dependencies
- localnet:
requires:
- setup_dependencies
- test_p2p
- upload_coverage:
requires:
- test_cover

21
.codecov.yml Normal file
View File

@@ -0,0 +1,21 @@
#
# This codecov.yml is the default configuration for
# all repositories on Codecov. You may adjust the settings
# below in your own codecov.yml in your repository.
#
coverage:
precision: 2
round: down
range: 70...100
status:
# Learn more at https://codecov.io/docs#yaml_default_commit_status
project:
default:
threshold: 1% # allow this much decrease on project
changes: false
comment:
layout: "header, diff"
behavior: default # update if exists else create new

View File

@@ -8,7 +8,10 @@ end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[*.{sh,Makefile}]
[Makefile]
indent_style = tab
[*.sh]
indent_style = tab
[*.proto]

7
.github/CODEOWNERS vendored
View File

@@ -1,7 +0,0 @@
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
# Everything goes through Bucky, Anton, Alex. For now.
* @ebuchman @melekes @xla
# Precious documentation
/docs/ @zramsay

41
.github/ISSUE_TEMPLATE vendored Normal file
View File

@@ -0,0 +1,41 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app)
in a case of bug.
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Merkleeyes version** (use `git rev-parse --verify HEAD`, skip if you don't use it):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Anything else do we need to know**:

View File

@@ -1,42 +0,0 @@
---
name: Bug Report
about: Create a report to help us squash bugs!
---
<!--
Please fill in as much of the template below as you can.
Be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app).
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**Have you tried the latest version**: yes/no
**How to reproduce it** (as minimally and precisely as possible):
**Logs (paste a small part showing an error (< 10 lines) or link a pastebin, gist, etc. containing more of the log file)**:
**Config (you can paste only the changes you've made)**:
**node command runtime flags**:
**`/dump_consensus_state` output for consensus bugs**
**Anything else we need to know**:

View File

@@ -1,13 +0,0 @@
---
name: Feature Request
about: Create a proposal to request a feature
---
<!--
Please describe *in detail* the feature/behavior/change you'd like to see.
Be ready for followup questions, and please respond in a timely
manner.
Word of caution: poorly thought out proposals may be rejected without deliberation
-->

View File

@@ -1,6 +0,0 @@
<!-- Thanks for filing a PR! Before hitting the button, please check the following items.-->
* [ ] Updated all relevant documentation in docs
* [ ] Updated all code comments where relevant
* [ ] Wrote tests
* [ ] Updated CHANGELOG_PENDING.md

31
.gitignore vendored
View File

@@ -5,6 +5,7 @@
.DS_Store
build/*
rpc/test/.tendermint
.debora
.tendermint
remote_dump
.revision
@@ -12,33 +13,5 @@ vendor
.vagrant
test/p2p/data/
test/logs
.glide
coverage.txt
docs/_build
docs/dist
*.log
abci-cli
docs/node_modules/
index.html.md
scripts/wal2json/wal2json
scripts/cutWALUntil/cutWALUntil
.idea/
*.iml
.vscode/
libs/pubsub/query/fuzz_test/output
shunit2
.tendermint-lite
addrbook.json
*/vendor
*/.glide
.terraform
terraform.tfstate
terraform.tfstate.backup
terraform.tfstate.d
.vscode

File diff suppressed because it is too large Load Diff

View File

@@ -1,107 +0,0 @@
# Pending
## v0.26.0
*October 19, 2018*
Special thanks to external contributors on this release:
@bradyjoestar, @connorwstein, @goolAdapter, @HaoyangLiu,
@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995
This release is primarily about adding Version fields to various data structures,
optimizing consensus messages for signing and verification in
restricted environments (like HSMs and the Ethereum Virtual Machine), and
aligning the consensus code with the [specification](https://arxiv.org/abs/1807.04938).
It also includes our first take at a generalized merkle proof system.
See the [UPGRADING.md](UPGRADING.md#v0.26.0) for details on upgrading to the new
version.
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
BREAKING CHANGES:
* CLI/RPC/Config
* [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) timeouts as time.Duration, not ints
* [config] [\#2505](https://github.com/tendermint/tendermint/issues/2505) Remove Mempool.RecheckEmpty (it was effectively useless anyways)
* [config] [\#2490](https://github.com/tendermint/tendermint/issues/2490) `mempool.wal` is disabled by default
* [privval] [\#2459](https://github.com/tendermint/tendermint/issues/2459) Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer)
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as
encoded on disk.
* [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default
behaviour to `prove=false`
* [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and
`/net_info`
* Apps
* [abci] [\#2298](https://github.com/tendermint/tendermint/issues/2298) ResponseQuery.Proof is now a structured merkle.Proof, not just
arbitrary bytes
* [abci] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version to Header and shift all fields by one
* [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Bump the field numbers for some `ResponseInfo` fields to make room for
`AppVersion`
* Go API
* [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) timeouts as time.Duration, not ints
* [crypto/merkle & lite] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Various changes to accomodate General Merkle trees
* [crypto/merkle] [\#2595](https://github.com/tendermint/tendermint/issues/2595) Remove all Hasher objects in favor of byte slices
* [crypto/merkle] [\#2635](https://github.com/tendermint/tendermint/issues/2635) merkle.SimpleHashFromTwoHashes is no longer exported
* [node] [\#2479](https://github.com/tendermint/tendermint/issues/2479) Remove node.RunForever
* [rpc/client] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove`
* [types] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Remove `Index` and `Total` fields from `TxProof`.
* [types] [\#2598](https://github.com/tendermint/tendermint/issues/2598) `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. `PrevoteType`,
`PrecommitType`.
* Blockchain Protocol
* [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`:
* [\#2459](https://github.com/tendermint/tendermint/issues/2459) Use amino encoding instead of JSON in `SignBytes`.
* [\#2598](https://github.com/tendermint/tendermint/issues/2598) Reorder fields and use fixed sized encoding.
* [\#2598](https://github.com/tendermint/tendermint/issues/2598) Change `Type` field fromt `string` to `byte` and use new
`SignedMsgType` to enumerate.
* [types] [\#2512](https://github.com/tendermint/tendermint/issues/2512) Remove the pubkey field from the validator hash
* [types] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version struct to Header
* [types] [\#2609](https://github.com/tendermint/tendermint/issues/2609) ConsensusParams.Hash() is the hash of the amino encoded
struct instead of the Merkle tree of the fields
* [state] [\#2587](https://github.com/tendermint/tendermint/issues/2587) Require block.Time of the fist block to be genesis time
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Require block.Version to match state.Version
* [types] [\#2670](https://github.com/tendermint/tendermint/issues/2670) Header.Hash() builds Merkle tree out of fields in the same
order they appear in the header, instead of sorting by field name
* P2P Protocol
* [p2p] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Add `ProtocolVersion` struct with protocol versions to top of
DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake
FEATURES:
- [abci] [\#2557](https://github.com/tendermint/tendermint/issues/2557) Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}`
- [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Add `BlockVersion` and `P2PVersion` to `RequestInfo`
- [crypto/merkle] [\#2298](https://github.com/tendermint/tendermint/issues/2298) General Merkle Proof scheme for chaining various types of Merkle trees together
IMPROVEMENTS:
- Additional Metrics
- [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169)
- [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169)
- [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Added ValidateBasic method, which performs basic checks
- [crypto/ed25519] [\#2558](https://github.com/tendermint/tendermint/issues/2558) Switch to use latest `golang.org/x/crypto` through our fork at
github.com/tendermint/crypto
- [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit
BUG FIXES:
- [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter)
- [common] [\#2533](https://github.com/tendermint/tendermint/issues/2533) Fixed a bug in the `BitArray.Or` method
- [common] [\#2506](https://github.com/tendermint/tendermint/issues/2506) Fixed a bug in the `BitArray.Sub` method (@james-ray)
- [common] [\#2534](https://github.com/tendermint/tendermint/issues/2534) Fix `BitArray.PickRandom` to choose uniformly from true bits
- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) Wait for
timeoutPrecommit before starting next round
- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) Wait for
Proposal or timeoutProposal before entering prevote
- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Only propose ValidBlock, not LockedBlock
- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Initialized ValidRound and LockedRound to -1
- [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a
block
- [evidence] [\#2515](https://github.com/tendermint/tendermint/issues/2515) Fix db iter leak (@goolAdapter)
- [libs/event] [\#2518](https://github.com/tendermint/tendermint/issues/2518) Fix event concurrency flaw (@goolAdapter)
- [node] [\#2434](https://github.com/tendermint/tendermint/issues/2434) Make node respond to signal interrupts while sleeping for genesis time
- [state] [\#2616](https://github.com/tendermint/tendermint/issues/2616) Pass nil to NewValidatorSet() when genesis file's Validators field is nil
- [p2p] [\#2555](https://github.com/tendermint/tendermint/issues/2555) Fix p2p switch FlushThrottle value (@goolAdapter)
- [p2p] [\#2668](https://github.com/tendermint/tendermint/issues/2668) Reconnect to originally dialed address (not self-reported
address) for persistent peers

View File

@@ -1,6 +1,6 @@
# Contributing
Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards.
Thank you for considering making contributions to Tendermint and related repositories (Basecoin, Merkleeyes, etc.)!
Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
@@ -8,16 +8,16 @@ Please make sure to use `gofmt` before every commit - the easiest way to do this
## Forking
Please note that Go requires code to live under absolute paths, which complicates forking.
While my fork lives at `https://github.com/ebuchman/tendermint`,
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
Please note that Go requires code to live under absolute paths, which complicates forking.
While my fork lives at `https://github.com/ebuchman/tendermint`,
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
Instead, we use `git remote` to add the fork as a new remote for the original repo,
`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there.
For instance, to create a fork and work on a branch of it, I would:
* Create the fork on github, using the fork button.
* Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`)
* Go to the original repo checked out locally (ie. `$GOPATH/src/github.com/tendermint/tendermint`)
* `git remote rename origin upstream`
* `git remote add origin git@github.com:ebuchman/basecoin.git`
@@ -34,44 +34,16 @@ Please don't make Pull Requests to `master`.
## Dependencies
We use [dep](https://github.com/golang/dep) to manage dependencies.
We use [glide](https://github.com/masterminds/glide) to manage dependencies.
That said, the master branch of every Tendermint repository should just build with `go get`, which means they should be kept up-to-date with their dependencies so we can get away with telling people they can just `go get` our software.
Since some dependencies are not under our control, a third party may break our build, in which case we can fall back on `glide install`. Even for dependencies under our control, glide helps us keeps multiple repos in sync as they evolve. Anything with an executable, such as apps, tools, and the core, should use glide.
That said, the master branch of every Tendermint repository should just build
with `go get`, which means they should be kept up-to-date with their
dependencies so we can get away with telling people they can just `go get` our
software.
Since some dependencies are not under our control, a third party may break our
build, in which case we can fall back on `dep ensure` (or `make
get_vendor_deps`). Even for dependencies under our control, dep helps us to
keep multiple repos in sync as they evolve. Anything with an executable, such
as apps, tools, and the core, should use dep.
Run `dep status` to get a list of vendor dependencies that may not be
up-to-date.
## Vagrant
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started
hacking Tendermint with the commands below.
NOTE: In case you installed Vagrant in 2017, you might need to run
`vagrant box update` to upgrade to the latest `ubuntu/xenial64`.
```
vagrant up
vagrant ssh
make test
```
Run `bash scripts/glide/status.sh` to get a list of vendored dependencies that may not be up-to-date.
## Testing
All repos should be hooked up to [CircleCI](https://circleci.com/).
If they have `.go` files in the root directory, they will be automatically
tested by circle using `go test -v -race ./...`. If not, they will need a
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
includes its continuous integration status using a badge in the `README.md`.
All repos should be hooked up to circle.
If they have `.go` files in the root directory, they will be automatically tested by circle using `go test -v -race ./...`. If not, they will need a `circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and includes its continuous integration status using a badge in the `README.md`.
## Branching Model and Release
@@ -85,7 +57,7 @@ especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint
- the latest state of development is on `develop`
- `develop` must never fail `make test`
- no --force onto `develop` (except when reverting a broken commit, which should seldom happen)
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`)
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git add origin`)
- before submitting a pull request, begin `git rebase` on top of `develop`
### Pull Merge Procedure:
@@ -103,15 +75,3 @@ especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint
- push to release-vX.X.X to run the extended integration tests on the CI
- merge to master
- merge master back to develop
### Hotfix Procedure:
- start on `master`
- checkout a new branch named hotfix-vX.X.X
- make the required changes
- these changes should be small and an absolute necessity
- add a note to CHANGELOG.md
- bump versions
- push to hotfix-vX.X.X to run the extended integration tests on the CI
- merge hotfix-vX.X.X to master
- merge hotfix-vX.X.X to develop
- delete the hotfix-vX.X.X branch

1
DOCKER/.gitignore vendored
View File

@@ -1 +0,0 @@
tendermint

View File

@@ -1,39 +1,45 @@
FROM alpine:3.7
MAINTAINER Greg Szabo <greg@tendermint.com>
FROM alpine:3.6
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
# (unless you change `genesis_file` in config.toml). You can put your config.toml and
# private validator file into /tendermint/config.
# This is the release of tendermint to pull in.
ENV TM_VERSION 0.10.0
ENV TM_SHA256SUM a29852b8d51c00db93c87c3d148fa419a047abd38f32b2507a905805131acc19
# Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private
# validator file into /tendermint.
#
# The /tendermint/data dir is used by tendermint to store state.
ENV TMHOME /tendermint
ENV DATA_ROOT /tendermint
ENV TMHOME $DATA_ROOT
# Set user right away for determinism
RUN addgroup tmuser && \
adduser -S -G tmuser tmuser
# Create directory for persistence and give our user ownership
RUN mkdir -p $DATA_ROOT && \
chown -R tmuser:tmuser $DATA_ROOT
# OS environment setup
# Set user right away for determinism, create directory for persistence and give our user ownership
# jq and curl used for extracting `pub_key` from private validator while
# deploying tendermint with Kubernetes. It is nice to have bash so the users
# could execute bash commands.
RUN apk update && \
apk upgrade && \
apk --no-cache add curl jq bash && \
addgroup tmuser && \
adduser -S -G tmuser tmuser -h "$TMHOME"
RUN apk add --no-cache bash curl jq
# Run the container with tmuser by default. (UID=100, GID=1000)
USER tmuser
RUN apk add --no-cache openssl && \
wget https://s3-us-west-2.amazonaws.com/tendermint/binaries/tendermint/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
apk del openssl && \
rm -f tendermint_${TM_VERSION}_linux_amd64.zip
# Expose the data directory as a volume since there's mutable state in there
VOLUME [ $TMHOME ]
VOLUME $DATA_ROOT
WORKDIR $TMHOME
# p2p port
EXPOSE 46656
# rpc port
EXPOSE 46657
# p2p and rpc port
EXPOSE 26656 26657
ENTRYPOINT ["tendermint"]
ENTRYPOINT ["/usr/bin/tendermint"]
CMD ["node", "--moniker=`hostname`"]
STOPSIGNAL SIGTERM
ARG BINARY=tendermint
COPY $BINARY /usr/bin/tendermint

View File

@@ -1,23 +0,0 @@
FROM golang:latest
RUN mkdir -p /go/src/github.com/tendermint/abci
WORKDIR /go/src/github.com/tendermint/abci
COPY Makefile /go/src/github.com/tendermint/abci/
# see make protoc for details on ldconfig
RUN make get_protoc && ldconfig
# killall is used in tests
RUN apt-get update && apt-get install -y \
psmisc \
&& rm -rf /var/lib/apt/lists/*
COPY Gopkg.toml /go/src/github.com/tendermint/abci/
COPY Gopkg.lock /go/src/github.com/tendermint/abci/
RUN make get_tools
# see https://github.com/golang/dep/issues/1312
RUN dep ensure -vendor-only
COPY . /go/src/github.com/tendermint/abci

View File

@@ -1,4 +1,4 @@
FROM alpine:3.7
FROM alpine:3.6
ENV DATA_ROOT /tendermint
ENV TMHOME $DATA_ROOT
@@ -18,18 +18,18 @@ RUN mkdir -p /go/src/github.com/tendermint/tendermint && \
cd /go/src/github.com/tendermint/tendermint && \
git clone https://github.com/tendermint/tendermint . && \
git checkout develop && \
make get_tools && \
make get_vendor_deps && \
make install && \
glide cc && \
cd - && \
rm -rf /go/src/github.com/tendermint/tendermint && \
apk del go build-base git
VOLUME $DATA_ROOT
EXPOSE 26656
EXPOSE 26657
EXPOSE 46656
EXPOSE 46657
ENTRYPOINT ["tendermint"]
CMD ["node", "--moniker=`hostname`", "--proxy_app=kvstore"]
CMD ["node", "--moniker=`hostname`", "--proxy_app=dummy"]

View File

@@ -1,18 +0,0 @@
FROM golang:1.10.1
# Grab deps (jq, hexdump, xxd, killall)
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq bsdmainutils vim-common psmisc netcat
# Add testing deps for curl
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \
apt-get update && \
apt-get install -y --no-install-recommends curl
VOLUME /go
EXPOSE 26656
EXPOSE 26657

View File

@@ -7,9 +7,6 @@ push:
build_develop:
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .
build_testing:
docker build --tag tendermint/testing -f ./Dockerfile.testing .
push_develop:
docker push "tendermint/tendermint:develop"

View File

@@ -1,14 +1,6 @@
# Docker
# Supported tags and respective `Dockerfile` links
## Supported tags and respective `Dockerfile` links
- `0.17.1`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/208ac32fa266657bd6c304e84ec828aa252bb0b8/DOCKER/Dockerfile)
- `0.15.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
- `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile)
- `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile)
- `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile)
- `0.11.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/9177cc1f64ca88a4a0243c5d1773d10fba67e201/DOCKER/Dockerfile)
- `0.10.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/e5342f4054ab784b2cd6150e14f01053d7c8deb2/DOCKER/Dockerfile)
- `0.10.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/e5342f4054ab784b2cd6150e14f01053d7c8deb2/DOCKER/Dockerfile)
- `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile)
- `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
@@ -16,52 +8,51 @@
`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch.
## Quick reference
* **Where to get help:**
https://cosmos.network/community
* **Where to file issues:**
https://github.com/tendermint/tendermint/issues
* **Supported Docker versions:**
[the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis)
## Tendermint
# Tendermint
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
For more background, see the [introduction](https://tendermint.readthedocs.io/en/master/introduction.html).
For more background, see the [introduction](https://tendermint.com/intro).
To get started developing applications, see the [application developers guide](https://tendermint.readthedocs.io/en/master/getting-started.html).
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/guides/app-development).
## How to use this image
# How to use this image
### Start one instance of the Tendermint core with the `kvstore` app
## Start one instance of the Tendermint core with the `dummy` app
A quick example of a built-in app and Tendermint core in one container.
A very simple example of a built-in app and Tendermint core in one container.
```
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=dummy
```
## Local cluster
## mintnet-kubernetes
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run:
If you want to see many containers talking to each other, consider using [mintnet-kubernetes](https://github.com/tendermint/tools/tree/master/mintnet-kubernetes), which is a tool for running Tendermint-based applications on a Kubernetes cluster.
```
make build-linux
make build-docker-localnode
make localnet-start
```
# Supported Docker versions
Note that this will build and use a different image than the ones provided here.
This image is officially supported on Docker version 1.13.1.
## License
Support for older versions (down to 1.6) is provided on a best-effort basis.
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE).
Please see [the Docker installation documentation](https://docs.docker.com/installation/) for details on how to upgrade your Docker daemon.
# License
View [license information](https://raw.githubusercontent.com/tendermint/tendermint/master/LICENSE) for the software contained in this image.
# User Feedback
## Issues
If you have any problems with or questions about this image, please contact us through a [GitHub](https://github.com/tendermint/tendermint/issues) issue. If the issue is related to a CVE, please check for [a `cve-tracker` issue on the `official-images` repository](https://github.com/docker-library/official-images/issues?q=label%3Acve-tracker) first.
You can also reach the image maintainers via [Slack](http://forum.tendermint.com:3000/).
## Contributing
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
You are invited to contribute new features, fixes, or updates, large or small; we are always thrilled to receive pull requests, and do our best to process them as fast as we can.
Before you start to code, we recommend discussing your plans through a [GitHub](https://github.com/tendermint/tendermint/issues) issue, especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give you feedback on your design, and help you find out if someone else is working on the same thing.

552
Gopkg.lock generated
View File

@@ -1,552 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "UT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
branch = "master"
digest = "1:c0decf632843204d2b8781de7b26e7038584e2dcccc7e2f401e88ae85b1df2b7"
name = "github.com/btcsuite/btcd"
packages = ["btcec"]
pruneopts = "UT"
revision = "67e573d211ace594f1366b4ce9d39726c4b19bd0"
[[projects]]
digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf"
name = "github.com/btcsuite/btcutil"
packages = [
"base58",
"bech32",
]
pruneopts = "UT"
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "UT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:c7644c73a3d23741fdba8a99b1464e021a224b7e205be497271a8003a15ca41b"
name = "github.com/ebuchman/fail-test"
packages = ["."]
pruneopts = "UT"
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
[[projects]]
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
name = "github.com/fortytw2/leaktest"
packages = ["."]
pruneopts = "UT"
revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e"
version = "v1.2.0"
[[projects]]
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
name = "github.com/fsnotify/fsnotify"
packages = ["."]
pruneopts = "UT"
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11"
name = "github.com/go-kit/kit"
packages = [
"log",
"log/level",
"log/term",
"metrics",
"metrics/discard",
"metrics/internal/lv",
"metrics/prometheus",
]
pruneopts = "UT"
revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
version = "v0.6.0"
[[projects]]
digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659"
name = "github.com/go-logfmt/logfmt"
packages = ["."]
pruneopts = "UT"
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0"
[[projects]]
digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d"
name = "github.com/go-stack/stack"
packages = ["."]
pruneopts = "UT"
revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a"
version = "v1.8.0"
[[projects]]
digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
"jsonpb",
"proto",
"protoc-gen-gogo/descriptor",
"sortkeys",
"types",
]
pruneopts = "UT"
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1"
[[projects]]
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
]
pruneopts = "UT"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009"
name = "github.com/golang/snappy"
packages = ["."]
pruneopts = "UT"
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
[[projects]]
digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e"
name = "github.com/gorilla/websocket"
packages = ["."]
pruneopts = "UT"
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
version = "v1.2.0"
[[projects]]
digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8"
name = "github.com/hashicorp/hcl"
packages = [
".",
"hcl/ast",
"hcl/parser",
"hcl/scanner",
"hcl/strconv",
"hcl/token",
"json/parser",
"json/scanner",
"json/token",
]
pruneopts = "UT"
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
version = "v1.0.0"
[[projects]]
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
pruneopts = "UT"
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214"
name = "github.com/jmhodges/levigo"
packages = ["."]
pruneopts = "UT"
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
[[projects]]
branch = "master"
digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72"
name = "github.com/kr/logfmt"
packages = ["."]
pruneopts = "UT"
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
name = "github.com/magiconair/properties"
packages = ["."]
pruneopts = "UT"
revision = "c2353362d570a7bfa228149c62842019201cfb71"
version = "v1.8.0"
[[projects]]
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "UT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
pruneopts = "UT"
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
version = "v1.1.2"
[[projects]]
digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e"
name = "github.com/pelletier/go-toml"
packages = ["."]
pruneopts = "UT"
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
version = "v1.2.0"
[[projects]]
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "UT"
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:c1a04665f9613e082e1209cf288bf64f4068dcd6c87a64bf1c4ff006ad422ba0"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/promhttp",
]
pruneopts = "UT"
revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632"
[[projects]]
branch = "master"
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "UT"
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
[[projects]]
branch = "master"
digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
]
pruneopts = "UT"
revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6"
[[projects]]
branch = "master"
digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs",
]
pruneopts = "UT"
revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
[[projects]]
digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
pruneopts = "UT"
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[projects]]
digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd"
name = "github.com/spf13/afero"
packages = [
".",
"mem",
]
pruneopts = "UT"
revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd"
version = "v1.1.2"
[[projects]]
digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f"
name = "github.com/spf13/cast"
packages = ["."]
pruneopts = "UT"
revision = "8965335b8c7107321228e3e3702cab9832751bac"
version = "v1.2.0"
[[projects]]
digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e"
name = "github.com/spf13/cobra"
packages = ["."]
pruneopts = "UT"
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
version = "v0.0.1"
[[projects]]
digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb"
name = "github.com/spf13/jwalterweatherman"
packages = ["."]
pruneopts = "UT"
revision = "4a4406e478ca629068e7768fc33f3f044173c0a6"
version = "v1.0.0"
[[projects]]
digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96"
name = "github.com/spf13/viper"
packages = ["."]
pruneopts = "UT"
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
version = "v1.0.0"
[[projects]]
digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6"
name = "github.com/stretchr/testify"
packages = [
"assert",
"require",
]
pruneopts = "UT"
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
[[projects]]
branch = "master"
digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c"
name = "github.com/syndtr/goleveldb"
packages = [
"leveldb",
"leveldb/cache",
"leveldb/comparer",
"leveldb/errors",
"leveldb/filter",
"leveldb/iterator",
"leveldb/journal",
"leveldb/memdb",
"leveldb/opt",
"leveldb/storage",
"leveldb/table",
"leveldb/util",
]
pruneopts = "UT"
revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43"
[[projects]]
digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f"
name = "github.com/tendermint/btcd"
packages = ["btcec"]
pruneopts = "UT"
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
[[projects]]
digest = "1:5f52e817b6c9d52ddba70dece0ea31134d82a52c05bce98fbc739ab2a832df28"
name = "github.com/tendermint/go-amino"
packages = ["."]
pruneopts = "UT"
revision = "cb07448b240918aa8d8df4505153549b86b77134"
version = "v0.13.0"
[[projects]]
digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"chacha20poly1305",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"hkdf",
"internal/chacha20",
"internal/subtle",
"nacl/box",
"nacl/secretbox",
"openpgp/armor",
"openpgp/errors",
"poly1305",
"ripemd160",
"salsa20/salsa",
]
pruneopts = "UT"
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
source = "github.com/tendermint/crypto"
[[projects]]
digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1"
name = "golang.org/x/net"
packages = [
"context",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"netutil",
"trace",
]
pruneopts = "UT"
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
[[projects]]
branch = "master"
digest = "1:d1da39c9bac61327dbef1d8ef9f210425e99fd2924b6fb5f0bc587a193353637"
name = "golang.org/x/sys"
packages = [
"cpu",
"unix",
]
pruneopts = "UT"
revision = "8a28ead16f52c8aaeffbf79239b251dfdf6c4f96"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = "UT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "UT"
revision = "94acd270e44e65579b9ee3cdab25034d33fed608"
[[projects]]
digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/channelz",
"internal/grpcrand",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport",
]
pruneopts = "UT"
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
version = "v1.13.0"
[[projects]]
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/btcsuite/btcutil/base58",
"github.com/btcsuite/btcutil/bech32",
"github.com/ebuchman/fail-test",
"github.com/fortytw2/leaktest",
"github.com/go-kit/kit/log",
"github.com/go-kit/kit/log/level",
"github.com/go-kit/kit/log/term",
"github.com/go-kit/kit/metrics",
"github.com/go-kit/kit/metrics/discard",
"github.com/go-kit/kit/metrics/prometheus",
"github.com/go-logfmt/logfmt",
"github.com/gogo/protobuf/gogoproto",
"github.com/gogo/protobuf/jsonpb",
"github.com/gogo/protobuf/proto",
"github.com/gogo/protobuf/types",
"github.com/golang/protobuf/proto",
"github.com/golang/protobuf/ptypes/timestamp",
"github.com/gorilla/websocket",
"github.com/jmhodges/levigo",
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/rcrowley/go-metrics",
"github.com/spf13/cobra",
"github.com/spf13/viper",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
"github.com/syndtr/goleveldb/leveldb",
"github.com/syndtr/goleveldb/leveldb/errors",
"github.com/syndtr/goleveldb/leveldb/iterator",
"github.com/syndtr/goleveldb/leveldb/opt",
"github.com/tendermint/btcd/btcec",
"github.com/tendermint/go-amino",
"golang.org/x/crypto/bcrypt",
"golang.org/x/crypto/chacha20poly1305",
"golang.org/x/crypto/curve25519",
"golang.org/x/crypto/ed25519",
"golang.org/x/crypto/hkdf",
"golang.org/x/crypto/nacl/box",
"golang.org/x/crypto/nacl/secretbox",
"golang.org/x/crypto/openpgp/armor",
"golang.org/x/crypto/ripemd160",
"golang.org/x/net/context",
"golang.org/x/net/netutil",
"google.golang.org/grpc",
"google.golang.org/grpc/credentials",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,112 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
#
###########################################################
# NOTE: All packages should be pinned to specific versions.
# Packages without releases must pin to a commit.
[[constraint]]
name = "github.com/go-kit/kit"
version = "=0.6.0"
[[constraint]]
name = "github.com/gogo/protobuf"
version = "=1.1.1"
[[constraint]]
name = "github.com/golang/protobuf"
version = "=1.1.0"
[[constraint]]
name = "github.com/gorilla/websocket"
version = "=1.2.0"
[[constraint]]
name = "github.com/pkg/errors"
version = "=0.8.0"
[[constraint]]
name = "github.com/spf13/cobra"
version = "=0.0.1"
[[constraint]]
name = "github.com/spf13/viper"
version = "=1.0.0"
[[constraint]]
name = "github.com/stretchr/testify"
version = "=1.2.1"
[[constraint]]
name = "github.com/tendermint/go-amino"
version = "v0.13.0"
[[constraint]]
name = "google.golang.org/grpc"
version = "=1.13.0"
[[constraint]]
name = "github.com/fortytw2/leaktest"
version = "=1.2.0"
###################################
## Some repos dont have releases.
## Pin to revision
[[constraint]]
name = "golang.org/x/crypto"
source = "github.com/tendermint/crypto"
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
[[override]]
name = "github.com/jmhodges/levigo"
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
[[constraint]]
name = "github.com/ebuchman/fail-test"
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
# last revision used by go-crypto
[[constraint]]
name = "github.com/btcsuite/btcutil"
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
[[constraint]]
name = "github.com/tendermint/btcd"
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
# Haven't made a release since 2016.
[[constraint]]
name = "github.com/prometheus/client_golang"
revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632"
[[constraint]]
name = "github.com/rcrowley/go-metrics"
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[constraint]]
name = "golang.org/x/net"
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
[prune]
go-tests = true
unused-packages = true

1
INSTALL.md Normal file
View File

@@ -0,0 +1 @@
The installation guide has moved to the [docs directory](docs/guides/install-from-source.md) in order to easily be rendered by the website. Please update your links accordingly.

357
Makefile
View File

@@ -1,328 +1,83 @@
GOTOOLS = \
github.com/mitchellh/gox \
github.com/golang/dep/cmd/dep \
github.com/alecthomas/gometalinter \
github.com/gogo/protobuf/protoc-gen-gogo \
github.com/square/certstrap
GOBIN?=${GOPATH}/bin
PACKAGES=$(shell go list ./...)
github.com/mitchellh/gox \
github.com/Masterminds/glide \
honnef.co/go/tools/cmd/megacheck
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
BUILD_TAGS?='tendermint'
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint
TMHOME = $${TMHOME:-$$HOME/.tendermint}
LINT_FLAGS = --exclude '.*\.pb\.go' --exclude 'vendor/*' --vendor --deadline=600s
all: install test
all: check build test install
check: check_tools get_vendor_deps
########################################
### Build Tendermint
install: get_vendor_deps
@go install --ldflags '-extldflags "-static"' \
--ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse HEAD`" ./cmd/tendermint
build:
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
build_c:
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/
go build \
--ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse HEAD`" -o build/tendermint ./cmd/tendermint/
build_race:
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
install:
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
########################################
### Protobuf
protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc
%.pb.go: %.proto
## If you get the following error,
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
## See https://stackoverflow.com/a/25518702
## Note the $< here is substituted for the %.proto
## Note the $@ here is substituted for the %.pb.go
protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:.
########################################
### Build ABCI
# see protobuf section above
protoc_abci: abci/types/types.pb.go
build_abci:
@go build -i ./abci/cmd/...
install_abci:
@go install ./abci/cmd/...
########################################
### Distribution
go build -race -o build/tendermint ./cmd/tendermint
# dist builds binaries for all platforms and packages them for distribution
# TODO add abci to these scripts
dist:
@BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'"
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
########################################
### Tools & dependencies
test:
@echo "--> Running go test"
@go test $(PACKAGES)
check_tools:
@# https://stackoverflow.com/a/25668869
@echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\
$(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))"
get_tools:
@echo "--> Installing tools"
./scripts/get_tools.sh
@echo "--> Downloading linters (this may take awhile)"
$(GOPATH)/src/github.com/alecthomas/gometalinter/scripts/install.sh -b $(GOBIN)
update_tools:
@echo "--> Updating tools"
./scripts/get_tools.sh
#Update dependencies
get_vendor_deps:
@echo "--> Running dep"
@dep ensure
#For ABCI and libs
get_protoc:
@# https://github.com/google/protobuf/releases
curl -L https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz | tar xvz && \
cd protobuf-3.6.1 && \
DIST_LANG=cpp ./configure && \
make && \
make check && \
sudo make install && \
sudo ldconfig && \
cd .. && \
rm -rf protobuf-3.6.1
draw_deps:
@# requires brew install graphviz or apt-get install graphviz
go get github.com/RobotsAndPencils/goviz
@goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png
get_deps_bin_size:
@# Copy of build recipe with additional flags to perform binary size analysis
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
########################################
### Libs
protoc_libs: libs/common/types.pb.go
gen_certs: clean_certs
## Generating certificates for TLS testing...
certstrap init --common-name "tendermint.com" --passphrase ""
certstrap request-cert -ip "::" --passphrase ""
certstrap sign "::" --CA "tendermint.com" --passphrase ""
mv out/::.crt out/::.key db/remotedb
clean_certs:
## Cleaning TLS testing certificates...
rm -rf out
rm -f db/remotedb/::.crt db/remotedb/::.key
test_libs: gen_certs
GOCACHE=off go test -tags gcc $(PACKAGES)
make clean_certs
grpc_dbserver:
protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto
protoc_grpc: rpc/grpc/types.pb.go
protoc_merkle: crypto/merkle/merkle.pb.go
########################################
### Testing
## required to be run first by most tests
build_docker_test_image:
docker build -t tester -f ./test/docker/Dockerfile .
### coverage, app, persistence, and libs tests
test_cover:
# run the go unit tests with coverage
bash test/test_cover.sh
test_apps:
# run the app tests using bash
# requires `abci-cli` and `tendermint` binaries installed
bash test/app/test.sh
test_abci_apps:
bash abci/tests/test_app/test.sh
test_abci_cli:
# test the cli against the examples in the tutorial at:
# ./docs/abci-cli.md
# if test fails, update the docs ^
@ bash abci/tests/test_cli/test.sh
test_persistence:
# run the persistence tests using bash
# requires `abci-cli` installed
docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh
# TODO undockerize
# bash test/persist/test_failure_indices.sh
test_p2p:
docker rm -f rsyslog || true
rm -rf test/logs || true
mkdir test/logs
cd test/
docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog
cd ..
# requires 'tester' the image from above
bash test/p2p/test.sh tester
# the `docker cp` takes a really long time; uncomment for debugging
#
# mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs
test_race:
@echo "--> Running go test --race"
@go test -v -race $(PACKAGES)
test_integrations:
make build_docker_test_image
make get_tools
make get_vendor_deps
make install
make test_cover
make test_apps
make test_abci_apps
make test_abci_cli
make test_libs
make test_persistence
make test_p2p
test_release:
@go test -tags release $(PACKAGES)
@bash ./test/test.sh
test100:
@for i in {1..100}; do make test; done
vagrant_test:
vagrant up
vagrant ssh -c 'make test_integrations'
draw_deps:
# requires brew install graphviz or apt-get install graphviz
go get github.com/RobotsAndPencils/goviz
@goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png
### go tests
test:
@echo "--> Running go test"
@GOCACHE=off go test -p 1 $(PACKAGES)
list_deps:
@go list -f '{{join .Deps "\n"}}' ./... | \
grep -v /vendor/ | sort | uniq | \
xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
test_race:
@echo "--> Running go test --race"
@GOCACHE=off go test -p 1 -v -race $(PACKAGES)
get_deps:
@echo "--> Running go get"
@go get -v -d $(PACKAGES)
@go list -f '{{join .TestImports "\n"}}' ./... | \
grep -v /vendor/ | sort | uniq | \
xargs go get -v -d
get_vendor_deps: ensure_tools
@rm -rf vendor/
@echo "--> Running glide install"
@glide install
update_deps: tools
@echo "--> Updating dependencies"
@go get -d -u ./...
revision:
-echo `git rev-parse --verify HEAD` > $(TMHOME)/revision
-echo `git rev-parse --verify HEAD` >> $(TMHOME)/revision_history
tools:
go get -u -v $(GOTOOLS)
ensure_tools:
go get $(GOTOOLS)
########################################
### Formatting, linting, and vetting
fmt:
@go fmt ./...
megacheck:
@for pkg in ${PACKAGES}; do megacheck "$$pkg"; done
metalinter:
@echo "--> Running linter"
@gometalinter $(LINT_FLAGS) --disable-all \
--enable=deadcode \
--enable=gosimple \
--enable=misspell \
--enable=safesql \
./...
#--enable=gas \
#--enable=maligned \
#--enable=dupl \
#--enable=errcheck \
#--enable=goconst \
#--enable=gocyclo \
#--enable=goimports \
#--enable=golint \ <== comments on anything exported
#--enable=gotype \
#--enable=ineffassign \
#--enable=interfacer \
#--enable=megacheck \
#--enable=staticcheck \
#--enable=structcheck \
#--enable=unconvert \
#--enable=unparam \
#--enable=unused \
#--enable=varcheck \
#--enable=vet \
#--enable=vetshadow \
metalinter_all:
@echo "--> Running linter (all)"
gometalinter $(LINT_FLAGS) --enable-all --disable=lll ./...
DESTINATION = ./index.html.md
rpc-docs:
cat rpc/core/slate_header.txt > $(DESTINATION)
godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's,/src/target,https://github.com/tendermint/tendermint/tree/master/rpc/core,' >> $(DESTINATION)
check_dep:
dep status >> /dev/null
!(grep -n branch Gopkg.toml)
###########################################################
### Docker image
build-docker:
cp build/tendermint DOCKER/tendermint
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
rm -rf DOCKER/tendermint
###########################################################
### Local testnet using docker
# Build linux binary on other platforms
build-linux:
GOOS=linux GOARCH=amd64 $(MAKE) build
build-docker-localnode:
cd networks/local
make
# Run a 4-node testnet locally
localnet-start: localnet-stop
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
docker-compose up
# Stop testnet
localnet-stop:
docker-compose down
###########################################################
### Remote full-nodes (sentry) using terraform and ansible
# Server management
sentry-start:
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
@if ! [ -f $(CURDIR)/build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 0 --n 4 --o . ; fi
cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
@echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make sentry-config\". (Public key of validator, chain ID, peer IP and node ID.)"
# Configuration management
sentry-config:
cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$(CURDIR)/build/tendermint -e CONFIGDIR=$(CURDIR)/build
sentry-stop:
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
# meant for the CI, inspect script & adapt accordingly
build-slate:
bash scripts/slate.sh
# To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all
.PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps revision tools

145
README.md
View File

@@ -8,140 +8,69 @@ Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short.
[![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
)](https://godoc.org/github.com/tendermint/tendermint)
[![Go version](https://img.shields.io/badge/go-1.10.4-blue.svg)](https://github.com/moovweb/gvm)
[![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org)
[![Rocket.Chat](https://demo.rocket.chat/images/join-chat.svg)](https://cosmos.rocket.chat/)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
Branch | Tests | Coverage
----------|-------|----------
master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint)
develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint)
Branch | Tests | Coverage | Report Card
----------|-------|----------|-------------
develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/develop)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/develop)
master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/master)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/master)
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
_NOTE: This is alpha software. Please contact us if you intend to run it in production._
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language,
and securely replicates it on many machines.
For protocol details, see [the specification](/docs/spec).
For more background, see the [introduction](https://tendermint.com/intro).
For detailed analysis of the consensus protocol, including safety and liveness proofs,
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/guides/app-development).
## A Note on Production Readiness
While Tendermint is being used in production in private, permissioned
environments, we are still working actively to harden and audit it in preparation
for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/).
We are also still making breaking changes to the protocol and the APIs.
Thus, we tag the releases as *alpha software*.
In any case, if you intend to run Tendermint in production,
please [contact us](https://riot.im/app/#/room/#tendermint:matrix.org) :)
## Security
To report a security vulnerability, see our [bug bounty
program](https://tendermint.com/security).
For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md)
## Minimum requirements
Requirement|Notes
---|---
Go version | Go1.10 or higher
### Code of Conduct
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).
## Install
See the [install instructions](/docs/introduction/install.md)
To download pre-built binaries, see our [downloads page](https://tendermint.com/downloads).
## Quick Start
To install from source, you should be able to:
- [Single node](/docs/tendermint-core/using-tendermint.md)
- [Local cluster using docker-compose](/networks/local)
- [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md)
- [Join the Cosmos testnet](https://cosmos.network/testnet)
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
## Resources
### Tendermint Core
For details about the blockchain data structures and the p2p protocols, see the
the [Tendermint specification](/docs/spec).
For details on using the software, see the [documentation](/docs/) which is also
hosted at: https://tendermint.com/docs/
### Tools
Benchmarking and monitoring is provided by `tm-bench` and `tm-monitor`, respectively.
Their code is found [here](/tools) and these binaries need to be built seperately.
Additional documentation is found [here](/docs/tools).
### Sub-projects
* [Amino](http://github.com/tendermint/go-amino), a reflection-based improvement on proto3
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
### Applications
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
* [Many more](https://tendermint.com/ecosystem)
### Research
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
* [Blog](https://blog.cosmos.network/tendermint/home)
For more details (or if it fails), see the [install guide](https://tendermint.com/docs/guides/install-from-source).
## Contributing
Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md).
## Versioning
## Resources
### SemVer
### Tendermint Core
Tendermint uses [SemVer](http://semver.org/) to determine when and how the version changes.
According to SemVer, anything in the public API can change at any time before version 1.0.0
- [Introduction](https://tendermint.com/intro)
- [Docs](https://tendermint.com/docs)
- [Software using Tendermint](https://tendermint.com/ecosystem)
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
to signal breaking changes across a subset of the total public API. This subset includes all
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
include the in-process Go APIs.
### Sub-projects
That said, breaking changes in the following packages will be documented in the
CHANGELOG even if they don't lead to MINOR version bumps:
* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface
* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library
* [Go-Crypto](http://github.com/tendermint/go-crypto), an elliptic curve cryptography library
* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries
* [Merkleeyes](http://github.com/tendermint/merkleeyes), a balanced, binary Merkle tree for ABCI apps
- types
- rpc/client
- config
- node
- libs
- bech32
- common
- db
- errors
- log
### Tools
* [Deployment, Benchmarking, and Monitoring](https://github.com/tendermint/tools)
Exported objects in these packages that are not covered by the versioning scheme
are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any
time without notice. Functions, types, and values in any other package may also change at any time.
### Applications
### Upgrades
* [Ethermint](http://github.com/tendermint/ethermint): Ethereum on Tendermint
* [Basecoin](http://github.com/tendermint/basecoin), a cryptocurrency application framework
In an effort to avoid accumulating technical debt prior to 1.0.0,
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
will work with existing tendermint blockchains. In these cases you will
have to start a new blockchain, or write something custom to get the old
data into the new chain.
### More
However, any bump in the PATCH version should be compatible with existing histories
(if not please open an [issue](https://github.com/tendermint/tendermint/issues)).
For more information on upgrading, see [here](./UPGRADING.md)
## Code of Conduct
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).
* [Tendermint Blog](https://blog.cosmos.network/tendermint/home)
* [Cosmos Blog](https://blog.cosmos.network)
* [Original Whitepaper (out-of-date)](http://www.the-blockchain.com/docs/Tendermint%20Consensus%20without%20Mining.pdf)
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)

View File

@@ -1,23 +0,0 @@
# Roadmap
BREAKING CHANGES:
- Better support for injecting randomness
- Upgrade consensus for more real-time use of evidence
FEATURES:
- Use the chain as its own CA for nodes and validators
- Tooling to run multiple blockchains/apps, possibly in a single process
- State syncing (without transaction replay)
- Add authentication and rate-limitting to the RPC
IMPROVEMENTS:
- Improve subtleties around mempool caching and logic
- Consensus optimizations:
- cache block parts for faster agreement after round changes
- propagate block parts rarest first
- Better testing of the consensus state machine (ie. use a DSL)
- Auto compiled serialization/deserialization code instead of go-wire reflection
BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness

View File

@@ -1,71 +0,0 @@
# Security
As part of our [Coordinated Vulnerability Disclosure
Policy](https://tendermint.com/security), we operate a bug bounty.
See the policy for more details on submissions and rewards.
Here is a list of examples of the kinds of bugs we're most interested in:
## Specification
- Conceptual flaws
- Ambiguities, inconsistencies, or incorrect statements
- Mis-match between specification and implementation of any component
## Consensus
Assuming less than 1/3 of the voting power is Byzantine (malicious):
- Validation of blockchain data structures, including blocks, block parts,
votes, and so on
- Execution of blocks
- Validator set changes
- Proposer round robin
- Two nodes committing conflicting blocks for the same height (safety failure)
- A correct node signing conflicting votes
- A node halting (liveness failure)
- Syncing new and old nodes
## Networking
- Authenticated encryption (MITM, information leakage)
- Eclipse attacks
- Sybil attacks
- Long-range attacks
- Denial-of-Service
## RPC
- Write-access to anything besides sending transactions
- Denial-of-Service
- Leakage of secrets
## Denial-of-Service
Attacks may come through the P2P network or the RPC:
- Amplification attacks
- Resource abuse
- Deadlocks and race conditions
- Panics and unhandled errors
## Libraries
- Serialization (Amino)
- Reading/Writing files and databases
- Logging and monitoring
## Cryptography
- Elliptic curves for validator signatures
- Hash algorithms and Merkle trees for block validation
- Authenticated encryption for P2P connections
## Light Client
- Validation of blockchain data structures
- Correctly validating an incorrect proof
- Incorrectly validating a correct proof
- Syncing validator set changes

View File

@@ -1,148 +0,0 @@
# Upgrading Tendermint Core
This guide provides steps to be followed when you upgrade your applications to
a newer version of Tendermint Core.
## v0.26.0
New 0.26.0 release contains a lot of changes to core data types. It is not
compatible to the old versions and there is no straight forward way to update
old data to be compatible with the new version.
To reset the state do:
```
$ tendermint unsafe_reset_all
```
Here we summarize some other notable changes to be mindful of.
### Config Changes
All timeouts must be changed from integers to strings with their duration, for
instance `flush_throttle_timeout = 100` would be changed to
`flush_throttle_timeout = "100ms"` and `timeout_propose = 3000` would be changed
to `timeout_propose = "3s"`.
### RPC Changes
The default behaviour of `/abci_query` has been changed to not return a proof,
and the name of the parameter that controls this has been changed from `trusted`
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
Various version fields like `amino_version`, `p2p_version`, `consensus_version`,
and `rpc_version` have been removed from the `node_info.other` and are
consolidated under the tendermint semantic version (ie. `node_info.version`) and
the new `block` and `p2p` protocol versions under `node_info.protocol_version`..
### ABCI Changes
Field numbers were bumped in the `Header` and `ResponseInfo` messages to make
room for new `version` fields. It should be straight forward to recompile the
protobuf file for these changes.
#### Proofs
The `ResponseQuery.Proof` field is now structured as a `[]ProofOp` to support
generalized Merkle tree constructions where the leaves of one Merkle tree are
the root of another. If you don't need this functionaluty, and you used to
return `<proof bytes>` here, you should instead return a single `ProofOp` with
just the `Data` field set:
```
[]ProofOp{
ProofOp{
Data: <proof bytes>,
}
}
```
For more information, see:
- [ADR-026](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/architecture/adr-026-general-merkle-proof.md)
- [Relevant ABCI
documentation](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/spec/abci/apps.md#query-proofs)
- [Description of
keys](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/crypto/merkle/proof_key_path.go#L14)
### Go API Changes
#### crypto.merkle
The `merkle.Hasher` interface was removed. Functions which used to take `Hasher`
now simply take `[]byte`. This means that any objects being Merklized should be
serialized before they are passed in.
#### node
The `node.RunForever` function was removed. Signal handling and running forever
should instead be explicitly configured by the caller. See how we do it
[here](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/cmd/tendermint/commands/run_node.go#L60).
## v0.25.0
This release has minimal impact.
If you use GasWanted in ABCI and want to enforce it, set the MaxGas in the genesis file (default is no max).
## v0.24.0
New 0.24.0 release contains a lot of changes to the state and types. It's not
compatible to the old versions and there is no straight forward way to update
old data to be compatible with the new version.
To reset the state do:
```
$ tendermint unsafe_reset_all
```
Here we summarize some other notable changes to be mindful of.
### Config changes
`p2p.max_num_peers` was removed in favor of `p2p.max_num_inbound_peers` and
`p2p.max_num_outbound_peers`.
```
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
```
As you can see, the default ratio of inbound/outbound peers is 4/1. The reason
is we want it to be easier for new nodes to connect to the network. You can
tweak these parameters to alter the network topology.
### RPC Changes
The result of `/commit` used to contain `header` and `commit` fields at the top level. These are now contained under the `signed_header` field.
### ABCI Changes
The header has been upgraded and contains new fields, but none of the existing
fields were changed, except their order.
The `Validator` type was split into two, one containing an `Address` and one
containing a `PubKey`. When processing `RequestBeginBlock`, use the `Validator`
type, which contains just the `Address`. When returning `ResponseEndBlock`, use
the `ValidatorUpdate` type, which contains just the `PubKey`.
### Validator Set Updates
Validator set updates returned in ResponseEndBlock for height `H` used to take
effect immediately at height `H+1`. Now they will be delayed one block, to take
effect at height `H+2`. Note this means that the change will be seen by the ABCI
app in the `RequestBeginBlock.LastCommitInfo` at block `H+3`. Apps were already
required to maintain a map from validator addresses to pubkeys since v0.23 (when
pubkeys were removed from RequestBeginBlock), but now they may need to track
multiple validator sets at once to accomodate this delay.
### Block Size
The `ConsensusParams.BlockSize.MaxTxs` was removed in favour of
`ConsensusParams.BlockSize.MaxBytes`, which is now enforced. This means blocks
are limitted only by byte-size, not by number of transactions.

48
Vagrantfile vendored
View File

@@ -2,7 +2,7 @@
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/xenial64"
config.vm.box = "ubuntu/trusty64"
config.vm.provider "virtualbox" do |v|
v.memory = 4096
@@ -11,48 +11,28 @@ Vagrant.configure("2") do |config|
config.vm.provision "shell", inline: <<-SHELL
apt-get update
apt-get install -y --no-install-recommends wget curl jq shellcheck bsdmainutils psmisc
# install base requirements
apt-get install -y --no-install-recommends wget curl jq zip \
make shellcheck bsdmainutils psmisc
apt-get install -y language-pack-en
# install docker
apt-get install -y --no-install-recommends apt-transport-https \
ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
apt-get install -y docker-ce
wget -qO- https://get.docker.com/ | sh
usermod -a -G docker vagrant
# install go
wget -q https://dl.google.com/go/go1.11.linux-amd64.tar.gz
tar -xvf go1.11.linux-amd64.tar.gz
mv go /usr/local
rm -f go1.11.linux-amd64.tar.gz
# cleanup
apt-get autoremove -y
# set env variables
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
curl -O https://storage.googleapis.com/golang/go1.8.linux-amd64.tar.gz
tar -xvf go1.8.linux-amd64.tar.gz
rm -rf /usr/local/go
mv go /usr/local
rm -f go1.8.linux-amd64.tar.gz
mkdir -p /home/vagrant/go/bin
echo 'export PATH=$PATH:/usr/local/go/bin:/home/vagrant/go/bin' >> /home/vagrant/.bash_profile
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
mkdir -p /home/vagrant/go/src/github.com/tendermint
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
chown -R vagrant:vagrant /home/vagrant/go
chown vagrant:vagrant /home/vagrant/.bash_profile
# get all deps and tools, ready to install/test
su - vagrant -c 'source /home/vagrant/.bash_profile'
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps'
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_vendor_deps'
SHELL
end

View File

@@ -1,171 +0,0 @@
# Application BlockChain Interface (ABCI)
[![CircleCI](https://circleci.com/gh/tendermint/abci.svg?style=svg)](https://circleci.com/gh/tendermint/abci)
Blockchains are systems for multi-master state machine replication.
**ABCI** is an interface that defines the boundary between the replication engine (the blockchain),
and the state machine (the application).
Using a socket protocol, a consensus engine running in one process
can manage an application state running in another.
Previously, the ABCI was referred to as TMSP.
The community has provided a number of addtional implementations, see the [Tendermint Ecosystem](https://tendermint.com/ecosystem)
## Specification
A detailed description of the ABCI methods and message types is contained in:
- [A prose specification](specification.md)
- [A protobuf file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto)
- [A Go interface](https://github.com/tendermint/tendermint/blob/master/abci/types/application.go).
For more background information on ABCI, motivations, and tendermint, please visit [the documentation](https://tendermint.com/docs/).
The two guides to focus on are the `Application Development Guide` and `Using ABCI-CLI`.
## Protocol Buffers
To compile the protobuf file, run:
```
cd $GOPATH/src/github.com/tendermint/tendermint/; make protoc_abci
```
See `protoc --help` and [the Protocol Buffers site](https://developers.google.com/protocol-buffers)
for details on compiling for other languages. Note we also include a [GRPC](http://www.grpc.io/docs)
service definition.
## Install ABCI-CLI
The `abci-cli` is a simple tool for debugging ABCI servers and running some
example apps. To install it:
```
mkdir -p $GOPATH/src/github.com/tendermint
cd $GOPATH/src/github.com/tendermint
git clone https://github.com/tendermint/tendermint.git
cd tendermint
make get_tools
make get_vendor_deps
make install_abci
```
## Implementation
We provide three implementations of the ABCI in Go:
- Golang in-process
- ABCI-socket
- GRPC
Note the GRPC version is maintained primarily to simplify onboarding and prototyping and is not receiving the same
attention to security and performance as the others
### In Process
The simplest implementation just uses function calls within Go.
This means ABCI applications written in Golang can be compiled with TendermintCore and run as a single binary.
See the [examples](#examples) below for more information.
### Socket (TSP)
ABCI is best implemented as a streaming protocol.
The socket implementation provides for asynchronous, ordered message passing over unix or tcp.
Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers)
For example, if the Protobuf3 encoded ABCI message is `0xDEADBEEF` (4 bytes), the length-prefixed message is `0x08DEADBEEF`, since `0x08` is the signed varint
encoding of `4`. If the Protobuf3 encoded ABCI message is 65535 bytes long, the length-prefixed message would be like `0xFEFF07...`.
Note the benefit of using this `varint` encoding over the old version (where integers were encoded as `<len of len><big endian len>` is that
it is the standard way to encode integers in Protobuf. It is also generally shorter.
### GRPC
GRPC is an rpc framework native to Protocol Buffers with support in many languages.
Implementing the ABCI using GRPC can allow for faster prototyping, but is expected to be much slower than
the ordered, asynchronous socket protocol. The implementation has also not received as much testing or review.
Note the length-prefixing used in the socket implementation does not apply for GRPC.
## Usage
The `abci-cli` tool wraps an ABCI client and can be used for probing/testing an ABCI server.
For instance, `abci-cli test` will run a test sequence against a listening server running the Counter application (see below).
It can also be used to run some example applications.
See [the documentation](https://tendermint.com/docs/) for more details.
### Examples
Check out the variety of example applications in the [example directory](example/).
It also contains the code refered to by the `counter` and `kvstore` apps; these apps come
built into the `abci-cli` binary.
#### Counter
The `abci-cli counter` application illustrates nonce checking in transactions. It's code looks like:
```golang
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewCounterApplication(flagSerial)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Start the listener
srv, err := server.NewServer(flagAddrC, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Wait forever
cmn.TrapSignal(func() {
// Cleanup
srv.Stop()
})
return nil
}
```
and can be found in [this file](cmd/abci-cli/abci-cli.go).
#### kvstore
The `abci-cli kvstore` application, which illustrates a simple key-value Merkle tree
```golang
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Create the application - in memory or persisted to disk
var app types.Application
if flagPersist == "" {
app = kvstore.NewKVStoreApplication()
} else {
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
}
// Start the listener
srv, err := server.NewServer(flagAddrD, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Wait forever
cmn.TrapSignal(func() {
// Cleanup
srv.Stop()
})
return nil
}
```

View File

@@ -1,129 +0,0 @@
package abcicli
import (
"fmt"
"sync"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
const (
dialRetryIntervalSeconds = 3
echoRetryIntervalSeconds = 1
)
// Client defines an interface for an ABCI client.
// All `Async` methods return a `ReqRes` object.
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
// Note these are client errors, eg. ABCI socket connectivity issues.
// Application-related errors are reflected in response via ABCI error codes and logs.
type Client interface {
cmn.Service
SetResponseCallback(Callback)
Error() error
FlushAsync() *ReqRes
EchoAsync(msg string) *ReqRes
InfoAsync(types.RequestInfo) *ReqRes
SetOptionAsync(types.RequestSetOption) *ReqRes
DeliverTxAsync(tx []byte) *ReqRes
CheckTxAsync(tx []byte) *ReqRes
QueryAsync(types.RequestQuery) *ReqRes
CommitAsync() *ReqRes
InitChainAsync(types.RequestInitChain) *ReqRes
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
EndBlockAsync(types.RequestEndBlock) *ReqRes
FlushSync() error
EchoSync(msg string) (*types.ResponseEcho, error)
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error)
CheckTxSync(tx []byte) (*types.ResponseCheckTx, error)
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
CommitSync() (*types.ResponseCommit, error)
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
}
//----------------------------------------
// NewClient returns a new ABCI client of the specified transport type.
// It returns an error if the transport is not "socket" or "grpc"
func NewClient(addr, transport string, mustConnect bool) (client Client, err error) {
switch transport {
case "socket":
client = NewSocketClient(addr, mustConnect)
case "grpc":
client = NewGRPCClient(addr, mustConnect)
default:
err = fmt.Errorf("Unknown abci transport %s", transport)
}
return
}
//----------------------------------------
type Callback func(*types.Request, *types.Response)
//----------------------------------------
type ReqRes struct {
*types.Request
*sync.WaitGroup
*types.Response // Not set atomically, so be sure to use WaitGroup.
mtx sync.Mutex
done bool // Gets set to true once *after* WaitGroup.Done().
cb func(*types.Response) // A single callback that may be set.
}
func NewReqRes(req *types.Request) *ReqRes {
return &ReqRes{
Request: req,
WaitGroup: waitGroup1(),
Response: nil,
done: false,
cb: nil,
}
}
// Sets the callback for this ReqRes atomically.
// If reqRes is already done, calls cb immediately.
// NOTE: reqRes.cb should not change if reqRes.done.
// NOTE: only one callback is supported.
func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
reqRes.mtx.Lock()
if reqRes.done {
reqRes.mtx.Unlock()
cb(reqRes.Response)
return
}
defer reqRes.mtx.Unlock()
reqRes.cb = cb
}
func (reqRes *ReqRes) GetCallback() func(*types.Response) {
reqRes.mtx.Lock()
defer reqRes.mtx.Unlock()
return reqRes.cb
}
// NOTE: it should be safe to read reqRes.cb without locks after this.
func (reqRes *ReqRes) SetDone() {
reqRes.mtx.Lock()
reqRes.done = true
reqRes.mtx.Unlock()
}
func waitGroup1() (wg *sync.WaitGroup) {
wg = &sync.WaitGroup{}
wg.Add(1)
return
}

View File

@@ -1,301 +0,0 @@
package abcicli
import (
"fmt"
"net"
"sync"
"time"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
var _ Client = (*grpcClient)(nil)
// A stripped copy of the remoteClient that makes
// synchronous calls using grpc
type grpcClient struct {
cmn.BaseService
mustConnect bool
client types.ABCIApplicationClient
conn *grpc.ClientConn
mtx sync.Mutex
addr string
err error
resCb func(*types.Request, *types.Response) // listens to all callbacks
}
func NewGRPCClient(addr string, mustConnect bool) *grpcClient {
cli := &grpcClient{
addr: addr,
mustConnect: mustConnect,
}
cli.BaseService = *cmn.NewBaseService(nil, "grpcClient", cli)
return cli
}
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
return cmn.Connect(addr)
}
func (cli *grpcClient) OnStart() error {
if err := cli.BaseService.OnStart(); err != nil {
return err
}
RETRY_LOOP:
for {
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
if err != nil {
if cli.mustConnect {
return err
}
cli.Logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr))
time.Sleep(time.Second * dialRetryIntervalSeconds)
continue RETRY_LOOP
}
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
client := types.NewABCIApplicationClient(conn)
cli.conn = conn
ENSURE_CONNECTED:
for {
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true))
if err == nil {
break ENSURE_CONNECTED
}
cli.Logger.Error("Echo failed", "err", err)
time.Sleep(time.Second * echoRetryIntervalSeconds)
}
cli.client = client
return nil
}
}
func (cli *grpcClient) OnStop() {
cli.BaseService.OnStop()
if cli.conn != nil {
cli.conn.Close()
}
}
func (cli *grpcClient) StopForError(err error) {
cli.mtx.Lock()
if !cli.IsRunning() {
return
}
if cli.err == nil {
cli.err = err
}
cli.mtx.Unlock()
cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
cli.Stop()
}
func (cli *grpcClient) Error() error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
return cli.err
}
// Set listener for all responses
// NOTE: callback may get internally generated flush responses.
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.resCb = resCb
}
//----------------------------------------
// GRPC calls are synchronous, but some callbacks expect to be called asynchronously
// (eg. the mempool expects to be able to lock to remove bad txs from cache).
// To accommodate, we finish each call in its own go-routine,
// which is expensive, but easy - if you want something better, use the socket protocol!
// maybe one day, if people really want it, we use grpc streams,
// but hopefully not :D
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
req := types.ToRequestEcho(msg)
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
}
func (cli *grpcClient) FlushAsync() *ReqRes {
req := types.ToRequestFlush()
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
}
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
req := types.ToRequestInfo(params)
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
}
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
req := types.ToRequestSetOption(params)
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
}
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
req := types.ToRequestDeliverTx(tx)
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
}
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
req := types.ToRequestCheckTx(tx)
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
}
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
req := types.ToRequestQuery(params)
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
}
func (cli *grpcClient) CommitAsync() *ReqRes {
req := types.ToRequestCommit()
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
}
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
req := types.ToRequestInitChain(params)
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
}
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
req := types.ToRequestBeginBlock(params)
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
}
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
req := types.ToRequestEndBlock(params)
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
}
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
reqres := NewReqRes(req)
reqres.Response = res // Set response
reqres.Done() // Release waiters
reqres.SetDone() // so reqRes.SetCallback will run the callback
// go routine for callbacks
go func() {
// Notify reqRes listener if set
if cb := reqres.GetCallback(); cb != nil {
cb(res)
}
// Notify client listener if set
if cli.resCb != nil {
cli.resCb(reqres.Request, res)
}
}()
return reqres
}
//----------------------------------------
func (cli *grpcClient) FlushSync() error {
return nil
}
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
reqres := cli.EchoAsync(msg)
// StopForError should already have been called if error is set
return reqres.Response.GetEcho(), cli.Error()
}
func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
reqres := cli.InfoAsync(req)
return reqres.Response.GetInfo(), cli.Error()
}
func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
reqres := cli.SetOptionAsync(req)
return reqres.Response.GetSetOption(), cli.Error()
}
func (cli *grpcClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
reqres := cli.DeliverTxAsync(tx)
return reqres.Response.GetDeliverTx(), cli.Error()
}
func (cli *grpcClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
reqres := cli.CheckTxAsync(tx)
return reqres.Response.GetCheckTx(), cli.Error()
}
func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
reqres := cli.QueryAsync(req)
return reqres.Response.GetQuery(), cli.Error()
}
func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) {
reqres := cli.CommitAsync()
return reqres.Response.GetCommit(), cli.Error()
}
func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) {
reqres := cli.InitChainAsync(params)
return reqres.Response.GetInitChain(), cli.Error()
}
func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
reqres := cli.BeginBlockAsync(params)
return reqres.Response.GetBeginBlock(), cli.Error()
}
func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) {
reqres := cli.EndBlockAsync(params)
return reqres.Response.GetEndBlock(), cli.Error()
}

View File

@@ -1,230 +0,0 @@
package abcicli
import (
"sync"
types "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
var _ Client = (*localClient)(nil)
type localClient struct {
cmn.BaseService
mtx *sync.Mutex
types.Application
Callback
}
func NewLocalClient(mtx *sync.Mutex, app types.Application) *localClient {
if mtx == nil {
mtx = new(sync.Mutex)
}
cli := &localClient{
mtx: mtx,
Application: app,
}
cli.BaseService = *cmn.NewBaseService(nil, "localClient", cli)
return cli
}
func (app *localClient) SetResponseCallback(cb Callback) {
app.mtx.Lock()
defer app.mtx.Unlock()
app.Callback = cb
}
// TODO: change types.Application to include Error()?
func (app *localClient) Error() error {
return nil
}
func (app *localClient) FlushAsync() *ReqRes {
// Do nothing
return newLocalReqRes(types.ToRequestFlush(), nil)
}
func (app *localClient) EchoAsync(msg string) *ReqRes {
return app.callback(
types.ToRequestEcho(msg),
types.ToResponseEcho(msg),
)
}
func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
app.mtx.Lock()
res := app.Application.Info(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestInfo(req),
types.ToResponseInfo(res),
)
}
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
app.mtx.Lock()
res := app.Application.SetOption(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestSetOption(req),
types.ToResponseSetOption(res),
)
}
func (app *localClient) DeliverTxAsync(tx []byte) *ReqRes {
app.mtx.Lock()
res := app.Application.DeliverTx(tx)
app.mtx.Unlock()
return app.callback(
types.ToRequestDeliverTx(tx),
types.ToResponseDeliverTx(res),
)
}
func (app *localClient) CheckTxAsync(tx []byte) *ReqRes {
app.mtx.Lock()
res := app.Application.CheckTx(tx)
app.mtx.Unlock()
return app.callback(
types.ToRequestCheckTx(tx),
types.ToResponseCheckTx(res),
)
}
func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
app.mtx.Lock()
res := app.Application.Query(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestQuery(req),
types.ToResponseQuery(res),
)
}
func (app *localClient) CommitAsync() *ReqRes {
app.mtx.Lock()
res := app.Application.Commit()
app.mtx.Unlock()
return app.callback(
types.ToRequestCommit(),
types.ToResponseCommit(res),
)
}
func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
app.mtx.Lock()
res := app.Application.InitChain(req)
reqRes := app.callback(
types.ToRequestInitChain(req),
types.ToResponseInitChain(res),
)
app.mtx.Unlock()
return reqRes
}
func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
app.mtx.Lock()
res := app.Application.BeginBlock(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestBeginBlock(req),
types.ToResponseBeginBlock(res),
)
}
func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
app.mtx.Lock()
res := app.Application.EndBlock(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestEndBlock(req),
types.ToResponseEndBlock(res),
)
}
//-------------------------------------------------------
func (app *localClient) FlushSync() error {
return nil
}
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
return &types.ResponseEcho{Message: msg}, nil
}
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
app.mtx.Lock()
res := app.Application.Info(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
app.mtx.Lock()
res := app.Application.SetOption(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
app.mtx.Lock()
res := app.Application.DeliverTx(tx)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
app.mtx.Lock()
res := app.Application.CheckTx(tx)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
app.mtx.Lock()
res := app.Application.Query(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
app.mtx.Lock()
res := app.Application.Commit()
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
app.mtx.Lock()
res := app.Application.InitChain(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
app.mtx.Lock()
res := app.Application.BeginBlock(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
app.mtx.Lock()
res := app.Application.EndBlock(req)
app.mtx.Unlock()
return &res, nil
}
//-------------------------------------------------------
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
app.Callback(req, res)
return newLocalReqRes(req, res)
}
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
reqRes := NewReqRes(req)
reqRes.Response = res
reqRes.SetDone()
return reqRes
}

View File

@@ -1,406 +0,0 @@
package abcicli
import (
"bufio"
"container/list"
"errors"
"fmt"
"net"
"reflect"
"sync"
"time"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
const reqQueueSize = 256 // TODO make configurable
// const maxResponseSize = 1048576 // 1MB TODO make configurable
const flushThrottleMS = 20 // Don't wait longer than...
var _ Client = (*socketClient)(nil)
// This is goroutine-safe, but users should beware that
// the application in general is not meant to be interfaced
// with concurrent callers.
type socketClient struct {
cmn.BaseService
reqQueue chan *ReqRes
flushTimer *cmn.ThrottleTimer
mustConnect bool
mtx sync.Mutex
addr string
conn net.Conn
err error
reqSent *list.List
resCb func(*types.Request, *types.Response) // listens to all callbacks
}
func NewSocketClient(addr string, mustConnect bool) *socketClient {
cli := &socketClient{
reqQueue: make(chan *ReqRes, reqQueueSize),
flushTimer: cmn.NewThrottleTimer("socketClient", flushThrottleMS),
mustConnect: mustConnect,
addr: addr,
reqSent: list.New(),
resCb: nil,
}
cli.BaseService = *cmn.NewBaseService(nil, "socketClient", cli)
return cli
}
func (cli *socketClient) OnStart() error {
if err := cli.BaseService.OnStart(); err != nil {
return err
}
var err error
var conn net.Conn
RETRY_LOOP:
for {
conn, err = cmn.Connect(cli.addr)
if err != nil {
if cli.mustConnect {
return err
}
cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying...", cli.addr))
time.Sleep(time.Second * dialRetryIntervalSeconds)
continue RETRY_LOOP
}
cli.conn = conn
go cli.sendRequestsRoutine(conn)
go cli.recvResponseRoutine(conn)
return nil
}
}
func (cli *socketClient) OnStop() {
cli.BaseService.OnStop()
cli.mtx.Lock()
defer cli.mtx.Unlock()
if cli.conn != nil {
cli.conn.Close()
}
cli.flushQueue()
}
// Stop the client and set the error
func (cli *socketClient) StopForError(err error) {
if !cli.IsRunning() {
return
}
cli.mtx.Lock()
if cli.err == nil {
cli.err = err
}
cli.mtx.Unlock()
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
cli.Stop()
}
func (cli *socketClient) Error() error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
return cli.err
}
// Set listener for all responses
// NOTE: callback may get internally generated flush responses.
func (cli *socketClient) SetResponseCallback(resCb Callback) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.resCb = resCb
}
//----------------------------------------
func (cli *socketClient) sendRequestsRoutine(conn net.Conn) {
w := bufio.NewWriter(conn)
for {
select {
case <-cli.flushTimer.Ch:
select {
case cli.reqQueue <- NewReqRes(types.ToRequestFlush()):
default:
// Probably will fill the buffer, or retry later.
}
case <-cli.Quit():
return
case reqres := <-cli.reqQueue:
cli.willSendReq(reqres)
err := types.WriteMessage(reqres.Request, w)
if err != nil {
cli.StopForError(fmt.Errorf("Error writing msg: %v", err))
return
}
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
err = w.Flush()
if err != nil {
cli.StopForError(fmt.Errorf("Error flushing writer: %v", err))
return
}
}
}
}
}
func (cli *socketClient) recvResponseRoutine(conn net.Conn) {
r := bufio.NewReader(conn) // Buffer reads
for {
var res = &types.Response{}
err := types.ReadMessage(r, res)
if err != nil {
cli.StopForError(err)
return
}
switch r := res.Value.(type) {
case *types.Response_Exception:
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
cli.StopForError(errors.New(r.Exception.Error))
return
default:
// cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
err := cli.didRecvResponse(res)
if err != nil {
cli.StopForError(err)
return
}
}
}
}
func (cli *socketClient) willSendReq(reqres *ReqRes) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.reqSent.PushBack(reqres)
}
func (cli *socketClient) didRecvResponse(res *types.Response) error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
// Get the first ReqRes
next := cli.reqSent.Front()
if next == nil {
return fmt.Errorf("Unexpected result type %v when nothing expected", reflect.TypeOf(res.Value))
}
reqres := next.Value.(*ReqRes)
if !resMatchesReq(reqres.Request, res) {
return fmt.Errorf("Unexpected result type %v when response to %v expected",
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
}
reqres.Response = res // Set response
reqres.Done() // Release waiters
cli.reqSent.Remove(next) // Pop first item from linked list
// Notify reqRes listener if set
if cb := reqres.GetCallback(); cb != nil {
cb(res)
}
// Notify client listener if set
if cli.resCb != nil {
cli.resCb(reqres.Request, res)
}
return nil
}
//----------------------------------------
func (cli *socketClient) EchoAsync(msg string) *ReqRes {
return cli.queueRequest(types.ToRequestEcho(msg))
}
func (cli *socketClient) FlushAsync() *ReqRes {
return cli.queueRequest(types.ToRequestFlush())
}
func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
return cli.queueRequest(types.ToRequestInfo(req))
}
func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
return cli.queueRequest(types.ToRequestSetOption(req))
}
func (cli *socketClient) DeliverTxAsync(tx []byte) *ReqRes {
return cli.queueRequest(types.ToRequestDeliverTx(tx))
}
func (cli *socketClient) CheckTxAsync(tx []byte) *ReqRes {
return cli.queueRequest(types.ToRequestCheckTx(tx))
}
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
return cli.queueRequest(types.ToRequestQuery(req))
}
func (cli *socketClient) CommitAsync() *ReqRes {
return cli.queueRequest(types.ToRequestCommit())
}
func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
return cli.queueRequest(types.ToRequestInitChain(req))
}
func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
return cli.queueRequest(types.ToRequestBeginBlock(req))
}
func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
return cli.queueRequest(types.ToRequestEndBlock(req))
}
//----------------------------------------
func (cli *socketClient) FlushSync() error {
reqRes := cli.queueRequest(types.ToRequestFlush())
if err := cli.Error(); err != nil {
return err
}
reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here
return cli.Error()
}
func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) {
reqres := cli.queueRequest(types.ToRequestEcho(msg))
cli.FlushSync()
return reqres.Response.GetEcho(), cli.Error()
}
func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
reqres := cli.queueRequest(types.ToRequestInfo(req))
cli.FlushSync()
return reqres.Response.GetInfo(), cli.Error()
}
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
reqres := cli.queueRequest(types.ToRequestSetOption(req))
cli.FlushSync()
return reqres.Response.GetSetOption(), cli.Error()
}
func (cli *socketClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
reqres := cli.queueRequest(types.ToRequestDeliverTx(tx))
cli.FlushSync()
return reqres.Response.GetDeliverTx(), cli.Error()
}
func (cli *socketClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
reqres := cli.queueRequest(types.ToRequestCheckTx(tx))
cli.FlushSync()
return reqres.Response.GetCheckTx(), cli.Error()
}
func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
reqres := cli.queueRequest(types.ToRequestQuery(req))
cli.FlushSync()
return reqres.Response.GetQuery(), cli.Error()
}
func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) {
reqres := cli.queueRequest(types.ToRequestCommit())
cli.FlushSync()
return reqres.Response.GetCommit(), cli.Error()
}
func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
reqres := cli.queueRequest(types.ToRequestInitChain(req))
cli.FlushSync()
return reqres.Response.GetInitChain(), cli.Error()
}
func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
reqres := cli.queueRequest(types.ToRequestBeginBlock(req))
cli.FlushSync()
return reqres.Response.GetBeginBlock(), cli.Error()
}
func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
reqres := cli.queueRequest(types.ToRequestEndBlock(req))
cli.FlushSync()
return reqres.Response.GetEndBlock(), cli.Error()
}
//----------------------------------------
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
reqres := NewReqRes(req)
// TODO: set cli.err if reqQueue times out
cli.reqQueue <- reqres
// Maybe auto-flush, or unset auto-flush
switch req.Value.(type) {
case *types.Request_Flush:
cli.flushTimer.Unset()
default:
cli.flushTimer.Set()
}
return reqres
}
func (cli *socketClient) flushQueue() {
// mark all in-flight messages as resolved (they will get cli.Error())
for req := cli.reqSent.Front(); req != nil; req = req.Next() {
reqres := req.Value.(*ReqRes)
reqres.Done()
}
// mark all queued messages as resolved
LOOP:
for {
select {
case reqres := <-cli.reqQueue:
reqres.Done()
default:
break LOOP
}
}
}
//----------------------------------------
func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
switch req.Value.(type) {
case *types.Request_Echo:
_, ok = res.Value.(*types.Response_Echo)
case *types.Request_Flush:
_, ok = res.Value.(*types.Response_Flush)
case *types.Request_Info:
_, ok = res.Value.(*types.Response_Info)
case *types.Request_SetOption:
_, ok = res.Value.(*types.Response_SetOption)
case *types.Request_DeliverTx:
_, ok = res.Value.(*types.Response_DeliverTx)
case *types.Request_CheckTx:
_, ok = res.Value.(*types.Response_CheckTx)
case *types.Request_Commit:
_, ok = res.Value.(*types.Response_Commit)
case *types.Request_Query:
_, ok = res.Value.(*types.Response_Query)
case *types.Request_InitChain:
_, ok = res.Value.(*types.Response_InitChain)
case *types.Request_BeginBlock:
_, ok = res.Value.(*types.Response_BeginBlock)
case *types.Request_EndBlock:
_, ok = res.Value.(*types.Response_EndBlock)
}
return ok
}

View File

@@ -1,121 +0,0 @@
package abcicli_test
import (
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func TestSocketClientStopForErrorDeadlock(t *testing.T) {
c := abcicli.NewSocketClient(":80", false)
err := errors.New("foo-tendermint")
// See Issue https://github.com/tendermint/abci/issues/114
doneChan := make(chan bool)
go func() {
defer close(doneChan)
c.StopForError(err)
c.StopForError(err)
}()
select {
case <-doneChan:
case <-time.After(time.Second * 4):
t.Fatalf("Test took too long, potential deadlock still exists")
}
}
func TestProperSyncCalls(t *testing.T) {
app := slowApp{}
s, c := setupClientServer(t, app)
defer s.Stop()
defer c.Stop()
resp := make(chan error, 1)
go func() {
// This is BeginBlockSync unrolled....
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
c.FlushSync()
res := reqres.Response.GetBeginBlock()
require.NotNil(t, res)
resp <- c.Error()
}()
select {
case <-time.After(time.Second):
require.Fail(t, "No response arrived")
case err, ok := <-resp:
require.True(t, ok, "Must not close channel")
assert.NoError(t, err, "This should return success")
}
}
func TestHangingSyncCalls(t *testing.T) {
app := slowApp{}
s, c := setupClientServer(t, app)
defer s.Stop()
defer c.Stop()
resp := make(chan error, 1)
go func() {
// Start BeginBlock and flush it
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
flush := c.FlushAsync()
// wait 20 ms for all events to travel socket, but
// no response yet from server
time.Sleep(20 * time.Millisecond)
// kill the server, so the connections break
s.Stop()
// wait for the response from BeginBlock
reqres.Wait()
flush.Wait()
resp <- c.Error()
}()
select {
case <-time.After(time.Second):
require.Fail(t, "No response arrived")
case err, ok := <-resp:
require.True(t, ok, "Must not close channel")
assert.Error(t, err, "We should get EOF error")
}
}
func setupClientServer(t *testing.T, app types.Application) (
cmn.Service, abcicli.Client) {
// some port between 20k and 30k
port := 20000 + cmn.RandInt32()%10000
addr := fmt.Sprintf("localhost:%d", port)
s, err := server.NewServer(addr, "socket", app)
require.NoError(t, err)
err = s.Start()
require.NoError(t, err)
c := abcicli.NewSocketClient(addr, true)
err = c.Start()
require.NoError(t, err)
return s, c
}
type slowApp struct {
types.BaseApplication
}
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
time.Sleep(200 * time.Millisecond)
return types.ResponseBeginBlock{}
}

View File

@@ -1,774 +0,0 @@
package main
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/counter"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/abci/server"
servertest "github.com/tendermint/tendermint/abci/tests/server"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/abci/version"
"github.com/tendermint/tendermint/crypto/merkle"
)
// client is a global variable so it can be reused by the console
var (
client abcicli.Client
logger log.Logger
)
// flags
var (
// global
flagAddress string
flagAbci string
flagVerbose bool // for the println output
flagLogLevel string // for the logger
// query
flagPath string
flagHeight int
flagProve bool
// counter
flagSerial bool
// kvstore
flagPersist string
)
var RootCmd = &cobra.Command{
Use: "abci-cli",
Short: "the ABCI CLI tool wraps an ABCI client",
Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
switch cmd.Use {
case "counter", "kvstore", "dummy": // for the examples apps, don't pre-run
return nil
case "version": // skip running for version command
return nil
}
if logger == nil {
allowLevel, err := log.AllowLevel(flagLogLevel)
if err != nil {
return err
}
logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel)
}
if client == nil {
var err error
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
if err != nil {
return err
}
client.SetLogger(logger.With("module", "abci-client"))
if err := client.Start(); err != nil {
return err
}
}
return nil
},
}
// Structure for data passed to print response.
type response struct {
// generic abci response
Data []byte
Code uint32
Info string
Log string
Query *queryResponse
}
type queryResponse struct {
Key []byte
Value []byte
Height int64
Proof *merkle.Proof
}
func Execute() error {
addGlobalFlags()
addCommands()
return RootCmd.Execute()
}
func addGlobalFlags() {
RootCmd.PersistentFlags().StringVarP(&flagAddress, "address", "", "tcp://0.0.0.0:26658", "address of application socket")
RootCmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc")
RootCmd.PersistentFlags().BoolVarP(&flagVerbose, "verbose", "v", false, "print the command and results as if it were a console session")
RootCmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level")
}
func addQueryFlags() {
queryCmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with")
queryCmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at")
queryCmd.PersistentFlags().BoolVarP(&flagProve, "prove", "", false, "whether or not to return a merkle proof of the query result")
}
func addCounterFlags() {
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
}
func addDummyFlags() {
dummyCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
}
func addKVStoreFlags() {
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
}
func addCommands() {
RootCmd.AddCommand(batchCmd)
RootCmd.AddCommand(consoleCmd)
RootCmd.AddCommand(echoCmd)
RootCmd.AddCommand(infoCmd)
RootCmd.AddCommand(setOptionCmd)
RootCmd.AddCommand(deliverTxCmd)
RootCmd.AddCommand(checkTxCmd)
RootCmd.AddCommand(commitCmd)
RootCmd.AddCommand(versionCmd)
RootCmd.AddCommand(testCmd)
addQueryFlags()
RootCmd.AddCommand(queryCmd)
// examples
addCounterFlags()
RootCmd.AddCommand(counterCmd)
// deprecated, left for backwards compatibility
addDummyFlags()
RootCmd.AddCommand(dummyCmd)
// replaces dummy, see issue #196
addKVStoreFlags()
RootCmd.AddCommand(kvstoreCmd)
}
var batchCmd = &cobra.Command{
Use: "batch",
Short: "run a batch of abci commands against an application",
Long: `run a batch of abci commands against an application
This command is run by piping in a file containing a series of commands
you'd like to run:
abci-cli batch < example.file
where example.file looks something like:
set_option serial on
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
check_tx 0x00
deliver_tx 0x01
deliver_tx 0x04
info
`,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdBatch(cmd, args)
},
}
var consoleCmd = &cobra.Command{
Use: "console",
Short: "start an interactive ABCI console for multiple commands",
Long: `start an interactive ABCI console for multiple commands
This command opens an interactive console for running any of the other commands
without opening a new connection each time
`,
Args: cobra.ExactArgs(0),
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
RunE: func(cmd *cobra.Command, args []string) error {
return cmdConsole(cmd, args)
},
}
var echoCmd = &cobra.Command{
Use: "echo",
Short: "have the application echo a message",
Long: "have the application echo a message",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdEcho(cmd, args)
},
}
var infoCmd = &cobra.Command{
Use: "info",
Short: "get some info about the application",
Long: "get some info about the application",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdInfo(cmd, args)
},
}
var setOptionCmd = &cobra.Command{
Use: "set_option",
Short: "set an option on the application",
Long: "set an option on the application",
Args: cobra.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdSetOption(cmd, args)
},
}
var deliverTxCmd = &cobra.Command{
Use: "deliver_tx",
Short: "deliver a new transaction to the application",
Long: "deliver a new transaction to the application",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdDeliverTx(cmd, args)
},
}
var checkTxCmd = &cobra.Command{
Use: "check_tx",
Short: "validate a transaction",
Long: "validate a transaction",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdCheckTx(cmd, args)
},
}
var commitCmd = &cobra.Command{
Use: "commit",
Short: "commit the application state and return the Merkle root hash",
Long: "commit the application state and return the Merkle root hash",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdCommit(cmd, args)
},
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "print ABCI console version",
Long: "print ABCI console version",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println(version.Version)
return nil
},
}
var queryCmd = &cobra.Command{
Use: "query",
Short: "query the application state",
Long: "query the application state",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdQuery(cmd, args)
},
}
var counterCmd = &cobra.Command{
Use: "counter",
Short: "ABCI demo example",
Long: "ABCI demo example",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdCounter(cmd, args)
},
}
// deprecated, left for backwards compatibility
var dummyCmd = &cobra.Command{
Use: "dummy",
Deprecated: "use: [abci-cli kvstore] instead",
Short: "ABCI demo example",
Long: "ABCI demo example",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdKVStore(cmd, args)
},
}
var kvstoreCmd = &cobra.Command{
Use: "kvstore",
Short: "ABCI demo example",
Long: "ABCI demo example",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdKVStore(cmd, args)
},
}
var testCmd = &cobra.Command{
Use: "test",
Short: "run integration tests",
Long: "run integration tests",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdTest(cmd, args)
},
}
// Generates new Args array based off of previous call args to maintain flag persistence
func persistentArgs(line []byte) []string {
// generate the arguments to run from original os.Args
// to maintain flag arguments
args := os.Args
args = args[:len(args)-1] // remove the previous command argument
if len(line) > 0 { // prevents introduction of extra space leading to argument parse errors
args = append(args, strings.Split(string(line), " ")...)
}
return args
}
//--------------------------------------------------------------------------------
func compose(fs []func() error) error {
if len(fs) == 0 {
return nil
} else {
err := fs[0]()
if err == nil {
return compose(fs[1:])
} else {
return err
}
}
}
func cmdTest(cmd *cobra.Command, args []string) error {
return compose(
[]func() error{
func() error { return servertest.InitChain(client) },
func() error { return servertest.SetOption(client, "serial", "on") },
func() error { return servertest.Commit(client, nil) },
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
func() error { return servertest.Commit(client, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) },
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) },
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) },
func() error {
return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
},
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
})
}
func cmdBatch(cmd *cobra.Command, args []string) error {
bufReader := bufio.NewReader(os.Stdin)
for {
line, more, err := bufReader.ReadLine()
if more {
return errors.New("Input line is too long")
} else if err == io.EOF {
break
} else if len(line) == 0 {
continue
} else if err != nil {
return err
}
cmdArgs := persistentArgs(line)
if err := muxOnCommands(cmd, cmdArgs); err != nil {
return err
}
fmt.Println()
}
return nil
}
func cmdConsole(cmd *cobra.Command, args []string) error {
for {
fmt.Printf("> ")
bufReader := bufio.NewReader(os.Stdin)
line, more, err := bufReader.ReadLine()
if more {
return errors.New("Input is too long")
} else if err != nil {
return err
}
pArgs := persistentArgs(line)
if err := muxOnCommands(cmd, pArgs); err != nil {
return err
}
}
return nil
}
func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
if len(pArgs) < 2 {
return errors.New("expecting persistent args of the form: abci-cli [command] <...>")
}
// TODO: this parsing is fragile
args := []string{}
for i := 0; i < len(pArgs); i++ {
arg := pArgs[i]
// check for flags
if strings.HasPrefix(arg, "-") {
// if it has an equal, we can just skip
if strings.Contains(arg, "=") {
continue
}
// if its a boolean, we can just skip
_, err := cmd.Flags().GetBool(strings.TrimLeft(arg, "-"))
if err == nil {
continue
}
// otherwise, we need to skip the next one too
i += 1
continue
}
// append the actual arg
args = append(args, arg)
}
var subCommand string
var actualArgs []string
if len(args) > 1 {
subCommand = args[1]
}
if len(args) > 2 {
actualArgs = args[2:]
}
cmd.Use = subCommand // for later print statements ...
switch strings.ToLower(subCommand) {
case "check_tx":
return cmdCheckTx(cmd, actualArgs)
case "commit":
return cmdCommit(cmd, actualArgs)
case "deliver_tx":
return cmdDeliverTx(cmd, actualArgs)
case "echo":
return cmdEcho(cmd, actualArgs)
case "info":
return cmdInfo(cmd, actualArgs)
case "query":
return cmdQuery(cmd, actualArgs)
case "set_option":
return cmdSetOption(cmd, actualArgs)
default:
return cmdUnimplemented(cmd, pArgs)
}
}
func cmdUnimplemented(cmd *cobra.Command, args []string) error {
msg := "unimplemented command"
if len(args) > 0 {
msg += fmt.Sprintf(" args: [%s]", strings.Join(args, " "))
}
printResponse(cmd, args, response{
Code: codeBad,
Log: msg,
})
fmt.Println("Available commands:")
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
fmt.Println("Use \"[command] --help\" for more information about a command.")
return nil
}
// Have the application echo a message
func cmdEcho(cmd *cobra.Command, args []string) error {
msg := ""
if len(args) > 0 {
msg = args[0]
}
res, err := client.EchoSync(msg)
if err != nil {
return err
}
printResponse(cmd, args, response{
Data: []byte(res.Message),
})
return nil
}
// Get some info from the application
func cmdInfo(cmd *cobra.Command, args []string) error {
var version string
if len(args) == 1 {
version = args[0]
}
res, err := client.InfoSync(types.RequestInfo{Version: version})
if err != nil {
return err
}
printResponse(cmd, args, response{
Data: []byte(res.Data),
})
return nil
}
const codeBad uint32 = 10
// Set an option on the application
func cmdSetOption(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
printResponse(cmd, args, response{
Code: codeBad,
Log: "want at least arguments of the form: <key> <value>",
})
return nil
}
key, val := args[0], args[1]
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
if err != nil {
return err
}
printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show...
return nil
}
// Append a new tx to application
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Log: "want the tx",
})
return nil
}
txBytes, err := stringOrHexToBytes(args[0])
if err != nil {
return err
}
res, err := client.DeliverTxSync(txBytes)
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: res.Code,
Data: res.Data,
Info: res.Info,
Log: res.Log,
})
return nil
}
// Validate a tx
func cmdCheckTx(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Info: "want the tx",
})
return nil
}
txBytes, err := stringOrHexToBytes(args[0])
if err != nil {
return err
}
res, err := client.CheckTxSync(txBytes)
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: res.Code,
Data: res.Data,
Info: res.Info,
Log: res.Log,
})
return nil
}
// Get application Merkle root hash
func cmdCommit(cmd *cobra.Command, args []string) error {
res, err := client.CommitSync()
if err != nil {
return err
}
printResponse(cmd, args, response{
Data: res.Data,
})
return nil
}
// Query application state
func cmdQuery(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Info: "want the query",
Log: "",
})
return nil
}
queryBytes, err := stringOrHexToBytes(args[0])
if err != nil {
return err
}
resQuery, err := client.QuerySync(types.RequestQuery{
Data: queryBytes,
Path: flagPath,
Height: int64(flagHeight),
Prove: flagProve,
})
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: resQuery.Code,
Info: resQuery.Info,
Log: resQuery.Log,
Query: &queryResponse{
Key: resQuery.Key,
Value: resQuery.Value,
Height: resQuery.Height,
Proof: resQuery.Proof,
},
})
return nil
}
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewCounterApplication(flagSerial)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Start the listener
srv, err := server.NewServer(flagAddress, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Wait forever
cmn.TrapSignal(func() {
// Cleanup
srv.Stop()
})
return nil
}
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Create the application - in memory or persisted to disk
var app types.Application
if flagPersist == "" {
app = kvstore.NewKVStoreApplication()
} else {
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
}
// Start the listener
srv, err := server.NewServer(flagAddress, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Wait forever
cmn.TrapSignal(func() {
// Cleanup
srv.Stop()
})
return nil
}
//--------------------------------------------------------------------------------
func printResponse(cmd *cobra.Command, args []string, rsp response) {
if flagVerbose {
fmt.Println(">", cmd.Use, strings.Join(args, " "))
}
// Always print the status code.
if rsp.Code == types.CodeTypeOK {
fmt.Printf("-> code: OK\n")
} else {
fmt.Printf("-> code: %d\n", rsp.Code)
}
if len(rsp.Data) != 0 {
// Do no print this line when using the commit command
// because the string comes out as gibberish
if cmd.Use != "commit" {
fmt.Printf("-> data: %s\n", rsp.Data)
}
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
}
if rsp.Log != "" {
fmt.Printf("-> log: %s\n", rsp.Log)
}
if rsp.Query != nil {
fmt.Printf("-> height: %d\n", rsp.Query.Height)
if rsp.Query.Key != nil {
fmt.Printf("-> key: %s\n", rsp.Query.Key)
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
}
if rsp.Query.Value != nil {
fmt.Printf("-> value: %s\n", rsp.Query.Value)
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
}
if rsp.Query.Proof != nil {
fmt.Printf("-> proof: %#v\n", rsp.Query.Proof)
}
}
}
// NOTE: s is interpreted as a string unless prefixed with 0x
func stringOrHexToBytes(s string) ([]byte, error) {
if len(s) > 2 && strings.ToLower(s[:2]) == "0x" {
b, err := hex.DecodeString(s[2:])
if err != nil {
err = fmt.Errorf("Error decoding hex argument: %s", err.Error())
return nil, err
}
return b, nil
}
if !strings.HasPrefix(s, "\"") || !strings.HasSuffix(s, "\"") {
err := fmt.Errorf("Invalid string arg: \"%s\". Must be quoted or a \"0x\"-prefixed hex string", s)
return nil, err
}
return []byte(s[1 : len(s)-1]), nil
}

View File

@@ -1,14 +0,0 @@
package main
import (
"fmt"
"os"
)
func main() {
err := Execute()
if err != nil {
fmt.Print(err)
os.Exit(1)
}
}

View File

@@ -1,10 +0,0 @@
package code
// Return codes for the examples
const (
CodeTypeOK uint32 = 0
CodeTypeEncodingError uint32 = 1
CodeTypeBadNonce uint32 = 2
CodeTypeUnauthorized uint32 = 3
CodeTypeUnknownError uint32 = 4
)

View File

@@ -1,103 +0,0 @@
package counter
import (
"encoding/binary"
"fmt"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
)
type CounterApplication struct {
types.BaseApplication
hashCount int
txCount int
serial bool
}
func NewCounterApplication(serial bool) *CounterApplication {
return &CounterApplication{serial: serial}
}
func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
key, value := req.Key, req.Value
if key == "serial" && value == "on" {
app.serial = true
} else {
/*
TODO Panic and have the ABCI server pass an exception.
The client can call SetOptionSync() and get an `error`.
return types.ResponseSetOption{
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
}
*/
return types.ResponseSetOption{}
}
return types.ResponseSetOption{}
}
func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
if app.serial {
if len(tx) > 8 {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(app.txCount) {
return types.ResponseDeliverTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
}
}
app.txCount++
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) CheckTx(tx []byte) types.ResponseCheckTx {
if app.serial {
if len(tx) > 8 {
return types.ResponseCheckTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue < uint64(app.txCount) {
return types.ResponseCheckTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
}
}
return types.ResponseCheckTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) Commit() (resp types.ResponseCommit) {
app.hashCount++
if app.txCount == 0 {
return types.ResponseCommit{}
}
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return types.ResponseCommit{Data: hash}
}
func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
switch reqQuery.Path {
case "hash":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
case "tx":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
default:
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
}
}

View File

@@ -1,3 +0,0 @@
package example
// so the go tool doesn't return errors about no buildable go files ...

View File

@@ -1,156 +0,0 @@
package example
import (
"fmt"
"net"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"golang.org/x/net/context"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/kvstore"
abciserver "github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
)
func TestKVStore(t *testing.T) {
fmt.Println("### Testing KVStore")
testStream(t, kvstore.NewKVStoreApplication())
}
func TestBaseApp(t *testing.T) {
fmt.Println("### Testing BaseApp")
testStream(t, types.NewBaseApplication())
}
func TestGRPC(t *testing.T) {
fmt.Println("### Testing GRPC")
testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication()))
}
func testStream(t *testing.T, app types.Application) {
numDeliverTxs := 20000
// Start the listener
server := abciserver.NewSocketServer("unix://test.sock", app)
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := server.Start(); err != nil {
require.NoError(t, err, "Error starting socket server")
}
defer server.Stop()
// Connect to the socket
client := abcicli.NewSocketClient("unix://test.sock", false)
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
if err := client.Start(); err != nil {
t.Fatalf("Error starting socket client: %v", err.Error())
}
defer client.Stop()
done := make(chan struct{})
counter := 0
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
// Process response
switch r := res.Value.(type) {
case *types.Response_DeliverTx:
counter++
if r.DeliverTx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
}
if counter > numDeliverTxs {
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
}
case *types.Response_Flush:
// ignore
default:
t.Error("Unexpected response type", reflect.TypeOf(res.Value))
}
})
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
reqRes := client.DeliverTxAsync([]byte("test"))
_ = reqRes
// check err ?
// Sometimes send flush messages
if counter%123 == 0 {
client.FlushAsync()
// check err ?
}
}
// Send final flush message
client.FlushAsync()
<-done
}
//-------------------------
// test grpc
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
return cmn.Connect(addr)
}
func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
numDeliverTxs := 2000
// Start the listener
server := abciserver.NewGRPCServer("unix://test.sock", app)
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := server.Start(); err != nil {
t.Fatalf("Error starting GRPC server: %v", err.Error())
}
defer server.Stop()
// Connect to the socket
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
if err != nil {
t.Fatalf("Error dialing GRPC server: %v", err.Error())
}
defer conn.Close()
client := types.NewABCIApplicationClient(conn)
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
if err != nil {
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
}
counter++
if response.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", response.Code)
}
if counter > numDeliverTxs {
t.Fatal("Too many DeliverTx responses")
}
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}
}
}

View File

@@ -1 +0,0 @@
node_modules

View File

@@ -1 +0,0 @@
This example has been moved here: https://github.com/tendermint/js-abci/tree/master/example

View File

@@ -1,31 +0,0 @@
# KVStore
There are two app's here: the KVStoreApplication and the PersistentKVStoreApplication.
## KVStoreApplication
The KVStoreApplication is a simple merkle key-value store.
Transactions of the form `key=value` are stored as key-value pairs in the tree.
Transactions without an `=` sign set the value to the key.
The app has no replay protection (other than what the mempool provides).
## PersistentKVStoreApplication
The PersistentKVStoreApplication wraps the KVStoreApplication
and provides two additional features:
1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism)
2) validator set changes
The state is persisted in leveldb along with the last block committed,
and the Handshake allows any necessary blocks to be replayed.
Validator set changes are effected using the following transaction format:
```
val:pubkey1/power1,addr2/power2,addr3/power3"
```
where `power1` is the new voting power for the validator with `pubkey1` (possibly a new one).
There is no sybil protection against new validators joining.
Validators can be removed by setting their power to `0`.

View File

@@ -1,36 +0,0 @@
package kvstore
import (
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
// RandVal creates one random validator, with a key derived
// from the input value
func RandVal(i int) types.ValidatorUpdate {
pubkey := cmn.RandBytes(32)
power := cmn.RandUint16() + 1
v := types.Ed25519ValidatorUpdate(pubkey, int64(power))
return v
}
// RandVals returns a list of cnt validators for initializing
// the application. Note that the keys are deterministically
// derived from the index in the array, while the power is
// random (Change this if not desired)
func RandVals(cnt int) []types.ValidatorUpdate {
res := make([]types.ValidatorUpdate, cnt)
for i := 0; i < cnt; i++ {
res[i] = RandVal(i)
}
return res
}
// InitKVStore initializes the kvstore app with some data,
// which allows tests to pass and is fine as long as you
// don't make any tx that modify the validator state
func InitKVStore(app *PersistentKVStoreApplication) {
app.InitChain(types.RequestInitChain{
Validators: RandVals(1),
})
}

View File

@@ -1,134 +0,0 @@
package kvstore
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/version"
)
var (
stateKey = []byte("stateKey")
kvPairPrefixKey = []byte("kvPairKey:")
ProtocolVersion version.Protocol = 0x1
)
type State struct {
db dbm.DB
Size int64 `json:"size"`
Height int64 `json:"height"`
AppHash []byte `json:"app_hash"`
}
func loadState(db dbm.DB) State {
stateBytes := db.Get(stateKey)
var state State
if len(stateBytes) != 0 {
err := json.Unmarshal(stateBytes, &state)
if err != nil {
panic(err)
}
}
state.db = db
return state
}
func saveState(state State) {
stateBytes, err := json.Marshal(state)
if err != nil {
panic(err)
}
state.db.Set(stateKey, stateBytes)
}
func prefixKey(key []byte) []byte {
return append(kvPairPrefixKey, key...)
}
//---------------------------------------------------
var _ types.Application = (*KVStoreApplication)(nil)
type KVStoreApplication struct {
types.BaseApplication
state State
}
func NewKVStoreApplication() *KVStoreApplication {
state := loadState(dbm.NewMemDB())
return &KVStoreApplication{state: state}
}
func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
return types.ResponseInfo{
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
Version: version.ABCIVersion,
AppVersion: ProtocolVersion.Uint64(),
}
}
// tx is either "key=value" or just arbitrary bytes
func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
var key, value []byte
parts := bytes.Split(tx, []byte("="))
if len(parts) == 2 {
key, value = parts[0], parts[1]
} else {
key, value = tx, tx
}
app.state.db.Set(prefixKey(key), value)
app.state.Size += 1
tags := []cmn.KVPair{
{Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")},
{Key: []byte("app.key"), Value: key},
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
}
func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
}
func (app *KVStoreApplication) Commit() types.ResponseCommit {
// Using a memdb - just return the big endian size of the db
appHash := make([]byte, 8)
binary.PutVarint(appHash, app.state.Size)
app.state.AppHash = appHash
app.state.Height += 1
saveState(app.state)
return types.ResponseCommit{Data: appHash}
}
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
if reqQuery.Prove {
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Index = -1 // TODO make Proof return index
resQuery.Key = reqQuery.Data
resQuery.Value = value
if value != nil {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
} else {
resQuery.Key = reqQuery.Data
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Value = value
if value != nil {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
}
}

View File

@@ -1,311 +0,0 @@
package kvstore
import (
"bytes"
"fmt"
"io/ioutil"
"sort"
"testing"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
abciserver "github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
)
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
ar := app.DeliverTx(tx)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar = app.DeliverTx(tx)
require.False(t, ar.IsErr(), ar)
// make sure query is fine
resQuery := app.Query(types.RequestQuery{
Path: "/store",
Data: []byte(key),
})
require.Equal(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, value, string(resQuery.Value))
// make sure proof is fine
resQuery = app.Query(types.RequestQuery{
Path: "/store",
Data: []byte(key),
Prove: true,
})
require.EqualValues(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, value, string(resQuery.Value))
}
func TestKVStoreKV(t *testing.T) {
kvstore := NewKVStoreApplication()
key := "abc"
value := key
tx := []byte(key)
testKVStore(t, kvstore, tx, key, value)
value = "def"
tx = []byte(key + "=" + value)
testKVStore(t, kvstore, tx, key, value)
}
func TestPersistentKVStoreKV(t *testing.T) {
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
kvstore := NewPersistentKVStoreApplication(dir)
key := "abc"
value := key
tx := []byte(key)
testKVStore(t, kvstore, tx, key, value)
value = "def"
tx = []byte(key + "=" + value)
testKVStore(t, kvstore, tx, key, value)
}
func TestPersistentKVStoreInfo(t *testing.T) {
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
kvstore := NewPersistentKVStoreApplication(dir)
InitKVStore(kvstore)
height := int64(0)
resInfo := kvstore.Info(types.RequestInfo{})
if resInfo.LastBlockHeight != height {
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
}
// make and apply block
height = int64(1)
hash := []byte("foo")
header := types.Header{
Height: int64(height),
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
if resInfo.LastBlockHeight != height {
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
}
}
// add a validator, remove a validator, update a validator
func TestValUpdates(t *testing.T) {
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
kvstore := NewPersistentKVStoreApplication(dir)
// init with some validators
total := 10
nInit := 5
vals := RandVals(total)
// iniitalize with the first nInit
kvstore.InitChain(types.RequestInitChain{
Validators: vals[:nInit],
})
vals1, vals2 := vals[:nInit], kvstore.Validators()
valsEqual(t, vals1, vals2)
var v1, v2, v3 types.ValidatorUpdate
// add some validators
v1, v2 = vals[nInit], vals[nInit+1]
diff := []types.ValidatorUpdate{v1, v2}
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
makeApplyBlock(t, kvstore, 1, diff, tx1, tx2)
vals1, vals2 = vals[:nInit+2], kvstore.Validators()
valsEqual(t, vals1, vals2)
// remove some validators
v1, v2, v3 = vals[nInit-2], vals[nInit-1], vals[nInit]
v1.Power = 0
v2.Power = 0
v3.Power = 0
diff = []types.ValidatorUpdate{v1, v2, v3}
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
vals1 = append(vals[:nInit-2], vals[nInit+1])
vals2 = kvstore.Validators()
valsEqual(t, vals1, vals2)
// update some validators
v1 = vals[0]
if v1.Power == 5 {
v1.Power = 6
} else {
v1.Power = 5
}
diff = []types.ValidatorUpdate{v1}
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
makeApplyBlock(t, kvstore, 3, diff, tx1)
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
vals2 = kvstore.Validators()
valsEqual(t, vals1, vals2)
}
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
// make and apply block
height := int64(heightInt)
hash := []byte("foo")
header := types.Header{
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
for _, tx := range txs {
if r := kvstore.DeliverTx(tx); r.IsErr() {
t.Fatal(r)
}
}
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
}
// order doesn't matter
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
if len(vals1) != len(vals2) {
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
}
sort.Sort(types.ValidatorUpdates(vals1))
sort.Sort(types.ValidatorUpdates(vals2))
for i, v1 := range vals1 {
v2 := vals2[i]
if !bytes.Equal(v1.PubKey.Data, v2.PubKey.Data) ||
v1.Power != v2.Power {
t.Fatalf("vals dont match at index %d. got %X/%d , expected %X/%d", i, v2.PubKey, v2.Power, v1.PubKey, v1.Power)
}
}
}
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
// Start the listener
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
server := abciserver.NewSocketServer(socket, app)
server.SetLogger(logger.With("module", "abci-server"))
if err := server.Start(); err != nil {
return nil, nil, err
}
// Connect to the socket
client := abcicli.NewSocketClient(socket, false)
client.SetLogger(logger.With("module", "abci-client"))
if err := client.Start(); err != nil {
server.Stop()
return nil, nil, err
}
return client, server, nil
}
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
// Start the listener
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
gapp := types.NewGRPCApplication(app)
server := abciserver.NewGRPCServer(socket, gapp)
server.SetLogger(logger.With("module", "abci-server"))
if err := server.Start(); err != nil {
return nil, nil, err
}
client := abcicli.NewGRPCClient(socket, true)
client.SetLogger(logger.With("module", "abci-client"))
if err := client.Start(); err != nil {
server.Stop()
return nil, nil, err
}
return client, server, nil
}
func TestClientServer(t *testing.T) {
// set up socket app
kvstore := NewKVStoreApplication()
client, server, err := makeSocketClientServer(kvstore, "kvstore-socket")
require.Nil(t, err)
defer server.Stop()
defer client.Stop()
runClientTests(t, client)
// set up grpc app
kvstore = NewKVStoreApplication()
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
require.Nil(t, err)
defer gserver.Stop()
defer gclient.Stop()
runClientTests(t, gclient)
}
func runClientTests(t *testing.T, client abcicli.Client) {
// run some tests....
key := "abc"
value := key
tx := []byte(key)
testClient(t, client, tx, key, value)
value = "def"
tx = []byte(key + "=" + value)
testClient(t, client, tx, key, value)
}
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
ar, err := app.DeliverTxSync(tx)
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar, err = app.DeliverTxSync(tx)
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// make sure query is fine
resQuery, err := app.QuerySync(types.RequestQuery{
Path: "/store",
Data: []byte(key),
})
require.Nil(t, err)
require.Equal(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, value, string(resQuery.Value))
// make sure proof is fine
resQuery, err = app.QuerySync(types.RequestQuery{
Path: "/store",
Data: []byte(key),
Prove: true,
})
require.Nil(t, err)
require.Equal(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, value, string(resQuery.Value))
}

View File

@@ -1,199 +0,0 @@
package kvstore
import (
"bytes"
"encoding/hex"
"fmt"
"strconv"
"strings"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
)
const (
ValidatorSetChangePrefix string = "val:"
)
//-----------------------------------------
var _ types.Application = (*PersistentKVStoreApplication)(nil)
type PersistentKVStoreApplication struct {
app *KVStoreApplication
// validator set
ValUpdates []types.ValidatorUpdate
logger log.Logger
}
func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication {
name := "kvstore"
db, err := dbm.NewGoLevelDB(name, dbDir)
if err != nil {
panic(err)
}
state := loadState(db)
return &PersistentKVStoreApplication{
app: &KVStoreApplication{state: state},
logger: log.NewNopLogger(),
}
}
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
app.logger = l
}
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
res := app.app.Info(req)
res.LastBlockHeight = app.app.state.Height
res.LastBlockAppHash = app.app.state.AppHash
return res
}
func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
return app.app.SetOption(req)
}
// tx is either "val:pubkey/power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
// if it starts with "val:", update the validator set
// format is "val:pubkey/power"
if isValidatorTx(tx) {
// update validators in the merkle tree
// and in app.ValUpdates
return app.execValidatorTx(tx)
}
// otherwise, update the key-value store
return app.app.DeliverTx(tx)
}
func (app *PersistentKVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
return app.app.CheckTx(tx)
}
// Commit will panic if InitChain was not called
func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
return app.app.Commit()
}
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
return app.app.Query(reqQuery)
}
// Save the validators in the merkle tree
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
for _, v := range req.Validators {
r := app.updateValidator(v)
if r.IsErr() {
app.logger.Error("Error updating validators", "r", r)
}
}
return types.ResponseInitChain{}
}
// Track the block hash and header information
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
return types.ResponseBeginBlock{}
}
// Update the validator set
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
}
//---------------------------------------------
// update validators
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
itr := app.app.state.db.Iterator(nil, nil)
for ; itr.Valid(); itr.Next() {
if isValidatorTx(itr.Key()) {
validator := new(types.ValidatorUpdate)
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
if err != nil {
panic(err)
}
validators = append(validators, *validator)
}
}
return
}
func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte {
return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power))
}
func isValidatorTx(tx []byte) bool {
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
}
// format is "val:pubkey/power"
// pubkey is raw 32-byte ed25519 key
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
tx = tx[len(ValidatorSetChangePrefix):]
//get the pubkey and power
pubKeyAndPower := strings.Split(string(tx), "/")
if len(pubKeyAndPower) != 2 {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Expected 'pubkey/power'. Got %v", pubKeyAndPower)}
}
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
// decode the pubkey
pubkey, err := hex.DecodeString(pubkeyS)
if err != nil {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Pubkey (%s) is invalid hex", pubkeyS)}
}
// decode the power
power, err := strconv.ParseInt(powerS, 10, 64)
if err != nil {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
}
// update
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power)))
}
// add, update, or remove a validator
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
key := []byte("val:" + string(v.PubKey.Data))
if v.Power == 0 {
// remove validator
if !app.app.state.db.Has(key) {
return types.ResponseDeliverTx{
Code: code.CodeTypeUnauthorized,
Log: fmt.Sprintf("Cannot remove non-existent validator %X", key)}
}
app.app.state.db.Delete(key)
} else {
// add or update validator
value := bytes.NewBuffer(make([]byte, 0))
if err := types.WriteMessage(&v, value); err != nil {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Error encoding validator: %v", err)}
}
app.app.state.db.Set(key, value.Bytes())
}
// we only update the changes array if we successfully updated the tree
app.ValUpdates = append(app.ValUpdates, v)
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
}

View File

@@ -1,50 +0,0 @@
from wire import decode_string
# map type_byte to message name
message_types = {
0x01: "echo",
0x02: "flush",
0x03: "info",
0x04: "set_option",
0x21: "deliver_tx",
0x22: "check_tx",
0x23: "commit",
0x24: "add_listener",
0x25: "rm_listener",
}
# return the decoded arguments of abci messages
class RequestDecoder():
def __init__(self, reader):
self.reader = reader
def echo(self):
return decode_string(self.reader)
def flush(self):
return
def info(self):
return
def set_option(self):
return decode_string(self.reader), decode_string(self.reader)
def deliver_tx(self):
return decode_string(self.reader)
def check_tx(self):
return decode_string(self.reader)
def commit(self):
return
def add_listener(self):
# TODO
return
def rm_listener(self):
# TODO
return

View File

@@ -1,56 +0,0 @@
# Simple read() method around a bytearray
class BytesBuffer():
def __init__(self, b):
self.buf = b
self.readCount = 0
def count(self):
return self.readCount
def reset_count(self):
self.readCount = 0
def size(self):
return len(self.buf)
def peek(self):
return self.buf[0]
def write(self, b):
# b should be castable to byte array
self.buf += bytearray(b)
def read(self, n):
if len(self.buf) < n:
print "reader err: buf less than n"
# TODO: exception
return
self.readCount += n
r = self.buf[:n]
self.buf = self.buf[n:]
return r
# Buffer bytes off a tcp connection and read them off in chunks
class ConnReader():
def __init__(self, conn):
self.conn = conn
self.buf = bytearray()
# blocking
def read(self, n):
while n > len(self.buf):
moreBuf = self.conn.recv(1024)
if not moreBuf:
raise IOError("dead connection")
self.buf = self.buf + bytearray(moreBuf)
r = self.buf[:n]
self.buf = self.buf[n:]
return r

View File

@@ -1,202 +0,0 @@
import socket
import select
import sys
from wire import decode_varint, encode
from reader import BytesBuffer
from msg import RequestDecoder, message_types
# hold the asyncronous state of a connection
# ie. we may not get enough bytes on one read to decode the message
class Connection():
def __init__(self, fd, app):
self.fd = fd
self.app = app
self.recBuf = BytesBuffer(bytearray())
self.resBuf = BytesBuffer(bytearray())
self.msgLength = 0
self.decoder = RequestDecoder(self.recBuf)
self.inProgress = False # are we in the middle of a message
def recv(this):
data = this.fd.recv(1024)
if not data: # what about len(data) == 0
raise IOError("dead connection")
this.recBuf.write(data)
# ABCI server responds to messges by calling methods on the app
class ABCIServer():
def __init__(self, app, port=5410):
self.app = app
# map conn file descriptors to (app, reqBuf, resBuf, msgDecoder)
self.appMap = {}
self.port = port
self.listen_backlog = 10
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.setblocking(0)
self.listener.bind(('', port))
self.listener.listen(self.listen_backlog)
self.shutdown = False
self.read_list = [self.listener]
self.write_list = []
def handle_new_connection(self, r):
new_fd, new_addr = r.accept()
new_fd.setblocking(0) # non-blocking
self.read_list.append(new_fd)
self.write_list.append(new_fd)
print 'new connection to', new_addr
self.appMap[new_fd] = Connection(new_fd, self.app)
def handle_conn_closed(self, r):
self.read_list.remove(r)
self.write_list.remove(r)
r.close()
print "connection closed"
def handle_recv(self, r):
# app, recBuf, resBuf, conn
conn = self.appMap[r]
while True:
try:
print "recv loop"
# check if we need more data first
if conn.inProgress:
if (conn.msgLength == 0 or conn.recBuf.size() < conn.msgLength):
conn.recv()
else:
if conn.recBuf.size() == 0:
conn.recv()
conn.inProgress = True
# see if we have enough to get the message length
if conn.msgLength == 0:
ll = conn.recBuf.peek()
if conn.recBuf.size() < 1 + ll:
# we don't have enough bytes to read the length yet
return
print "decoding msg length"
conn.msgLength = decode_varint(conn.recBuf)
# see if we have enough to decode the message
if conn.recBuf.size() < conn.msgLength:
return
# now we can decode the message
# first read the request type and get the particular msg
# decoder
typeByte = conn.recBuf.read(1)
typeByte = int(typeByte[0])
resTypeByte = typeByte + 0x10
req_type = message_types[typeByte]
if req_type == "flush":
# messages are length prefixed
conn.resBuf.write(encode(1))
conn.resBuf.write([resTypeByte])
conn.fd.send(str(conn.resBuf.buf))
conn.msgLength = 0
conn.inProgress = False
conn.resBuf = BytesBuffer(bytearray())
return
decoder = getattr(conn.decoder, req_type)
print "decoding args"
req_args = decoder()
print "got args", req_args
# done decoding message
conn.msgLength = 0
conn.inProgress = False
req_f = getattr(conn.app, req_type)
if req_args is None:
res = req_f()
elif isinstance(req_args, tuple):
res = req_f(*req_args)
else:
res = req_f(req_args)
if isinstance(res, tuple):
res, ret_code = res
else:
ret_code = res
res = None
print "called", req_type, "ret code:", ret_code
if ret_code != 0:
print "non-zero retcode:", ret_code
if req_type in ("echo", "info"): # these dont return a ret code
enc = encode(res)
# messages are length prefixed
conn.resBuf.write(encode(len(enc) + 1))
conn.resBuf.write([resTypeByte])
conn.resBuf.write(enc)
else:
enc, encRet = encode(res), encode(ret_code)
# messages are length prefixed
conn.resBuf.write(encode(len(enc) + len(encRet) + 1))
conn.resBuf.write([resTypeByte])
conn.resBuf.write(encRet)
conn.resBuf.write(enc)
except TypeError as e:
print "TypeError on reading from connection:", e
self.handle_conn_closed(r)
return
except ValueError as e:
print "ValueError on reading from connection:", e
self.handle_conn_closed(r)
return
except IOError as e:
print "IOError on reading from connection:", e
self.handle_conn_closed(r)
return
except Exception as e:
# sys.exc_info()[0] # TODO better
print "error reading from connection", str(e)
self.handle_conn_closed(r)
return
def main_loop(self):
while not self.shutdown:
r_list, w_list, _ = select.select(
self.read_list, self.write_list, [], 2.5)
for r in r_list:
if (r == self.listener):
try:
self.handle_new_connection(r)
# undo adding to read list ...
except NameError as e:
print "Could not connect due to NameError:", e
except TypeError as e:
print "Could not connect due to TypeError:", e
except:
print "Could not connect due to unexpected error:", sys.exc_info()[0]
else:
self.handle_recv(r)
def handle_shutdown(self):
for r in self.read_list:
r.close()
for w in self.write_list:
try:
w.close()
except Exception as e:
print(e) # TODO: add logging
self.shutdown = True

View File

@@ -1,115 +0,0 @@
# the decoder works off a reader
# the encoder returns bytearray
def hex2bytes(h):
return bytearray(h.decode('hex'))
def bytes2hex(b):
if type(b) in (str, unicode):
return "".join([hex(ord(c))[2:].zfill(2) for c in b])
else:
return bytes2hex(b.decode())
# expects uvarint64 (no crazy big nums!)
def uvarint_size(i):
if i == 0:
return 0
for j in xrange(1, 8):
if i < 1 << j * 8:
return j
return 8
# expects i < 2**size
def encode_big_endian(i, size):
if size == 0:
return bytearray()
return encode_big_endian(i / 256, size - 1) + bytearray([i % 256])
def decode_big_endian(reader, size):
if size == 0:
return 0
firstByte = reader.read(1)[0]
return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1)
# ints are max 16 bytes long
def encode_varint(i):
negate = False
if i < 0:
negate = True
i = -i
size = uvarint_size(i)
if size == 0:
return bytearray([0])
big_end = encode_big_endian(i, size)
if negate:
size += 0xF0
return bytearray([size]) + big_end
# returns the int and whats left of the byte array
def decode_varint(reader):
size = reader.read(1)[0]
if size == 0:
return 0
negate = True if size > int(0xF0) else False
if negate:
size = size - 0xF0
i = decode_big_endian(reader, size)
if negate:
i = i * (-1)
return i
def encode_string(s):
size = encode_varint(len(s))
return size + bytearray(s)
def decode_string(reader):
length = decode_varint(reader)
return str(reader.read(length))
def encode_list(s):
b = bytearray()
map(b.extend, map(encode, s))
return encode_varint(len(s)) + b
def encode(s):
if s is None:
return bytearray()
if isinstance(s, int):
return encode_varint(s)
elif isinstance(s, str):
return encode_string(s)
elif isinstance(s, list):
return encode_list(s)
else:
print "UNSUPPORTED TYPE!", type(s), s
if __name__ == '__main__':
ns = [100, 100, 1000, 256]
ss = [2, 5, 5, 2]
bs = map(encode_big_endian, ns, ss)
ds = map(decode_big_endian, bs, ss)
print ns
print [i[0] for i in ds]
ss = ["abc", "hi there jim", "ok now what"]
e = map(encode_string, ss)
d = map(decode_string, e)
print ss
print [i[0] for i in d]

View File

@@ -1,82 +0,0 @@
import sys
from abci.wire import hex2bytes, decode_big_endian, encode_big_endian
from abci.server import ABCIServer
from abci.reader import BytesBuffer
class CounterApplication():
def __init__(self):
sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.")
self.hashCount = 0
self.txCount = 0
self.serial = False
def echo(self, msg):
return msg, 0
def info(self):
return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0
def set_option(self, key, value):
if key == "serial" and value == "on":
self.serial = True
return 0
def deliver_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue != self.txCount:
return None, 6
self.txCount += 1
return None, 0
def check_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue < self.txCount:
return 6
return 0
def commit(self):
self.hashCount += 1
if self.txCount == 0:
return "", 0
h = encode_big_endian(self.txCount, 8)
h.reverse()
return str(h), 0
def add_listener(self):
return 0
def rm_listener(self):
return 0
def event(self):
return
if __name__ == '__main__':
l = len(sys.argv)
if l == 1:
port = 26658
elif l == 2:
port = int(sys.argv[1])
else:
print "too many arguments"
quit()
print 'ABCI Demo APP (Python)'
app = CounterApplication()
server = ABCIServer(app, port)
server.main_loop()

View File

@@ -1,50 +0,0 @@
from .wire import decode_string
# map type_byte to message name
message_types = {
0x01: "echo",
0x02: "flush",
0x03: "info",
0x04: "set_option",
0x21: "deliver_tx",
0x22: "check_tx",
0x23: "commit",
0x24: "add_listener",
0x25: "rm_listener",
}
# return the decoded arguments of abci messages
class RequestDecoder():
def __init__(self, reader):
self.reader = reader
def echo(self):
return decode_string(self.reader)
def flush(self):
return
def info(self):
return
def set_option(self):
return decode_string(self.reader), decode_string(self.reader)
def deliver_tx(self):
return decode_string(self.reader)
def check_tx(self):
return decode_string(self.reader)
def commit(self):
return
def add_listener(self):
# TODO
return
def rm_listener(self):
# TODO
return

View File

@@ -1,56 +0,0 @@
# Simple read() method around a bytearray
class BytesBuffer():
def __init__(self, b):
self.buf = b
self.readCount = 0
def count(self):
return self.readCount
def reset_count(self):
self.readCount = 0
def size(self):
return len(self.buf)
def peek(self):
return self.buf[0]
def write(self, b):
# b should be castable to byte array
self.buf += bytearray(b)
def read(self, n):
if len(self.buf) < n:
print("reader err: buf less than n")
# TODO: exception
return
self.readCount += n
r = self.buf[:n]
self.buf = self.buf[n:]
return r
# Buffer bytes off a tcp connection and read them off in chunks
class ConnReader():
def __init__(self, conn):
self.conn = conn
self.buf = bytearray()
# blocking
def read(self, n):
while n > len(self.buf):
moreBuf = self.conn.recv(1024)
if not moreBuf:
raise IOError("dead connection")
self.buf = self.buf + bytearray(moreBuf)
r = self.buf[:n]
self.buf = self.buf[n:]
return r

View File

@@ -1,196 +0,0 @@
import socket
import select
import sys
import logging
from .wire import decode_varint, encode
from .reader import BytesBuffer
from .msg import RequestDecoder, message_types
# hold the asyncronous state of a connection
# ie. we may not get enough bytes on one read to decode the message
logger = logging.getLogger(__name__)
class Connection():
def __init__(self, fd, app):
self.fd = fd
self.app = app
self.recBuf = BytesBuffer(bytearray())
self.resBuf = BytesBuffer(bytearray())
self.msgLength = 0
self.decoder = RequestDecoder(self.recBuf)
self.inProgress = False # are we in the middle of a message
def recv(this):
data = this.fd.recv(1024)
if not data: # what about len(data) == 0
raise IOError("dead connection")
this.recBuf.write(data)
# ABCI server responds to messges by calling methods on the app
class ABCIServer():
def __init__(self, app, port=5410):
self.app = app
# map conn file descriptors to (app, reqBuf, resBuf, msgDecoder)
self.appMap = {}
self.port = port
self.listen_backlog = 10
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.setblocking(0)
self.listener.bind(('', port))
self.listener.listen(self.listen_backlog)
self.shutdown = False
self.read_list = [self.listener]
self.write_list = []
def handle_new_connection(self, r):
new_fd, new_addr = r.accept()
new_fd.setblocking(0) # non-blocking
self.read_list.append(new_fd)
self.write_list.append(new_fd)
print('new connection to', new_addr)
self.appMap[new_fd] = Connection(new_fd, self.app)
def handle_conn_closed(self, r):
self.read_list.remove(r)
self.write_list.remove(r)
r.close()
print("connection closed")
def handle_recv(self, r):
# app, recBuf, resBuf, conn
conn = self.appMap[r]
while True:
try:
print("recv loop")
# check if we need more data first
if conn.inProgress:
if (conn.msgLength == 0 or conn.recBuf.size() < conn.msgLength):
conn.recv()
else:
if conn.recBuf.size() == 0:
conn.recv()
conn.inProgress = True
# see if we have enough to get the message length
if conn.msgLength == 0:
ll = conn.recBuf.peek()
if conn.recBuf.size() < 1 + ll:
# we don't have enough bytes to read the length yet
return
print("decoding msg length")
conn.msgLength = decode_varint(conn.recBuf)
# see if we have enough to decode the message
if conn.recBuf.size() < conn.msgLength:
return
# now we can decode the message
# first read the request type and get the particular msg
# decoder
typeByte = conn.recBuf.read(1)
typeByte = int(typeByte[0])
resTypeByte = typeByte + 0x10
req_type = message_types[typeByte]
if req_type == "flush":
# messages are length prefixed
conn.resBuf.write(encode(1))
conn.resBuf.write([resTypeByte])
conn.fd.send(conn.resBuf.buf)
conn.msgLength = 0
conn.inProgress = False
conn.resBuf = BytesBuffer(bytearray())
return
decoder = getattr(conn.decoder, req_type)
print("decoding args")
req_args = decoder()
print("got args", req_args)
# done decoding message
conn.msgLength = 0
conn.inProgress = False
req_f = getattr(conn.app, req_type)
if req_args is None:
res = req_f()
elif isinstance(req_args, tuple):
res = req_f(*req_args)
else:
res = req_f(req_args)
if isinstance(res, tuple):
res, ret_code = res
else:
ret_code = res
res = None
print("called", req_type, "ret code:", ret_code, 'res:', res)
if ret_code != 0:
print("non-zero retcode:", ret_code)
if req_type in ("echo", "info"): # these dont return a ret code
enc = encode(res)
# messages are length prefixed
conn.resBuf.write(encode(len(enc) + 1))
conn.resBuf.write([resTypeByte])
conn.resBuf.write(enc)
else:
enc, encRet = encode(res), encode(ret_code)
# messages are length prefixed
conn.resBuf.write(encode(len(enc) + len(encRet) + 1))
conn.resBuf.write([resTypeByte])
conn.resBuf.write(encRet)
conn.resBuf.write(enc)
except IOError as e:
print("IOError on reading from connection:", e)
self.handle_conn_closed(r)
return
except Exception as e:
logger.exception("error reading from connection")
self.handle_conn_closed(r)
return
def main_loop(self):
while not self.shutdown:
r_list, w_list, _ = select.select(
self.read_list, self.write_list, [], 2.5)
for r in r_list:
if (r == self.listener):
try:
self.handle_new_connection(r)
# undo adding to read list ...
except NameError as e:
print("Could not connect due to NameError:", e)
except TypeError as e:
print("Could not connect due to TypeError:", e)
except:
print("Could not connect due to unexpected error:", sys.exc_info()[0])
else:
self.handle_recv(r)
def handle_shutdown(self):
for r in self.read_list:
r.close()
for w in self.write_list:
try:
w.close()
except Exception as e:
print(e) # TODO: add logging
self.shutdown = True

View File

@@ -1,119 +0,0 @@
# the decoder works off a reader
# the encoder returns bytearray
def hex2bytes(h):
return bytearray(h.decode('hex'))
def bytes2hex(b):
if type(b) in (str, str):
return "".join([hex(ord(c))[2:].zfill(2) for c in b])
else:
return bytes2hex(b.decode())
# expects uvarint64 (no crazy big nums!)
def uvarint_size(i):
if i == 0:
return 0
for j in range(1, 8):
if i < 1 << j * 8:
return j
return 8
# expects i < 2**size
def encode_big_endian(i, size):
if size == 0:
return bytearray()
return encode_big_endian(i // 256, size - 1) + bytearray([i % 256])
def decode_big_endian(reader, size):
if size == 0:
return 0
firstByte = reader.read(1)[0]
return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1)
# ints are max 16 bytes long
def encode_varint(i):
negate = False
if i < 0:
negate = True
i = -i
size = uvarint_size(i)
if size == 0:
return bytearray([0])
big_end = encode_big_endian(i, size)
if negate:
size += 0xF0
return bytearray([size]) + big_end
# returns the int and whats left of the byte array
def decode_varint(reader):
size = reader.read(1)[0]
if size == 0:
return 0
negate = True if size > int(0xF0) else False
if negate:
size = size - 0xF0
i = decode_big_endian(reader, size)
if negate:
i = i * (-1)
return i
def encode_string(s):
size = encode_varint(len(s))
return size + bytearray(s, 'utf8')
def decode_string(reader):
length = decode_varint(reader)
raw_data = reader.read(length)
return raw_data.decode()
def encode_list(s):
b = bytearray()
list(map(b.extend, list(map(encode, s))))
return encode_varint(len(s)) + b
def encode(s):
print('encoding', repr(s))
if s is None:
return bytearray()
if isinstance(s, int):
return encode_varint(s)
elif isinstance(s, str):
return encode_string(s)
elif isinstance(s, list):
return encode_list(s)
elif isinstance(s, bytearray):
return encode_string(s)
else:
print("UNSUPPORTED TYPE!", type(s), s)
if __name__ == '__main__':
ns = [100, 100, 1000, 256]
ss = [2, 5, 5, 2]
bs = list(map(encode_big_endian, ns, ss))
ds = list(map(decode_big_endian, bs, ss))
print(ns)
print([i[0] for i in ds])
ss = ["abc", "hi there jim", "ok now what"]
e = list(map(encode_string, ss))
d = list(map(decode_string, e))
print(ss)
print([i[0] for i in d])

View File

@@ -1,82 +0,0 @@
import sys
from abci.wire import hex2bytes, decode_big_endian, encode_big_endian
from abci.server import ABCIServer
from abci.reader import BytesBuffer
class CounterApplication():
def __init__(self):
sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.")
self.hashCount = 0
self.txCount = 0
self.serial = False
def echo(self, msg):
return msg, 0
def info(self):
return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0
def set_option(self, key, value):
if key == "serial" and value == "on":
self.serial = True
return 0
def deliver_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue != self.txCount:
return None, 6
self.txCount += 1
return None, 0
def check_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue < self.txCount:
return 6
return 0
def commit(self):
self.hashCount += 1
if self.txCount == 0:
return "", 0
h = encode_big_endian(self.txCount, 8)
h.reverse()
return h.decode(), 0
def add_listener(self):
return 0
def rm_listener(self):
return 0
def event(self):
return
if __name__ == '__main__':
l = len(sys.argv)
if l == 1:
port = 26658
elif l == 2:
port = int(sys.argv[1])
else:
print("too many arguments")
quit()
print('ABCI Demo APP (Python)')
app = CounterApplication()
server = ABCIServer(app, port)
server.main_loop()

View File

@@ -1,57 +0,0 @@
package server
import (
"net"
"google.golang.org/grpc"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
type GRPCServer struct {
cmn.BaseService
proto string
addr string
listener net.Listener
server *grpc.Server
app types.ABCIApplicationServer
}
// NewGRPCServer returns a new gRPC ABCI server
func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) cmn.Service {
proto, addr := cmn.ProtocolAndAddress(protoAddr)
s := &GRPCServer{
proto: proto,
addr: addr,
listener: nil,
app: app,
}
s.BaseService = *cmn.NewBaseService(nil, "ABCIServer", s)
return s
}
// OnStart starts the gRPC service
func (s *GRPCServer) OnStart() error {
if err := s.BaseService.OnStart(); err != nil {
return err
}
ln, err := net.Listen(s.proto, s.addr)
if err != nil {
return err
}
s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr)
s.listener = ln
s.server = grpc.NewServer()
types.RegisterABCIApplicationServer(s.server, s.app)
go s.server.Serve(s.listener)
return nil
}
// OnStop stops the gRPC server
func (s *GRPCServer) OnStop() {
s.BaseService.OnStop()
s.server.Stop()
}

View File

@@ -1,31 +0,0 @@
/*
Package server is used to start a new ABCI server.
It contains two server implementation:
* gRPC server
* socket server
*/
package server
import (
"fmt"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func NewServer(protoAddr, transport string, app types.Application) (cmn.Service, error) {
var s cmn.Service
var err error
switch transport {
case "socket":
s = NewSocketServer(protoAddr, app)
case "grpc":
s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app))
default:
err = fmt.Errorf("Unknown server type %s", transport)
}
return s, err
}

View File

@@ -1,226 +0,0 @@
package server
import (
"bufio"
"fmt"
"io"
"net"
"sync"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
// var maxNumberConnections = 2
type SocketServer struct {
cmn.BaseService
proto string
addr string
listener net.Listener
connsMtx sync.Mutex
conns map[int]net.Conn
nextConnID int
appMtx sync.Mutex
app types.Application
}
func NewSocketServer(protoAddr string, app types.Application) cmn.Service {
proto, addr := cmn.ProtocolAndAddress(protoAddr)
s := &SocketServer{
proto: proto,
addr: addr,
listener: nil,
app: app,
conns: make(map[int]net.Conn),
}
s.BaseService = *cmn.NewBaseService(nil, "ABCIServer", s)
return s
}
func (s *SocketServer) OnStart() error {
if err := s.BaseService.OnStart(); err != nil {
return err
}
ln, err := net.Listen(s.proto, s.addr)
if err != nil {
return err
}
s.listener = ln
go s.acceptConnectionsRoutine()
return nil
}
func (s *SocketServer) OnStop() {
s.BaseService.OnStop()
if err := s.listener.Close(); err != nil {
s.Logger.Error("Error closing listener", "err", err)
}
s.connsMtx.Lock()
defer s.connsMtx.Unlock()
for id, conn := range s.conns {
delete(s.conns, id)
if err := conn.Close(); err != nil {
s.Logger.Error("Error closing connection", "id", id, "conn", conn, "err", err)
}
}
}
func (s *SocketServer) addConn(conn net.Conn) int {
s.connsMtx.Lock()
defer s.connsMtx.Unlock()
connID := s.nextConnID
s.nextConnID++
s.conns[connID] = conn
return connID
}
// deletes conn even if close errs
func (s *SocketServer) rmConn(connID int) error {
s.connsMtx.Lock()
defer s.connsMtx.Unlock()
conn, ok := s.conns[connID]
if !ok {
return fmt.Errorf("Connection %d does not exist", connID)
}
delete(s.conns, connID)
return conn.Close()
}
func (s *SocketServer) acceptConnectionsRoutine() {
for {
// Accept a connection
s.Logger.Info("Waiting for new connection...")
conn, err := s.listener.Accept()
if err != nil {
if !s.IsRunning() {
return // Ignore error from listener closing.
}
s.Logger.Error("Failed to accept connection: " + err.Error())
continue
}
s.Logger.Info("Accepted a new connection")
connID := s.addConn(conn)
closeConn := make(chan error, 2) // Push to signal connection closed
responses := make(chan *types.Response, 1000) // A channel to buffer responses
// Read requests from conn and deal with them
go s.handleRequests(closeConn, conn, responses)
// Pull responses from 'responses' and write them to conn.
go s.handleResponses(closeConn, conn, responses)
// Wait until signal to close connection
go s.waitForClose(closeConn, connID)
}
}
func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
err := <-closeConn
if err == io.EOF {
s.Logger.Error("Connection was closed by client")
} else if err != nil {
s.Logger.Error("Connection error", "error", err)
} else {
// never happens
s.Logger.Error("Connection was closed.")
}
// Close the connection
if err := s.rmConn(connID); err != nil {
s.Logger.Error("Error in closing connection", "error", err)
}
}
// Read requests from conn and deal with them
func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, responses chan<- *types.Response) {
var count int
var bufReader = bufio.NewReader(conn)
for {
var req = &types.Request{}
err := types.ReadMessage(bufReader, req)
if err != nil {
if err == io.EOF {
closeConn <- err
} else {
closeConn <- fmt.Errorf("Error reading message: %v", err.Error())
}
return
}
s.appMtx.Lock()
count++
s.handleRequest(req, responses)
s.appMtx.Unlock()
}
}
func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) {
switch r := req.Value.(type) {
case *types.Request_Echo:
responses <- types.ToResponseEcho(r.Echo.Message)
case *types.Request_Flush:
responses <- types.ToResponseFlush()
case *types.Request_Info:
res := s.app.Info(*r.Info)
responses <- types.ToResponseInfo(res)
case *types.Request_SetOption:
res := s.app.SetOption(*r.SetOption)
responses <- types.ToResponseSetOption(res)
case *types.Request_DeliverTx:
res := s.app.DeliverTx(r.DeliverTx.Tx)
responses <- types.ToResponseDeliverTx(res)
case *types.Request_CheckTx:
res := s.app.CheckTx(r.CheckTx.Tx)
responses <- types.ToResponseCheckTx(res)
case *types.Request_Commit:
res := s.app.Commit()
responses <- types.ToResponseCommit(res)
case *types.Request_Query:
res := s.app.Query(*r.Query)
responses <- types.ToResponseQuery(res)
case *types.Request_InitChain:
res := s.app.InitChain(*r.InitChain)
responses <- types.ToResponseInitChain(res)
case *types.Request_BeginBlock:
res := s.app.BeginBlock(*r.BeginBlock)
responses <- types.ToResponseBeginBlock(res)
case *types.Request_EndBlock:
res := s.app.EndBlock(*r.EndBlock)
responses <- types.ToResponseEndBlock(res)
default:
responses <- types.ToResponseException("Unknown request")
}
}
// Pull responses from 'responses' and write them to conn.
func (s *SocketServer) handleResponses(closeConn chan error, conn net.Conn, responses <-chan *types.Response) {
var count int
var bufWriter = bufio.NewWriter(conn)
for {
var res = <-responses
err := types.WriteMessage(res, bufWriter)
if err != nil {
closeConn <- fmt.Errorf("Error writing message: %v", err.Error())
return
}
if _, ok := res.Value.(*types.Response_Flush); ok {
err = bufWriter.Flush()
if err != nil {
closeConn <- fmt.Errorf("Error flushing write buffer: %v", err.Error())
return
}
}
count++
}
}

View File

@@ -1 +0,0 @@
package benchmarks

View File

@@ -1,55 +0,0 @@
package main
import (
"bufio"
"fmt"
"log"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func main() {
conn, err := cmn.Connect("unix://test.sock")
if err != nil {
log.Fatal(err.Error())
}
// Read a bunch of responses
go func() {
counter := 0
for {
var res = &types.Response{}
err := types.ReadMessage(conn, res)
if err != nil {
log.Fatal(err.Error())
}
counter++
if counter%1000 == 0 {
fmt.Println("Read", counter)
}
}
}()
// Write a bunch of requests
counter := 0
for i := 0; ; i++ {
var bufWriter = bufio.NewWriter(conn)
var req = types.ToRequestEcho("foobar")
err := types.WriteMessage(req, bufWriter)
if err != nil {
log.Fatal(err.Error())
}
err = bufWriter.Flush()
if err != nil {
log.Fatal(err.Error())
}
counter++
if counter%1000 == 0 {
fmt.Println("Write", counter)
}
}
}

View File

@@ -1,69 +0,0 @@
package main
import (
"bufio"
"fmt"
"log"
"net"
"reflect"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func main() {
conn, err := cmn.Connect("unix://test.sock")
if err != nil {
log.Fatal(err.Error())
}
// Make a bunch of requests
counter := 0
for i := 0; ; i++ {
req := types.ToRequestEcho("foobar")
_, err := makeRequest(conn, req)
if err != nil {
log.Fatal(err.Error())
}
counter++
if counter%1000 == 0 {
fmt.Println(counter)
}
}
}
func makeRequest(conn net.Conn, req *types.Request) (*types.Response, error) {
var bufWriter = bufio.NewWriter(conn)
// Write desired request
err := types.WriteMessage(req, bufWriter)
if err != nil {
return nil, err
}
err = types.WriteMessage(types.ToRequestFlush(), bufWriter)
if err != nil {
return nil, err
}
err = bufWriter.Flush()
if err != nil {
return nil, err
}
// Read desired response
var res = &types.Response{}
err = types.ReadMessage(conn, res)
if err != nil {
return nil, err
}
var resFlush = &types.Response{}
err = types.ReadMessage(conn, resFlush)
if err != nil {
return nil, err
}
if _, ok := resFlush.Value.(*types.Response_Flush); !ok {
return nil, fmt.Errorf("Expected flush response but got something else: %v", reflect.TypeOf(resFlush))
}
return res, nil
}

View File

@@ -1,27 +0,0 @@
package tests
import (
"testing"
"github.com/stretchr/testify/assert"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
abciserver "github.com/tendermint/tendermint/abci/server"
)
func TestClientServerNoAddrPrefix(t *testing.T) {
addr := "localhost:26658"
transport := "socket"
app := kvstore.NewKVStoreApplication()
server, err := abciserver.NewServer(addr, transport, app)
assert.NoError(t, err, "expected no error on NewServer")
err = server.Start()
assert.NoError(t, err, "expected no error on server.Start")
client, err := abciclient.NewClient(addr, transport, true)
assert.NoError(t, err, "expected no error on NewClient")
err = client.Start()
assert.NoError(t, err, "expected no error on client.Start")
}

View File

@@ -1,96 +0,0 @@
package testsuite
import (
"bytes"
"errors"
"fmt"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func InitChain(client abcicli.Client) error {
total := 10
vals := make([]types.ValidatorUpdate, total)
for i := 0; i < total; i++ {
pubkey := cmn.RandBytes(33)
power := cmn.RandInt()
vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power))
}
_, err := client.InitChainSync(types.RequestInitChain{
Validators: vals,
})
if err != nil {
fmt.Printf("Failed test: InitChain - %v\n", err)
return err
}
fmt.Println("Passed test: InitChain")
return nil
}
func SetOption(client abcicli.Client, key, value string) error {
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
if err != nil {
fmt.Println("Failed test: SetOption")
fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err)
return err
}
fmt.Println("Passed test: SetOption")
return nil
}
func Commit(client abcicli.Client, hashExp []byte) error {
res, err := client.CommitSync()
data := res.Data
if err != nil {
fmt.Println("Failed test: Commit")
fmt.Printf("error while committing: %v\n", err)
return err
}
if !bytes.Equal(data, hashExp) {
fmt.Println("Failed test: Commit")
fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp)
return errors.New("CommitTx failed")
}
fmt.Println("Passed test: Commit")
return nil
}
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.DeliverTxSync(txBytes)
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("DeliverTx error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("DeliverTx error")
}
fmt.Println("Passed test: DeliverTx")
return nil
}
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.CheckTxSync(txBytes)
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: CheckTx")
fmt.Printf("CheckTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("CheckTx")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: CheckTx")
fmt.Printf("CheckTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("CheckTx")
}
fmt.Println("Passed test: CheckTx")
return nil
}

View File

@@ -1,78 +0,0 @@
package main
import (
"bytes"
"fmt"
"os"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
)
func startClient(abciType string) abcicli.Client {
// Start client
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
if err != nil {
panic(err.Error())
}
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
client.SetLogger(logger.With("module", "abcicli"))
if err := client.Start(); err != nil {
panicf("connecting to abci_app: %v", err.Error())
}
return client
}
func setOption(client abcicli.Client, key, value string) {
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
if err != nil {
panicf("setting %v=%v: \nerr: %v", key, value, err)
}
}
func commit(client abcicli.Client, hashExp []byte) {
res, err := client.CommitSync()
if err != nil {
panicf("client error: %v", err)
}
if !bytes.Equal(res.Data, hashExp) {
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
}
}
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
res, err := client.DeliverTxSync(txBytes)
if err != nil {
panicf("client error: %v", err)
}
if res.Code != codeExp {
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
}
if !bytes.Equal(res.Data, dataExp) {
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
}
}
/*func checkTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
res, err := client.CheckTxSync(txBytes)
if err != nil {
panicf("client error: %v", err)
}
if res.IsErr() {
panicf("checking tx %X: %v\nlog: %v", txBytes, res.Log)
}
if res.Code != codeExp {
panicf("CheckTx response code was unexpected. Got %v expected %v. Log: %v",
res.Code, codeExp, res.Log)
}
if !bytes.Equal(res.Data, dataExp) {
panicf("CheckTx response data was unexpected. Got %X expected %X",
res.Data, dataExp)
}
}*/
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
}

View File

@@ -1,84 +0,0 @@
package main
import (
"fmt"
"log"
"os"
"os/exec"
"time"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
)
var abciType string
func init() {
abciType = os.Getenv("ABCI")
if abciType == "" {
abciType = "socket"
}
}
func main() {
testCounter()
}
const (
maxABCIConnectTries = 10
)
func ensureABCIIsUp(typ string, n int) error {
var err error
cmdString := "abci-cli echo hello"
if typ == "grpc" {
cmdString = "abci-cli --abci grpc echo hello"
}
for i := 0; i < n; i++ {
cmd := exec.Command("bash", "-c", cmdString) // nolint: gas
_, err = cmd.CombinedOutput()
if err == nil {
break
}
<-time.After(500 * time.Millisecond)
}
return err
}
func testCounter() {
abciApp := os.Getenv("ABCI_APP")
if abciApp == "" {
panic("No ABCI_APP specified")
}
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
cmd := exec.Command("bash", "-c", fmt.Sprintf("abci-cli %s", abciApp)) // nolint: gas
cmd.Stdout = os.Stdout
if err := cmd.Start(); err != nil {
log.Fatalf("starting %q err: %v", abciApp, err)
}
defer cmd.Wait()
defer cmd.Process.Kill()
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
log.Fatalf("echo failed: %v", err)
}
client := startClient(abciType)
defer client.Stop()
setOption(client, "serial", "on")
commit(client, nil)
deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
commit(client, nil)
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
}

View File

@@ -1,27 +0,0 @@
#! /bin/bash
set -e
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
# Get the directory of where this script is.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
# Change into that dir because we expect that.
cd "$DIR"
echo "RUN COUNTER OVER SOCKET"
# test golang counter
ABCI_APP="counter" go run ./*.go
echo "----------------------"
echo "RUN COUNTER OVER GRPC"
# test golang counter via grpc
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run ./*.go
echo "----------------------"
# test nodejs counter
# TODO: fix node app
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter

View File

@@ -1,10 +0,0 @@
echo hello
info
commit
deliver_tx "abc"
info
commit
query "abc"
deliver_tx "def=xyz"
commit
query "def"

View File

@@ -1,51 +0,0 @@
> echo hello
-> code: OK
-> data: hello
-> data.hex: 0x68656C6C6F
> info
-> code: OK
-> data: {"size":0}
-> data.hex: 0x7B2273697A65223A307D
> commit
-> code: OK
-> data.hex: 0x0000000000000000
> deliver_tx "abc"
-> code: OK
> info
-> code: OK
-> data: {"size":1}
-> data.hex: 0x7B2273697A65223A317D
> commit
-> code: OK
-> data.hex: 0x0200000000000000
> query "abc"
-> code: OK
-> log: exists
-> height: 0
-> key: abc
-> key.hex: 616263
-> value: abc
-> value.hex: 616263
> deliver_tx "def=xyz"
-> code: OK
> commit
-> code: OK
-> data.hex: 0x0400000000000000
> query "def"
-> code: OK
-> log: exists
-> height: 0
-> key: def
-> key.hex: 646566
-> value: xyz
-> value.hex: 78797A

View File

@@ -1,8 +0,0 @@
set_option serial on
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
check_tx 0x00
deliver_tx 0x01
deliver_tx 0x04
info

View File

@@ -1,29 +0,0 @@
> set_option serial on
-> code: OK
-> log: OK (SetOption doesn't return anything.)
> check_tx 0x00
-> code: OK
> check_tx 0xff
-> code: OK
> deliver_tx 0x00
-> code: OK
> check_tx 0x00
-> code: 2
-> log: Invalid nonce. Expected >= 1, got 0
> deliver_tx 0x01
-> code: OK
> deliver_tx 0x04
-> code: 2
-> log: Invalid nonce. Expected 2, got 4
> info
-> code: OK
-> data: {"hashes":0,"txs":2}
-> data.hex: 0x7B22686173686573223A302C22747873223A327D

View File

@@ -1,42 +0,0 @@
#! /bin/bash
set -e
# Get the root directory.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )"
# Change into that dir because we expect that.
cd "$DIR" || exit
function testExample() {
N=$1
INPUT=$2
APP="$3 $4"
echo "Example $N: $APP"
$APP &> /dev/null &
sleep 2
abci-cli --log_level=error --verbose batch < "$INPUT" > "${INPUT}.out.new"
killall "$3"
pre=$(shasum < "${INPUT}.out")
post=$(shasum < "${INPUT}.out.new")
if [[ "$pre" != "$post" ]]; then
echo "You broke the tutorial"
echo "Got:"
cat "${INPUT}.out.new"
echo "Expected:"
cat "${INPUT}.out"
exit 1
fi
rm "${INPUT}".out.new
}
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
testExample 2 tests/test_cli/ex2.abci abci-cli counter
echo ""
echo "PASS"

View File

@@ -1 +0,0 @@
package tests

View File

@@ -1,138 +0,0 @@
package types // nolint: goimports
import (
context "golang.org/x/net/context"
)
// Application is an interface that enables any finite, deterministic state machine
// to be driven by a blockchain-based replication engine via the ABCI.
// All methods take a RequestXxx argument and return a ResponseXxx argument,
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
type Application interface {
// Info/Query Connection
Info(RequestInfo) ResponseInfo // Return application info
SetOption(RequestSetOption) ResponseSetOption // Set application option
Query(RequestQuery) ResponseQuery // Query for state
// Mempool Connection
CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
}
//-------------------------------------------------------
// BaseApplication is a base form of Application
var _ Application = (*BaseApplication)(nil)
type BaseApplication struct {
}
func NewBaseApplication() *BaseApplication {
return &BaseApplication{}
}
func (BaseApplication) Info(req RequestInfo) ResponseInfo {
return ResponseInfo{}
}
func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
return ResponseSetOption{}
}
func (BaseApplication) DeliverTx(tx []byte) ResponseDeliverTx {
return ResponseDeliverTx{Code: CodeTypeOK}
}
func (BaseApplication) CheckTx(tx []byte) ResponseCheckTx {
return ResponseCheckTx{Code: CodeTypeOK}
}
func (BaseApplication) Commit() ResponseCommit {
return ResponseCommit{}
}
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
return ResponseQuery{Code: CodeTypeOK}
}
func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
return ResponseInitChain{}
}
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
return ResponseBeginBlock{}
}
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
return ResponseEndBlock{}
}
//-------------------------------------------------------
// GRPCApplication is a GRPC wrapper for Application
type GRPCApplication struct {
app Application
}
func NewGRPCApplication(app Application) *GRPCApplication {
return &GRPCApplication{app}
}
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
return &ResponseEcho{Message: req.Message}, nil
}
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {
return &ResponseFlush{}, nil
}
func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) {
res := app.app.Info(*req)
return &res, nil
}
func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) {
res := app.app.SetOption(*req)
return &res, nil
}
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
res := app.app.DeliverTx(req.Tx)
return &res, nil
}
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
res := app.app.CheckTx(req.Tx)
return &res, nil
}
func (app *GRPCApplication) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) {
res := app.app.Query(*req)
return &res, nil
}
func (app *GRPCApplication) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) {
res := app.app.Commit()
return &res, nil
}
func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
res := app.app.InitChain(*req)
return &res, nil
}
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
res := app.app.BeginBlock(*req)
return &res, nil
}
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
res := app.app.EndBlock(*req)
return &res, nil
}

View File

@@ -1,210 +0,0 @@
package types
import (
"bufio"
"encoding/binary"
"io"
"github.com/gogo/protobuf/proto"
)
const (
maxMsgSize = 104857600 // 100MB
)
// WriteMessage writes a varint length-delimited protobuf message.
func WriteMessage(msg proto.Message, w io.Writer) error {
bz, err := proto.Marshal(msg)
if err != nil {
return err
}
return encodeByteSlice(w, bz)
}
// ReadMessage reads a varint length-delimited protobuf message.
func ReadMessage(r io.Reader, msg proto.Message) error {
return readProtoMsg(r, msg, maxMsgSize)
}
func readProtoMsg(r io.Reader, msg proto.Message, maxSize int) error {
// binary.ReadVarint takes an io.ByteReader, eg. a bufio.Reader
reader, ok := r.(*bufio.Reader)
if !ok {
reader = bufio.NewReader(r)
}
length64, err := binary.ReadVarint(reader)
if err != nil {
return err
}
length := int(length64)
if length < 0 || length > maxSize {
return io.ErrShortBuffer
}
buf := make([]byte, length)
if _, err := io.ReadFull(reader, buf); err != nil {
return err
}
return proto.Unmarshal(buf, msg)
}
//-----------------------------------------------------------------------
// NOTE: we copied wire.EncodeByteSlice from go-wire rather than keep
// go-wire as a dep
func encodeByteSlice(w io.Writer, bz []byte) (err error) {
err = encodeVarint(w, int64(len(bz)))
if err != nil {
return
}
_, err = w.Write(bz)
return
}
func encodeVarint(w io.Writer, i int64) (err error) {
var buf [10]byte
n := binary.PutVarint(buf[:], i)
_, err = w.Write(buf[0:n])
return
}
//----------------------------------------
func ToRequestEcho(message string) *Request {
return &Request{
Value: &Request_Echo{&RequestEcho{Message: message}},
}
}
func ToRequestFlush() *Request {
return &Request{
Value: &Request_Flush{&RequestFlush{}},
}
}
func ToRequestInfo(req RequestInfo) *Request {
return &Request{
Value: &Request_Info{&req},
}
}
func ToRequestSetOption(req RequestSetOption) *Request {
return &Request{
Value: &Request_SetOption{&req},
}
}
func ToRequestDeliverTx(tx []byte) *Request {
return &Request{
Value: &Request_DeliverTx{&RequestDeliverTx{Tx: tx}},
}
}
func ToRequestCheckTx(tx []byte) *Request {
return &Request{
Value: &Request_CheckTx{&RequestCheckTx{Tx: tx}},
}
}
func ToRequestCommit() *Request {
return &Request{
Value: &Request_Commit{&RequestCommit{}},
}
}
func ToRequestQuery(req RequestQuery) *Request {
return &Request{
Value: &Request_Query{&req},
}
}
func ToRequestInitChain(req RequestInitChain) *Request {
return &Request{
Value: &Request_InitChain{&req},
}
}
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
return &Request{
Value: &Request_BeginBlock{&req},
}
}
func ToRequestEndBlock(req RequestEndBlock) *Request {
return &Request{
Value: &Request_EndBlock{&req},
}
}
//----------------------------------------
func ToResponseException(errStr string) *Response {
return &Response{
Value: &Response_Exception{&ResponseException{Error: errStr}},
}
}
func ToResponseEcho(message string) *Response {
return &Response{
Value: &Response_Echo{&ResponseEcho{Message: message}},
}
}
func ToResponseFlush() *Response {
return &Response{
Value: &Response_Flush{&ResponseFlush{}},
}
}
func ToResponseInfo(res ResponseInfo) *Response {
return &Response{
Value: &Response_Info{&res},
}
}
func ToResponseSetOption(res ResponseSetOption) *Response {
return &Response{
Value: &Response_SetOption{&res},
}
}
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
return &Response{
Value: &Response_DeliverTx{&res},
}
}
func ToResponseCheckTx(res ResponseCheckTx) *Response {
return &Response{
Value: &Response_CheckTx{&res},
}
}
func ToResponseCommit(res ResponseCommit) *Response {
return &Response{
Value: &Response_Commit{&res},
}
}
func ToResponseQuery(res ResponseQuery) *Response {
return &Response{
Value: &Response_Query{&res},
}
}
func ToResponseInitChain(res ResponseInitChain) *Response {
return &Response{
Value: &Response_InitChain{&res},
}
}
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
return &Response{
Value: &Response_BeginBlock{&res},
}
}
func ToResponseEndBlock(res ResponseEndBlock) *Response {
return &Response{
Value: &Response_EndBlock{&res},
}
}

View File

@@ -1,103 +0,0 @@
package types
import (
"bytes"
"encoding/json"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
cmn "github.com/tendermint/tendermint/libs/common"
)
func TestMarshalJSON(t *testing.T) {
b, err := json.Marshal(&ResponseDeliverTx{})
assert.Nil(t, err)
// Do not include empty fields.
assert.False(t, strings.Contains(string(b), "code"))
r1 := ResponseCheckTx{
Code: 1,
Data: []byte("hello"),
GasWanted: 43,
Tags: []cmn.KVPair{
{Key: []byte("pho"), Value: []byte("bo")},
},
}
b, err = json.Marshal(&r1)
assert.Nil(t, err)
var r2 ResponseCheckTx
err = json.Unmarshal(b, &r2)
assert.Nil(t, err)
assert.Equal(t, r1, r2)
}
func TestWriteReadMessageSimple(t *testing.T) {
cases := []proto.Message{
&RequestEcho{
Message: "Hello",
},
}
for _, c := range cases {
buf := new(bytes.Buffer)
err := WriteMessage(c, buf)
assert.Nil(t, err)
msg := new(RequestEcho)
err = ReadMessage(buf, msg)
assert.Nil(t, err)
assert.Equal(t, c, msg)
}
}
func TestWriteReadMessage(t *testing.T) {
cases := []proto.Message{
&Header{
NumTxs: 4,
},
// TODO: add the rest
}
for _, c := range cases {
buf := new(bytes.Buffer)
err := WriteMessage(c, buf)
assert.Nil(t, err)
msg := new(Header)
err = ReadMessage(buf, msg)
assert.Nil(t, err)
assert.Equal(t, c, msg)
}
}
func TestWriteReadMessage2(t *testing.T) {
phrase := "hello-world"
cases := []proto.Message{
&ResponseCheckTx{
Data: []byte(phrase),
Log: phrase,
GasWanted: 10,
Tags: []cmn.KVPair{
cmn.KVPair{Key: []byte("abc"), Value: []byte("def")},
},
},
// TODO: add the rest
}
for _, c := range cases {
buf := new(bytes.Buffer)
err := WriteMessage(c, buf)
assert.Nil(t, err)
msg := new(ResponseCheckTx)
err = ReadMessage(buf, msg)
assert.Nil(t, err)
assert.Equal(t, c, msg)
}
}

View File

@@ -1,55 +0,0 @@
// +build ignore
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
)
// This script replaces most `[]byte` with `data.Bytes` in a `.pb.go` file.
// It was written before we realized we could use `gogo/protobuf` to achieve
// this more natively. So it's here for safe keeping in case we ever need to
// abandon `gogo/protobuf`.
func main() {
bytePattern := regexp.MustCompile("[[][]]byte")
const oldPath = "types/types.pb.go"
const tmpPath = "types/types.pb.new"
content, err := ioutil.ReadFile(oldPath)
if err != nil {
panic("cannot read " + oldPath)
os.Exit(1)
}
lines := bytes.Split(content, []byte("\n"))
outFile, _ := os.Create(tmpPath)
wroteImport := false
for _, line_bytes := range lines {
line := string(line_bytes)
gotPackageLine := strings.HasPrefix(line, "package ")
writeImportTime := strings.HasPrefix(line, "import ")
containsDescriptor := strings.Contains(line, "Descriptor")
containsByteArray := strings.Contains(line, "[]byte")
if containsByteArray && !containsDescriptor {
line = string(bytePattern.ReplaceAll([]byte(line), []byte("data.Bytes")))
}
if writeImportTime && !wroteImport {
wroteImport = true
fmt.Fprintf(outFile, "import \"github.com/tendermint/go-wire/data\"\n")
}
if gotPackageLine {
fmt.Fprintf(outFile, "%s\n", "//nolint: gas")
}
fmt.Fprintf(outFile, "%s\n", line)
}
outFile.Close()
os.Remove(oldPath)
os.Rename(tmpPath, oldPath)
exec.Command("goimports", "-w", oldPath)
}

View File

@@ -1,16 +0,0 @@
package types
const (
PubKeyEd25519 = "ed25519"
)
func Ed25519ValidatorUpdate(pubkey []byte, power int64) ValidatorUpdate {
return ValidatorUpdate{
// Address:
PubKey: PubKey{
Type: PubKeyEd25519,
Data: pubkey,
},
Power: power,
}
}

View File

@@ -1,121 +0,0 @@
package types
import (
"bytes"
"encoding/json"
"github.com/gogo/protobuf/jsonpb"
)
const (
CodeTypeOK uint32 = 0
)
// IsOK returns true if Code is OK.
func (r ResponseCheckTx) IsOK() bool {
return r.Code == CodeTypeOK
}
// IsErr returns true if Code is something other than OK.
func (r ResponseCheckTx) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ResponseDeliverTx) IsOK() bool {
return r.Code == CodeTypeOK
}
// IsErr returns true if Code is something other than OK.
func (r ResponseDeliverTx) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ResponseQuery) IsOK() bool {
return r.Code == CodeTypeOK
}
// IsErr returns true if Code is something other than OK.
func (r ResponseQuery) IsErr() bool {
return r.Code != CodeTypeOK
}
//---------------------------------------------------------------------------
// override JSON marshalling so we dont emit defaults (ie. disable omitempty)
// note we need Unmarshal functions too because protobuf had the bright idea
// to marshal int64->string. cool. cool, cool, cool: https://developers.google.com/protocol-buffers/docs/proto3#json
var (
jsonpbMarshaller = jsonpb.Marshaler{
EnumsAsInts: true,
EmitDefaults: false,
}
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
)
func (r *ResponseSetOption) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseSetOption) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseCheckTx) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
func (r *ResponseDeliverTx) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseDeliverTx) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
func (r *ResponseQuery) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseQuery) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
func (r *ResponseCommit) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseCommit) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
// Some compile time assertions to ensure we don't
// have accidental runtime surprises later on.
// jsonEncodingRoundTripper ensures that asserted
// interfaces implement both MarshalJSON and UnmarshalJSON
type jsonRoundTripper interface {
json.Marshaler
json.Unmarshaler
}
var _ jsonRoundTripper = (*ResponseCommit)(nil)
var _ jsonRoundTripper = (*ResponseQuery)(nil)
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
var _ jsonRoundTripper = (*ResponseSetOption)(nil)

File diff suppressed because it is too large Load Diff

View File

@@ -1,328 +0,0 @@
syntax = "proto3";
package types;
// For more information on gogo.proto, see:
// https://github.com/gogo/protobuf/blob/master/extensions.md
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/tendermint/tendermint/libs/common/types.proto";
import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto";
// This file is copied from http://github.com/tendermint/abci
// NOTE: When using custom types, mind the warnings.
// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.goproto_registration) = true;
// Generate tests
option (gogoproto.populate_all) = true;
option (gogoproto.equal_all) = true;
option (gogoproto.testgen_all) = true;
//----------------------------------------
// Request types
message Request {
oneof value {
RequestEcho echo = 2;
RequestFlush flush = 3;
RequestInfo info = 4;
RequestSetOption set_option = 5;
RequestInitChain init_chain = 6;
RequestQuery query = 7;
RequestBeginBlock begin_block = 8;
RequestCheckTx check_tx = 9;
RequestDeliverTx deliver_tx = 19;
RequestEndBlock end_block = 11;
RequestCommit commit = 12;
}
}
message RequestEcho {
string message = 1;
}
message RequestFlush {
}
message RequestInfo {
string version = 1;
uint64 block_version = 2;
uint64 p2p_version = 3;
}
// nondeterministic
message RequestSetOption {
string key = 1;
string value = 2;
}
message RequestInitChain {
google.protobuf.Timestamp time = 1 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
string chain_id = 2;
ConsensusParams consensus_params = 3;
repeated ValidatorUpdate validators = 4 [(gogoproto.nullable)=false];
bytes app_state_bytes = 5;
}
message RequestQuery {
bytes data = 1;
string path = 2;
int64 height = 3;
bool prove = 4;
}
// NOTE: validators here have empty pubkeys.
message RequestBeginBlock {
bytes hash = 1;
Header header = 2 [(gogoproto.nullable)=false];
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable)=false];
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable)=false];
}
message RequestCheckTx {
bytes tx = 1;
}
message RequestDeliverTx {
bytes tx = 1;
}
message RequestEndBlock {
int64 height = 1;
}
message RequestCommit {
}
//----------------------------------------
// Response types
message Response {
oneof value {
ResponseException exception = 1;
ResponseEcho echo = 2;
ResponseFlush flush = 3;
ResponseInfo info = 4;
ResponseSetOption set_option = 5;
ResponseInitChain init_chain = 6;
ResponseQuery query = 7;
ResponseBeginBlock begin_block = 8;
ResponseCheckTx check_tx = 9;
ResponseDeliverTx deliver_tx = 10;
ResponseEndBlock end_block = 11;
ResponseCommit commit = 12;
}
}
// nondeterministic
message ResponseException {
string error = 1;
}
message ResponseEcho {
string message = 1;
}
message ResponseFlush {
}
message ResponseInfo {
string data = 1;
string version = 2;
uint64 app_version = 3;
int64 last_block_height = 4;
bytes last_block_app_hash = 5;
}
// nondeterministic
message ResponseSetOption {
uint32 code = 1;
// bytes data = 2;
string log = 3;
string info = 4;
}
message ResponseInitChain {
ConsensusParams consensus_params = 1;
repeated ValidatorUpdate validators = 2 [(gogoproto.nullable)=false];
}
message ResponseQuery {
uint32 code = 1;
// bytes data = 2; // use "value" instead.
string log = 3; // nondeterministic
string info = 4; // nondeterministic
int64 index = 5;
bytes key = 6;
bytes value = 7;
merkle.Proof proof = 8;
int64 height = 9;
string codespace = 10;
}
message ResponseBeginBlock {
repeated common.KVPair tags = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
}
message ResponseCheckTx {
uint32 code = 1;
bytes data = 2;
string log = 3; // nondeterministic
string info = 4; // nondeterministic
int64 gas_wanted = 5;
int64 gas_used = 6;
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
string codespace = 8;
}
message ResponseDeliverTx {
uint32 code = 1;
bytes data = 2;
string log = 3; // nondeterministic
string info = 4; // nondeterministic
int64 gas_wanted = 5;
int64 gas_used = 6;
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
string codespace = 8;
}
message ResponseEndBlock {
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false];
ConsensusParams consensus_param_updates = 2;
repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
}
message ResponseCommit {
// reserve 1
bytes data = 2;
}
//----------------------------------------
// Misc.
// ConsensusParams contains all consensus-relevant parameters
// that can be adjusted by the abci app
message ConsensusParams {
BlockSize block_size = 1;
EvidenceParams evidence_params = 2;
}
// BlockSize contains limits on the block size.
message BlockSize {
// Note: must be greater than 0
int64 max_bytes = 1;
// Note: must be greater or equal to -1
int64 max_gas = 2;
}
// EvidenceParams contains limits on the evidence.
message EvidenceParams {
// Note: must be greater than 0
int64 max_age = 1;
}
message LastCommitInfo {
int32 round = 1;
repeated VoteInfo votes = 2 [(gogoproto.nullable)=false];
}
//----------------------------------------
// Blockchain Types
message Header {
// basic block info
Version version = 1 [(gogoproto.nullable)=false];
string chain_id = 2 [(gogoproto.customname)="ChainID"];
int64 height = 3;
google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
int64 num_txs = 5;
int64 total_txs = 6;
// prev block info
BlockID last_block_id = 7 [(gogoproto.nullable)=false];
// hashes of block data
bytes last_commit_hash = 8; // commit from validators from the last block
bytes data_hash = 9; // transactions
// hashes from the app output from the prev block
bytes validators_hash = 10; // validators for the current block
bytes next_validators_hash = 11; // validators for the next block
bytes consensus_hash = 12; // consensus params for current block
bytes app_hash = 13; // state after txs from the previous block
bytes last_results_hash = 14;// root hash of all results from the txs from the previous block
// consensus info
bytes evidence_hash = 15; // evidence included in the block
bytes proposer_address = 16; // original proposer of the block
}
message Version {
uint64 Block = 1;
uint64 App = 2;
}
message BlockID {
bytes hash = 1;
PartSetHeader parts_header = 2 [(gogoproto.nullable)=false];
}
message PartSetHeader {
int32 total = 1;
bytes hash = 2;
}
// Validator
message Validator {
bytes address = 1;
//PubKey pub_key = 2 [(gogoproto.nullable)=false];
int64 power = 3;
}
// ValidatorUpdate
message ValidatorUpdate {
PubKey pub_key = 1 [(gogoproto.nullable)=false];
int64 power = 2;
}
// VoteInfo
message VoteInfo {
Validator validator = 1 [(gogoproto.nullable)=false];
bool signed_last_block = 2;
}
message PubKey {
string type = 1;
bytes data = 2;
}
message Evidence {
string type = 1;
Validator validator = 2 [(gogoproto.nullable)=false];
int64 height = 3;
google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
int64 total_voting_power = 5;
}
//----------------------------------------
// Service Definition
service ABCIApplication {
rpc Echo(RequestEcho) returns (ResponseEcho) ;
rpc Flush(RequestFlush) returns (ResponseFlush);
rpc Info(RequestInfo) returns (ResponseInfo);
rpc SetOption(RequestSetOption) returns (ResponseSetOption);
rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx);
rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx);
rpc Query(RequestQuery) returns (ResponseQuery);
rpc Commit(RequestCommit) returns (ResponseCommit);
rpc InitChain(RequestInitChain) returns (ResponseInitChain);
rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock);
rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,34 +0,0 @@
package types
import (
"bytes"
"sort"
)
//------------------------------------------------------------------------------
// ValidatorUpdates is a list of validators that implements the Sort interface
type ValidatorUpdates []ValidatorUpdate
var _ sort.Interface = (ValidatorUpdates)(nil)
// All these methods for ValidatorUpdates:
// Len, Less and Swap
// are for ValidatorUpdates to implement sort.Interface
// which will be used by the sort package.
// See Issue https://github.com/tendermint/abci/issues/212
func (v ValidatorUpdates) Len() int {
return len(v)
}
// XXX: doesn't distinguish same validator with different power
func (v ValidatorUpdates) Less(i, j int) bool {
return bytes.Compare(v[i].PubKey.Data, v[j].PubKey.Data) <= 0
}
func (v ValidatorUpdates) Swap(i, j int) {
v1 := v[i]
v[i] = v[j]
v[j] = v1
}

View File

@@ -1,9 +0,0 @@
package version
import (
"github.com/tendermint/tendermint/version"
)
// TODO: eliminate this after some version refactor
const Version = version.ABCIVersion

View File

@@ -1,13 +0,0 @@
version: 1.0.{build}
configuration: Release
platform:
- x64
- x86
clone_folder: c:\go\path\src\github.com\tendermint\tendermint
before_build:
- cmd: set GOPATH=%GOROOT%\path
- cmd: set PATH=%GOPATH%\bin;%PATH%
- cmd: make get_vendor_deps
build_script:
- cmd: make test
test: off

View File

@@ -1,2 +0,0 @@
data

View File

@@ -1,80 +0,0 @@
#!/bin/bash
DATA=$GOPATH/src/github.com/tendermint/tendermint/benchmarks/blockchain/data
if [ ! -d $DATA ]; then
echo "no data found, generating a chain... (this only has to happen once)"
tendermint init --home $DATA
cp $DATA/config.toml $DATA/config2.toml
echo "
[consensus]
timeout_commit = 0
" >> $DATA/config.toml
echo "starting node"
tendermint node \
--home $DATA \
--proxy_app kvstore \
--p2p.laddr tcp://127.0.0.1:56656 \
--rpc.laddr tcp://127.0.0.1:56657 \
--log_level error &
echo "making blocks for 60s"
sleep 60
mv $DATA/config2.toml $DATA/config.toml
kill %1
echo "done generating chain."
fi
# validator node
HOME1=$TMPDIR$RANDOM$RANDOM
cp -R $DATA $HOME1
echo "starting validator node"
tendermint node \
--home $HOME1 \
--proxy_app kvstore \
--p2p.laddr tcp://127.0.0.1:56656 \
--rpc.laddr tcp://127.0.0.1:56657 \
--log_level error &
sleep 1
# downloader node
HOME2=$TMPDIR$RANDOM$RANDOM
tendermint init --home $HOME2
cp $HOME1/genesis.json $HOME2
printf "starting downloader node"
tendermint node \
--home $HOME2 \
--proxy_app kvstore \
--p2p.laddr tcp://127.0.0.1:56666 \
--rpc.laddr tcp://127.0.0.1:56667 \
--p2p.persistent_peers 127.0.0.1:56656 \
--log_level error &
# wait for node to start up so we only count time where we are actually syncing
sleep 0.5
while curl localhost:56667/status 2> /dev/null | grep "\"latest_block_height\": 0," > /dev/null
do
printf '.'
sleep 0.2
done
echo
echo "syncing blockchain for 10s"
for i in {1..10}
do
sleep 1
HEIGHT="$(curl localhost:56667/status 2> /dev/null \
| grep 'latest_block_height' \
| grep -o ' [0-9]*' \
| xargs)"
let 'RATE = HEIGHT / i'
echo "height: $HEIGHT, blocks/sec: $RATE"
done
kill %1
kill %2
rm -rf $HOME1 $HOME2

View File

@@ -2,55 +2,37 @@ package benchmarks
import (
"testing"
"time"
"github.com/tendermint/go-amino"
proto "github.com/tendermint/tendermint/benchmarks/proto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/go-wire"
proto "github.com/tendermint/tendermint/benchmarks/proto"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo {
return p2p.DefaultNodeInfo{
ProtocolVersion: p2p.ProtocolVersion{1, 2, 3},
ID_: id,
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: p2p.DefaultNodeInfoOther{
TxIndex: "on",
RPCAddress: "0.0.0.0:26657",
},
}
}
func BenchmarkEncodeStatusWire(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
pubKey := crypto.GenPrivKeyEd25519().PubKey()
status := &ctypes.ResultStatus{
NodeInfo: testNodeInfo(nodeKey.ID()),
SyncInfo: ctypes.SyncInfo{
LatestBlockHash: []byte("SOMEBYTES"),
LatestBlockHeight: 123,
LatestBlockTime: time.Unix(0, 1234),
},
ValidatorInfo: ctypes.ValidatorInfo{
PubKey: nodeKey.PubKey(),
NodeInfo: &p2p.NodeInfo{
PubKey: pubKey.Unwrap().(crypto.PubKeyEd25519),
Moniker: "SOMENAME",
Network: "SOMENAME",
RemoteAddr: "SOMEADDR",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
},
PubKey: pubKey,
LatestBlockHash: []byte("SOMEBYTES"),
LatestBlockHeight: 123,
LatestBlockTime: 1234,
}
b.StartTimer()
counter := 0
for i := 0; i < b.N; i++ {
jsonBytes, err := cdc.MarshalJSON(status)
if err != nil {
panic(err)
}
jsonBytes := wire.JSONBytes(status)
counter += len(jsonBytes)
}
@@ -58,33 +40,42 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
func BenchmarkEncodeNodeInfoWire(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeInfo := testNodeInfo(nodeKey.ID())
pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519)
nodeInfo := &p2p.NodeInfo{
PubKey: pubKey,
Moniker: "SOMENAME",
Network: "SOMENAME",
RemoteAddr: "SOMEADDR",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
}
b.StartTimer()
counter := 0
for i := 0; i < b.N; i++ {
jsonBytes, err := cdc.MarshalJSON(nodeInfo)
if err != nil {
panic(err)
}
jsonBytes := wire.JSONBytes(nodeInfo)
counter += len(jsonBytes)
}
}
func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeInfo := testNodeInfo(nodeKey.ID())
pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519)
nodeInfo := &p2p.NodeInfo{
PubKey: pubKey,
Moniker: "SOMENAME",
Network: "SOMENAME",
RemoteAddr: "SOMEADDR",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
}
b.StartTimer()
counter := 0
for i := 0; i < b.N; i++ {
jsonBytes := cdc.MustMarshalBinaryBare(nodeInfo)
jsonBytes := wire.BinaryBytes(nodeInfo)
counter += len(jsonBytes)
}
@@ -92,20 +83,16 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
func BenchmarkEncodeNodeInfoProto(b *testing.B) {
b.StopTimer()
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeID := string(nodeKey.ID())
someName := "SOMENAME"
someAddr := "SOMEADDR"
someVer := "SOMEVER"
someString := "SOMESTRING"
otherString := "OTHERSTRING"
nodeInfo := proto.NodeInfo{
Id: &proto.ID{Id: &nodeID},
Moniker: &someName,
Network: &someName,
ListenAddr: &someAddr,
Version: &someVer,
Other: []string{someString, otherString},
pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519)
pubKey2 := &proto.PubKey{Ed25519: &proto.PubKeyEd25519{Bytes: pubKey[:]}}
nodeInfo := &proto.NodeInfo{
PubKey: pubKey2,
Moniker: "SOMENAME",
Network: "SOMENAME",
RemoteAddr: "SOMEADDR",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
}
b.StartTimer()

View File

@@ -1,9 +1,8 @@
package benchmarks
import (
. "github.com/tendermint/tmlibs/common"
"testing"
cmn "github.com/tendermint/tendermint/libs/common"
)
func BenchmarkSomething(b *testing.B) {
@@ -12,11 +11,11 @@ func BenchmarkSomething(b *testing.B) {
numChecks := 100000
keys := make([]string, numItems)
for i := 0; i < numItems; i++ {
keys[i] = cmn.RandStr(100)
keys[i] = RandStr(100)
}
txs := make([]string, numChecks)
for i := 0; i < numChecks; i++ {
txs[i] = cmn.RandStr(100)
txs[i] = RandStr(100)
}
b.StartTimer()

View File

@@ -4,7 +4,7 @@ import (
"os"
"testing"
cmn "github.com/tendermint/tendermint/libs/common"
. "github.com/tendermint/tmlibs/common"
)
func BenchmarkFileWrite(b *testing.B) {
@@ -14,20 +14,16 @@ func BenchmarkFileWrite(b *testing.B) {
if err != nil {
b.Error(err)
}
testString := cmn.RandStr(200) + "\n"
testString := RandStr(200) + "\n"
b.StartTimer()
for i := 0; i < b.N; i++ {
_, err := file.Write([]byte(testString))
if err != nil {
b.Error(err)
}
file.Write([]byte(testString))
}
if err := file.Close(); err != nil {
b.Error(err)
}
if err := os.Remove("benchmark_file_write.out"); err != nil {
file.Close()
err = os.Remove("benchmark_file_write.out")
if err != nil {
b.Error(err)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,7 @@ message ResultStatus {
}
message NodeInfo {
required ID id = 1;
required PubKey pubKey = 1;
required string moniker = 2;
required string network = 3;
required string remoteAddr = 4;
@@ -16,10 +16,6 @@ message NodeInfo {
repeated string other = 7;
}
message ID {
required string id = 1;
}
message PubKey {
optional PubKeyEd25519 ed25519 = 1;
}

View File

@@ -3,16 +3,17 @@ package main
import (
"context"
"encoding/binary"
"fmt"
"time"
//"encoding/hex"
"fmt"
cmn "github.com/tendermint/tendermint/libs/common"
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
cmn "github.com/tendermint/tmlibs/common"
)
func main() {
wsc := rpcclient.NewWSClient("127.0.0.1:26657", "/websocket")
err := wsc.Start()
wsc := rpcclient.NewWSClient("127.0.0.1:46657", "/websocket")
_, err := wsc.Start()
if err != nil {
cmn.Exit(err.Error())
}
@@ -21,7 +22,7 @@ func main() {
// Read a bunch of responses
go func() {
for {
_, ok := <-wsc.ResponsesCh
_, ok := <-wsc.ResultsCh
if !ok {
break
}

View File

@@ -1,52 +1,25 @@
package blockchain
import (
"errors"
"fmt"
"math"
"sync"
"sync/atomic"
"time"
cmn "github.com/tendermint/tendermint/libs/common"
flow "github.com/tendermint/tendermint/libs/flowrate"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
flow "github.com/tendermint/tmlibs/flowrate"
"github.com/tendermint/tmlibs/log"
)
/*
eg, L = latency = 0.1s
P = num peers = 10
FN = num full nodes
BS = 1kB block size
CB = 1 Mbit/s = 128 kB/s
CB/P = 12.8 kB
B/S = CB/P/BS = 12.8 blocks/s
12.8 * 0.1 = 1.28 blocks on conn
*/
const (
requestIntervalMS = 2
maxTotalRequesters = 600
requestIntervalMS = 250
maxTotalRequesters = 300
maxPendingRequests = maxTotalRequesters
maxPendingRequestsPerPeer = 20
// Minimum recv rate to ensure we're receiving blocks from a peer fast
// enough. If a peer is not sending us data at at least that rate, we
// consider them to have timedout and we disconnect.
//
// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
// sending data across atlantic ~ 7.5 KB/s.
minRecvRate = 7680
// Maximum difference between current and new block's height.
maxDiffBetweenCurrentAndReceivedBlockHeight = 100
maxPendingRequestsPerPeer = 75
minRecvRate = 10240 // 10Kb/s
)
var peerTimeout = 15 * time.Second // not const so we can override with tests
var peerTimeoutSeconds = time.Duration(15) // not const so we can override with tests
/*
Peers self report their heights when we join the block pool.
@@ -60,36 +33,33 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
*/
type BlockPool struct {
cmn.BaseService
BaseService
startTime time.Time
mtx sync.Mutex
// block requests
requesters map[int64]*bpRequester
height int64 // the lowest key in requesters.
// peers
peers map[p2p.ID]*bpPeer
maxPeerHeight int64
// atomic
requesters map[int]*bpRequester
height int // the lowest key in requesters.
numPending int32 // number of requests pending assignment or block response
// peers
peers map[string]*bpPeer
requestsCh chan<- BlockRequest
errorsCh chan<- peerError
timeoutsCh chan<- string
}
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
func NewBlockPool(start int, requestsCh chan<- BlockRequest, timeoutsCh chan<- string) *BlockPool {
bp := &BlockPool{
peers: make(map[p2p.ID]*bpPeer),
peers: make(map[string]*bpPeer),
requesters: make(map[int64]*bpRequester),
requesters: make(map[int]*bpRequester),
height: start,
numPending: 0,
requestsCh: requestsCh,
errorsCh: errorsCh,
timeoutsCh: timeoutsCh,
}
bp.BaseService = *cmn.NewBaseService(nil, "BlockPool", bp)
bp.BaseService = *NewBaseService(nil, "BlockPool", bp)
return bp
}
@@ -99,7 +69,9 @@ func (pool *BlockPool) OnStart() error {
return nil
}
func (pool *BlockPool) OnStop() {}
func (pool *BlockPool) OnStop() {
pool.BaseService.OnStop()
}
// Run spawns requesters as needed.
func (pool *BlockPool) makeRequestersRoutine() {
@@ -107,7 +79,6 @@ func (pool *BlockPool) makeRequestersRoutine() {
if !pool.IsRunning() {
break
}
_, numPending, lenRequesters := pool.GetStatus()
if numPending >= maxPendingRequests {
// sleep for a bit.
@@ -133,14 +104,10 @@ func (pool *BlockPool) removeTimedoutPeers() {
for _, peer := range pool.peers {
if !peer.didTimeout && peer.numPending > 0 {
curRate := peer.recvMonitor.Status().CurRate
// curRate can be 0 on start
// XXX remove curRate != 0
if curRate != 0 && curRate < minRecvRate {
err := errors.New("peer is not sending us data fast enough")
pool.sendError(err, peer.id)
pool.Logger.Error("SendTimeout", "peer", peer.id,
"reason", err,
"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
"minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024))
pool.sendTimeout(peer.id)
pool.Logger.Error("SendTimeout", "peer", peer.id, "reason", "curRate too low")
peer.didTimeout = true
}
}
@@ -150,11 +117,11 @@ func (pool *BlockPool) removeTimedoutPeers() {
}
}
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
func (pool *BlockPool) GetStatus() (height int, numPending int32, lenRequesters int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
return pool.height, pool.numPending, len(pool.requesters)
}
// TODO: relax conditions, prevent abuse.
@@ -168,10 +135,16 @@ func (pool *BlockPool) IsCaughtUp() bool {
return false
}
maxPeerHeight := 0
for _, peer := range pool.peers {
maxPeerHeight = MaxInt(maxPeerHeight, peer.height)
}
// some conditions to determine if we're caught up
receivedBlockOrTimedOut := (pool.height > 0 || time.Since(pool.startTime) > 5*time.Second)
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight
ourChainIsLongestAmongPeers := maxPeerHeight == 0 || pool.height >= maxPeerHeight
isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers
pool.Logger.Info(Fmt("IsCaughtUp: %v", isCaughtUp), "height", pool.height, "maxPeerHeight", maxPeerHeight)
return isCaughtUp
}
@@ -207,64 +180,46 @@ func (pool *BlockPool) PopRequest() {
delete(pool.requesters, pool.height)
pool.height++
} else {
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
PanicSanity(Fmt("Expected requester to pop, got nothing at height %v", pool.height))
}
}
// Invalidates the block at pool.height,
// Remove the peer and redo request from others.
// Returns the ID of the removed peer.
func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
func (pool *BlockPool) RedoRequest(height int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
request := pool.requesters[height]
peerID := request.getPeerID()
if peerID != p2p.ID("") {
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(peerID)
pool.mtx.Unlock()
if request.block == nil {
PanicSanity("Expected block to be non-nil")
}
return peerID
// RemovePeer will redo all requesters associated with this peer.
// TODO: record this malfeasance
pool.RemovePeer(request.peerID)
}
// TODO: ensure that blocks come in order for each peer.
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) {
func (pool *BlockPool) AddBlock(peerID string, block *types.Block, blockSize int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
requester := pool.requesters[block.Height]
if requester == nil {
pool.Logger.Info("peer sent us a block we didn't expect", "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height)
diff := pool.height - block.Height
if diff < 0 {
diff *= -1
}
if diff > maxDiffBetweenCurrentAndReceivedBlockHeight {
pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID)
}
return
}
if requester.setBlock(block, peerID) {
atomic.AddInt32(&pool.numPending, -1)
pool.numPending--
peer := pool.peers[peerID]
if peer != nil {
peer.decrPending(blockSize)
}
peer.decrPending(blockSize)
} else {
// Bad peer?
}
}
// MaxPeerHeight returns the highest height reported by a peer.
func (pool *BlockPool) MaxPeerHeight() int64 {
pool.mtx.Lock()
defer pool.mtx.Unlock()
return pool.maxPeerHeight
}
// Sets the peer's alleged blockchain height.
func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
func (pool *BlockPool) SetPeerHeight(peerID string, height int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@@ -276,23 +231,22 @@ func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
peer.setLogger(pool.Logger.With("peer", peerID))
pool.peers[peerID] = peer
}
if height > pool.maxPeerHeight {
pool.maxPeerHeight = height
}
}
func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
func (pool *BlockPool) RemovePeer(peerID string) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
pool.removePeer(peerID)
}
func (pool *BlockPool) removePeer(peerID p2p.ID) {
func (pool *BlockPool) removePeer(peerID string) {
for _, requester := range pool.requesters {
if requester.getPeerID() == peerID {
requester.redo()
if requester.getBlock() != nil {
pool.numPending++
}
go requester.redo() // pick another peer and ...
}
}
delete(pool.peers, peerID)
@@ -300,7 +254,7 @@ func (pool *BlockPool) removePeer(peerID p2p.ID) {
// Pick an available peer with at least the given minHeight.
// If no peers are available, returns nil.
func (pool *BlockPool) pickIncrAvailablePeer(minHeight int64) *bpPeer {
func (pool *BlockPool) pickIncrAvailablePeer(minHeight int) *bpPeer {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@@ -325,50 +279,42 @@ func (pool *BlockPool) makeNextRequester() {
pool.mtx.Lock()
defer pool.mtx.Unlock()
nextHeight := pool.height + pool.requestersLen()
nextHeight := pool.height + len(pool.requesters)
request := newBPRequester(pool, nextHeight)
// request.SetLogger(pool.Logger.With("height", nextHeight))
request.SetLogger(pool.Logger.With("height", nextHeight))
pool.requesters[nextHeight] = request
atomic.AddInt32(&pool.numPending, 1)
pool.numPending++
err := request.Start()
if err != nil {
request.Logger.Error("Error starting request", "err", err)
}
request.Start()
}
func (pool *BlockPool) requestersLen() int64 {
return int64(len(pool.requesters))
}
func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) {
func (pool *BlockPool) sendRequest(height int, peerID string) {
if !pool.IsRunning() {
return
}
pool.requestsCh <- BlockRequest{height, peerID}
}
func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
func (pool *BlockPool) sendTimeout(peerID string) {
if !pool.IsRunning() {
return
}
pool.errorsCh <- peerError{err, peerID}
pool.timeoutsCh <- peerID
}
// unused by tendermint; left for debugging purposes
func (pool *BlockPool) debug() string {
pool.mtx.Lock()
pool.mtx.Lock() // Lock
defer pool.mtx.Unlock()
str := ""
nextHeight := pool.height + pool.requestersLen()
for h := pool.height; h < nextHeight; h++ {
for h := pool.height; h < pool.height+len(pool.requesters); h++ {
if pool.requesters[h] == nil {
str += fmt.Sprintf("H(%v):X ", h)
str += Fmt("H(%v):X ", h)
} else {
str += fmt.Sprintf("H(%v):", h)
str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
str += Fmt("H(%v):", h)
str += Fmt("B?(%v) ", pool.requesters[h].block != nil)
}
}
return str
@@ -378,10 +324,10 @@ func (pool *BlockPool) debug() string {
type bpPeer struct {
pool *BlockPool
id p2p.ID
id string
recvMonitor *flow.Monitor
height int64
height int
numPending int32
timeout *time.Timer
didTimeout bool
@@ -389,7 +335,7 @@ type bpPeer struct {
logger log.Logger
}
func newBPPeer(pool *BlockPool, peerID p2p.ID, height int64) *bpPeer {
func newBPPeer(pool *BlockPool, peerID string, height int) *bpPeer {
peer := &bpPeer{
pool: pool,
id: peerID,
@@ -406,15 +352,15 @@ func (peer *bpPeer) setLogger(l log.Logger) {
func (peer *bpPeer) resetMonitor() {
peer.recvMonitor = flow.New(time.Second, time.Second*40)
initialValue := float64(minRecvRate) * math.E
var initialValue = float64(minRecvRate) * math.E
peer.recvMonitor.SetREMA(initialValue)
}
func (peer *bpPeer) resetTimeout() {
if peer.timeout == nil {
peer.timeout = time.AfterFunc(peerTimeout, peer.onTimeout)
peer.timeout = time.AfterFunc(time.Second*peerTimeoutSeconds, peer.onTimeout)
} else {
peer.timeout.Reset(peerTimeout)
peer.timeout.Reset(time.Second * peerTimeoutSeconds)
}
}
@@ -440,37 +386,36 @@ func (peer *bpPeer) onTimeout() {
peer.pool.mtx.Lock()
defer peer.pool.mtx.Unlock()
err := errors.New("peer did not send us anything")
peer.pool.sendError(err, peer.id)
peer.logger.Error("SendTimeout", "reason", err, "timeout", peerTimeout)
peer.pool.sendTimeout(peer.id)
peer.logger.Error("SendTimeout", "reason", "onTimeout")
peer.didTimeout = true
}
//-------------------------------------
type bpRequester struct {
cmn.BaseService
BaseService
pool *BlockPool
height int64
height int
gotBlockCh chan struct{}
redoCh chan struct{}
mtx sync.Mutex
peerID p2p.ID
peerID string
block *types.Block
}
func newBPRequester(pool *BlockPool, height int64) *bpRequester {
func newBPRequester(pool *BlockPool, height int) *bpRequester {
bpr := &bpRequester{
pool: pool,
height: height,
gotBlockCh: make(chan struct{}, 1),
redoCh: make(chan struct{}, 1),
gotBlockCh: make(chan struct{}),
redoCh: make(chan struct{}),
peerID: "",
block: nil,
}
bpr.BaseService = *cmn.NewBaseService(nil, "bpRequester", bpr)
bpr.BaseService = *NewBaseService(nil, "bpRequester", bpr)
return bpr
}
@@ -479,8 +424,8 @@ func (bpr *bpRequester) OnStart() error {
return nil
}
// Returns true if the peer matches and block doesn't already exist.
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
// Returns true if the peer matches
func (bpr *bpRequester) setBlock(block *types.Block, peerID string) bool {
bpr.mtx.Lock()
if bpr.block != nil || bpr.peerID != peerID {
bpr.mtx.Unlock()
@@ -489,10 +434,7 @@ func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
bpr.block = block
bpr.mtx.Unlock()
select {
case bpr.gotBlockCh <- struct{}{}:
default:
}
bpr.gotBlockCh <- struct{}{}
return true
}
@@ -502,33 +444,23 @@ func (bpr *bpRequester) getBlock() *types.Block {
return bpr.block
}
func (bpr *bpRequester) getPeerID() p2p.ID {
func (bpr *bpRequester) getPeerID() string {
bpr.mtx.Lock()
defer bpr.mtx.Unlock()
return bpr.peerID
}
// This is called from the requestRoutine, upon redo().
func (bpr *bpRequester) reset() {
bpr.mtx.Lock()
defer bpr.mtx.Unlock()
if bpr.block != nil {
atomic.AddInt32(&bpr.pool.numPending, 1)
}
bpr.peerID = ""
bpr.block = nil
bpr.mtx.Unlock()
}
// Tells bpRequester to pick another peer and try again.
// NOTE: Nonblocking, and does nothing if another redo
// was already requested.
// NOTE: blocking
func (bpr *bpRequester) redo() {
select {
case bpr.redoCh <- struct{}{}:
default:
}
bpr.redoCh <- struct{}{}
}
// Responsible for making more requests as necessary
@@ -537,7 +469,7 @@ func (bpr *bpRequester) requestRoutine() {
OUTER_LOOP:
for {
// Pick a peer to send request to.
var peer *bpPeer
var peer *bpPeer = nil
PICK_PEER_LOOP:
for {
if !bpr.IsRunning() || !bpr.pool.IsRunning() {
@@ -557,21 +489,26 @@ OUTER_LOOP:
// Send request and wait.
bpr.pool.sendRequest(bpr.height, peer.id)
WAIT_LOOP:
for {
select {
case <-bpr.pool.Quit:
bpr.Stop()
return
case <-bpr.Quit:
return
case <-bpr.redoCh:
bpr.reset()
continue OUTER_LOOP // When peer is removed
case <-bpr.gotBlockCh:
// We got the block, now see if it's good.
select {
case <-bpr.pool.Quit():
case <-bpr.pool.Quit:
bpr.Stop()
return
case <-bpr.Quit():
case <-bpr.Quit:
return
case <-bpr.redoCh:
bpr.reset()
continue OUTER_LOOP
case <-bpr.gotBlockCh:
// We got a block!
// Continue the for-loop and wait til Quit.
continue WAIT_LOOP
}
}
}
@@ -580,6 +517,6 @@ OUTER_LOOP:
//-------------------------------------
type BlockRequest struct {
Height int64
PeerID p2p.ID
Height int
PeerID string
}

View File

@@ -1,48 +1,42 @@
package blockchain
import (
"math/rand"
"testing"
"time"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
func init() {
peerTimeout = 2 * time.Second
peerTimeoutSeconds = time.Duration(2)
}
type testPeer struct {
id p2p.ID
height int64
id string
height int
}
func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer {
peers := make(map[p2p.ID]testPeer, numPeers)
func makePeers(numPeers int, minHeight, maxHeight int) map[string]testPeer {
peers := make(map[string]testPeer, numPeers)
for i := 0; i < numPeers; i++ {
peerID := p2p.ID(cmn.RandStr(12))
height := minHeight + cmn.RandInt63n(maxHeight-minHeight)
peerID := RandStr(12)
height := minHeight + rand.Intn(maxHeight-minHeight)
peers[peerID] = testPeer{peerID, height}
}
return peers
}
func TestBasic(t *testing.T) {
start := int64(42)
start := 42
peers := makePeers(10, start+1, 1000)
errorsCh := make(chan peerError, 1000)
requestsCh := make(chan BlockRequest, 1000)
pool := NewBlockPool(start, requestsCh, errorsCh)
timeoutsCh := make(chan string, 100)
requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.SetLogger(log.TestingLogger())
err := pool.Start()
if err != nil {
t.Error(err)
}
pool.Start()
defer pool.Stop()
// Introduce each peer.
@@ -70,8 +64,8 @@ func TestBasic(t *testing.T) {
// Pull from channels
for {
select {
case err := <-errorsCh:
t.Error(err)
case peerID := <-timeoutsCh:
t.Errorf("timeout: %v", peerID)
case request := <-requestsCh:
t.Logf("Pulled new BlockRequest %v", request)
if request.Height == 300 {
@@ -79,7 +73,7 @@ func TestBasic(t *testing.T) {
}
// Request desired, pretend like we got the block immediately.
go func() {
block := &types.Block{Header: types.Header{Height: request.Height}}
block := &types.Block{Header: &types.Header{Height: request.Height}}
pool.AddBlock(request.PeerID, block, 123)
t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height)
}()
@@ -88,16 +82,13 @@ func TestBasic(t *testing.T) {
}
func TestTimeout(t *testing.T) {
start := int64(42)
start := 42
peers := makePeers(10, start+1, 1000)
errorsCh := make(chan peerError, 1000)
requestsCh := make(chan BlockRequest, 1000)
pool := NewBlockPool(start, requestsCh, errorsCh)
timeoutsCh := make(chan string, 100)
requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.SetLogger(log.TestingLogger())
err := pool.Start()
if err != nil {
t.Error(err)
}
pool.Start()
defer pool.Stop()
for _, peer := range peers {
@@ -128,13 +119,12 @@ func TestTimeout(t *testing.T) {
// Pull from channels
counter := 0
timedOut := map[p2p.ID]struct{}{}
timedOut := map[string]struct{}{}
for {
select {
case err := <-errorsCh:
t.Log(err)
// consider error to be always timeout here
if _, ok := timedOut[err.peerID]; !ok {
case peerID := <-timeoutsCh:
t.Logf("Peer %v timeouted", peerID)
if _, ok := timedOut[peerID]; !ok {
counter++
if counter == len(peers) {
return // Done!

View File

@@ -1,25 +1,25 @@
package blockchain
import (
"fmt"
"bytes"
"errors"
"reflect"
"time"
amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
)
const (
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 10
defaultChannelCapacity = 100
trySyncIntervalMS = 100
// stop syncing when last block's time is
// within this much of the system time.
// stopSyncingDurationMinutes = 10
@@ -28,89 +28,63 @@ const (
statusUpdateIntervalSeconds = 10
// check if we should switch to consensus reactor
switchToConsensusIntervalSeconds = 1
// NOTE: keep up to date with bcBlockResponseMessage
bcBlockResponseMessagePrefixSize = 4
bcBlockResponseMessageFieldKeySize = 1
maxMsgSize = types.MaxBlockSizeBytes +
bcBlockResponseMessagePrefixSize +
bcBlockResponseMessageFieldKeySize
maxBlockchainResponseSize = types.MaxBlockSize + 2
)
type consensusReactor interface {
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(sm.State, int)
}
type peerError struct {
err error
peerID p2p.ID
}
func (e peerError) Error() string {
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
SwitchToConsensus(*sm.State)
}
// BlockchainReactor handles long-term catchup syncing.
type BlockchainReactor struct {
p2p.BaseReactor
// immutable
initialState sm.State
state *sm.State
proxyAppConn proxy.AppConnConsensus // same as consensus.proxyAppConn
store *BlockStore
pool *BlockPool
fastSync bool
requestsCh chan BlockRequest
timeoutsCh chan string
blockExec *sm.BlockExecutor
store *BlockStore
pool *BlockPool
fastSync bool
requestsCh <-chan BlockRequest
errorsCh <-chan peerError
evsw types.EventSwitch
}
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore,
fastSync bool) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
store.Height()))
func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
if state.LastBlockHeight == store.Height()-1 {
store.height-- // XXX HACK, make this better
}
requestsCh := make(chan BlockRequest, maxTotalRequesters)
const capacity = 1000 // must be bigger than peers count
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
if state.LastBlockHeight != store.Height() {
cmn.PanicSanity(cmn.Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()))
}
requestsCh := make(chan BlockRequest, defaultChannelCapacity)
timeoutsCh := make(chan string, defaultChannelCapacity)
pool := NewBlockPool(
store.Height()+1,
requestsCh,
errorsCh,
timeoutsCh,
)
bcR := &BlockchainReactor{
initialState: state,
blockExec: blockExec,
state: state,
proxyAppConn: proxyAppConn,
store: store,
pool: pool,
fastSync: fastSync,
requestsCh: requestsCh,
errorsCh: errorsCh,
timeoutsCh: timeoutsCh,
}
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
return bcR
}
// SetLogger implements cmn.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.pool.Logger = l
}
// OnStart implements cmn.Service.
// OnStart implements BaseService
func (bcR *BlockchainReactor) OnStart() error {
bcR.BaseReactor.OnStart()
if bcR.fastSync {
err := bcR.pool.Start()
_, err := bcR.pool.Start()
if err != nil {
return err
}
@@ -119,223 +93,164 @@ func (bcR *BlockchainReactor) OnStart() error {
return nil
}
// OnStop implements cmn.Service.
// OnStop implements BaseService
func (bcR *BlockchainReactor) OnStop() {
bcR.BaseReactor.OnStop()
bcR.pool.Stop()
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 1000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxMsgSize,
&p2p.ChannelDescriptor{
ID: BlockchainChannel,
Priority: 5,
SendQueueCapacity: 100,
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
if !peer.Send(BlockchainChannel, msgBytes) {
func (bcR *BlockchainReactor) AddPeer(peer *p2p.Peer) {
if !peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
// doing nothing, will try later in `poolRoutine`
}
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.SetPeerHeight
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
bcR.pool.RemovePeer(peer.ID())
}
// respondToPeer loads a block and sends it to the requesting peer,
// if we have it. Otherwise, we'll respond saying we don't have it.
// According to the Tendermint spec, if all nodes are honest,
// no node should be requesting for a block that's non-existent.
func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block})
return src.TrySend(BlockchainChannel, msgBytes)
}
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height})
return src.TrySend(BlockchainChannel, msgBytes)
func (bcR *BlockchainReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
bcR.pool.RemovePeer(peer.Key)
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := decodeMsg(msgBytes)
func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
bcR.Switch.StopPeerForError(src, err)
bcR.Logger.Error("Error decoding message", "err", err)
return
}
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
// TODO: improve logic to satisfy megacheck
switch msg := msg.(type) {
case *bcBlockRequestMessage:
if queued := bcR.respondToPeer(msg, src); !queued {
// Unfortunately not queued since the queue is full.
// Got a request for a block. Respond with block if we have it.
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
msg := &bcBlockResponseMessage{Block: block}
queued := src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg})
if !queued {
// queue is full, just ignore.
}
} else {
// TODO peer is asking for things we don't have.
}
case *bcBlockResponseMessage:
// Got a block.
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
bcR.pool.AddBlock(src.Key, msg.Block, len(msgBytes))
case *bcStatusRequestMessage:
// Send peer our state.
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
queued := src.TrySend(BlockchainChannel, msgBytes)
queued := src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
if !queued {
// sorry
}
case *bcStatusResponseMessage:
// Got a peer status. Unverified.
bcR.pool.SetPeerHeight(src.ID(), msg.Height)
bcR.pool.SetPeerHeight(src.Key, msg.Height)
default:
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
}
// Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
func (bcR *BlockchainReactor) poolRoutine() {
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
blocksSynced := 0
chainID := bcR.initialState.ChainID
state := bcR.initialState
lastHundred := time.Now()
lastRate := 0.0
didProcessCh := make(chan struct{}, 1)
FOR_LOOP:
for {
select {
case request := <-bcR.requestsCh:
case request := <-bcR.requestsCh: // chan BlockRequest
peer := bcR.Switch.Peers().Get(request.PeerID)
if peer == nil {
continue FOR_LOOP // Peer has since been disconnected.
}
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
queued := peer.TrySend(BlockchainChannel, msgBytes)
msg := &bcBlockRequestMessage{request.Height}
queued := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg})
if !queued {
// We couldn't make the request, send-queue full.
// The pool handles timeouts, just let it go.
continue FOR_LOOP
}
case err := <-bcR.errorsCh:
peer := bcR.Switch.Peers().Get(err.peerID)
case peerID := <-bcR.timeoutsCh: // chan string
// Peer timed out.
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
bcR.Switch.StopPeerForError(peer, err)
bcR.Switch.StopPeerForError(peer, errors.New("BlockchainReactor Timeout"))
}
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest() // nolint: errcheck
go bcR.BroadcastStatusRequest()
case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus()
height, numPending, _ := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
bcR.Logger.Info("Consensus ticker", "numPending", numPending, "total", len(bcR.pool.requesters),
"outbound", outbound, "inbound", inbound)
if bcR.pool.IsCaughtUp() {
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
bcR.pool.Stop()
conR := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
conR.SwitchToConsensus(state, blocksSynced)
conR.SwitchToConsensus(bcR.state)
break FOR_LOOP
}
case <-trySyncTicker.C: // chan time
select {
case didProcessCh <- struct{}{}:
default:
}
case <-didProcessCh:
// NOTE: It is a subtle mistake to process more than a single block
// at a time (e.g. 10) here, because we only TrySend 1 request per
// loop. The ratio mismatch can result in starving of blocks, a
// sudden burst of requests and responses, and repeat.
// Consequently, it is better to split these routines rather than
// coupling them as it's written here. TODO uncouple from request
// routine.
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
continue FOR_LOOP
} else {
// Try again quickly next loop.
didProcessCh <- struct{}{}
}
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartsHeader := firstParts.Header()
firstID := types.BlockID{first.Hash(), firstPartsHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := state.Validators.VerifyCommit(
chainID, firstID, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Error("Error in validation", "err", err)
peerID := bcR.pool.RedoRequest(first.Height)
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
// NOTE: we've already removed the peer's request, but we
// still need to clean up the rest.
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
// This loop can be slow as long as it's doing syncing work.
SYNC_LOOP:
for i := 0; i < 10; i++ {
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
break SYNC_LOOP
}
continue FOR_LOOP
} else {
bcR.pool.PopRequest()
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
var err error
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
firstParts := first.MakePartSet(types.DefaultBlockPartSize)
firstPartsHeader := firstParts.Header()
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := bcR.state.Validators.VerifyCommit(
bcR.state.ChainID, types.BlockID{first.Hash(), firstPartsHeader}, first.Height, second.LastCommit)
if err != nil {
// TODO This is bad, are we zombie?
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
first.Height, first.Hash(), err))
}
blocksSynced++
bcR.Logger.Info("error in validation", "err", err)
bcR.pool.RedoRequest(first.Height)
break SYNC_LOOP
} else {
bcR.pool.PopRequest()
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: should we be firing events? need to fire NewBlock events manually ...
// NOTE: we could improve performance if we
// didn't make the app commit to disk every block
// ... but we would need a way to get the hash without it persisting
err := bcR.state.ApplyBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader, types.MockMempool{})
if err != nil {
// TODO This is bad, are we zombie?
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
}
}
continue FOR_LOOP
case <-bcR.Quit():
case <-bcR.Quit:
break FOR_LOOP
}
}
@@ -343,78 +258,86 @@ FOR_LOOP:
// BroadcastStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()})
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
bcR.Switch.Broadcast(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusRequestMessage{bcR.store.Height()}})
return nil
}
// SetEventSwitch implements events.Eventable
func (bcR *BlockchainReactor) SetEventSwitch(evsw types.EventSwitch) {
bcR.evsw = evsw
}
//-----------------------------------------------------------------------------
// Messages
const (
msgTypeBlockRequest = byte(0x10)
msgTypeBlockResponse = byte(0x11)
msgTypeStatusResponse = byte(0x20)
msgTypeStatusRequest = byte(0x21)
)
// BlockchainMessage is a generic message for this reactor.
type BlockchainMessage interface{}
func RegisterBlockchainMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil)
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil)
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil)
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil)
}
var _ = wire.RegisterInterface(
struct{ BlockchainMessage }{},
wire.ConcreteType{&bcBlockRequestMessage{}, msgTypeBlockRequest},
wire.ConcreteType{&bcBlockResponseMessage{}, msgTypeBlockResponse},
wire.ConcreteType{&bcStatusResponseMessage{}, msgTypeStatusResponse},
wire.ConcreteType{&bcStatusRequestMessage{}, msgTypeStatusRequest},
)
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
// DecodeMessage decodes BlockchainMessage.
// TODO: ensure that bz is completely read.
func DecodeMessage(bz []byte) (msgType byte, msg BlockchainMessage, err error) {
msgType = bz[0]
n := int(0)
r := bytes.NewReader(bz)
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxBlockchainResponseSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
if err != nil && n != len(bz) {
err = errors.New("DecodeMessage() had bytes left over")
}
err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}
//-------------------------------------
type bcBlockRequestMessage struct {
Height int64
Height int
}
func (m *bcBlockRequestMessage) String() string {
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
}
type bcNoBlockResponseMessage struct {
Height int64
}
func (brm *bcNoBlockResponseMessage) String() string {
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height)
return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height)
}
//-------------------------------------
// NOTE: keep up-to-date with maxBlockchainResponseSize
type bcBlockResponseMessage struct {
Block *types.Block
}
func (m *bcBlockResponseMessage) String() string {
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height)
}
//-------------------------------------
type bcStatusRequestMessage struct {
Height int64
Height int
}
func (m *bcStatusRequestMessage) String() string {
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height)
}
//-------------------------------------
type bcStatusResponseMessage struct {
Height int64
Height int
}
func (m *bcStatusResponseMessage) String() string {
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height)
}

Some files were not shown because too many files have changed in this diff Show More