mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-13 00:02:52 +00:00
Compare commits
371 Commits
v0.34.22
...
sam/abci++
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28a7cbe97e | ||
|
|
f5bfacd2cf | ||
|
|
22d04dd19d | ||
|
|
5651a21adb | ||
|
|
bd75306a93 | ||
|
|
b76eb6906c | ||
|
|
b0e07355bd | ||
|
|
f7184192a0 | ||
|
|
a0c68b4941 | ||
|
|
662517b0db | ||
|
|
38897de6d7 | ||
|
|
e1f3d5c58c | ||
|
|
07804ea061 | ||
|
|
1f6a6176c0 | ||
|
|
b8162782c3 | ||
|
|
18382f97eb | ||
|
|
5bfc6d7a3b | ||
|
|
43ce473402 | ||
|
|
3b4c1b903a | ||
|
|
8a40180248 | ||
|
|
920c5ad813 | ||
|
|
bbf1169aea | ||
|
|
67d3a6e45b | ||
|
|
0c2483e9f3 | ||
|
|
b837ead457 | ||
|
|
944e9ebdc0 | ||
|
|
28d47d9a38 | ||
|
|
7a81dd25bf | ||
|
|
94c874da32 | ||
|
|
e7e48024ca | ||
|
|
abbeb919df | ||
|
|
a02cc30e41 | ||
|
|
5c23ffb05b | ||
|
|
45518db3d0 | ||
|
|
f76f6535b4 | ||
|
|
20ffe9e101 | ||
|
|
af5281d704 | ||
|
|
ed68aadd2b | ||
|
|
e8ec611ed4 | ||
|
|
5fe1a72416 | ||
|
|
b7f1e1f218 | ||
|
|
a0ed437942 | ||
|
|
561440a56d | ||
|
|
f1dc5811c3 | ||
|
|
e48d5a0294 | ||
|
|
f2c32c9b3e | ||
|
|
84bc77cb1f | ||
|
|
fbcfecbc3a | ||
|
|
fe0aa4d30e | ||
|
|
080dfab992 | ||
|
|
ab4238a0e2 | ||
|
|
bfdeccd649 | ||
|
|
e84d43ec93 | ||
|
|
2d1ada4d52 | ||
|
|
c69ab68848 | ||
|
|
10d1156add | ||
|
|
0f857047c5 | ||
|
|
db26cff58f | ||
|
|
a8efef1854 | ||
|
|
21a3bbda3f | ||
|
|
0c96f0b434 | ||
|
|
3293ce6f94 | ||
|
|
d67be51ef4 | ||
|
|
10f3626e6f | ||
|
|
7bd84cd8cc | ||
|
|
7feb484765 | ||
|
|
2d77374d4f | ||
|
|
c1f163f39b | ||
|
|
e80dd00894 | ||
|
|
93ead3d0e5 | ||
|
|
8e90d294ca | ||
|
|
dfefd837e7 | ||
|
|
ac866e38d4 | ||
|
|
0f45086c5f | ||
|
|
5013bc3f4a | ||
|
|
59a711eabe | ||
|
|
43ebbed9c2 | ||
|
|
ffce253273 | ||
|
|
2ae117ef62 | ||
|
|
1944dfd873 | ||
|
|
d54ed6423f | ||
|
|
1067ba1571 | ||
|
|
101bf50e71 | ||
|
|
d7645628f1 | ||
|
|
e667b855c7 | ||
|
|
c71efd1283 | ||
|
|
6bdc970377 | ||
|
|
8655080a0f | ||
|
|
bf73c7985e | ||
|
|
6371f02810 | ||
|
|
b055535397 | ||
|
|
2ff11e5bc2 | ||
|
|
70fd943bdf | ||
|
|
f1835338d9 | ||
|
|
bdf0217bd8 | ||
|
|
cceea4de22 | ||
|
|
7b40167f58 | ||
|
|
2313f35800 | ||
|
|
3233568cee | ||
|
|
9f76e8da15 | ||
|
|
28cfb039c9 | ||
|
|
5f932a53f0 | ||
|
|
50b5c23d88 | ||
|
|
b37f062619 | ||
|
|
aa303edaef | ||
|
|
2d8df1bd4e | ||
|
|
d886bc8fdd | ||
|
|
daaf5d6441 | ||
|
|
3cc976482d | ||
|
|
0ca3a89c90 | ||
|
|
1a4d9397e9 | ||
|
|
c8302c5fcb | ||
|
|
fb9afcc969 | ||
|
|
622b930e3a | ||
|
|
1069ffc6aa | ||
|
|
7bd86ec004 | ||
|
|
9993514893 | ||
|
|
3003e05581 | ||
|
|
498657f128 | ||
|
|
c322b89b2a | ||
|
|
cbc7a1abcf | ||
|
|
670abbc330 | ||
|
|
7dc4f934b0 | ||
|
|
cb570f6672 | ||
|
|
f36999e484 | ||
|
|
ae1fc74f80 | ||
|
|
c4eb6113a8 | ||
|
|
25b0c7c78e | ||
|
|
a6dde14ec4 | ||
|
|
152a2fa5c9 | ||
|
|
f295b4d431 | ||
|
|
0bea0647fe | ||
|
|
f861062ee2 | ||
|
|
d5ec276052 | ||
|
|
bff63aec83 | ||
|
|
69845bb44e | ||
|
|
10e1ac8fea | ||
|
|
74dd21eb89 | ||
|
|
ad1f9b49bc | ||
|
|
ef4e37b532 | ||
|
|
03c79b666d | ||
|
|
1148759a94 | ||
|
|
608933b73e | ||
|
|
b92a19b2ce | ||
|
|
6b499aeb31 | ||
|
|
88186f76fc | ||
|
|
b2409b3345 | ||
|
|
d268e56383 | ||
|
|
4f30c90e62 | ||
|
|
ea271c534a | ||
|
|
2164883501 | ||
|
|
61619ab072 | ||
|
|
4206a0e9b7 | ||
|
|
eb762cf5d7 | ||
|
|
b6a515a818 | ||
|
|
07d242c461 | ||
|
|
1e9d81fb8a | ||
|
|
5305fa79cf | ||
|
|
49ec3b9780 | ||
|
|
439d84afa1 | ||
|
|
13da93a430 | ||
|
|
476f8fa7a8 | ||
|
|
75d51e18f7 | ||
|
|
e32b86b549 | ||
|
|
85636f6a00 | ||
|
|
8834a0dbae | ||
|
|
4309f54349 | ||
|
|
c4e235243b | ||
|
|
18c7e3aae5 | ||
|
|
58effdd8b3 | ||
|
|
a7049ed3fc | ||
|
|
0323bd5365 | ||
|
|
a11b7743d4 | ||
|
|
1ad0683189 | ||
|
|
fa2ccc80da | ||
|
|
60d6856782 | ||
|
|
e2a038e039 | ||
|
|
f793752d07 | ||
|
|
bf71990d2f | ||
|
|
41e681293c | ||
|
|
c939e155a6 | ||
|
|
26ee62aa52 | ||
|
|
e44ab95f2f | ||
|
|
4c3339ab6a | ||
|
|
1e985f6226 | ||
|
|
72adbf9cc9 | ||
|
|
8029cf7a0f | ||
|
|
ed7fa80693 | ||
|
|
8f9cd23016 | ||
|
|
24f22eeb52 | ||
|
|
f790b6f903 | ||
|
|
0ff67d6b1e | ||
|
|
aa8f656573 | ||
|
|
6039594121 | ||
|
|
24222c5855 | ||
|
|
6bd5263515 | ||
|
|
89d381f7cf | ||
|
|
5559e14355 | ||
|
|
a2a9ffbe7e | ||
|
|
8dd91a7ac3 | ||
|
|
f3216e6953 | ||
|
|
90434cb74d | ||
|
|
aba090a69a | ||
|
|
048f6a32f9 | ||
|
|
4a9bcebe2a | ||
|
|
4b79bccc0b | ||
|
|
292828a01b | ||
|
|
5dfaa54350 | ||
|
|
00446bb9f4 | ||
|
|
255942e8c7 | ||
|
|
84ee4249ae | ||
|
|
b39af911ae | ||
|
|
0dc5d4df07 | ||
|
|
ea8238f090 | ||
|
|
640b71038b | ||
|
|
5c32ebcda8 | ||
|
|
b2465e0c3a | ||
|
|
9f6a4bcf23 | ||
|
|
b4a31746dd | ||
|
|
b270ab8d15 | ||
|
|
227e5269ca | ||
|
|
b315f04980 | ||
|
|
abaffef912 | ||
|
|
038f3e025a | ||
|
|
2f590a6392 | ||
|
|
72d15a4b07 | ||
|
|
1b2b24055c | ||
|
|
d260ff3e37 | ||
|
|
a4672048e7 | ||
|
|
fc569173a1 | ||
|
|
ce146d00d7 | ||
|
|
439a5bcacb | ||
|
|
accd7ffe18 | ||
|
|
42751ea4f3 | ||
|
|
acb9a7d734 | ||
|
|
31cfa53082 | ||
|
|
26ef2ccddb | ||
|
|
6abcb13dab | ||
|
|
033608bbf1 | ||
|
|
871d0514cd | ||
|
|
c1ff62fe44 | ||
|
|
66e9106b4d | ||
|
|
d5e0294003 | ||
|
|
32b811a1fb | ||
|
|
819e89ac7a | ||
|
|
cf03759ff5 | ||
|
|
9fce8480b0 | ||
|
|
d31a4a4b34 | ||
|
|
9ad6440bc0 | ||
|
|
97928e190a | ||
|
|
ec8af314cc | ||
|
|
a3fadb7c1a | ||
|
|
01622f81e9 | ||
|
|
792767d1cb | ||
|
|
0794fc8ff2 | ||
|
|
c5576dfa69 | ||
|
|
04fb20e33d | ||
|
|
8391fa0b89 | ||
|
|
3e56eb5fe3 | ||
|
|
733b020899 | ||
|
|
109a73f672 | ||
|
|
80747a0872 | ||
|
|
f3033c5515 | ||
|
|
6c95c3f250 | ||
|
|
a66bb37e32 | ||
|
|
606abc7fc0 | ||
|
|
b74b1c2b68 | ||
|
|
dd325bb191 | ||
|
|
d8a2c8f6f1 | ||
|
|
1075f77cc3 | ||
|
|
45bbbb6317 | ||
|
|
6140847bba | ||
|
|
cda8006569 | ||
|
|
9dbf818055 | ||
|
|
efbbc9462f | ||
|
|
c9d3564634 | ||
|
|
8dd2ed4c6f | ||
|
|
f3207cee52 | ||
|
|
a84c59734f | ||
|
|
430a4d0504 | ||
|
|
89ac8f6e62 | ||
|
|
95acfdead1 | ||
|
|
604923e034 | ||
|
|
713a773c81 | ||
|
|
e96921822d | ||
|
|
29f4e13e05 | ||
|
|
ef1e0ff886 | ||
|
|
c5e45ecb48 | ||
|
|
31b182b7aa | ||
|
|
d46cd7f573 | ||
|
|
0445156ed9 | ||
|
|
3a29521848 | ||
|
|
2bd673c8eb | ||
|
|
8ff136c716 | ||
|
|
b10ff00e1b | ||
|
|
6b570e2111 | ||
|
|
89922df775 | ||
|
|
1bd2aacb56 | ||
|
|
30ef12d0bb | ||
|
|
199124048e | ||
|
|
9c0754e617 | ||
|
|
0d5f212f30 | ||
|
|
5acd1540c0 | ||
|
|
d65205ecad | ||
|
|
3c27335db3 | ||
|
|
c3cd54a8e0 | ||
|
|
90797cef90 | ||
|
|
9842b4b0fb | ||
|
|
f399abd7ac | ||
|
|
7a0cdd53d5 | ||
|
|
ebda9dcac5 | ||
|
|
15b15d2060 | ||
|
|
3ab6026ad7 | ||
|
|
1152120dea | ||
|
|
d45389e2b0 | ||
|
|
9440fc16ce | ||
|
|
3f04e8bbce | ||
|
|
c9a664a2f8 | ||
|
|
4f0fb3325a | ||
|
|
e963deff5a | ||
|
|
ee5c790878 | ||
|
|
327767a1c1 | ||
|
|
edb4928357 | ||
|
|
b0f35a64d9 | ||
|
|
56ffcf709a | ||
|
|
452f0b775a | ||
|
|
cf24489764 | ||
|
|
576e40eabd | ||
|
|
206139f384 | ||
|
|
603364bdaa | ||
|
|
033a0cb53f | ||
|
|
026fddee4f | ||
|
|
dc542068ae | ||
|
|
c35d6e706f | ||
|
|
bd2f41bf79 | ||
|
|
d1bd98d5e0 | ||
|
|
035838901e | ||
|
|
e342c21336 | ||
|
|
eb9e1f961c | ||
|
|
f26eb4ee89 | ||
|
|
146e251892 | ||
|
|
7130c2e68c | ||
|
|
4a9eb1f1ac | ||
|
|
0adde9d415 | ||
|
|
ee0cc537b8 | ||
|
|
4f7c55507c | ||
|
|
8528cdb314 | ||
|
|
9ddfc79813 | ||
|
|
069906a25d | ||
|
|
5c580846bb | ||
|
|
afda2d39b6 | ||
|
|
743a658613 | ||
|
|
dbc8765104 | ||
|
|
2306108d8a | ||
|
|
4ee393c3da | ||
|
|
953523c3cb | ||
|
|
d862fd4ec7 | ||
|
|
7b3138e694 | ||
|
|
a4b68ec2fb | ||
|
|
f618acf2ab | ||
|
|
513c67230f | ||
|
|
fa3430ad16 | ||
|
|
95cf253b6d | ||
|
|
9b3531d7d6 | ||
|
|
81a0198af2 | ||
|
|
87abbf78e6 | ||
|
|
b362894a56 | ||
|
|
2866ba1a2c | ||
|
|
5764a81410 | ||
|
|
4a81d0a02f | ||
|
|
9d864da353 |
@@ -1,168 +0,0 @@
|
||||
version: 2.1
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: tendermintdev/docker-tendermint-build
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
environment:
|
||||
GOBIN: /tmp/bin
|
||||
release:
|
||||
machine: true
|
||||
docs:
|
||||
docker:
|
||||
- image: tendermintdev/docker-website-deployment
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
commands:
|
||||
run_test:
|
||||
parameters:
|
||||
script_path:
|
||||
type: string
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: "Running test"
|
||||
command: |
|
||||
bash << parameters.script_path >>
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
command: |
|
||||
mkdir -p /tmp/bin
|
||||
- run:
|
||||
name: Cache go modules
|
||||
command: make go-mod-cache
|
||||
- run:
|
||||
name: tools
|
||||
command: make tools
|
||||
- run:
|
||||
name: "Build binaries"
|
||||
command: make install install_abci
|
||||
- save_cache:
|
||||
name: "Save go modules cache"
|
||||
key: go-mod-v1-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
- save_cache:
|
||||
name: "Save source code cache"
|
||||
key: go-src-v1-{{ .Revision }}
|
||||
paths:
|
||||
- ".git"
|
||||
- persist_to_workspace:
|
||||
root: "/tmp/bin"
|
||||
paths:
|
||||
- "."
|
||||
|
||||
deploy_docs:
|
||||
executor: docs
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: "Pull versions"
|
||||
command: git fetch origin v0.32 v0.33
|
||||
- run:
|
||||
name: "Build docs"
|
||||
command: make build-docs
|
||||
- run:
|
||||
name: "Sync to S3"
|
||||
command: make sync-docs
|
||||
|
||||
prepare_build:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: make tools
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v2-release-deps-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
|
||||
# # Test RPC implementation against the swagger documented specs
|
||||
# contract_tests:
|
||||
# working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
# machine:
|
||||
# image: circleci/classic:latest
|
||||
# environment:
|
||||
# GOBIN: /home/circleci/.go_workspace/bin
|
||||
# GOPATH: /home/circleci/.go_workspace/
|
||||
# GOOS: linux
|
||||
# GOARCH: amd64
|
||||
# parallelism: 1
|
||||
# steps:
|
||||
# - checkout
|
||||
# - run:
|
||||
# name: Test RPC endpoints against swagger documentation
|
||||
# command: |
|
||||
# set -x
|
||||
# export PATH=~/.local/bin:$PATH
|
||||
# # install node and dredd
|
||||
# ./scripts/get_nodejs.sh
|
||||
# # build the binaries with a proper version of Go
|
||||
# docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks
|
||||
# # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use
|
||||
# go get github.com/snikch/goodman/cmd/goodman
|
||||
# make contract-tests
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
docs:
|
||||
jobs:
|
||||
- deploy_docs:
|
||||
context: tendermint-docs
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
tags:
|
||||
only:
|
||||
- /^v.*/
|
||||
- deploy_docs:
|
||||
context: tendermint-docs-staging
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- docs-staging
|
||||
# - contract_tests:
|
||||
# requires:
|
||||
# - setup_dependencies
|
||||
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -7,4 +7,6 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair
|
||||
* @ebuchman @tendermint/tendermint-engineering
|
||||
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@@ -1,13 +1,18 @@
|
||||
---
|
||||
name: Bug Report
|
||||
name: Bug report
|
||||
about: Create a report to help us squash bugs!
|
||||
|
||||
---
|
||||
<!--
|
||||
|
||||
Please fill in as much of the template below as you can.
|
||||
|
||||
Be ready for followup questions, and please respond in a timely
|
||||
manner. We might ask you to provide additional logs and data (tendermint & app).
|
||||
If you have general questions, please create a new discussion:
|
||||
https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
Be ready for followup questions, and please respond in a timely manner. We might
|
||||
ask you to provide additional logs and data (tendermint & app).
|
||||
|
||||
-->
|
||||
|
||||
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Ask a question
|
||||
url: https://github.com/tendermint/tendermint/discussions
|
||||
about: Please ask and answer questions here
|
||||
11
.github/ISSUE_TEMPLATE/feature-request.md
vendored
11
.github/ISSUE_TEMPLATE/feature-request.md
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: Feature Request
|
||||
name: Feature request
|
||||
about: Create a proposal to request a feature
|
||||
|
||||
---
|
||||
@@ -25,12 +25,3 @@ Are there any disadvantages of including this feature? -->
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
|
||||
____
|
||||
|
||||
#### For Admin Use
|
||||
|
||||
- [ ] Not duplicate issue
|
||||
- [ ] Appropriate labels applied
|
||||
- [ ] Appropriate contributors tagged
|
||||
- [ ] Contributor assigned/self-assigned
|
||||
|
||||
28
.github/ISSUE_TEMPLATE/proposal.md
vendored
Normal file
28
.github/ISSUE_TEMPLATE/proposal.md
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: Protocol change proposal
|
||||
about: Create a proposal to request a change to the protocol
|
||||
|
||||
---
|
||||
|
||||
<!-- < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < ☺
|
||||
v ✰ Thanks for opening an issue! ✰
|
||||
v Before smashing the submit button please review the template.
|
||||
v Word of caution: Under-specified proposals may be rejected summarily
|
||||
☺ > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > -->
|
||||
|
||||
# Protocol Change Proposal
|
||||
|
||||
## Summary
|
||||
|
||||
<!-- Short, concise description of the proposed change -->
|
||||
|
||||
## Problem Definition
|
||||
|
||||
<!-- Why do we need this change?
|
||||
What problems may be addressed by introducing this change?
|
||||
What benefits does Tendermint stand to gain by including this change?
|
||||
Are there any disadvantages of including this change? -->
|
||||
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,7 +1,32 @@
|
||||
## Description
|
||||
<!--
|
||||
|
||||
_Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review._
|
||||
Please add a reference to the issue that this PR addresses and indicate which
|
||||
files are most critical to review. If it fully addresses a particular issue,
|
||||
please include "Closes #XXX" (where "XXX" is the issue number).
|
||||
|
||||
Closes: #XXX
|
||||
If this PR is non-trivial/large/complex, please ensure that you have either
|
||||
created an issue that the team's had a chance to respond to, or had some
|
||||
discussion with the team prior to submitting substantial pull requests. The team
|
||||
can be reached via GitHub Discussions or the Cosmos Network Discord server in
|
||||
the #tendermint-core channel. GitHub Discussions is preferred over Discord as it
|
||||
allows us to keep track of conversations topically.
|
||||
https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
If the work in this PR is not aligned with the team's current priorities, please
|
||||
be advised that it may take some time before it is merged - especially if it has
|
||||
not yet been discussed with the team.
|
||||
|
||||
See the project board for the team's current priorities:
|
||||
https://github.com/orgs/tendermint/projects/15/views/5
|
||||
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
#### PR checklist
|
||||
|
||||
- [ ] Tests written/updated, or no tests needed
|
||||
- [ ] `CHANGELOG_PENDING.md` updated, or no changelog entry needed
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments, or no
|
||||
documentation updates needed
|
||||
|
||||
|
||||
16
.github/auto-comment.yml
vendored
16
.github/auto-comment.yml
vendored
@@ -1,16 +0,0 @@
|
||||
pullRequestOpened: |
|
||||
:wave: Thanks for creating a PR!
|
||||
|
||||
Before we can merge this PR, please make sure that all the following items have been
|
||||
checked off. If any of the checklist items are not applicable, please leave them but
|
||||
write a little note why.
|
||||
|
||||
- [ ] Wrote tests
|
||||
- [ ] Updated CHANGELOG_PENDING.md
|
||||
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments
|
||||
- [ ] Re-reviewed `Files changed` in the Github PR explorer
|
||||
- [ ] Applied Appropriate Labels
|
||||
|
||||
|
||||
Thank you for your contribution to Tendermint! :rocket:
|
||||
25
.github/codecov.yml
vendored
Normal file
25
.github/codecov.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: "70...100"
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 20%
|
||||
patch: off
|
||||
changes: off
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
comment: false
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
- "DOCKER"
|
||||
- "scripts"
|
||||
- "**/*.pb.go"
|
||||
- "libs/pubsub/query/query.peg.go"
|
||||
- "*.md"
|
||||
- "*.rst"
|
||||
- "*.yml"
|
||||
73
.github/dependabot.yml
vendored
Normal file
73
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "main"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.37.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
###################################
|
||||
##
|
||||
## Update All Go Dependencies
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "main"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.37.x"
|
||||
# Only allow automated security-related dependency updates until we cut the
|
||||
# final v0.37.0 release.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
6
.github/issue_template.md
vendored
Normal file
6
.github/issue_template.md
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
<!--
|
||||
|
||||
If you want to ask a general question, please create a new discussion instead of
|
||||
an issue: https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
-->
|
||||
15
.github/linters/markdownlint.yml
vendored
Normal file
15
.github/linters/markdownlint.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# markdownlint configuration for Super-Linter
|
||||
# - https://github.com/DavidAnson/markdownlint
|
||||
# - https://github.com/github/super-linter
|
||||
|
||||
# Default state for all rules
|
||||
default: true
|
||||
|
||||
# See https://github.com/DavidAnson/markdownlint#rules--aliases for rules
|
||||
MD007: {"indent": 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: {no-inline-html: false}
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
9
.github/linters/yaml-lint.yml
vendored
Normal file
9
.github/linters/yaml-lint.yml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# Default rules for YAML linting from super-linter.
|
||||
# See: See https://yamllint.readthedocs.io/en/stable/rules.html
|
||||
extends: default
|
||||
rules:
|
||||
document-end: disable
|
||||
document-start: disable
|
||||
line-length: disable
|
||||
truthy: disable
|
||||
22
.github/mergify.yml
vendored
22
.github/mergify.yml
vendored
@@ -1,13 +1,13 @@
|
||||
queue_rules:
|
||||
- name: default
|
||||
conditions:
|
||||
- base=v0.34.x
|
||||
- base=main
|
||||
- label=S:automerge
|
||||
|
||||
pull_request_rules:
|
||||
- name: Automerge to v0.34.x
|
||||
- name: Automerge to main
|
||||
conditions:
|
||||
- base=v0.34.x
|
||||
- base=main
|
||||
- label=S:automerge
|
||||
actions:
|
||||
queue:
|
||||
@@ -17,3 +17,19 @@ pull_request_rules:
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
- name: backport patches to v0.37.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
- label=S:backport-to-v0.37.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.37.x
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
- label=S:backport-to-v0.34.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.34.x
|
||||
|
||||
82
.github/workflows/build.yml
vendored
Normal file
82
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
name: Build
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to main or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
goos: ["linux"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: install
|
||||
run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
12
.github/workflows/check-generated.yml
vendored
12
.github/workflows/check-generated.yml
vendored
@@ -7,7 +7,7 @@ name: Check generated code
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- v0.34.x
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -18,7 +18,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: "1.18"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -26,8 +26,6 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
readonly MOCKERY=2.12.3 # N.B. no leading "v"
|
||||
curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf -
|
||||
make mockery 2>/dev/null
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
@@ -44,11 +42,11 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: "1.18"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
|
||||
- name: "Check protobuf generated code"
|
||||
run: |
|
||||
@@ -61,7 +59,7 @@ jobs:
|
||||
export GOBIN="${tools}/bin"
|
||||
|
||||
go install github.com/bufbuild/buf/cmd/buf
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
go install github.com/cosmos/gogoproto/protoc-gen-gogofaster@latest
|
||||
|
||||
make proto-gen
|
||||
|
||||
|
||||
123
.github/workflows/coverage.yml
vendored
123
.github/workflows/coverage.yml
vendored
@@ -1,123 +0,0 @@
|
||||
name: Test Coverage
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
|
||||
build-linux:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
23
.github/workflows/docker.yml
vendored
23
.github/workflows/docker.yml
vendored
@@ -1,14 +1,15 @@
|
||||
name: Build & Push
|
||||
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
|
||||
name: Docker
|
||||
# Build & Push rebuilds the Tendermint docker image on every push to main and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/tendermint/tendermint
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -39,18 +40,18 @@ jobs:
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.0.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v2.0.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v3.1.1
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
63
.github/workflows/docs-deployment.yml
vendored
Normal file
63
.github/workflows/docs-deployment.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# Build and deploy the docs.tendermint.com website content.
|
||||
# The static content is published to GitHub Pages.
|
||||
#
|
||||
# For documentation build info, see docs/DOCS_README.md.
|
||||
name: Build static documentation site
|
||||
on:
|
||||
workflow_dispatch: # allow manual updates
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- v0.34.x
|
||||
paths:
|
||||
- docs/**
|
||||
- spec/**
|
||||
|
||||
jobs:
|
||||
# This is split into two jobs so that the build, which runs npm, does not
|
||||
# have write access to anything. The deploy requires write access to publish
|
||||
# to the branch used by GitHub Pages, however, so we can't just make the
|
||||
# whole workflow read-only.
|
||||
build:
|
||||
name: VuePress build
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: alpine:latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Install generator dependencies
|
||||
run: |
|
||||
apk add --no-cache make bash git npm
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# We need to fetch full history so the backport branches for previous
|
||||
# versions will be available for the build.
|
||||
fetch-depth: 0
|
||||
- name: Build documentation
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: /tmp/tendermint-core-docs
|
||||
|
||||
deploy:
|
||||
name: Deploy to GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: 'docs-tendermint-com'
|
||||
folder: ~/output
|
||||
single-commit: true
|
||||
20
.github/workflows/docs-toc.yml
vendored
Normal file
20
.github/workflows/docs-toc.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# Verify that important design docs have ToC entries.
|
||||
name: Check documentation ToC
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
docs/architecture/**
|
||||
docs/rfc/**
|
||||
- run: make check-docs-toc
|
||||
if: env.GIT_DIFF
|
||||
7
.github/workflows/e2e-manual.yml
vendored
7
.github/workflows/e2e-manual.yml
vendored
@@ -1,4 +1,5 @@
|
||||
# Manually run randomly generated E2E testnets (as nightly).
|
||||
# Runs randomly generated E2E testnets nightly on main
|
||||
# manually run e2e tests
|
||||
name: e2e-manual
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -16,7 +17,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -28,7 +29,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
|
||||
67
.github/workflows/e2e-nightly-34x.yml
vendored
67
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -1,12 +1,10 @@
|
||||
# Runs randomly generated E2E testnets nightly
|
||||
# on the 0.34.x release branch
|
||||
# Runs randomly generated E2E testnets nightly on the 0.34.x branch.
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
# !! This file should be kept in sync with the e2e-nightly-main.yml file,
|
||||
# modulo changes to the version labels.
|
||||
|
||||
name: e2e-nightly-34x
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually, in theory
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
@@ -17,18 +15,24 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.15.4'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
@@ -37,40 +41,39 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
run: ./build/generator -g 2 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
79
.github/workflows/e2e-nightly-37x.yml
vendored
Normal file
79
.github/workflows/e2e-nightly-37x.yml
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
# Runs randomly generated E2E testnets nightly on the v0.37.x branch.
|
||||
|
||||
# !! This file should be kept in sync with the e2e-nightly-main.yml file,
|
||||
# modulo changes to the version labels.
|
||||
|
||||
name: e2e-nightly-37x
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.37.x'
|
||||
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
68
.github/workflows/e2e-nightly-main.yml
vendored
Normal file
68
.github/workflows/e2e-nightly-main.yml
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
# Runs randomly generated E2E testnets nightly on main
|
||||
|
||||
# !! Relevant changes to this file should be propagated to the e2e-nightly-<V>x
|
||||
# files for the supported backport branches, when appropriate, modulo version
|
||||
# markers.
|
||||
|
||||
name: e2e-nightly-main
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
73
.github/workflows/e2e-nightly-master.yml
vendored
73
.github/workflows/e2e-nightly-master.yml
vendored
@@ -1,73 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on master
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-34x workflow as well!
|
||||
|
||||
name: e2e-nightly-master
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test-2:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on master
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on master
|
||||
SLACK_FOOTER: ''
|
||||
16
.github/workflows/e2e.yml
vendored
16
.github/workflows/e2e.yml
vendored
@@ -1,11 +1,12 @@
|
||||
name: e2e
|
||||
# Runs the CI end-to-end test network on all pushes to master or release branches
|
||||
# Runs the CI end-to-end test network on all pushes to main or release branches
|
||||
# and every pull request, but only if any Go files have been changed.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
@@ -15,7 +16,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.15.4'
|
||||
go-version: '1.18'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
@@ -27,15 +28,10 @@ jobs:
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run two make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker runner
|
||||
run: make -j2 docker runner tests
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Run CI testnet
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml
|
||||
run: ./run-multiple.sh networks/ci.toml
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Emit logs on failure
|
||||
if: ${{ failure() }}
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml logs
|
||||
|
||||
45
.github/workflows/fuzz-nightly.yml
vendored
45
.github/workflows/fuzz-nightly.yml
vendored
@@ -1,9 +1,13 @@
|
||||
# Runs fuzzing nightly.
|
||||
name: fuzz-nightly
|
||||
name: Fuzz Tests
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "test/fuzz/**/*.go"
|
||||
|
||||
jobs:
|
||||
fuzz-nightly-test:
|
||||
@@ -11,13 +15,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
run: go install github.com/dvyukov/go-fuzz/go-fuzz@latest github.com/dvyukov/go-fuzz/go-fuzz-build@latest
|
||||
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
@@ -49,14 +53,14 @@ jobs:
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 1
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
retention-days: 1
|
||||
retention-days: 3
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
@@ -71,13 +75,24 @@ jobs:
|
||||
if: ${{ needs.fuzz-nightly-test.outputs.crashers-count != 0 }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly Fuzz Tests
|
||||
SLACK_ICON_EMOJI: ':firecracker:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Crashers found in Nightly Fuzz tests
|
||||
SLACK_FOOTER: ''
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
CRASHERS: ${{ needs.fuzz-nightly-test.outputs.crashers-count }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly fuzz tests for `${{ env.BRANCH }}` failed with ${{ env.CRASHERS }} crasher(s). See the <${{ env.RUN_URL }}|run details>."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
41
.github/workflows/gosec.yml
vendored
Normal file
41
.github/workflows/gosec.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Run Gosec
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'feature/*'
|
||||
- 'v0.37.x'
|
||||
- 'v0.34.x'
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
|
||||
jobs:
|
||||
Gosec:
|
||||
permissions:
|
||||
security-events: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout Source
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run Gosec Security Scanner
|
||||
uses: cosmos/gosec@master
|
||||
with:
|
||||
# Let the report trigger a failure with the Github Security scanner features.
|
||||
args: "-no-fail -fmt sarif -out results.sarif ./..."
|
||||
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: results.sarif
|
||||
16
.github/workflows/janitor.yml
vendored
Normal file
16
.github/workflows/janitor.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Janitor
|
||||
# Janitor cleans up previous runs of various workflows
|
||||
# To add more workflows to cancel visit https://api.github.com/repos/tendermint/tendermint/actions/workflows and find the actions name
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
cancel:
|
||||
name: "Cancel Previous Runs"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.10.1
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
12
.github/workflows/linkchecker.yml
vendored
12
.github/workflows/linkchecker.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: Check Markdown links
|
||||
on:
|
||||
schedule:
|
||||
- cron: '* */24 * * *'
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
folder-path: "docs"
|
||||
24
.github/workflows/lint.yml
vendored
24
.github/workflows/lint.yml
vendored
@@ -1,12 +1,18 @@
|
||||
name: Lint
|
||||
# Lint runs golangci-lint over the entire Tendermint repository
|
||||
# This workflow is run on every pull request and push to master
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
|
||||
name: Golang Linter
|
||||
# Lint runs golangci-lint over the entire Tendermint repository.
|
||||
#
|
||||
# This workflow is run on every pull request and push to main.
|
||||
#
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum}
|
||||
# files have been modified.
|
||||
#
|
||||
# To run this locally, simply run `make lint` from the root of the repo.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
jobs:
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
@@ -16,7 +22,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
go-version: '1.18'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -25,8 +31,10 @@ jobs:
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.45
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.47.3
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
12
.github/workflows/linter.yml
vendored
12
.github/workflows/linter.yml
vendored
@@ -1,14 +1,14 @@
|
||||
name: Lint
|
||||
name: Markdown Linter
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
- "**.yaml"
|
||||
pull_request:
|
||||
branches: [master]
|
||||
branches: [main]
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
@@ -21,12 +21,12 @@ jobs:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
uses: docker://github/super-linter:v4
|
||||
env:
|
||||
LINTER_RULES_PATH: .
|
||||
VALIDATE_ALL_CODEBASE: true
|
||||
DEFAULT_BRANCH: master
|
||||
DEFAULT_BRANCH: main
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
YAML_CONFIG_FILE: yaml-lint.yml
|
||||
|
||||
23
.github/workflows/markdown-links.yml
vendored
Normal file
23
.github/workflows/markdown-links.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
40
.github/workflows/pre-release.yml
vendored
Normal file
40
.github/workflows/pre-release.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: "Pre-release"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
# Link to CHANGELOG_PENDING.md as release notes.
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
4
.github/workflows/proto-lint.yml
vendored
4
.github/workflows/proto-lint.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- v0.34.x
|
||||
- main
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.6.0
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
22
.github/workflows/proto.yml
vendored
22
.github/workflows/proto.yml
vendored
@@ -1,22 +0,0 @@
|
||||
name: Protobuf
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a .proto file has been modified
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.proto"
|
||||
jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
18
.github/workflows/release.yml
vendored
18
.github/workflows/release.yml
vendored
@@ -3,7 +3,7 @@ name: "Release"
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
@@ -16,12 +16,20 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.15.4'
|
||||
go-version: '1.18'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Run GoReleaser
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@@ -7,12 +7,14 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
- uses: actions/stale@v6
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
days-before-stale: -1
|
||||
days-before-close: -1
|
||||
days-before-pr-stale: 10
|
||||
days-before-pr-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
|
||||
142
.github/workflows/tests.yml
vendored
142
.github/workflows/tests.yml
vendored
@@ -1,146 +1,34 @@
|
||||
name: Tests
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to master or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
name: Test
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- "**.go"
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
cleanup-runs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: rokroskar/workflow-run-cleanup-action@master
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'"
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03", "04", "05"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
Makefile
|
||||
- name: Run Go Tests
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=6
|
||||
if: env.GIT_DIFF
|
||||
|
||||
69
.gitignore
vendored
69
.gitignore
vendored
@@ -1,58 +1,57 @@
|
||||
*.swp
|
||||
*.swo
|
||||
.bak
|
||||
*.bak
|
||||
*.iml
|
||||
*.log
|
||||
*.swo
|
||||
*.swp
|
||||
*/.glide
|
||||
*/vendor
|
||||
.DS_Store
|
||||
build/*
|
||||
rpc/test/.tendermint
|
||||
.tendermint
|
||||
remote_dump
|
||||
.bak
|
||||
.idea/
|
||||
.revision
|
||||
vendor
|
||||
.tendermint
|
||||
.tendermint-lite
|
||||
.terraform
|
||||
.vagrant
|
||||
test/e2e/build
|
||||
test/maverick/maverick
|
||||
test/e2e/networks/*/
|
||||
test/p2p/data/
|
||||
test/logs
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci/abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
coverage.txt
|
||||
docs/.vuepress/dist
|
||||
docs/_build
|
||||
docs/dist
|
||||
docs/.vuepress/dist
|
||||
*.log
|
||||
abci-cli
|
||||
docs/node_modules/
|
||||
docs/spec
|
||||
docs/.vuepress/public/rpc
|
||||
index.html.md
|
||||
|
||||
scripts/wal2json/wal2json
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
.vscode/
|
||||
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
remote_dump
|
||||
rpc/test/.tendermint
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
scripts/wal2json/wal2json
|
||||
shunit2
|
||||
|
||||
.tendermint-lite
|
||||
addrbook.json
|
||||
|
||||
*/vendor
|
||||
.vendor-new/
|
||||
*/.glide
|
||||
.terraform
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
profile\.out
|
||||
test/app/grpc_client
|
||||
test/loadtime/build
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
test/maverick/maverick
|
||||
test/p2p/data/
|
||||
vendor
|
||||
test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
proto/spec/**/*.pb.go
|
||||
*.aux
|
||||
*.bbl
|
||||
*.blg
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
|
||||
@@ -8,14 +8,7 @@ linters:
|
||||
- dupl
|
||||
- errcheck
|
||||
- exportloopref
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
- goconst
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
@@ -23,48 +16,31 @@ linters:
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
# - maligned
|
||||
# - misspell
|
||||
- misspell
|
||||
- nakedret
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- staticcheck
|
||||
- structcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- stylecheck
|
||||
# - typecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
# - unparam
|
||||
- unused
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- linters:
|
||||
- lll
|
||||
source: "https://"
|
||||
max-same-issues: 50
|
||||
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
maligned:
|
||||
suggest-new: true
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
revive:
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
ignore-words:
|
||||
- behaviour
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ env:
|
||||
- GO111MODULE=on
|
||||
|
||||
builds:
|
||||
- id: "Tendermint"
|
||||
- id: "tendermint"
|
||||
main: ./cmd/tendermint/main.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/tendermint/tendermint/version.TMCoreSemVer={{ .Version }}
|
||||
@@ -25,4 +25,13 @@ checksum:
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"
|
||||
prerelease: auto
|
||||
name_template: "{{.Version}}"
|
||||
|
||||
archives:
|
||||
- files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
|
||||
@@ -1,8 +1,17 @@
|
||||
default: true,
|
||||
MD007: { "indent": 4 }
|
||||
# markdownlint configuration
|
||||
# https://github.com/DavidAnson/markdownlint
|
||||
|
||||
# Default state for all rules
|
||||
default: true
|
||||
|
||||
# See https://github.com/DavidAnson/markdownlint#rules--aliases for rules
|
||||
MD001: false
|
||||
MD007: {indent: 4}
|
||||
MD013: false
|
||||
MD024: { siblings_only: true }
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: { no-inline-html: false }
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
MD033: false
|
||||
MD036: false
|
||||
MD010: false
|
||||
MD012: false
|
||||
MD028: false
|
||||
|
||||
6
.md-link-check.json
Normal file
6
.md-link-check.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
}
|
||||
10
.mergify.yml
10
.mergify.yml
@@ -1,10 +0,0 @@
|
||||
pull_request_rules:
|
||||
- name: Automerge to master
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:automerge
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
8
.vscode/settings.json
vendored
8
.vscode/settings.json
vendored
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
82
CHANGELOG.md
82
CHANGELOG.md
@@ -2,6 +2,38 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
- A new `[storage]` configuration section and flag `discard_abci_responses`,
|
||||
which, if enabled, discards all ABCI responses except the latest one in order
|
||||
to reduce disk space usage in the state store. When enabled, the
|
||||
`block_results` RPC endpoint can no longer function and will return an error.
|
||||
- A new CLI command, `reindex-event`, to re-index block and tx events to the
|
||||
event sinks. You can run this command when the event store backend
|
||||
dropped/disconnected or you want to replace the backend. When
|
||||
`discard_abci_responses` is enabled, you will not be able to use this command.
|
||||
|
||||
Special thanks to external contributors on this release: @rootwarp & @animart
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#9083](https://github.com/tendermint/tendermint/issues/9083) Backport command to reindex missed events (@cmwaters)
|
||||
- [cli] [\#9107](https://github.com/tendermint/tendermint/issues/9107) Add the `p2p.external-address` argument to set the node P2P external address (@amimart)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#9054](https://github.com/tendermint/tendermint/issues/9054) `discard_abci_responses` flag added to discard all ABCI
|
||||
responses except the last in order to save on storage space in the state
|
||||
store (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] [\#9033](https://github.com/tendermint/tendermint/issues/9033) Rework lock discipline to mitigate callback deadlocks in the
|
||||
priority mempool
|
||||
- [cli] [\#9103](https://github.com/tendermint/tendermint/issues/9103) fix unsafe-reset-all for working with home path (@rootwarp)
|
||||
|
||||
## v0.34.20
|
||||
|
||||
Special thanks to external contributors on this release: @joeabbey @yihuang
|
||||
@@ -438,7 +470,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters)
|
||||
- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker)
|
||||
- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker)
|
||||
- [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes)
|
||||
- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters)
|
||||
@@ -452,7 +484,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes)
|
||||
- [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters)
|
||||
- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section.
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
@@ -532,7 +564,7 @@ This security release fixes:
|
||||
Tendermint 0.33.0 and above allow block proposers to include signatures for the
|
||||
wrong block. This may happen naturally if you start a network, have it run for
|
||||
some time and restart it **without changing the chainID**. (It is a
|
||||
[misconfiguration](https://docs.tendermint.com/master/tendermint-core/using-tendermint.html)
|
||||
[misconfiguration](https://docs.tendermint.com/v0.33/tendermint-core/using-tendermint.html)
|
||||
to reuse chainIDs.) Correct block proposers will accidentally include signatures
|
||||
for the wrong block if they see these signatures, and then commits won't validate,
|
||||
making all proposed blocks invalid. A malicious validator (even with a minimal
|
||||
@@ -831,7 +863,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/tree/main/spec/consensus/light-client) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
@@ -1191,8 +1223,8 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
|
||||
|
||||
*August 28, 2019*
|
||||
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://docs.tendermint.com/v0.34/tutorials/java.html)
|
||||
guide.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@@ -1225,7 +1257,7 @@ Special thanks to external contributors on this release:
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/main/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
|
||||
- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors`
|
||||
option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors).
|
||||
@@ -1283,7 +1315,7 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
|
||||
- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Prevent "sent next PEX request too soon" errors by not calling
|
||||
ensurePeers outside of ensurePeersRoutine
|
||||
- [behaviour] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380)
|
||||
- [behavior] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380)
|
||||
- [config] [\#3723](https://github.com/tendermint/tendermint/issues/3723) Add consensus_params to testnet config generation; document time_iota_ms (@ashleyvega)
|
||||
|
||||
|
||||
@@ -1542,7 +1574,7 @@ Special thanks to external contributors on this release:
|
||||
- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation
|
||||
* Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
https://docs.tendermint.com/master/introduction/install.html#compile-with-cleveldb-support)
|
||||
<https://docs.tendermint.com>)
|
||||
* Use `boltdb` tag to compile Tendermint with bolt db
|
||||
- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except
|
||||
when IP lookup fails)
|
||||
@@ -1603,7 +1635,7 @@ It brings back `NetAddress()` to `NodeInfo` and uses it instead of `SocketAddr`
|
||||
Additionally, it improves response time on the `/validators` or `/status` RPC endpoints.
|
||||
As a side-effect it makes these RPC endpoint more difficult to DoS and fixes a performance degradation in `ExecCommitBlock`.
|
||||
Also, it contains an [ADR](https://github.com/tendermint/tendermint/pull/3539) that proposes decoupling the
|
||||
responsibility for peer behaviour from the `p2p.Switch` (by @brapse).
|
||||
responsibility for peer behavior from the `p2p.Switch` (by @brapse).
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@brapse, @guagualvcha, @mydring
|
||||
@@ -1766,7 +1798,7 @@ more details.
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -1819,7 +1851,7 @@ more details.
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -2271,8 +2303,8 @@ Special thanks to external contributors on this release:
|
||||
- [blockchain] [\#2731](https://github.com/tendermint/tendermint/issues/2731) Retry both blocks if either is bad to avoid getting stuck during fast sync (@goolAdapter)
|
||||
- [consensus] [\#2893](https://github.com/tendermint/tendermint/issues/2893) Use genDoc.Validators instead of state.NextValidators on replay when appHeight==0 (@james-ray)
|
||||
- [log] [\#2868](https://github.com/tendermint/tendermint/issues/2868) Fix `module=main` setting overriding all others
|
||||
- NOTE: this changes the default logging behaviour to be much less verbose.
|
||||
Set `log_level="info"` to restore the previous behaviour.
|
||||
- NOTE: this changes the default logging behavior to be much less verbose.
|
||||
Set `log_level="info"` to restore the previous behavior.
|
||||
- [rpc] [\#2808](https://github.com/tendermint/tendermint/issues/2808) Fix `accum` field in `/validators` by calling `IncrementAccum` if necessary
|
||||
- [rpc] [\#2811](https://github.com/tendermint/tendermint/issues/2811) Allow integer IDs in JSON-RPC requests (@tomtau)
|
||||
- [txindex/kv] [\#2759](https://github.com/tendermint/tendermint/issues/2759) Fix tx.height range queries
|
||||
@@ -2403,7 +2435,7 @@ increasing attention to backwards compatibility. Thanks for bearing with us!
|
||||
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as
|
||||
encoded on disk.
|
||||
* [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default
|
||||
behaviour to `prove=false`
|
||||
behavior to `prove=false`
|
||||
* [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and
|
||||
`/net_info`
|
||||
* [rpc] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Remove
|
||||
@@ -2517,7 +2549,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2548,7 +2580,7 @@ FEATURES:
|
||||
- [libs] [\#2286](https://github.com/tendermint/tendermint/issues/2286) Panic if `autofile` or `db/fsdb` permissions change from 0600.
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialised (@bradyjoestar)
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialized (@bradyjoestar)
|
||||
- [mempool] [\#2399](https://github.com/tendermint/tendermint/issues/2399) Make mempool cache a proper LRU (@bradyjoestar)
|
||||
- [p2p] [\#2126](https://github.com/tendermint/tendermint/issues/2126) Introduce PeerTransport interface to improve isolation of concerns
|
||||
- [libs/common] [\#2326](https://github.com/tendermint/tendermint/issues/2326) Service returns ErrNotStarted
|
||||
@@ -2582,7 +2614,7 @@ are affected by a change.
|
||||
|
||||
A few more breaking changes are in the works - each will come with a clear
|
||||
Architecture Decision Record (ADR) explaining the change. You can review ADRs
|
||||
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
|
||||
[here](https://github.com/tendermint/tendermint/tree/main/docs/architecture)
|
||||
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
|
||||
You can also check in on the [issues marked as
|
||||
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
|
||||
@@ -2598,7 +2630,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2620,7 +2652,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2631,7 +2663,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
@@ -2861,7 +2893,7 @@ BREAKING CHANGES:
|
||||
FEATURES
|
||||
- [cmd] Added metrics (served under `/metrics` using a Prometheus client;
|
||||
disabled by default). See the new `instrumentation` section in the config and
|
||||
[metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html)
|
||||
[metrics](https://github.com/tendermint/tendermint/blob/main/docs/tendermint-core/metrics.md)
|
||||
guide.
|
||||
- [p2p] Add IPv6 support to peering.
|
||||
- [p2p] Add `external_address` to config to allow specifying the address for
|
||||
@@ -2975,7 +3007,7 @@ BREAKING:
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
|
||||
- [rpc] the RPC documentation is now published to https://github.com/tendermint/tendermint/tree/main/spec/rpc
|
||||
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
|
||||
- true by default for now, false by default in next breaking release
|
||||
- [docs] Add docs for query, tx indexing, events, pubsub
|
||||
@@ -3431,7 +3463,7 @@ Also includes the Grand Repo-Merge of 2017.
|
||||
BREAKING CHANGES:
|
||||
|
||||
- Config and Flags:
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/main/config/config.go#L11),
|
||||
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
|
||||
- This affects the following flags:
|
||||
- `--seeds` is now `--p2p.seeds`
|
||||
@@ -3454,7 +3486,7 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
|
||||
|
||||
- Logger
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
|
||||
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
|
||||
See our new [logging library](https://github.com/tendermint/tendermint/blob/main/libs/log/logger.go) and [blog post](https://blog.cosmos.network/abstracting-the-logger-interface-in-go-4cf96bf90bb7) for more details
|
||||
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger
|
||||
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
## v0.38.0
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
@@ -18,10 +14,85 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
### FEATURES
|
||||
- Data Storage
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
|
||||
- [#9083] backport cli command to reindex missed events (@cmwaters)
|
||||
### FEATURES
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [docker] \#9462 ensure Docker image uses consistent version of Go
|
||||
|
||||
## v0.37.0
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [config] \#9259 Rename the fastsync section and the fast_sync key blocksync and block_sync respectively
|
||||
|
||||
- Apps
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
- [abci] \#5783 Make length delimiter encoding consistent (`uint64`) between ABCI and P2P wire-level protocols
|
||||
- [abci] \#9145 Removes unused Response/Request `SetOption` from ABCI (@samricotta)
|
||||
- [abci/params] \#9287 Deduplicate `ConsensusParams` and `BlockParams` so only `types` proto definitions are used (@cmwaters)
|
||||
- Remove `TimeIotaMs` and use a hard-coded 1 millisecond value to ensure monotonically increasing block times.
|
||||
- Rename `AppVersion` to `App` so as to not stutter.
|
||||
- [types] \#9287 Reduce the use of protobuf types in core logic. (@cmwaters)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams` have become native types.
|
||||
They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- Moved `ValidateConsensusParams` inside (now native type) `ConsensusParams`, and renamed it to `ValidateBasic`.
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
- [abci] \#8216 Renamed `EvidenceType` to `MisbehaviorType` and `Evidence` to `Misbehavior` as a more accurate label of their contents. (@williambanfield, @sergio-mena)
|
||||
- [abci] \#9122 Renamed `LastCommitInfo` to `CommitInfo` in preparation for vote extensions. (@cmwaters)
|
||||
- [abci] \#8656, \#8901 Added cli commands for `PrepareProposal` and `ProcessProposal`. (@jmalicevic, @hvanz)
|
||||
- [abci] \#6403 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [all] \#9144 Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
|
||||
### IMPROVEMENTS
|
||||
- [crypto] \#9250 Update to use btcec v2 and the latest btcutil. (@wcsiu)
|
||||
|
||||
- [cli] \#9171 add `--hard` flag to rollback command (and a boolean to the `RollbackState` method). This will rollback
|
||||
state and remove the last block. This command can be triggered multiple times. The application must also rollback
|
||||
state to the same height. (@tsutsu, @cmwaters)
|
||||
- [proto] \#9356 Migrate from `gogo/protobuf` to `cosmos/gogoproto` (@julienrbrt)
|
||||
- [rpc] \#9276 Added `header` and `header_by_hash` queries to the RPC client (@samricotta)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778 & @Yawning)
|
||||
- [types] \#6120 use batch verification for verifying commits signatures. (@marbar3778 & @cmwaters & @Yawning)
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [state] \#9505 Added logic so when pruning, the evidence period is taken into consideration and only deletes unecessary data (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
|
||||
@@ -1,59 +1,109 @@
|
||||
# The Tendermint Code of Conduct
|
||||
|
||||
This code of conduct applies to all projects run by the Tendermint/COSMOS team and hence to tendermint.
|
||||
|
||||
This code of conduct applies to all projects run by the Tendermint/COSMOS team
|
||||
and hence to Tendermint.
|
||||
|
||||
----
|
||||
|
||||
|
||||
# Conduct
|
||||
|
||||
## Contact: conduct@tendermint.com
|
||||
|
||||
* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
* We are committed to providing a friendly, safe and welcoming environment for
|
||||
all, regardless of level of experience, gender, gender identity and
|
||||
expression, sexual orientation, disability, personal appearance, body size,
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all.
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that
|
||||
might detract from a friendly, safe and welcoming environment for all.
|
||||
|
||||
* Please be kind and courteous. There’s no need to be mean or rude.
|
||||
|
||||
* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer.
|
||||
* Respect that people have differences of opinion and that every design or
|
||||
implementation choice carries a trade-off and numerous costs. There is seldom
|
||||
a right answer.
|
||||
|
||||
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
|
||||
* Please keep unstructured critique to a minimum. If you have solid ideas you
|
||||
want to experiment with, make a fork and see how it works.
|
||||
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone.
|
||||
That is not welcome behavior. We interpret the term “harassment” as including
|
||||
the definition in the [Citizen Code of Conduct][ccoc]; if you have any lack of
|
||||
clarity about what might be included in that concept, please read their
|
||||
definition. In particular, we don’t tolerate behavior that excludes people in
|
||||
socially marginalized groups.
|
||||
|
||||
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back.
|
||||
|
||||
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behaviour is not welcome.
|
||||
* Private harassment is also unacceptable. No matter who you are, if you feel
|
||||
you have been or are being harassed or made uncomfortable by a community
|
||||
member, please contact one of the channel admins or the person mentioned above
|
||||
immediately. Whether you’re a regular contributor or a newcomer, we care about
|
||||
making this community a safe place for you and we’ve got your back.
|
||||
|
||||
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing
|
||||
behavior is not welcome.
|
||||
|
||||
----
|
||||
|
||||
|
||||
# Moderation
|
||||
|
||||
These are the policies for upholding our community’s standards of conduct. If you feel that a thread needs moderation, please contact the above mentioned person.
|
||||
These are the policies for upholding our community’s standards of conduct. If
|
||||
you feel that a thread needs moderation, please contact the above mentioned
|
||||
person.
|
||||
|
||||
1. Remarks that violate the Tendermint/COSMOS standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.)
|
||||
1. Remarks that violate the Tendermint/COSMOS standards of conduct, including
|
||||
hateful, hurtful, oppressive, or exclusionary remarks, are not allowed.
|
||||
(Cursing is allowed, but never targeting another user, and never in a hateful
|
||||
manner.)
|
||||
|
||||
2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed.
|
||||
2. Remarks that moderators find inappropriate, whether listed in the code of
|
||||
conduct or not, are also not allowed.
|
||||
|
||||
3. Moderators will first respond to such remarks with a warning.
|
||||
|
||||
4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of the communication channel to cool off.
|
||||
4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of
|
||||
the communication channel to cool off.
|
||||
|
||||
5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded.
|
||||
5. If the user comes back and continues to make trouble, they will be banned,
|
||||
i.e., indefinitely excluded.
|
||||
|
||||
6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology.
|
||||
6. Moderators may choose at their discretion to un-ban the user if it was a
|
||||
first offense and they offer the offended party a genuine apology.
|
||||
|
||||
7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, in private. Complaints about bans in-channel are not allowed.
|
||||
7. If a moderator bans someone and you think it was unjustified, please take it
|
||||
up with that moderator, or with a different moderator, in private. Complaints
|
||||
about bans in-channel are not allowed.
|
||||
|
||||
8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others.
|
||||
8. Moderators are held to a higher standard than other community members. If a
|
||||
moderator creates an inappropriate situation, they should expect less leeway
|
||||
than others.
|
||||
|
||||
In the Tendermint/COSMOS community we strive to go the extra step to look out for each other. Don’t just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they’re off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely.
|
||||
In the Tendermint/COSMOS community we strive to go the extra step to look out
|
||||
for each other. Don’t just aim to be technically unimpeachable, try to be your
|
||||
best self. In particular, avoid flirting with offensive or sensitive issues,
|
||||
particularly if they’re off-topic; this all too often leads to unnecessary
|
||||
fights, hurt feelings, and damaged trust; worse, it can drive people away
|
||||
from the community entirely.
|
||||
|
||||
And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could’ve communicated better — remember that it’s your responsibility to make your fellow Cosmonauts comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust.
|
||||
And if someone takes issue with something you said or did, resist the urge to be
|
||||
defensive. Just stop doing what it was they complained about and apologize. Even
|
||||
if you feel you were misinterpreted or unfairly accused, chances are good there
|
||||
was something you could’ve communicated better — remember that it’s your
|
||||
responsibility to make your fellow Cosmonauts comfortable. Everyone wants to
|
||||
get along and we are all here first and foremost because we want to talk
|
||||
about cool technology. You will find that people will be eager to assume
|
||||
good intent and forgive as long as you earn their trust.
|
||||
|
||||
The enforcement policies listed above apply to all official Tendermint/COSMOS venues.For other projects adopting the Tendermint/COSMOS Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion.
|
||||
The enforcement policies listed above apply to all official Tendermint/COSMOS
|
||||
venues. For other projects adopting the Tendermint/COSMOS Code of Conduct,
|
||||
please contact the maintainers of those projects for enforcement. If you wish to
|
||||
use this code of conduct for your own project, consider explicitly mentioning
|
||||
your moderation policy or making a copy with your own moderation policy so as to
|
||||
avoid confusion.
|
||||
|
||||
*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling), the [Contributor Covenant v1.3.0](http://contributor-covenant.org/version/1/3/0/) and the [Rust Code of Conduct](https://www.rust-lang.org/en-US/conduct.html).
|
||||
\*Adapted from the [Node.js Policy on Trolling][node-trolling-policy], the
|
||||
[Contributor Covenant v1.3.0][ccov] and the [Rust Code of Conduct][rust-coc].
|
||||
|
||||
[ccoc]: https://github.com/stumpsyn/policies/blob/master/citizen_code_of_conduct.md
|
||||
[node-trolling-policy]: http://blog.izs.me/post/30036893703/policy-on-trolling
|
||||
[ccov]: http://contributor-covenant.org/version/1/3/0/
|
||||
[rust-coc]: https://www.rust-lang.org/en-US/conduct.html
|
||||
|
||||
227
CONTRIBUTING.md
227
CONTRIBUTING.md
@@ -7,7 +7,7 @@ support permissionless value-carrying networks. While all contributions are
|
||||
welcome, contributors should bear this goal in mind in deciding if they should
|
||||
target the main Tendermint project or a potential fork. When targeting the
|
||||
main Tendermint project, the following process leads to the best chance of
|
||||
landing changes in master.
|
||||
landing changes in `main`.
|
||||
|
||||
All work on the code base should be motivated by a [Github
|
||||
Issue](https://github.com/tendermint/tendermint/issues).
|
||||
@@ -26,7 +26,8 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
|
||||
Comment](https://github.com/tendermint/tendermint/tree/main/docs/rfc)
|
||||
in the Tendermint spec repo. Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
|
||||
@@ -45,7 +46,7 @@ Find the largest existing ADR number and bump it by 1.
|
||||
When the problem as well as proposed solution are well understood,
|
||||
changes should start with a [draft
|
||||
pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/)
|
||||
against master. The draft signals that work is underway. When the work
|
||||
against `main`. The draft signals that work is underway. When the work
|
||||
is ready for feedback, hitting "Ready for Review" will signal to the
|
||||
maintainers to take a look.
|
||||
|
||||
@@ -53,7 +54,7 @@ maintainers to take a look.
|
||||
|
||||
Each stage of the process is aimed at creating feedback cycles which align contributors and maintainers to make sure:
|
||||
|
||||
- Contributors don’t waste their time implementing/proposing features which won’t land in master.
|
||||
- Contributors don’t waste their time implementing/proposing features which won’t land in `main`.
|
||||
- Maintainers have the necessary context in order to support and review contributions.
|
||||
|
||||
## Forking
|
||||
@@ -72,19 +73,19 @@ For instance, to create a fork and work on a branch of it, I would:
|
||||
- `git remote add origin git@github.com:ebuchman/basecoin.git`
|
||||
|
||||
Now `origin` refers to my fork and `upstream` refers to the Tendermint version.
|
||||
So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there.
|
||||
So I can `git push -u origin main` to update my fork, and make pull requests to tendermint from there.
|
||||
Of course, replace `ebuchman` with your git handle.
|
||||
|
||||
To pull in updates from the origin repo, run
|
||||
|
||||
- `git fetch upstream`
|
||||
- `git rebase upstream/master` (or whatever branch you want)
|
||||
- `git rebase upstream/main` (or whatever branch you want)
|
||||
|
||||
## Dependencies
|
||||
|
||||
We use [go modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
|
||||
|
||||
That said, the master branch of every Tendermint repository should just build
|
||||
That said, the `main` branch of every Tendermint repository should just build
|
||||
with `go get`, which means they should be kept up-to-date with their
|
||||
dependencies so we can get away with telling people they can just `go get` our
|
||||
software.
|
||||
@@ -104,24 +105,46 @@ specify exactly the dependency you want to update, eg.
|
||||
|
||||
## Protobuf
|
||||
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
with [`gogoproto`](https://github.com/cosmos/gogoproto) to generate code for use
|
||||
across Tendermint Core.
|
||||
|
||||
For linting and checking breaking changes, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
To generate proto stubs, lint, and check protos for breaking changes, you will
|
||||
need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root
|
||||
of the repository, run:
|
||||
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`.
|
||||
```bash
|
||||
# Lint all of the .proto files in proto/tendermint
|
||||
make proto-lint
|
||||
|
||||
## Vagrant
|
||||
# Check if any of your local changes (prior to committing to the Git repository)
|
||||
# are breaking
|
||||
make proto-check-breaking
|
||||
|
||||
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started
|
||||
hacking Tendermint with the commands below.
|
||||
# Generate Go code from the .proto files in proto/tendermint
|
||||
make proto-gen
|
||||
```
|
||||
|
||||
NOTE: In case you installed Vagrant in 2017, you might need to run
|
||||
`vagrant box update` to upgrade to the latest `ubuntu/xenial64`.
|
||||
To automatically format `.proto` files, you will need
|
||||
[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once
|
||||
installed, you can run:
|
||||
|
||||
```sh
|
||||
vagrant up
|
||||
vagrant ssh
|
||||
make test
|
||||
```bash
|
||||
make proto-format
|
||||
```
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Changelog
|
||||
@@ -129,10 +152,47 @@ make test
|
||||
Every fix, improvement, feature, or breaking change should be made in a
|
||||
pull-request that includes an update to the `CHANGELOG_PENDING.md` file.
|
||||
|
||||
A feature can also be worked on a feature branch, if its size and/or risk
|
||||
justifies it (see #branching-model-and-release) below.
|
||||
|
||||
### What does a good changelog entry look like?
|
||||
|
||||
Changelog entries should answer the question: "what is important about this
|
||||
change for users to know?" or "what problem does this solve for users?". It
|
||||
should not simply be a reiteration of the title of the associated PR, unless the
|
||||
title of the PR _very_ clearly explains the benefit of a change to a user.
|
||||
|
||||
Some good examples of changelog entry descriptions:
|
||||
|
||||
```md
|
||||
- [consensus] \#1111 Small transaction throughput improvement (approximately
|
||||
3-5\% from preliminary tests) through refactoring the way we use channels
|
||||
- [mempool] \#1112 Refactor Go API to be able to easily swap out the current
|
||||
mempool implementation in Tendermint forks
|
||||
- [p2p] \#1113 Automatically ban peers when their messages are unsolicited or
|
||||
are received too frequently
|
||||
```
|
||||
|
||||
Some bad examples of changelog entry descriptions:
|
||||
|
||||
```md
|
||||
- [consensus] \#1111 Refactor channel usage
|
||||
- [mempool] \#1112 Make API generic
|
||||
- [p2p] \#1113 Ban for PEX message abuse
|
||||
```
|
||||
|
||||
For more on how to write good changelog entries, see:
|
||||
|
||||
- <https://keepachangelog.com>
|
||||
- <https://docs.gitlab.com/ee/development/changelog.html#writing-good-changelog-entries>
|
||||
- <https://depfu.com/blog/what-makes-a-good-changelog>
|
||||
|
||||
### Changelog entry format
|
||||
|
||||
Changelog entries should be formatted as follows:
|
||||
|
||||
```md
|
||||
- [module] \#xxx Some description about the change (@contributor)
|
||||
- [module] \#xxx Some description of the change (@contributor)
|
||||
```
|
||||
|
||||
Here, `module` is the part of the code that changed (typically a
|
||||
@@ -160,22 +220,31 @@ removed from the header in RPC responses as well.
|
||||
|
||||
## Branching Model and Release
|
||||
|
||||
The main development branch is master.
|
||||
The main development branch is `main`.
|
||||
|
||||
Every release is maintained in a release branch named `vX.Y.Z`.
|
||||
|
||||
Pending minor releases have long-lived release candidate ("RC") branches. Minor release changes should be merged to these long-lived RC branches at the same time that the changes are merged to master.
|
||||
Pending minor releases have long-lived release candidate ("RC") branches. Minor release changes should be merged to these long-lived RC branches at the same time that the changes are merged to `main`.
|
||||
|
||||
If a feature's size is big and/or its risk is high, it can be implemented in a feature branch.
|
||||
While the feature work is in progress,
|
||||
pull requests are open and squash merged against the feature branch.
|
||||
Branch `main` is periodically merged (merge commit) into the feature branch,
|
||||
to reduce branch divergence.
|
||||
When the feature is complete, the feature branch is merged back (merge commit) into `main`.
|
||||
The moment of the final merge can be carefully chosen
|
||||
so as to land different features in different releases.
|
||||
|
||||
Note all pull requests should be squash merged except for merging to a release branch (named `vX.Y`). This keeps the commit history clean and makes it
|
||||
easy to reference the pull request where a change was introduced.
|
||||
|
||||
### Development Procedure
|
||||
|
||||
The latest state of development is on `master`, which must never fail `make test`. _Never_ force push `master`, unless fixing broken git history (which we rarely do anyways).
|
||||
The latest state of development is on `main`, which must never fail `make test`. _Never_ force push `main`, unless fixing broken git history (which we rarely do anyways).
|
||||
|
||||
To begin contributing, create a development branch either on `github.com/tendermint/tendermint`, or your fork (using `git remote add origin`).
|
||||
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `master`. (Since pull requests are squash-merged, either is fine!)
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `main`. (Since pull requests are squash-merged, either is fine!)
|
||||
|
||||
Update the `UPGRADING.md` if the change you've made is breaking and the
|
||||
instructions should be in place for a user on how he/she can upgrade it's
|
||||
@@ -183,7 +252,7 @@ software (ABCI application, Tendermint-based blockchain, light client, wallet).
|
||||
|
||||
Once you have submitted a pull request label the pull request with either `R:minor`, if the change should be included in the next minor release, or `R:major`, if the change is meant for a major release.
|
||||
|
||||
Sometimes (often!) pull requests get out-of-date with master, as other people merge different pull requests to master. It is our convention that pull request authors are responsible for updating their branches with master. (This also means that you shouldn't update someone else's branch for them; even if it seems like you're doing them a favor, you may be interfering with their git flow in some way!)
|
||||
Sometimes (often!) pull requests get out-of-date with `main`, as other people merge different pull requests to `main`. It is our convention that pull request authors are responsible for updating their branches with `main`. (This also means that you shouldn't update someone else's branch for them; even if it seems like you're doing them a favor, you may be interfering with their git flow in some way!)
|
||||
|
||||
#### Merging Pull Requests
|
||||
|
||||
@@ -191,20 +260,20 @@ It is also our convention that authors merge their own pull requests, when possi
|
||||
|
||||
Before merging a pull request:
|
||||
|
||||
- Ensure pull branch is up-to-date with a recent `master` (GitHub won't let you merge without this!)
|
||||
- Ensure pull branch is up-to-date with a recent `main` (GitHub won't let you merge without this!)
|
||||
- Run `make test` to ensure that all tests pass
|
||||
- [Squash](https://stackoverflow.com/questions/5189560/squash-my-last-x-commits-together-using-git) merge pull request
|
||||
|
||||
#### Pull Requests for Minor Releases
|
||||
|
||||
If your change should be included in a minor release, please also open a PR against the long-lived minor release candidate branch (e.g., `rc1/v0.33.5`) _immediately after your change has been merged to master_.
|
||||
If your change should be included in a minor release, please also open a PR against the long-lived minor release candidate branch (e.g., `rc1/v0.33.5`) _immediately after your change has been merged to main_.
|
||||
|
||||
You can do this by cherry-picking your commit off master:
|
||||
You can do this by cherry-picking your commit off `main`:
|
||||
|
||||
```sh
|
||||
$ git checkout rc1/v0.33.5
|
||||
$ git checkout -b {new branch name}
|
||||
$ git cherry-pick {commit SHA from master}
|
||||
$ git cherry-pick {commit SHA from main}
|
||||
# may need to fix conflicts, and then use git add and git cherry-pick --continue
|
||||
$ git push origin {new branch name}
|
||||
```
|
||||
@@ -223,90 +292,7 @@ cmd/debug: execute p.Signal only when p is not nil
|
||||
Fixes #nnnn
|
||||
```
|
||||
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
### Release Procedure
|
||||
|
||||
#### Major Release
|
||||
|
||||
1. Start on `master`
|
||||
2. Run integration tests (see `test_integrations` in Makefile)
|
||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
|
||||
had release candidates, squash all the RC updates into one
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
5. Update the changelog.md file on master with the releases changelog.
|
||||
6. Delete any RC branches and tags for this release (if applicable)
|
||||
|
||||
#### Minor Release
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived release candidate branches, rather than from master.
|
||||
|
||||
1. Checkout the long-lived release candidate branch: `git checkout rcX/vX.X.X`
|
||||
2. Run integration tests: `make test_integrations`
|
||||
3. Prepare the release:
|
||||
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- reset the `CHANGELOG_PENDING.md`
|
||||
- bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- bump ABCI protocol version in `version.go`, if necessary
|
||||
- make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Create a release branch `release/vX.X.x` off the release candidate branch:
|
||||
- `git checkout -b release/vX.X.x`
|
||||
- `git push -u origin release/vX.X.x`
|
||||
- Note that all branches prefixed with `release` are protected once pushed. You will need admin help to make any changes to the branch.
|
||||
5. Once the release branch has been approved, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the release branch into master.
|
||||
7. Delete the former long lived release candidate branch once the release has been made.
|
||||
8. Create a new release candidate branch to be used for the next release.
|
||||
|
||||
#### Backport Release
|
||||
|
||||
1. start from the existing release branch you want to backport changes to (e.g. v0.30)
|
||||
Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7)
|
||||
2. Cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed)
|
||||
3. Follow steps 2 and 3 from [Major Release](#major-release)
|
||||
4. Push changes to release/vX.X.X branch
|
||||
5. Open a PR against the existing vX.X branch
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of RC branches. RC branches typically have names formatted
|
||||
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
|
||||
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
|
||||
2. Create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.34.0-rc4`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Create a new release candidate branch for any possible updates to the RC:
|
||||
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
|
||||
Each PR should have one commit once it lands on `main`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
## Testing
|
||||
|
||||
@@ -340,15 +326,6 @@ cd test/e2e && \
|
||||
./build/runner -f networks/ci.toml
|
||||
```
|
||||
|
||||
### Maverick
|
||||
|
||||
**If you're changing the code in `consensus` package, please make sure to
|
||||
replicate all the changes in `./test/maverick/consensus`**. Maverick is a
|
||||
byzantine node used to assert that the validator gets punished for malicious
|
||||
behavior.
|
||||
|
||||
See [README](./test/maverick/README.md) for details.
|
||||
|
||||
### Model-based tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
@@ -393,8 +370,10 @@ information.
|
||||
|
||||
### RPC Testing
|
||||
|
||||
If you contribute to the RPC endpoints it's important to document your changes in the [Openapi file](./rpc/openapi/openapi.yaml)
|
||||
To test your changes you should install `nodejs` and run:
|
||||
**If you contribute to the RPC endpoints it's important to document your
|
||||
changes in the [Openapi file](./rpc/openapi/openapi.yaml)**.
|
||||
|
||||
To test your changes you must install `nodejs` and run:
|
||||
|
||||
```bash
|
||||
npm i -g dredd
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
# Use a build arg to ensure that both stages use the same,
|
||||
# hopefully current, go version.
|
||||
ARG GOLANG_BASE_IMAGE=golang:1.18-alpine
|
||||
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM --platform=$BUILDPLATFORM $GOLANG_BASE_IMAGE as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
RUN TARGETPLATFORM=$TARGETPLATFORM make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.15-alpine
|
||||
FROM $GOLANG_BASE_IMAGE
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
|
||||
@@ -6,9 +6,9 @@ DockerHub tags for official releases are [here](https://hub.docker.com/r/tenderm
|
||||
|
||||
Official releases can be found [here](https://github.com/tendermint/tendermint/releases).
|
||||
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
|
||||
The Dockerfile for Tendermint is not expected to change in the near future. The main file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/main/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
|
||||
Respective versioned files can be found at `https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile` (replace the Xs with the version number).
|
||||
|
||||
## Quick reference
|
||||
|
||||
@@ -20,9 +20,9 @@ Respective versioned files can be found <https://raw.githubusercontent.com/tende
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [the docs](https://docs.tendermint.com/master/introduction/#quick-start).
|
||||
For more background, see the [the docs](https://docs.tendermint.com/main/introduction/#quick-start).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/master/introduction/quick-start.html).
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/main/introduction/quick-start.html).
|
||||
|
||||
## How to use this image
|
||||
|
||||
@@ -37,7 +37,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app
|
||||
|
||||
## Local cluster
|
||||
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/main/Makefile) and run:
|
||||
|
||||
```sh
|
||||
make build-linux
|
||||
@@ -49,8 +49,8 @@ Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
## License
|
||||
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/main/LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/main/CONTRIBUTING.md) for more information.
|
||||
|
||||
6
LICENSE
6
LICENSE
@@ -1,5 +1,3 @@
|
||||
Tendermint Core
|
||||
License: Apache2.0
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
@@ -181,7 +179,7 @@ License: Apache2.0
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
@@ -189,7 +187,7 @@ License: Apache2.0
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 All in Bits, Inc
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
147
Makefile
147
Makefile
@@ -1,16 +1,11 @@
|
||||
PACKAGES=$(shell go list ./...)
|
||||
OUTPUT?=build/tendermint
|
||||
BUILDDIR?=$(CURDIR)/build
|
||||
OUTPUT?=$(BUILDDIR)/tendermint
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
|
||||
# If building a release, please checkout the version tag to get the correct version setting
|
||||
ifneq ($(shell git symbolic-ref -q --short HEAD),)
|
||||
VERSION := unreleased-$(shell git symbolic-ref -q --short HEAD)-$(shell git rev-parse HEAD)
|
||||
else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
COMMIT_HASH := $(shell git rev-parse --short HEAD)
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMGitCommitHash=$(COMMIT_HASH)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
CGO_ENABLED ?= 0
|
||||
@@ -52,6 +47,67 @@ endif
|
||||
# allow users to pass additional flags via the conventional LDFLAGS variable
|
||||
LD_FLAGS += $(LDFLAGS)
|
||||
|
||||
# Process Docker environment varible TARGETPLATFORM
|
||||
# in order to build binary with correspondent ARCH
|
||||
# by default will always build for linux/amd64
|
||||
TARGETPLATFORM ?=
|
||||
GOOS ?= linux
|
||||
GOARCH ?= amd64
|
||||
GOARM ?=
|
||||
|
||||
ifeq (linux/arm,$(findstring linux/arm,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm
|
||||
GOARM=7
|
||||
endif
|
||||
|
||||
ifeq (linux/arm/v6,$(findstring linux/arm/v6,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm
|
||||
GOARM=6
|
||||
endif
|
||||
|
||||
ifeq (linux/arm64,$(findstring linux/arm64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm64
|
||||
GOARM=7
|
||||
endif
|
||||
|
||||
ifeq (linux/386,$(findstring linux/386,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=386
|
||||
endif
|
||||
|
||||
ifeq (linux/amd64,$(findstring linux/amd64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=amd64
|
||||
endif
|
||||
|
||||
ifeq (linux/mips,$(findstring linux/mips,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips
|
||||
endif
|
||||
|
||||
ifeq (linux/mipsle,$(findstring linux/mipsle,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mipsle
|
||||
endif
|
||||
|
||||
ifeq (linux/mips64,$(findstring linux/mips64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips64
|
||||
endif
|
||||
|
||||
ifeq (linux/mips64le,$(findstring linux/mips64le,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips64le
|
||||
endif
|
||||
|
||||
ifeq (linux/riscv64,$(findstring linux/riscv64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=riscv64
|
||||
endif
|
||||
|
||||
all: check build test install
|
||||
.PHONY: all
|
||||
|
||||
@@ -69,14 +125,28 @@ install:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
.PHONY: install
|
||||
|
||||
###############################################################################
|
||||
### Metrics ###
|
||||
###############################################################################
|
||||
|
||||
metrics: testdata-metrics
|
||||
go generate -run="scripts/metricsgen" ./...
|
||||
.PHONY: metrics
|
||||
|
||||
# By convention, the go tool ignores subdirectories of directories named
|
||||
# 'testdata'. This command invokes the generate command on the folder directly
|
||||
# to avoid this.
|
||||
testdata-metrics:
|
||||
ls ./scripts/metricsgen/testdata | xargs -I{} go generate -v -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{}
|
||||
.PHONY: testdata-metrics
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
@@ -84,7 +154,7 @@ mockery:
|
||||
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
@go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
@go install github.com/cosmos/gogoproto/protoc-gen-gogofaster@latest
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
|
||||
@@ -98,6 +168,7 @@ proto-gen: check-proto-deps
|
||||
@echo "Generating Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
@cp ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc
|
||||
.PHONY: proto-gen
|
||||
|
||||
# These targets are provided for convenience and are intended for local
|
||||
@@ -209,16 +280,33 @@ DESTINATION = ./index.html.md
|
||||
### Documentation ###
|
||||
###############################################################################
|
||||
|
||||
DOCS_OUTPUT?=/tmp/tendermint-core-docs
|
||||
|
||||
# This builds a docs site for each branch/tag in `./docs/versions` and copies
|
||||
# each site to a version prefixed path. The last entry inside the `versions`
|
||||
# file will be the default root index.html. Only redirects that are built into
|
||||
# the "redirects" folder of each of the branches will be copied out to the root
|
||||
# of the build at the end.
|
||||
build-docs:
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
mkdir -p $(DOCS_OUTPUT)/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* $(DOCS_OUTPUT)/$${path_prefix}/ ; \
|
||||
cp $(DOCS_OUTPUT)/$${path_prefix}/index.html $(DOCS_OUTPUT) ; \
|
||||
cp $(DOCS_OUTPUT)/$${path_prefix}/404.html $(DOCS_OUTPUT) ; \
|
||||
cp -r $(DOCS_OUTPUT)/$${path_prefix}/redirects/* $(DOCS_OUTPUT) || true ; \
|
||||
done < versions ;
|
||||
.PHONY: build-docs
|
||||
|
||||
# Build and serve the local version of the docs on the current branch from
|
||||
# http://0.0.0.0:8080
|
||||
serve-docs:
|
||||
@cd docs && \
|
||||
npm ci && \
|
||||
npm run serve
|
||||
.PHONY: serve-docs
|
||||
|
||||
sync-docs:
|
||||
cd ~/output && \
|
||||
echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \
|
||||
@@ -227,6 +315,11 @@ sync-docs:
|
||||
aws cloudfront create-invalidation --distribution-id ${CF_DISTRIBUTION_ID} --profile terraform --path "/*" ;
|
||||
.PHONY: sync-docs
|
||||
|
||||
# Verify that important design docs have ToC entries.
|
||||
check-docs-toc:
|
||||
@./docs/presubmit.sh
|
||||
.PHONY: check-docs-toc
|
||||
|
||||
###############################################################################
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
@@ -243,7 +336,7 @@ build-docker: build-linux
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) $(MAKE) build
|
||||
.PHONY: build-linux
|
||||
|
||||
build-docker-localnode:
|
||||
@@ -286,3 +379,25 @@ endif
|
||||
contract-tests:
|
||||
dredd
|
||||
.PHONY: contract-tests
|
||||
|
||||
# Implements test splitting and running. This is pulled directly from
|
||||
# the github action workflows for better local reproducibility.
|
||||
|
||||
GO_TEST_FILES != find $(CURDIR) -name "*_test.go"
|
||||
|
||||
# default to four splits by default
|
||||
NUM_SPLIT ?= 4
|
||||
|
||||
$(BUILDDIR):
|
||||
mkdir -p $@
|
||||
|
||||
# The format statement filters out all packages that don't have tests.
|
||||
# Note we need to check for both in-package tests (.TestGoFiles) and
|
||||
# out-of-package tests (.XTestGoFiles).
|
||||
$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
|
||||
go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
|
||||
|
||||
split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=15m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
|
||||
214
README.md
214
README.md
@@ -2,160 +2,174 @@
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
[Byzantine-Fault Tolerant][bft] [State Machine Replication][smr]. Or
|
||||
[Blockchain], for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/AzefAFd)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
[](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
|
||||
[![Version][version-badge]][version-url]
|
||||
[![API Reference][api-badge]][api-url]
|
||||
[![Go version][go-badge]][go-url]
|
||||
[![Discord chat][discord-badge]][discord-url]
|
||||
[![License][license-badge]][license-url]
|
||||
[![Sourcegraph][sg-badge]][sg-url]
|
||||
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------- |
|
||||
| master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) </br>  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
| Branch | Tests | Linting |
|
||||
|--------|------------------------------------|---------------------------------|
|
||||
| main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] |
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a
|
||||
state transition machine - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
For detailed analysis of the consensus protocol, including safety and liveness
|
||||
proofs, read our paper, "[The latest gossip on BFT
|
||||
consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the
|
||||
[website](https://docs.tendermint.com/).
|
||||
|
||||
## Releases
|
||||
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
Please do not depend on `main` as your production branch. Use
|
||||
[releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
See below for more details about [versioning](#versioning).
|
||||
Tendermint has been in the production of private and public environments, most
|
||||
notably the blockchains of the Cosmos Network. we haven't released v1.0 yet
|
||||
since we are making breaking changes to the protocol and the APIs. See below for
|
||||
more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/AzefAFd).
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help.
|
||||
You can contact us [over email](mailto:hello@interchain.io) or [join the
|
||||
chat](https://discord.gg/cosmosnetwork).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md)
|
||||
program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're
|
||||
looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
We also maintain a dedicated mailing list for security updates. We will only
|
||||
ever use this mailing list to notify you of vulnerabilities and fixes in
|
||||
Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
| ----------- | ---------------- |
|
||||
| Go version | Go1.15 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
| Requirement | Notes |
|
||||
|-------------|-------------------|
|
||||
| Go version | Go 1.18 or higher |
|
||||
|
||||
### Install
|
||||
|
||||
See the [install instructions](/docs/introduction/install.md).
|
||||
See the [install instructions](./docs/introduction/install.md).
|
||||
|
||||
### Quick Start
|
||||
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/networks/terraform-and-ansible.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
Before contributing to the project, please take a look at the [contributing
|
||||
guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also
|
||||
find it helpful to read the [specifications](./spec/README.md), and familiarize
|
||||
yourself with our [Architectural Decision Records
|
||||
(ADRs)](./docs/architecture/README.md) and
|
||||
[Request For Comments (RFCs)](./docs/rfc/README.md).
|
||||
|
||||
## Versioning
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and
|
||||
how the version changes. According to SemVer, anything in the public API can
|
||||
change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the Go APIs.
|
||||
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR
|
||||
version is used to signal breaking changes across Tendermint's API. This API
|
||||
includes all publicly exposed types, functions, and methods in non-internal Go
|
||||
packages as well as the types and methods accessible via the Tendermint RPC
|
||||
interface.
|
||||
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bech32
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
- log
|
||||
- math
|
||||
- net
|
||||
- os
|
||||
- protoio
|
||||
- rand
|
||||
- sync
|
||||
- strings
|
||||
- service
|
||||
- node
|
||||
- rpc/client
|
||||
- types
|
||||
Breaking changes to these public APIs will be documented in the CHANGELOG.
|
||||
|
||||
### Upgrades
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0,
|
||||
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
|
||||
will work with existing Tendermint blockchains. In these cases you will
|
||||
have to start a new blockchain, or write something custom to get the old
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
compatible with existing blockchain histories.
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0, we do not
|
||||
guarantee that breaking changes (ie. bumps in the MINOR version) will work with
|
||||
existing Tendermint blockchains. In these cases you will have to start a new
|
||||
blockchain, or write something custom to get the old data into the new chain.
|
||||
However, any bump in the PATCH version should be compatible with existing
|
||||
blockchain histories.
|
||||
|
||||
For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we only ship patch updates, including security updates,
|
||||
to the most recent minor release and the second-most recent minor release. Consequently,
|
||||
we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found
|
||||
in [UPGRADING.md](./UPGRADING.md).
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
### Libraries
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: <https://docs.tendermint.com/master/>
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
|
||||
Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building
|
||||
applications in Golang
|
||||
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
|
||||
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
- [Cosmos Hub](https://hub.cosmos.network/)
|
||||
- [Terra](https://www.terra.money/)
|
||||
- [Celestia](https://celestia.org/)
|
||||
- [Anoma](https://anoma.network/)
|
||||
- [Vocdoni](https://docs.vocdoni.io/)
|
||||
|
||||
### Research
|
||||
|
||||
- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
- [Original Whitepaper: "Tendermint: Consensus Without Mining"](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
- [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core)
|
||||
- [Cosmos Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
## Join us!
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.io).
|
||||
If you'd like to work full-time on Tendermint Core,
|
||||
[we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the
|
||||
[Interchain Foundation](https://interchain.io), a Swiss non-profit. The
|
||||
Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the
|
||||
for-profit entity that also maintains [tendermint.com](https://tendermint.com).
|
||||
|
||||
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
|
||||
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
|
||||
[Blockchain]: https://en.wikipedia.org/wiki/Blockchain
|
||||
[version-badge]: https://img.shields.io/github/v/release/tendermint/tendermint.svg
|
||||
[version-url]: https://github.com/tendermint/tendermint/releases/latest
|
||||
[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||
[api-url]: https://pkg.go.dev/github.com/tendermint/tendermint
|
||||
[go-badge]: https://img.shields.io/badge/go-1.18-blue.svg
|
||||
[go-url]: https://github.com/moovweb/gvm
|
||||
[discord-badge]: https://img.shields.io/discord/669268347736686612.svg
|
||||
[discord-url]: https://discord.gg/cosmosnetwork
|
||||
[license-badge]: https://img.shields.io/github/license/tendermint/tendermint.svg
|
||||
[license-url]: https://github.com/tendermint/tendermint/blob/main/LICENSE
|
||||
[sg-badge]: https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg
|
||||
[sg-url]: https://sourcegraph.com/github.com/tendermint/tendermint?badge
|
||||
[tests-url]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml
|
||||
[tests-badge]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml/badge.svg?branch=main
|
||||
[lint-badge]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml/badge.svg
|
||||
[lint-url]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml
|
||||
|
||||
371
RELEASES.md
Normal file
371
RELEASES.md
Normal file
@@ -0,0 +1,371 @@
|
||||
# Releases
|
||||
|
||||
Tendermint uses modified [semantic versioning](https://semver.org/) with each
|
||||
release following a `vX.Y.Z` format. Tendermint is currently on major version 0
|
||||
and uses the minor version to signal breaking changes. The `main` branch is
|
||||
used for active development and thus it is not advisable to build against it.
|
||||
|
||||
The latest changes are always initially merged into `main`. Releases are
|
||||
specified using tags and are built from long-lived "backport" branches that are
|
||||
cut from `main` when the release process begins. Each release "line" (e.g.
|
||||
0.34 or 0.33) has its own long-lived backport branch, and the backport branches
|
||||
have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder
|
||||
in this case). Tendermint only maintains the last two releases at a time (the
|
||||
oldest release is predominantly just security patches).
|
||||
|
||||
## Backporting
|
||||
|
||||
As non-breaking changes land on `main`, they should also be backported to
|
||||
these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to
|
||||
automatically backport to the needed branch. There should be a label for any
|
||||
backport branch that you'll be targeting. To notify the bot to backport a pull
|
||||
request, mark the pull request with the label corresponding to the correct
|
||||
backport branch. For example, to backport to v0.38.x, add the label
|
||||
`S:backport-to-v0.38.x`. Once the original pull request is merged, the bot will
|
||||
try to cherry-pick the pull request to the backport branch. If the bot fails to
|
||||
backport, it will open a pull request. The author of the original pull request
|
||||
is responsible for solving the conflicts and merging the pull request.
|
||||
|
||||
### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a minor version release, e.g.
|
||||
v0.25.0, you get to have the honor of creating the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the
|
||||
tags on `main` so that `go mod` is able to order the branches correctly. You
|
||||
should tag `main` with a "dev" tag that is "greater than" the backport
|
||||
branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072)
|
||||
for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.38.x line.
|
||||
|
||||
1. Start on `main`
|
||||
|
||||
2. Ensure that there is a [branch protection
|
||||
rule](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/managing-a-branch-protection-rule) for the
|
||||
branch you are about to create (you will need admin access to the repository
|
||||
in order to do this).
|
||||
|
||||
3. Create and push the backport branch:
|
||||
```sh
|
||||
git checkout -b v0.38.x
|
||||
git push origin v0.38.x
|
||||
```
|
||||
|
||||
4. Create a PR to update the documentation directory for the backport branch.
|
||||
|
||||
We rewrite any URLs pointing to `main` to point to the backport branch,
|
||||
so that generated documentation will link to the correct versions of files
|
||||
elsewhere in the repository. The following files are to be excluded from this
|
||||
search:
|
||||
|
||||
* [`README.md`](./README.md)
|
||||
* [`CHANGELOG.md`](./CHANGELOG.md)
|
||||
* [`UPGRADING.md`](./UPGRADING.md)
|
||||
|
||||
The following links are to always point to `main`, regardless of where they
|
||||
occur in the codebase:
|
||||
|
||||
* `https://github.com/tendermint/tendermint/blob/main/LICENSE`
|
||||
|
||||
Be sure to search for all of the following links and replace `main` with your
|
||||
corresponding branch label or version (e.g. `v0.38.x` or `v0.38`):
|
||||
|
||||
* `github.com/tendermint/tendermint/blob/main` ->
|
||||
`github.com/tendermint/tendermint/blob/v0.38.x`
|
||||
* `github.com/tendermint/tendermint/tree/main` ->
|
||||
`github.com/tendermint/tendermint/tree/v0.38.x`
|
||||
* `docs.tendermint.com/main` -> `docs.tendermint.com/v0.38`
|
||||
|
||||
Once you have updated all of the relevant documentation:
|
||||
```sh
|
||||
# Create and push the PR.
|
||||
git checkout -b update-docs-v038x
|
||||
git commit -m "Update docs for v0.38.x backport branch."
|
||||
git push -u origin update-docs-v038x
|
||||
```
|
||||
|
||||
Be sure to merge this PR before making other changes on the newly-created
|
||||
backport branch.
|
||||
|
||||
After doing these steps, go back to `main` and do the following:
|
||||
|
||||
1. Create a new workflow to run e2e nightlies for the new backport branch. (See
|
||||
[e2e-nightly-main.yml][e2e] for an example.)
|
||||
|
||||
2. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.38.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
3. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
enable automatic update of Go dependencies on this branch. Copy and edit one
|
||||
of the existing branch configurations to set the correct `target-branch`.
|
||||
|
||||
[e2e]: https://github.com/tendermint/tendermint/blob/main/.github/workflows/e2e-nightly-main.yml
|
||||
|
||||
## Pre-releases
|
||||
|
||||
Before creating an official release, especially a minor release, we may want to
|
||||
create an alpha or beta version, or release candidate (RC) for our friends and
|
||||
partners to test out. We use git tags to create pre-releases, and we build them
|
||||
off of backport branches, for example:
|
||||
|
||||
- `v0.38.0-alpha.1` - The first alpha release of `v0.38.0`. Subsequent alpha
|
||||
releases will be numbered `v0.38.0-alpha.2`, `v0.38.0-alpha.3`, etc.
|
||||
|
||||
Alpha releases are to be considered the _most_ unstable of pre-releases, and
|
||||
are most likely not yet properly QA'd. These are made available to allow early
|
||||
adopters to start integrating and testing new functionality before we're done
|
||||
with QA.
|
||||
|
||||
- `v0.38.0-beta.1` - The first beta release of `v0.38.0`. Subsequent beta
|
||||
releases will be numbered `v0.38.0-beta.2`, `v0.38.0-beta.3`, etc.
|
||||
|
||||
Beta releases can be considered more stable than alpha releases in that we
|
||||
will have QA'd them better than alpha releases, but there still may be
|
||||
minor breaking API changes if users have strong demands for such changes.
|
||||
|
||||
- `v0.38.0-rc1` - The first release candidate (RC) of `v0.38.0`. Subsequent RCs
|
||||
will be numbered `v0.38.0-rc2`, `v0.38.0-rc3`, etc.
|
||||
|
||||
RCs are considered more stable than beta releases in that we will have
|
||||
completed our QA on them. APIs will most likely be stable at this point. The
|
||||
difference between an RC and a release is that there may still be small
|
||||
changes (bug fixes, features) that may make their way into the series before
|
||||
cutting a final release.
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important
|
||||
that these branches have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first pre-release for a minor release, you'll have to make a new
|
||||
backport branch (see above). Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.38.x`).
|
||||
2. Run the integration tests and the E2E nightlies
|
||||
(which can be triggered from the GitHub UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-37x.yml).
|
||||
3. Prepare the pre-release documentation:
|
||||
- Ensure that all relevant changes are in the `CHANGELOG_PENDING.md` file.
|
||||
This file's contents must only be included in the `CHANGELOG.md` when we
|
||||
cut final releases.
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
4. Prepare the versioning:
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
Check the changelog for breaking changes in these components.
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
5. Open a PR with these changes against the backport branch.
|
||||
6. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
7. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.38.0-rc1 -m "Release Candidate v0.38.0-rc1`
|
||||
8. Push the tag back up to origin:
|
||||
`git push origin v0.38.0-rc1`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
9. Future pre-releases will continue to be built off of this branch.
|
||||
|
||||
## Minor release
|
||||
|
||||
This minor release process assumes that this release was preceded by release
|
||||
candidates. If there were no release candidates, begin by creating a backport
|
||||
branch, as described above.
|
||||
|
||||
Before performing these steps, be sure the
|
||||
[Minor Release Checklist](#minor-release-checklist) has been completed.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.38.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the pre-releases into a
|
||||
single entry, and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or
|
||||
simplifying any intra-pre-release changes. It may also help to alphabetize
|
||||
the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.38.0`.
|
||||
- `git tag -a v0.38.0 -m 'Release v0.38.0'`
|
||||
- `git push origin v0.38.0`
|
||||
6. Make sure that `main` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS\_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `main`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
- Commit these changes to `main` and backport them into the backport
|
||||
branch for this release.
|
||||
|
||||
## Patch release
|
||||
|
||||
Patch releases are done differently from minor releases: They are built off of
|
||||
long-lived backport branches, rather than from main. As non-breaking changes
|
||||
land on `main`, they should also be backported into these backport branches.
|
||||
|
||||
Patch releases don't have release candidates by default, although any tricky
|
||||
changes may merit a release candidate.
|
||||
|
||||
To create a patch release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.38.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during patch releases, and only field additions are valid patch changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.38.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.38.1 -m 'Release v0.38.1'`
|
||||
- `git push origin v0.38.1`
|
||||
6. Create a pull request back to main with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into main.
|
||||
|
||||
## Minor Release Checklist
|
||||
|
||||
The following set of steps are performed on all releases that increment the
|
||||
_minor_ version, e.g. v0.25 to v0.26. These steps ensure that Tendermint is well
|
||||
tested, stable, and suitable for adoption by the various diverse projects that
|
||||
rely on Tendermint.
|
||||
|
||||
### Feature Freeze
|
||||
|
||||
Ahead of any minor version release of Tendermint, the software enters 'Feature
|
||||
Freeze' for at least two weeks. A feature freeze means that _no_ new features
|
||||
are added to the code being prepared for release. No code changes should be made
|
||||
to the code being released that do not directly improve pressing issues of code
|
||||
quality. The following must not be merged during a feature freeze:
|
||||
|
||||
* Refactors that are not related to specific bug fixes.
|
||||
* Dependency upgrades.
|
||||
* New test code that does not test a discovered regression.
|
||||
* New features of any kind.
|
||||
* Documentation or spec improvements that are not related to the newly developed
|
||||
code.
|
||||
|
||||
This period directly follows the creation of the [backport
|
||||
branch](#creating-a-backport-branch). The Tendermint team instead directs all
|
||||
attention to ensuring that the existing code is stable and reliable. Broken
|
||||
tests are fixed, flakey-tests are remedied, end-to-end test failures are
|
||||
thoroughly diagnosed and all efforts of the team are aimed at improving the
|
||||
quality of the code. During this period, the upgrade harness tests are run
|
||||
repeatedly and a variety of in-house testnets are run to ensure Tendermint
|
||||
functions at the scale it will be used by application developers and node
|
||||
operators.
|
||||
|
||||
### Nightly End-To-End Tests
|
||||
|
||||
The Tendermint team maintains [a set of end-to-end
|
||||
tests](https://github.com/tendermint/tendermint/blob/main/test/e2e/README.md#L1)
|
||||
that run each night on the latest commit of the project and on the code in the
|
||||
tip of each supported backport branch. These tests start a network of
|
||||
containerized Tendermint processes and run automated checks that the network
|
||||
functions as expected in both stable and unstable conditions. During the feature
|
||||
freeze, these tests are run nightly and must pass consistently for a release of
|
||||
Tendermint to be considered stable.
|
||||
|
||||
### Upgrade Harness
|
||||
|
||||
> TODO(williambanfield): Change to past tense and clarify this section once
|
||||
> upgrade harness is complete.
|
||||
|
||||
The Tendermint team is creating an upgrade test harness to exercise the workflow
|
||||
of stopping an instance of Tendermint running one version of the software and
|
||||
starting up the same application running the next version. To support upgrade
|
||||
testing, we will add the ability to terminate the Tendermint process at specific
|
||||
pre-defined points in its execution so that we can verify upgrades work in a
|
||||
representative sample of stop conditions.
|
||||
|
||||
### Large Scale Testnets
|
||||
|
||||
The Tendermint end-to-end tests run a small network (~10s of nodes) to exercise
|
||||
basic consensus interactions. Real world deployments of Tendermint often have
|
||||
over a hundred nodes just in the validator set, with many others acting as full
|
||||
nodes and sentry nodes. To gain more assurance before a release, we will also
|
||||
run larger-scale test networks to shake out emergent behaviors at scale.
|
||||
|
||||
Large-scale test networks are run on a set of virtual machines (VMs). Each VM is
|
||||
equipped with 4 Gigabytes of RAM and 2 CPU cores. The network runs a very simple
|
||||
key-value store application. The application adds artificial delays to different
|
||||
ABCI calls to simulate a slow application. Each testnet is briefly run with no
|
||||
load being generated to collect a baseline performance. Once baseline is
|
||||
captured, a consistent load is applied across the network. This load takes the
|
||||
form of 10% of the running nodes all receiving a consistent stream of two
|
||||
hundred transactions per minute each.
|
||||
|
||||
During each test net, the following metrics are monitored and collected on each
|
||||
node:
|
||||
|
||||
* Consensus rounds per height
|
||||
* Maximum connected peers, Minimum connected peers, Rate of change of peer connections
|
||||
* Memory resident set size
|
||||
* CPU utilization
|
||||
* Blocks produced per minute
|
||||
* Seconds for each step of consensus (Propose, Prevote, Precommit, Commit)
|
||||
* Latency to receive block proposals
|
||||
|
||||
For these tests we intentionally target low-powered host machines (with low core
|
||||
counts and limited memory) to ensure we observe similar kinds of resource contention
|
||||
and limitation that real-world deployments of Tendermint experience in production.
|
||||
|
||||
#### 200 Node Testnet
|
||||
|
||||
To test the stability and performance of Tendermint in a real world scenario,
|
||||
a 200 node test network is run. The network comprises 5 seed nodes, 100
|
||||
validators and 95 non-validating full nodes. All nodes begin by dialing
|
||||
a subset of the seed nodes to discover peers. The network is run for several
|
||||
days, with metrics being collected continuously. In cases of changes to performance
|
||||
critical systems, testnets of larger sizes should be considered.
|
||||
|
||||
#### Rotating Node Testnet
|
||||
|
||||
Real-world deployments of Tendermint frequently see new nodes arrive and old
|
||||
nodes exit the network. The rotating node testnet ensures that Tendermint is
|
||||
able to handle this reliably. In this test, a network with 10 validators and
|
||||
3 seed nodes is started. A rolling set of 25 full nodes are started and each
|
||||
connects to the network by dialing one of the seed nodes. Once the node is able
|
||||
to blocksync to the head of the chain and begins producing blocks using
|
||||
Tendermint consensus it is stopped. Once stopped, a new node is started and
|
||||
takes its place. This network is run for several days.
|
||||
|
||||
#### Network Partition Testnet
|
||||
|
||||
Tendermint is expected to recover from network partitions. A partition where no
|
||||
subset of the nodes is left with the super-majority of the stake is expected to
|
||||
stop making blocks. Upon alleviation of the partition, the network is expected
|
||||
to once again become fully connected and capable of producing blocks. The
|
||||
network partition testnet ensures that Tendermint is able to handle this
|
||||
reliably at scale. In this test, a network with 100 validators and 95 full
|
||||
nodes is started. All validators have equal stake. Once the network is
|
||||
producing blocks, a set of firewall rules is deployed to create a partitioned
|
||||
network with 50% of the stake on one side and 50% on the other. Once the
|
||||
network stops producing blocks, the firewall rules are removed and the nodes
|
||||
are monitored to ensure they reconnect and that the network again begins
|
||||
producing blocks.
|
||||
|
||||
#### Absent Stake Testnet
|
||||
|
||||
Tendermint networks often run with _some_ portion of the voting power offline.
|
||||
The absent stake testnet ensures that large networks are able to handle this
|
||||
reliably. A set of 150 validator nodes and three seed nodes is started. The set
|
||||
of 150 validators is configured to only possess a cumulative stake of 67% of
|
||||
the total stake. The remaining 33% of the stake is configured to belong to
|
||||
a validator that is never actually run in the test network. The network is run
|
||||
for multiple days, ensuring that it is able to produce blocks without issue.
|
||||
153
SECURITY.md
153
SECURITY.md
@@ -2,98 +2,146 @@
|
||||
|
||||
## Reporting a Bug
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security),
|
||||
we operate a [bug bounty][hackerone]. See the policy for more
|
||||
details on submissions and rewards, and see "Example Vulnerabilities" (below)
|
||||
for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
### Guidelines
|
||||
|
||||
We require that all researchers:
|
||||
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting
|
||||
vulnerability information in public places, including Github Issues, Discord
|
||||
channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience,
|
||||
disruption to production systems (including but not limited to the Cosmos
|
||||
Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential
|
||||
between yourself and the Tendermint Core engineering team until the issue has
|
||||
been resolved and disclosed
|
||||
* Avoid posting personally identifiable information, privately or publicly
|
||||
|
||||
If you follow these guidelines when reporting an issue to us, we commit to:
|
||||
|
||||
* Not pursue or support any legal action related to your research on this vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
* Not pursue or support any legal action related to your research on this
|
||||
vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a
|
||||
timely fashion
|
||||
|
||||
## Disclosure Process
|
||||
## Disclosure Process
|
||||
|
||||
Tendermint Core uses the following disclosure process:
|
||||
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
1. Once a security report is received, the Tendermint Core team works to verify
|
||||
the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the
|
||||
vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private
|
||||
repositories. See “Supported Releases” below for more information on which
|
||||
releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE
|
||||
Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time
|
||||
to prepare their systems for the update. Notifications can include forum
|
||||
posts, tweets, and emails to partners and validators, including emails sent
|
||||
to the [Tendermint Security Mailing List][tmsec-mailing].
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new
|
||||
releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these
|
||||
releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we
|
||||
notify the community, again, through the same channels as above. We also
|
||||
publish a Security Advisory on Github and publish the CVE, as long as neither
|
||||
the Security Advisory nor the CVE include any information on how to exploit
|
||||
these vulnerabilities beyond what information is already available in the
|
||||
patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to
|
||||
submitters.
|
||||
10. One week after the releases go out, we will publish a post with further
|
||||
details on the vulnerability as well as our response to it.
|
||||
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
This process can take some time. Every effort will be made to handle the bug in
|
||||
as timely a manner as possible, however it's important that we follow the
|
||||
process described above to ensure that disclosures are handled consistently and
|
||||
to keep Tendermint Core and its downstream dependent projects--including but not
|
||||
limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
|
||||
### Example Timeline
|
||||
### Example Timeline
|
||||
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
The following is an example timeline for the triage and response. The required
|
||||
roles and team members are described in parentheses after each task; however,
|
||||
multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
#### > 24 Hours Before Release Time
|
||||
#### 24+ Hours Before Release Time
|
||||
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
|
||||
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open
|
||||
targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
|
||||
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
|
||||
#### 24 Hours Before Release Time
|
||||
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels
|
||||
(Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
|
||||
#### Release Time
|
||||
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT
|
||||
LEAD)
|
||||
2. Cut Cosmos SDK release for eligible versions (COSMOS ENG)
|
||||
3. Cut Gaia release for eligible versions (GAIA ENG)
|
||||
4. Post “Security releases” on forum (TENDERMINT LEAD)
|
||||
5. Post new Tweet linking to forum post (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is
|
||||
out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information
|
||||
(ADMIN)
|
||||
|
||||
#### After Release Time
|
||||
|
||||
1. Write forum post with exploit details (TENDERMINT LEAD)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
|
||||
#### 7 Days After Release Time
|
||||
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
The Tendermint Core team commits to releasing security patch releases for both
|
||||
the latest minor release as well for the major/minor release that the Cosmos Hub
|
||||
is running.
|
||||
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
If you are running older versions of Tendermint Core, we encourage you to
|
||||
upgrade at your earliest opportunity so that you can receive security patches
|
||||
directly from the Tendermint repo. While you are welcome to backport security
|
||||
patches to older versions for your own use, we will not publish or promote these
|
||||
backports.
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
The full scope of our bug bounty program is outlined on our
|
||||
[Hacker One program page][hackerone]. Please also note that, in the interest of
|
||||
the safety of our users and staff, a few things are explicitly excluded from
|
||||
scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Findings derived from social engineering (e.g., phishing)
|
||||
|
||||
## Example Vulnerabilities
|
||||
## Example Vulnerabilities
|
||||
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
most interested in. It is not exhaustive: there are other kinds of issues we may
|
||||
also be interested in!
|
||||
|
||||
### Specification
|
||||
|
||||
@@ -105,7 +153,8 @@ The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
|
||||
Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
|
||||
* Validation of blockchain data structures, including blocks, block parts, votes, and so on
|
||||
* Validation of blockchain data structures, including blocks, block parts,
|
||||
votes, and so on
|
||||
* Execution of blocks
|
||||
* Validator set changes
|
||||
* Proposer round robin
|
||||
@@ -114,6 +163,9 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
* A node halting (liveness failure)
|
||||
* Syncing new and old nodes
|
||||
|
||||
Assuming more than 1/3 the voting power is Byzantine:
|
||||
|
||||
* Attacks that go unpunished (unhandled evidence)
|
||||
|
||||
### Networking
|
||||
|
||||
@@ -139,7 +191,7 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Libraries
|
||||
|
||||
* Serialization (Amino)
|
||||
* Serialization
|
||||
* Reading/Writing files and databases
|
||||
|
||||
### Cryptography
|
||||
@@ -150,5 +202,8 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Light Client
|
||||
|
||||
* Core verification
|
||||
* Core verification
|
||||
* Bisection/sequential algorithms
|
||||
|
||||
[hackerone]: https://hackerone.com/cosmos
|
||||
[tmsec-mailing]: https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc
|
||||
|
||||
68
UPGRADING.md
68
UPGRADING.md
@@ -1,6 +1,36 @@
|
||||
# Upgrading Tendermint Core
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
This guide provides instructions for upgrading to specific versions of
|
||||
Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* The `ABCIVersion` is now `1.0.0`.
|
||||
|
||||
* Added new ABCI methods `PrepareProposal` and `ProcessProposal`. For details,
|
||||
please see the [spec](spec/abci/README.md). Applications upgrading to
|
||||
v0.37.0 must implement these methods, at the very minimum, as described
|
||||
[here](./spec/abci/abci++_app_requirements.md)
|
||||
* Deduplicated `ConsensusParams` and `BlockParams`.
|
||||
In the v0.34 branch they are defined both in `abci/types.proto` and `types/params.proto`.
|
||||
The definitions in `abci/types.proto` have been removed.
|
||||
In-process applications should make sure they are not using the deleted
|
||||
version of those structures.
|
||||
* In v0.34, messages on the wire used to be length-delimited with `int64` varint
|
||||
values, which was inconsistent with the `uint64` varint length delimiters used
|
||||
in the P2P layer. Both now consistently use `uint64` varint length delimiters.
|
||||
* Added `AbciVersion` to `RequestInfo`.
|
||||
Applications should check that Tendermint's ABCI version matches the one they expect
|
||||
in order to ensure compatibility.
|
||||
* The `SetOption` method has been removed from the ABCI `Client` interface.
|
||||
The corresponding Protobuf types have been deprecated.
|
||||
* The `key` and `value` fields in the `EventAttribute` type have been changed
|
||||
from type `bytes` to `string`. As per the [Protocol Buffers updating
|
||||
guidelines](https://developers.google.com/protocol-buffers/docs/proto3#updating),
|
||||
this should have no effect on the wire-level encoding for UTF8-encoded
|
||||
strings.
|
||||
|
||||
## v0.34.20
|
||||
|
||||
@@ -15,7 +45,7 @@ and gas cost).
|
||||
Operators can enable the priority mempool by setting `mempool.version` to
|
||||
`"v1"` in the `config.toml`. For more technical details about the priority
|
||||
mempool, see [ADR 067: Mempool
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md).
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-067-mempool-refactor.md).
|
||||
|
||||
## v0.34.0
|
||||
|
||||
@@ -23,7 +53,7 @@ Refactor](https://github.com/tendermint/tendermint/blob/master/docs/architecture
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -33,7 +63,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
Read [the spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
@@ -49,7 +79,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
Applications should be able to handle these evidence types
|
||||
(i.e., through slashing or other accountability measures).
|
||||
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/main/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
(used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type.
|
||||
Note that since Tendermint only supports ed25519 validator keys, there's only one
|
||||
option in the `oneof`. For more, see "Protocol Buffers," below.
|
||||
@@ -64,12 +94,9 @@ directory. For more, see "Protobuf," below.
|
||||
|
||||
### Blockchain Protocol
|
||||
|
||||
* `Header#LastResultsHash` previously was the root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data)` responses.
|
||||
As of 0.34,`Header#LastResultsHash` is now the root hash of a Merkle tree built from:
|
||||
* `BeginBlock#Events`
|
||||
* Root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data,
|
||||
GasWanted, GasUsed, Events)` responses
|
||||
* `BeginBlock#Events`
|
||||
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
|
||||
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
|
||||
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
@@ -159,7 +186,7 @@ The `bech32` package has moved to the Cosmos SDK:
|
||||
### CLI
|
||||
|
||||
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
|
||||
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
See [the docs](https://docs.tendermint.com/v0.33/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
|
||||
### Light Client
|
||||
|
||||
@@ -173,6 +200,7 @@ Other user-relevant changes include:
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light client stores headers and validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
|
||||
@@ -314,7 +342,7 @@ Evidence Params has been changed to include duration.
|
||||
### RPC Changes
|
||||
|
||||
* `/validators` is now paginated (default: 30 vals per page)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/v0.33/rpc/#/Info/block_results)
|
||||
* Event suffix has been removed from the ID in event responses
|
||||
* IDs are now integers not `json-client-XYZ`
|
||||
|
||||
@@ -433,11 +461,11 @@ the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
|
||||
<https://docs.tendermint.com/v0.33/introduction/install.html#compile-with-cleveldb-support>)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
This release contains a breaking change to the behavior of the pubsub system.
|
||||
It also contains some minor breaking changes in the Go API and ABCI.
|
||||
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
|
||||
with blockchains created from the v0.30 series.
|
||||
@@ -455,7 +483,7 @@ In this case, the WS client will receive an error with description:
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -508,14 +536,14 @@ due to changes in how various data structures are hashed.
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams)
|
||||
* [Merkle tree](https://github.com/tendermint/tendermint/blob/main/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/tendermint/blob/main/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes)
|
||||
* [Vote](https://github.com/tendermint/tendermint/blob/main/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
@@ -636,7 +664,7 @@ to `timeout_propose = "3s"`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The default behaviour of `/abci_query` has been changed to not return a proof,
|
||||
The default behavior of `/abci_query` has been changed to not return a proof,
|
||||
and the name of the parameter that controls this has been changed from `trusted`
|
||||
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](./types/types.proto)
|
||||
- [The main spec](https://github.com/tendermint/tendermint/blob/main/spec/abci/README.md)
|
||||
- [A protobuf file](../proto/tendermint/types/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
@@ -14,6 +14,8 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
// All `Async` methods return a `ReqRes` object.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
@@ -28,34 +30,36 @@ type Client interface {
|
||||
FlushAsync() *ReqRes
|
||||
EchoAsync(msg string) *ReqRes
|
||||
InfoAsync(types.RequestInfo) *ReqRes
|
||||
SetOptionAsync(types.RequestSetOption) *ReqRes
|
||||
DeliverTxAsync(types.RequestDeliverTx) *ReqRes
|
||||
CheckTxAsync(types.RequestCheckTx) *ReqRes
|
||||
QueryAsync(types.RequestQuery) *ReqRes
|
||||
CommitAsync() *ReqRes
|
||||
InitChainAsync(types.RequestInitChain) *ReqRes
|
||||
PrepareProposalAsync(types.RequestPrepareProposal) *ReqRes
|
||||
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
|
||||
EndBlockAsync(types.RequestEndBlock) *ReqRes
|
||||
ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes
|
||||
OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes
|
||||
LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes
|
||||
ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes
|
||||
ProcessProposalAsync(types.RequestProcessProposal) *ReqRes
|
||||
|
||||
FlushSync() error
|
||||
EchoSync(msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
|
||||
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
|
||||
DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync() (*types.ResponseCommit, error)
|
||||
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
PrepareProposalSync(types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
|
||||
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
ProcessProposalSync(types.RequestProcessProposal) (*types.ResponseProcessProposal, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -192,15 +192,6 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
req := types.ToRequestSetOption(params)
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
@@ -300,6 +291,25 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshot
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposalAsync(params types.RequestPrepareProposal) *ReqRes {
|
||||
req := types.ToRequestPrepareProposal(params)
|
||||
res, err := cli.client.PrepareProposal(context.Background(), req.GetPrepareProposal(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_PrepareProposal{PrepareProposal: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposalAsync(params types.RequestProcessProposal) *ReqRes {
|
||||
req := types.ToRequestProcessProposal(params)
|
||||
res, err := cli.client.ProcessProposal(context.Background(), req.GetProcessProposal(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ProcessProposal{ProcessProposal: res}})
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
@@ -356,11 +366,6 @@ func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, err
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.SetOptionAsync(req)
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
@@ -417,3 +422,14 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
reqres := cli.ApplySnapshotChunkAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposalSync(
|
||||
params types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqres := cli.PrepareProposalAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetPrepareProposal(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposalSync(params types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqres := cli.ProcessProposalAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetProcessProposal(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -75,17 +75,6 @@ func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return app.callback(
|
||||
types.ToRequestSetOption(req),
|
||||
types.ToResponseSetOption(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -207,6 +196,28 @@ func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotCh
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestPrepareProposal(req),
|
||||
types.ToResponsePrepareProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestProcessProposal(req),
|
||||
types.ToResponseProcessProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync() error {
|
||||
@@ -225,14 +236,6 @@ func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, er
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -323,6 +326,22 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v1.1.1. DO NOT EDIT.
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -575,6 +575,84 @@ func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// PrepareProposalAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) PrepareProposalAsync(_a0 types.RequestPrepareProposal) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PrepareProposalSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) PrepareProposalSync(_a0 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposalAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ProcessProposalAsync(_a0 types.RequestProcessProposal) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposalSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ProcessProposalSync(_a0 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
@@ -649,45 +727,6 @@ func (_m *Client) SetLogger(_a0 log.Logger) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SetOptionAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionAsync(_a0 types.RequestSetOption) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetOptionSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionSync(_a0 types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseSetOption
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *types.ResponseSetOption); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseSetOption)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestSetOption) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetResponseCallback provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {
|
||||
_m.Called(_a0)
|
||||
@@ -734,3 +773,18 @@ func (_m *Client) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewClient interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewClient(t mockConstructorTestingTNewClient) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -231,10 +231,6 @@ func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestSetOption(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
}
|
||||
@@ -279,6 +275,14 @@ func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotC
|
||||
return cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestPrepareProposal(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestProcessProposal(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync() error {
|
||||
@@ -308,15 +312,6 @@ func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, e
|
||||
return reqres.Response.GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestSetOption(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
@@ -417,6 +412,24 @@ func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestPrepareProposal(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetPrepareProposal(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestProcessProposal(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetProcessProposal(), cli.Error()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
||||
@@ -468,8 +481,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_SetOption:
|
||||
_, ok = res.Value.(*types.Response_SetOption)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
@@ -492,6 +503,10 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||
case *types.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||
case *types.Request_PrepareProposal:
|
||||
_, ok = res.Value.(*types.Response_PrepareProposal)
|
||||
case *types.Request_ProcessProposal:
|
||||
_, ok = res.Value.(*types.Response_ProcessProposal)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -44,9 +43,6 @@ var (
|
||||
flagHeight int
|
||||
flagProve bool
|
||||
|
||||
// counter
|
||||
flagSerial bool
|
||||
|
||||
// kvstore
|
||||
flagPersist string
|
||||
)
|
||||
@@ -58,9 +54,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
case "kvstore", "version":
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -89,10 +83,11 @@ var RootCmd = &cobra.Command{
|
||||
// Structure for data passed to print response.
|
||||
type response struct {
|
||||
// generic abci response
|
||||
Data []byte
|
||||
Code uint32
|
||||
Info string
|
||||
Log string
|
||||
Data []byte
|
||||
Code uint32
|
||||
Info string
|
||||
Log string
|
||||
Status int32
|
||||
|
||||
Query *queryResponse
|
||||
}
|
||||
@@ -135,10 +130,6 @@ func addQueryFlags() {
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -148,18 +139,17 @@ func addCommands() {
|
||||
RootCmd.AddCommand(consoleCmd)
|
||||
RootCmd.AddCommand(echoCmd)
|
||||
RootCmd.AddCommand(infoCmd)
|
||||
RootCmd.AddCommand(setOptionCmd)
|
||||
RootCmd.AddCommand(deliverTxCmd)
|
||||
RootCmd.AddCommand(checkTxCmd)
|
||||
RootCmd.AddCommand(commitCmd)
|
||||
RootCmd.AddCommand(versionCmd)
|
||||
RootCmd.AddCommand(testCmd)
|
||||
RootCmd.AddCommand(prepareProposalCmd)
|
||||
RootCmd.AddCommand(processProposalCmd)
|
||||
addQueryFlags()
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -176,7 +166,6 @@ you'd like to run:
|
||||
|
||||
where example.file looks something like:
|
||||
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
@@ -198,7 +187,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "prepare_proposal", "process_proposal", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -216,13 +205,6 @@ var infoCmd = &cobra.Command{
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
var setOptionCmd = &cobra.Command{
|
||||
Use: "set_option",
|
||||
Short: "set an option on the application",
|
||||
Long: "set an option on the application",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: cmdSetOption,
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
@@ -259,6 +241,22 @@ var versionCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var prepareProposalCmd = &cobra.Command{
|
||||
Use: "prepare_proposal",
|
||||
Short: "prepare proposal",
|
||||
Long: "prepare proposal",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
RunE: cmdPrepareProposal,
|
||||
}
|
||||
|
||||
var processProposalCmd = &cobra.Command{
|
||||
Use: "process_proposal",
|
||||
Short: "process proposal",
|
||||
Long: "process proposal",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
RunE: cmdProcessProposal,
|
||||
}
|
||||
|
||||
var queryCmd = &cobra.Command{
|
||||
Use: "query",
|
||||
Short: "query the application state",
|
||||
@@ -267,14 +265,6 @@ var queryCmd = &cobra.Command{
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
Use: "counter",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
@@ -324,7 +314,6 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
return compose(
|
||||
[]func() error{
|
||||
func() error { return servertest.InitChain(client) },
|
||||
func() error { return servertest.SetOption(client, "serial", "on") },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
@@ -339,6 +328,16 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
},
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
|
||||
func() error {
|
||||
return servertest.PrepareProposal(client, [][]byte{
|
||||
{0x01},
|
||||
}, [][]byte{{0x01}}, nil)
|
||||
},
|
||||
func() error {
|
||||
return servertest.ProcessProposal(client, [][]byte{
|
||||
{0x01},
|
||||
}, types.ResponseProcessProposal_ACCEPT)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -439,8 +438,10 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdInfo(cmd, actualArgs)
|
||||
case "query":
|
||||
return cmdQuery(cmd, actualArgs)
|
||||
case "set_option":
|
||||
return cmdSetOption(cmd, actualArgs)
|
||||
case "prepare_proposal":
|
||||
return cmdPrepareProposal(cmd, actualArgs)
|
||||
case "process_proposal":
|
||||
return cmdProcessProposal(cmd, actualArgs)
|
||||
default:
|
||||
return cmdUnimplemented(cmd, pArgs)
|
||||
}
|
||||
@@ -464,7 +465,6 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
|
||||
return nil
|
||||
@@ -504,25 +504,6 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
|
||||
const codeBad uint32 = 10
|
||||
|
||||
// Set an option on the application
|
||||
func cmdSetOption(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "want at least arguments of the form: <key> <value>",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
key, val := args[0], args[1]
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show...
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append a new tx to application
|
||||
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
@@ -625,30 +606,59 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
func cmdPrepareProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray := make([][]byte, len(args))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.PrepareProposalSync(types.RequestPrepareProposal{
|
||||
Txs: txsBytesArray,
|
||||
// kvstore has to have this parameter in order not to reject a tx as the default value is 0
|
||||
MaxTxBytes: 65536,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
resps := make([]response, 0, len(res.Txs))
|
||||
for _, tx := range res.Txs {
|
||||
resps = append(resps, response{
|
||||
Code: code.CodeTypeOK,
|
||||
Log: "Succeeded. Tx: " + string(tx),
|
||||
})
|
||||
}
|
||||
|
||||
printResponse(cmd, args, resps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdProcessProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray := make([][]byte, len(args))
|
||||
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.ProcessProposalSync(types.RequestProcessProposal{
|
||||
Txs: txsBytesArray,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
printResponse(cmd, args, response{
|
||||
Status: int32(res.Status),
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
@@ -657,11 +667,14 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
var err error
|
||||
flagPersist, err = os.MkdirTemp("", "persistent_kvstore_tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
@@ -687,44 +700,49 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
func printResponse(cmd *cobra.Command, args []string, rsp response) {
|
||||
func printResponse(cmd *cobra.Command, args []string, rsps ...response) {
|
||||
|
||||
if flagVerbose {
|
||||
fmt.Println(">", cmd.Use, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
// Always print the status code.
|
||||
if rsp.Code == types.CodeTypeOK {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
for _, rsp := range rsps {
|
||||
// Always print the status code.
|
||||
if rsp.Code == types.CodeTypeOK {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if len(rsp.Data) != 0 {
|
||||
// Do no print this line when using the commit command
|
||||
// because the string comes out as gibberish
|
||||
if cmd.Use != "commit" {
|
||||
fmt.Printf("-> data: %s\n", rsp.Data)
|
||||
if len(rsp.Data) != 0 {
|
||||
// Do no print this line when using the commit command
|
||||
// because the string comes out as gibberish
|
||||
if cmd.Use != "commit" {
|
||||
fmt.Printf("-> data: %s\n", rsp.Data)
|
||||
}
|
||||
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
|
||||
}
|
||||
if rsp.Log != "" {
|
||||
fmt.Printf("-> log: %s\n", rsp.Log)
|
||||
}
|
||||
if cmd.Use == "process_proposal" {
|
||||
fmt.Printf("-> status: %s\n", types.ResponseProcessProposal_ProposalStatus_name[rsp.Status])
|
||||
}
|
||||
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
|
||||
}
|
||||
if rsp.Log != "" {
|
||||
fmt.Printf("-> log: %s\n", rsp.Log)
|
||||
}
|
||||
|
||||
if rsp.Query != nil {
|
||||
fmt.Printf("-> height: %d\n", rsp.Query.Height)
|
||||
if rsp.Query.Key != nil {
|
||||
fmt.Printf("-> key: %s\n", rsp.Query.Key)
|
||||
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
|
||||
}
|
||||
if rsp.Query.Value != nil {
|
||||
fmt.Printf("-> value: %s\n", rsp.Query.Value)
|
||||
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
|
||||
}
|
||||
if rsp.Query.ProofOps != nil {
|
||||
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
|
||||
if rsp.Query != nil {
|
||||
fmt.Printf("-> height: %d\n", rsp.Query.Height)
|
||||
if rsp.Query.Key != nil {
|
||||
fmt.Printf("-> key: %s\n", rsp.Query.Key)
|
||||
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
|
||||
}
|
||||
if rsp.Query.Value != nil {
|
||||
fmt.Printf("-> value: %s\n", rsp.Query.Value)
|
||||
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
|
||||
}
|
||||
if rsp.Query.ProofOps != nil {
|
||||
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,4 +7,5 @@ const (
|
||||
CodeTypeBadNonce uint32 = 2
|
||||
CodeTypeUnauthorized uint32 = 3
|
||||
CodeTypeUnknownError uint32 = 4
|
||||
CodeTypeExecuted uint32 = 5
|
||||
)
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
package counter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
txCount int
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
key, value := req.Key, req.Value
|
||||
if key == "serial" && value == "on" {
|
||||
app.serial = true
|
||||
} else {
|
||||
/*
|
||||
TODO Panic and have the ABCI server pass an exception.
|
||||
The client can call SetOptionSync() and get an `error`.
|
||||
return types.ResponseSetOption{
|
||||
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
|
||||
}
|
||||
*/
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
@@ -68,6 +68,7 @@ type Application struct {
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
txToRemove map[string]struct{}
|
||||
}
|
||||
|
||||
func NewApplication() *Application {
|
||||
@@ -78,7 +79,7 @@ func NewApplication() *Application {
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
Version: version.ABCISemVer,
|
||||
AppVersion: ProtocolVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.AppHash,
|
||||
@@ -87,15 +88,19 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var key, value []byte
|
||||
if isReplacedTx(req.Tx) {
|
||||
app.txToRemove[string(req.Tx)] = struct{}{}
|
||||
}
|
||||
var key, value string
|
||||
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = parts[0], parts[1]
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = req.Tx, req.Tx
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey(key), value)
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -105,10 +110,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko"), Index: true},
|
||||
{Key: []byte("key"), Value: key, Index: true},
|
||||
{Key: []byte("index_key"), Value: []byte("index is working"), Index: true},
|
||||
{Key: []byte("noindex_key"), Value: []byte("index is working"), Index: false},
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -117,6 +122,11 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if req.Type == types.CheckTxType_Recheck {
|
||||
if _, ok := app.txToRemove[string(req.Tx)]; ok {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeExecuted, GasWanted: 1}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
@@ -126,6 +136,8 @@ func (app *Application) Commit() types.ResponseCommit {
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
app.state.AppHash = appHash
|
||||
app.state.Height++
|
||||
|
||||
// empty out the set of transactions to remove via rechecktx
|
||||
saveState(app.state)
|
||||
|
||||
resp := types.ResponseCommit{Data: appHash}
|
||||
@@ -170,3 +182,18 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo
|
||||
|
||||
return resQuery
|
||||
}
|
||||
|
||||
func (app *Application) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
app.txToRemove = map[string]struct{}{}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (app *Application) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package kvstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -87,7 +87,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -119,7 +119,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
|
||||
@@ -45,7 +45,10 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &Application{state: state},
|
||||
app: &Application{
|
||||
state: state,
|
||||
txToRemove: map[string]struct{}{},
|
||||
},
|
||||
valAddrToPubKeyMap: make(map[string]pc.PublicKey),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
@@ -62,10 +65,6 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
||||
return res
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
return app.app.SetOption(req)
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
@@ -76,6 +75,10 @@ func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) t
|
||||
return app.execValidatorTx(req.Tx)
|
||||
}
|
||||
|
||||
if isPrepareTx(req.Tx) {
|
||||
return app.execPrepareTx(req.Tx)
|
||||
}
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
@@ -126,7 +129,7 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
@@ -142,7 +145,7 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
return types.ResponseBeginBlock{}
|
||||
return app.app.BeginBlock(req)
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
@@ -170,6 +173,21 @@ func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) PrepareProposal(
|
||||
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
return types.ResponsePrepareProposal{Txs: app.substPrepareTx(req.Txs, req.MaxTxBytes)}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 || isPrepareTx(tx) {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
@@ -284,3 +302,42 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
|
||||
const (
|
||||
PreparePrefix = "prepare"
|
||||
ReplacePrefix = "replace"
|
||||
)
|
||||
|
||||
func isPrepareTx(tx []byte) bool { return bytes.HasPrefix(tx, []byte(PreparePrefix)) }
|
||||
|
||||
func isReplacedTx(tx []byte) bool {
|
||||
return bytes.HasPrefix(tx, []byte(ReplacePrefix))
|
||||
}
|
||||
|
||||
// execPrepareTx is noop. tx data is considered as placeholder
|
||||
// and is substitute at the PrepareProposal.
|
||||
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.ResponseDeliverTx {
|
||||
// noop
|
||||
return types.ResponseDeliverTx{}
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix stripped.
|
||||
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte, maxTxBytes int64) [][]byte {
|
||||
txs := make([][]byte, 0, len(blockData))
|
||||
var totalBytes int64
|
||||
for _, tx := range blockData {
|
||||
txMod := tx
|
||||
if isPrepareTx(tx) {
|
||||
txMod = bytes.Replace(tx, []byte(PreparePrefix), []byte(ReplacePrefix), 1)
|
||||
}
|
||||
totalBytes += int64(len(txMod))
|
||||
if totalBytes > maxTxBytes {
|
||||
break
|
||||
}
|
||||
txs = append(txs, txMod)
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
573
abci/example/orderbook/app.go
Normal file
573
abci/example/orderbook/app.go
Normal file
@@ -0,0 +1,573 @@
|
||||
package orderbook
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
fmt "fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
var _ types.Application = (*StateMachine)(nil)
|
||||
|
||||
const Version = 1
|
||||
|
||||
const (
|
||||
// In tendermint a zero code is okay and all non zero codes are errors
|
||||
StatusOK = iota
|
||||
StatusErrDecoding
|
||||
StatusErrUnknownMessage
|
||||
StatusErrValidateBasic
|
||||
StatusErrNoAccount
|
||||
StatusErrAccountExists
|
||||
StatusErrNoPair
|
||||
StatusErrPairExists
|
||||
StatusErrInvalidOrder
|
||||
StatusErrUnacceptableMessage
|
||||
StatusErrNoCommodity
|
||||
)
|
||||
|
||||
var (
|
||||
stateKey = []byte("state")
|
||||
accountKey = []byte("account")
|
||||
pairKey = []byte("pair")
|
||||
)
|
||||
|
||||
// StateMachine is the main struct that encompasses the logic of the orderbook
|
||||
type StateMachine struct {
|
||||
// inherit all the abci methods so we don't have to implement everything
|
||||
types.BaseApplication
|
||||
|
||||
// persisted state which is a key value store containing:
|
||||
// accountID -> account
|
||||
// pairID -> pair
|
||||
db dbm.DB
|
||||
|
||||
// in-memory state
|
||||
lastHeight int64 // the last height that was persisted
|
||||
lastHash []byte // the last hash that was persisted
|
||||
// list of accounts (this is used for the app hash)
|
||||
accounts []*Account
|
||||
pairs map[string]*Pair // lookup pairs
|
||||
commodities map[string]struct{} // lookup commodities
|
||||
publicKeys map[string]struct{} // lookup existence of an account
|
||||
// a list of transactions that have been modified by the most recent block
|
||||
// and will need to result in an update to the db
|
||||
touchedAccounts map[uint64]struct{}
|
||||
// new pairs added in this block which will needed to be added to the
|
||||
// db on "Commit"
|
||||
newPairs []*Pair
|
||||
|
||||
// app-side mempool (also emphemeral)
|
||||
// this takes ask and bid transactions from `CheckTx`
|
||||
// and matches them as a "MatchedOrder" which is
|
||||
// then proposed in a block
|
||||
//
|
||||
// it's important to note that there is no garbage collection
|
||||
// here. Bids and asks, potentially even invalid, will
|
||||
// continue to stay here until matched
|
||||
markets map[string]*Market // i.e. ATOM/USDC
|
||||
}
|
||||
|
||||
// New creates a StateMachine from a given database. If the database is
|
||||
// empty a fresh instance is created else the accounts, pairs and
|
||||
// state are loaded into memory.
|
||||
func New(db dbm.DB) (*StateMachine, error) {
|
||||
// iterate over all the account keys
|
||||
iter, err := db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer iter.Close()
|
||||
|
||||
var (
|
||||
accounts = make([]*Account, 0)
|
||||
publicKeys = make(map[string]struct{})
|
||||
commodities = make(map[string]struct{})
|
||||
pairs = make(map[string]*Pair)
|
||||
markets = make(map[string]*Market)
|
||||
lastHeight uint64
|
||||
lastHash []byte
|
||||
)
|
||||
|
||||
for ; iter.Valid(); iter.Next() {
|
||||
if bytes.HasPrefix(iter.Key(), pairKey) {
|
||||
var pair Pair
|
||||
if err := proto.Unmarshal(iter.Value(), &pair); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pairs[pair.String()] = &pair
|
||||
commodities[pair.BuyersDenomination] = struct{}{}
|
||||
markets[pair.String()] = NewMarket(&pair)
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(iter.Key(), accountKey) {
|
||||
var acc Account
|
||||
if err := proto.Unmarshal(iter.Value(), &acc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accounts = append(accounts, &acc)
|
||||
publicKeys[string(acc.PublicKey)] = struct{}{}
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(iter.Key(), stateKey) {
|
||||
state := iter.Value()
|
||||
lastHeight = binary.BigEndian.Uint64(state[:4])
|
||||
lastHash = state[4:]
|
||||
}
|
||||
}
|
||||
|
||||
return &StateMachine{
|
||||
accounts: accounts,
|
||||
pairs: pairs,
|
||||
commodities: commodities,
|
||||
publicKeys: publicKeys,
|
||||
markets: markets,
|
||||
lastHeight: int64(lastHeight),
|
||||
lastHash: lastHash,
|
||||
db: db,
|
||||
touchedAccounts: make(map[uint64]struct{}),
|
||||
newPairs: make([]*Pair, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Info is used by Tendermint to understand the state of the application.
|
||||
// This is useful for replay and syncing modes.
|
||||
func (sm *StateMachine) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{
|
||||
AppVersion: Version,
|
||||
LastBlockHeight: sm.lastHeight,
|
||||
LastBlockAppHash: sm.lastHash,
|
||||
}
|
||||
}
|
||||
|
||||
// CheckTx indicates which transactions should be accepted in the mempool. It is
|
||||
// not a perfect validity check because we're unsure of the state that the transaction
|
||||
// will be executed against. We should treat this as a gatekeeper to the mempool.
|
||||
// Apart from adding transactions to the app-side mempool, this check is stateless.
|
||||
func (sm *StateMachine) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
var msg = new(Msg)
|
||||
|
||||
err := proto.Unmarshal(req.Tx, msg)
|
||||
if err != nil {
|
||||
return types.ResponseCheckTx{Code: StatusErrDecoding, Log: err.Error()} // decoding error
|
||||
}
|
||||
|
||||
if err := msg.ValidateBasic(); err != nil {
|
||||
return types.ResponseCheckTx{Code: StatusErrValidateBasic, Log: err.Error()}
|
||||
}
|
||||
|
||||
// add either bids or asks to the market which will match them in PrepareProposal
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgAsk:
|
||||
market, ok := sm.markets[m.MsgAsk.Pair.String()]
|
||||
if !ok {
|
||||
return types.ResponseCheckTx{Code: StatusErrNoPair}
|
||||
}
|
||||
market.AddAsk(m.MsgAsk.AskOrder)
|
||||
case *Msg_MsgBid:
|
||||
market, ok := sm.markets[m.MsgBid.Pair.String()]
|
||||
if !ok {
|
||||
return types.ResponseCheckTx{Code: StatusErrNoPair}
|
||||
}
|
||||
market.AddBid(m.MsgBid.BidOrder)
|
||||
}
|
||||
|
||||
return types.ResponseCheckTx{Code: StatusOK}
|
||||
}
|
||||
|
||||
// ValidateTx validates the transactions against state.
|
||||
func (sm *StateMachine) ValidateTx(msg *Msg) uint32 {
|
||||
if err := msg.ValidateBasic(); err != nil {
|
||||
return StatusErrValidateBasic
|
||||
}
|
||||
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgRegisterPair:
|
||||
pair := m.MsgRegisterPair.Pair
|
||||
if _, ok := sm.pairs[pair.String()]; ok {
|
||||
return StatusErrPairExists
|
||||
}
|
||||
|
||||
reversePair := &Pair{BuyersDenomination: pair.SellersDenomination, SellersDenomination: pair.BuyersDenomination}
|
||||
if _, ok := sm.pairs[reversePair.String()]; ok {
|
||||
return StatusErrPairExists
|
||||
}
|
||||
|
||||
case *Msg_MsgAsk, *Msg_MsgBid: // MsgAsk and MsgBid are not allowed individually - they need to be matched as a TradeSet
|
||||
return StatusErrUnacceptableMessage //Todo add logic around msg ask and bid to allow
|
||||
|
||||
case *Msg_MsgCreateAccount:
|
||||
// check for duplicate accounts in state machine
|
||||
if _, ok := sm.publicKeys[string(m.MsgCreateAccount.PublicKey)]; ok {
|
||||
return StatusErrAccountExists
|
||||
}
|
||||
|
||||
// check that each of the commodities is present in at least one trading pair
|
||||
for _, commodity := range m.MsgCreateAccount.Commodities {
|
||||
if _, exists := sm.commodities[commodity.Denom]; !exists {
|
||||
return StatusErrNoCommodity
|
||||
}
|
||||
}
|
||||
|
||||
case *Msg_MsgTradeSet:
|
||||
// check the pair exists
|
||||
if _, ok := sm.pairs[m.MsgTradeSet.TradeSet.Pair.String()]; !ok {
|
||||
return StatusErrNoPair
|
||||
}
|
||||
|
||||
for _, order := range m.MsgTradeSet.TradeSet.MatchedOrders {
|
||||
// validate matched order i.e. users have funds and signatures are valid
|
||||
if !sm.isMatchedOrderValid(order, m.MsgTradeSet.TradeSet.Pair) {
|
||||
return StatusErrInvalidOrder
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return StatusErrUnknownMessage
|
||||
}
|
||||
|
||||
return StatusOK
|
||||
}
|
||||
|
||||
// PrepareProposal is called whenever the validator is the proposer for that round. First, it adds the non order
|
||||
// transactions provided by tendermint. The orderbook then loops through each market and tries to match as many
|
||||
// transactions as possible. For each new transaction it checks that the max bytes has not been exceeded.
|
||||
func (sm *StateMachine) PrepareProposal(req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
// declare transaction with the size of 0
|
||||
txs := make([][]byte, 0)
|
||||
|
||||
// go through the transactions passed up via Tendermint first
|
||||
for _, tx := range req.Txs {
|
||||
var msg = new(Msg)
|
||||
err := proto.Unmarshal(tx, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// skip over the bids and asks that are proposed. We already have them
|
||||
if _, ok := msg.Sum.(*Msg_MsgBid); ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := msg.Sum.(*Msg_MsgAsk); ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// make sure we're proposing valid transactions
|
||||
if status := sm.ValidateTx(msg); status != StatusOK {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(txs)+len(tx) > int(req.MaxTxBytes) {
|
||||
return types.ResponsePrepareProposal{Txs: txs}
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
|
||||
// fetch and match all the bids and asks for each market and add these
|
||||
for _, market := range sm.markets {
|
||||
tradeSet := market.Match()
|
||||
// tradesets into bytes and bytes into a transaction
|
||||
if tradeSet == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println("we have a tradeset")
|
||||
|
||||
tradeSet = sm.validateTradeSetAgainstState(tradeSet)
|
||||
if tradeSet == nil || len(tradeSet.MatchedOrders) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println("we have a valid tradeset")
|
||||
|
||||
// wrap this as a message typ
|
||||
msgTradeSet := &MsgTradeSet{TradeSet: tradeSet}
|
||||
bz, err := proto.Marshal(msgTradeSet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// check to see that we don't over populate the block
|
||||
if len(txs)+len(bz) > int(req.MaxTxBytes) {
|
||||
return types.ResponsePrepareProposal{Txs: txs}
|
||||
}
|
||||
txs = append(txs, bz)
|
||||
}
|
||||
|
||||
return types.ResponsePrepareProposal{Txs: req.Txs}
|
||||
}
|
||||
|
||||
// Process Proposal either rejects or accepts transactions
|
||||
//
|
||||
// It uses the same validity function for prepare proposal. This ensures the coherence property
|
||||
// is adhered to i.e. all honest validators must accept a proposal by an honest proposer
|
||||
func (sm *StateMachine) ProcessProposal(req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
var msg = new(Msg)
|
||||
err := proto.Unmarshal(tx, msg)
|
||||
if err != nil {
|
||||
return rejectProposal()
|
||||
}
|
||||
|
||||
if status := sm.ValidateTx(msg); status != StatusOK {
|
||||
fmt.Printf("tx failed validation, status: %d\n", status)
|
||||
return rejectProposal()
|
||||
}
|
||||
}
|
||||
|
||||
return acceptProposal()
|
||||
}
|
||||
|
||||
func (sm *StateMachine) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset the new pairs
|
||||
sm.newPairs = make([]*Pair, 0)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// DeliverTx is called for each tx in a block once it has been finalized. This is where the
|
||||
// execution code lives. Most importantly it's where we update the user accounts following
|
||||
// a successful order.
|
||||
func (sm *StateMachine) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var msg = new(Msg)
|
||||
|
||||
err := proto.Unmarshal(req.Tx, msg)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{Code: StatusErrDecoding, Log: err.Error()} // decoding error
|
||||
}
|
||||
|
||||
if status := sm.ValidateTx(msg); status != StatusOK {
|
||||
return types.ResponseDeliverTx{Code: status}
|
||||
}
|
||||
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgRegisterPair:
|
||||
sm.markets[m.MsgRegisterPair.Pair.String()] = NewMarket(m.MsgRegisterPair.Pair)
|
||||
sm.pairs[m.MsgRegisterPair.Pair.String()] = m.MsgRegisterPair.Pair
|
||||
sm.commodities[m.MsgRegisterPair.Pair.BuyersDenomination] = struct{}{}
|
||||
sm.commodities[m.MsgRegisterPair.Pair.SellersDenomination] = struct{}{}
|
||||
sm.newPairs = append(sm.newPairs, m.MsgRegisterPair.Pair)
|
||||
|
||||
case *Msg_MsgCreateAccount:
|
||||
nextAccountID := uint64(len(sm.accounts))
|
||||
sm.accounts = append(sm.accounts, &Account{
|
||||
Index: nextAccountID,
|
||||
PublicKey: m.MsgCreateAccount.PublicKey,
|
||||
Commodities: m.MsgCreateAccount.Commodities,
|
||||
})
|
||||
sm.touchedAccounts[nextAccountID] = struct{}{}
|
||||
sm.publicKeys[string(m.MsgCreateAccount.PublicKey)] = struct{}{}
|
||||
|
||||
case *Msg_MsgTradeSet:
|
||||
pair := m.MsgTradeSet.TradeSet.Pair
|
||||
for _, order := range m.MsgTradeSet.TradeSet.MatchedOrders {
|
||||
buyer := sm.accounts[order.OrderBid.OwnerId]
|
||||
seller := sm.accounts[order.OrderAsk.OwnerId]
|
||||
|
||||
// the buyer gets quantity of the asset that the seller was selling
|
||||
buyer.AddCommodity(NewCommodity(pair.SellersDenomination, order.OrderAsk.Quantity))
|
||||
// the buyer gives up quantity * ask price of the buyers denomination
|
||||
buyer.SubtractCommodity(NewCommodity(pair.BuyersDenomination, order.OrderAsk.Quantity*order.OrderAsk.AskPrice))
|
||||
|
||||
// the seller gets quantity * ask price of the asset that the buyer was paying with
|
||||
seller.AddCommodity(NewCommodity(pair.BuyersDenomination, order.OrderAsk.Quantity*order.OrderAsk.AskPrice))
|
||||
// the seller gives up quantity of the commodity they were selling
|
||||
seller.SubtractCommodity(NewCommodity(pair.SellersDenomination, order.OrderAsk.Quantity))
|
||||
|
||||
// mark that these account have been touched
|
||||
sm.touchedAccounts[order.OrderBid.OwnerId] = struct{}{}
|
||||
sm.touchedAccounts[order.OrderAsk.OwnerId] = struct{}{}
|
||||
}
|
||||
|
||||
default:
|
||||
return types.ResponseDeliverTx{Code: StatusErrUnknownMessage}
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: 0}
|
||||
}
|
||||
|
||||
// EndBlock is used to update consensus params and the validator set. For the orderbook,
|
||||
// we keep both the same for thw
|
||||
func (sm *StateMachine) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{}
|
||||
}
|
||||
|
||||
// Commit is called to tell the app it is safe to persist state to disk.
|
||||
// We now take the in-memory representation and update the parts that have
|
||||
// changed on to disk.
|
||||
func (sm *StateMachine) Commit() types.ResponseCommit {
|
||||
batch := sm.db.NewBatch()
|
||||
|
||||
// write to accounts that were modified by the last block
|
||||
for accountID := range sm.touchedAccounts {
|
||||
value, err := proto.Marshal(sm.accounts[accountID])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(accountKey, accountID)
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// write the new pairs that were added by the last block
|
||||
pairID := len(sm.pairs) - len(sm.newPairs)
|
||||
for id, pair := range sm.newPairs {
|
||||
value, err := proto.Marshal(pair)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(pairKey, uint64(pairID+id))
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
hash := sm.hash()
|
||||
err := sm.updateState(batch, sm.lastHeight+1, hash)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = batch.WriteSync()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
// hash is just the the sha256 of the byte representation of all accounts.
|
||||
// remember that this needs to be deterministic for all state machines
|
||||
func (sm *StateMachine) hash() []byte {
|
||||
digest := bytes.NewBuffer(nil)
|
||||
for _, account := range sm.accounts {
|
||||
bz, err := proto.Marshal(account)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
digest.Write(bz)
|
||||
}
|
||||
return tmhash.Sum(digest.Bytes())
|
||||
}
|
||||
|
||||
func (sm *StateMachine) updateState(batch dbm.Batch, height int64, hash []byte) error {
|
||||
sm.lastHash = hash
|
||||
sm.lastHeight = height
|
||||
heightBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(heightBytes, uint64(height))
|
||||
return batch.Set(stateKey, append(heightBytes, hash...))
|
||||
}
|
||||
|
||||
func (sm *StateMachine) validateTradeSetAgainstState(tradeSet *TradeSet) *TradeSet {
|
||||
output := &TradeSet{Pair: tradeSet.Pair}
|
||||
|
||||
for _, matchedOrder := range tradeSet.MatchedOrders {
|
||||
if !sm.isMatchedOrderValid(matchedOrder, tradeSet.Pair) {
|
||||
continue
|
||||
}
|
||||
|
||||
// yayy! this matched order is still valid and can be executed
|
||||
output.MatchedOrders = append(output.MatchedOrders, matchedOrder)
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// isMatchedOrderValid is a check against current state to ensure that the order
|
||||
// is valid and can execute.
|
||||
//
|
||||
// This method is also called when preparing a proposal since `CheckTx` doesn't have
|
||||
// strict validity guarantees and there could be invalid transactions within the mempool
|
||||
//
|
||||
// Note: if one of the two orders are invalid we discard both. In the future we could
|
||||
// improve this by adding back the part of the order that might still be valid.
|
||||
func (sm *StateMachine) isMatchedOrderValid(order *MatchedOrder, pair *Pair) bool {
|
||||
if int(order.OrderBid.OwnerId) >= len(sm.accounts) {
|
||||
return false
|
||||
}
|
||||
bidOwner := sm.accounts[order.OrderBid.OwnerId]
|
||||
if bidOwner == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if int(order.OrderAsk.OwnerId) >= len(sm.accounts) {
|
||||
return false
|
||||
}
|
||||
askOwner := sm.accounts[order.OrderAsk.OwnerId]
|
||||
if askOwner == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
askCommodities := askOwner.FindCommidity(pair.SellersDenomination)
|
||||
if askCommodities == nil {
|
||||
return false
|
||||
}
|
||||
buyCommodities := bidOwner.FindCommidity(pair.BuyersDenomination)
|
||||
if buyCommodities == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Seller has enough of the commodity
|
||||
if askCommodities.Quantity-order.OrderAsk.Quantity < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Buyer has enough of the buying commodity
|
||||
if buyCommodities.Quantity-(order.OrderAsk.AskPrice*order.OrderAsk.Quantity) < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if !order.OrderAsk.ValidateSignature(ed25519.PubKey(askOwner.PublicKey), pair) {
|
||||
return false
|
||||
}
|
||||
if !order.OrderBid.ValidateSignature(ed25519.PubKey(bidOwner.PublicKey), pair) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// InitDB takes an empty DB instance and populates it with the
|
||||
// provided pairs and accounts. Note that the order here is important
|
||||
func InitDB(db dbm.DB, pairs []*Pair, accounts []*Account) error {
|
||||
batch := db.NewBatch()
|
||||
|
||||
for id, account := range accounts {
|
||||
value, err := proto.Marshal(account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(accountKey, uint64(id))
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for id, pair := range pairs {
|
||||
value, err := proto.Marshal(pair)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(pairKey, uint64(id))
|
||||
fmt.Println(key)
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return batch.WriteSync()
|
||||
}
|
||||
|
||||
func rejectProposal() types.ResponseProcessProposal {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
|
||||
func acceptProposal() types.ResponseProcessProposal {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
327
abci/example/orderbook/app_test.go
Normal file
327
abci/example/orderbook/app_test.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package orderbook_test
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/orderbook"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
params "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// TODO: we should also check that CheckTx adds bids and asks to the app-side mempool
|
||||
func TestCheckTx(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
require.NoError(t, orderbook.InitDB(db, []*orderbook.Pair{testPair}, nil))
|
||||
app, err := orderbook.New(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
msg *orderbook.Msg
|
||||
responseCode uint32
|
||||
expOrderSize int
|
||||
}{
|
||||
{
|
||||
name: "test empty tx",
|
||||
msg: &orderbook.Msg{},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 0,
|
||||
},
|
||||
{
|
||||
name: "test msg ask",
|
||||
msg: &orderbook.Msg{
|
||||
Sum: &orderbook.Msg_MsgAsk{
|
||||
MsgAsk: &orderbook.MsgAsk{
|
||||
Pair: testPair,
|
||||
AskOrder: &orderbook.OrderAsk{
|
||||
Quantity: 10,
|
||||
AskPrice: 1,
|
||||
OwnerId: 1,
|
||||
Signature: crypto.CRandBytes(ed25519.SignatureSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
responseCode: orderbook.StatusOK,
|
||||
expOrderSize: 1,
|
||||
},
|
||||
{
|
||||
name: "test msg ask wrong signature",
|
||||
msg: &orderbook.Msg{
|
||||
Sum: &orderbook.Msg_MsgAsk{
|
||||
MsgAsk: &orderbook.MsgAsk{
|
||||
Pair: testPair,
|
||||
AskOrder: &orderbook.OrderAsk{
|
||||
Quantity: 10,
|
||||
AskPrice: 1,
|
||||
OwnerId: 1,
|
||||
Signature: crypto.CRandBytes(62),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 1,
|
||||
},
|
||||
{
|
||||
name: "test msg bid",
|
||||
msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgBid{MsgBid: &orderbook.MsgBid{
|
||||
Pair: testPair,
|
||||
BidOrder: &orderbook.OrderBid{
|
||||
MaxQuantity: 15,
|
||||
MaxPrice: 5,
|
||||
OwnerId: 1,
|
||||
Signature: crypto.CRandBytes(ed25519.SignatureSize),
|
||||
},
|
||||
}}},
|
||||
responseCode: orderbook.StatusOK,
|
||||
expOrderSize: 2,
|
||||
},
|
||||
{
|
||||
name: "test msg bid blank",
|
||||
msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgBid{MsgBid: &orderbook.MsgBid{
|
||||
Pair: testPair,
|
||||
BidOrder: &orderbook.OrderBid{
|
||||
MaxQuantity: 0,
|
||||
MaxPrice: 0,
|
||||
OwnerId: 0,
|
||||
Signature: crypto.CRandBytes(ed25519.SignatureSize),
|
||||
},
|
||||
}}},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 2,
|
||||
},
|
||||
{
|
||||
name: "test msg register duplicate pair",
|
||||
msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgRegisterPair{MsgRegisterPair: &orderbook.MsgRegisterPair{
|
||||
Pair: &orderbook.Pair{BuyersDenomination: "ATOM", SellersDenomination: "ATOM"},
|
||||
}}},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
bz, err := proto.Marshal(tc.msg)
|
||||
require.NoError(t, err)
|
||||
resp := app.CheckTx(types.RequestCheckTx{Tx: bz})
|
||||
require.Equal(t, tc.responseCode, resp.Code, resp.Log)
|
||||
bids, asks := app.Orders(testPair)
|
||||
require.Equal(t, tc.expOrderSize, len(bids)+len(asks))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// func ValidateTx(t *testing.T) {
|
||||
// db := dbm.NewMemDB()
|
||||
// require.NoError(t, orderbook.InitDB(db, []*orderbook.Pair{testPair}, nil))
|
||||
// app, err := orderbook.New(db)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// for _, tc := range testCases {
|
||||
// t.Run(tc.name, func(t *testing.T) {
|
||||
// bz, err := proto.Marshal(tc.msg)
|
||||
// require.NoError(t, err)
|
||||
// resp := app.CheckTx(types.RequestCheckTx{Tx: bz})
|
||||
// require.Equal(t, tc.responseCode, resp.Code, resp.Log)
|
||||
// bids, asks := app.Orders(testPair)
|
||||
// require.Equal(t, tc.expOrderSize, len(bids)+len(asks))
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
// TODO: we should check that transactions in
|
||||
// a market are being validated and added to the proposal
|
||||
// // and that other transactions get in
|
||||
// func TestPrepareProposal(t *testing.T) {
|
||||
// db := dbm.NewMemDB()
|
||||
// require.NoError(t, orderbook.InitDB(db, []*orderbook.Pair{testPair}, nil))
|
||||
// app, err := orderbook.New(db)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// for _, tc := range testCases {
|
||||
// t.Run(tc.name, func(t *testing.T) {
|
||||
// bz, err := proto.Marshal(tc.msg)
|
||||
// require.NoError(t, err)
|
||||
// resp := app.CheckTx(types.RequestCheckTx{Tx: bz})
|
||||
// require.Equal(t, tc.responseCode, resp.Code, resp.Log)
|
||||
// bids, asks := app.Orders(testPair)
|
||||
// require.Equal(t, tc.expOrderSize, len(bids)+len(asks))
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
// {
|
||||
// name: "test msg register pair",
|
||||
// msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgRegisterPair{MsgRegisterPair: &orderbook.MsgRegisterPair{
|
||||
// Pair: &orderbook.Pair{BuyersDenomination: "ATOM", SellersDenomination: "AUD"},
|
||||
// }}},
|
||||
// responseCode: orderbook.StatusOK,
|
||||
// expOrderSize: 2,
|
||||
// pairSize: 2,
|
||||
// },
|
||||
|
||||
// TODO: we should test that transactions are
|
||||
// always valid i.e. ValidateTx. We could potentially
|
||||
// combine this with PrepareProposal
|
||||
// func TestProcessProposal(t *testing.T) {
|
||||
// app := orderbook.New(dbm.NewMemDB())
|
||||
// }
|
||||
|
||||
// TODO: we should test that a matched order
|
||||
// correctly updates the accounts. We should
|
||||
// also test that committing a block persists
|
||||
// it to the database and that we can now
|
||||
// query the new state
|
||||
// func TestFinalizeBlock(t *testing.T) {
|
||||
// app := orderbook.New(dbm.NewMemDB())
|
||||
// }
|
||||
|
||||
// TODO: test that we can start from new
|
||||
// and from existing state
|
||||
// func TestNewStateMachine(t *testing.T) {}
|
||||
|
||||
func TestEndToEnd(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
app, err := orderbook.New(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
var (
|
||||
maxBytes = params.DefaultConsensusParams().Block.MaxBytes
|
||||
commodityNZD = &orderbook.Commodity{Denom: "NZD", Quantity: 100}
|
||||
commodityAUD = &orderbook.Commodity{Denom: "AUD", Quantity: 100}
|
||||
registerPairMsg = newRegisterPair("NZD", "AUD")
|
||||
pair = registerPairMsg.GetMsgRegisterPair().Pair
|
||||
pkAlice = ed25519.GenPrivKey()
|
||||
pkBob = ed25519.GenPrivKey()
|
||||
pubKeyAlice = pkAlice.PubKey().Bytes()
|
||||
pubKeyBob = pkBob.PubKey().Bytes()
|
||||
registerAlice = newRegisterAccount(pubKeyAlice, []*orderbook.Commodity{commodityAUD})
|
||||
registerBob = newRegisterAccount(pubKeyBob, []*orderbook.Commodity{commodityNZD})
|
||||
// bob is asking for 25 AUD for 5 NZD
|
||||
ask = &orderbook.Msg{Sum: &orderbook.Msg_MsgAsk{MsgAsk: orderbook.NewMsgAsk(pair, 5, 5, 1)}}
|
||||
// alice is bidding for 5 NZD for 25 AUD
|
||||
bid = &orderbook.Msg{Sum: &orderbook.Msg_MsgBid{MsgBid: orderbook.NewMsgBid(pair, 5, 5, 0)}}
|
||||
)
|
||||
|
||||
require.NoError(t, ask.GetMsgAsk().Sign(pkBob))
|
||||
require.NoError(t, bid.GetMsgBid().Sign(pkAlice))
|
||||
|
||||
testCases := []struct {
|
||||
txs [][]byte
|
||||
accepted bool
|
||||
// assertions to be made about the state of the application
|
||||
// after each block
|
||||
assertions func(t *testing.T, app *orderbook.StateMachine)
|
||||
}{
|
||||
{
|
||||
// block 1 sets up the trading pair
|
||||
txs: asTxs(registerPairMsg),
|
||||
accepted: true,
|
||||
assertions: func(t *testing.T, app *orderbook.StateMachine) {
|
||||
pairs := app.Pairs()
|
||||
require.Len(t, pairs, 1)
|
||||
require.Equal(t, pair, &pairs[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
// block 2 registers two accounts: alice and bob
|
||||
txs: asTxs(registerAlice, registerBob),
|
||||
accepted: true,
|
||||
assertions: func(t *testing.T, app *orderbook.StateMachine) {
|
||||
alice := app.Account(0)
|
||||
require.False(t, alice.IsEmpty(), alice)
|
||||
require.Equal(t, pubKeyAlice, alice.PublicKey)
|
||||
require.Len(t, alice.Commodities, 1)
|
||||
require.Equal(t, alice.Commodities[0], commodityAUD)
|
||||
bob := app.Account(1)
|
||||
require.False(t, bob.IsEmpty(), bob)
|
||||
require.Equal(t, pubKeyBob, bob.PublicKey)
|
||||
require.Len(t, bob.Commodities, 1)
|
||||
require.Equal(t, bob.Commodities[0], commodityNZD)
|
||||
require.True(t, app.Account(2).IsEmpty())
|
||||
},
|
||||
},
|
||||
{
|
||||
// block 3 performs a trade between alice and bob
|
||||
txs: asTxs(ask, bid),
|
||||
accepted: true,
|
||||
assertions: func(t *testing.T, app *orderbook.StateMachine) {
|
||||
alice := app.Account(0)
|
||||
require.Equal(t, alice.Commodities[0].Quantity, 75) // 75 AUD
|
||||
require.Equal(t, alice.Commodities[1].Quantity, 5) // 5 NZD
|
||||
bob := app.Account(1)
|
||||
require.Equal(t, bob.Commodities[0].Quantity, 95) // 95 NZD
|
||||
require.Equal(t, bob.Commodities[0].Quantity, 5) // 5 AUD
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range testCases {
|
||||
for _, tx := range tc.txs {
|
||||
resp := app.CheckTx(types.RequestCheckTx{Tx: tx})
|
||||
require.EqualValues(t, orderbook.StatusOK, resp.Code)
|
||||
}
|
||||
txs := app.PrepareProposal(types.RequestPrepareProposal{MaxTxBytes: maxBytes, Txs: tc.txs}).Txs
|
||||
require.Equal(t, txs, tc.txs)
|
||||
if idx == 2 {
|
||||
fmt.Print(tc.txs)
|
||||
fmt.Println()
|
||||
fmt.Print(txs)
|
||||
}
|
||||
|
||||
result := app.ProcessProposal(types.RequestProcessProposal{Txs: txs})
|
||||
if tc.accepted {
|
||||
require.Equal(t, types.ResponseProcessProposal_ACCEPT, result.Status)
|
||||
} else {
|
||||
require.Equal(t, types.ResponseProcessProposal_REJECT, result.Status)
|
||||
continue
|
||||
}
|
||||
|
||||
app.BeginBlock(types.RequestBeginBlock{})
|
||||
for _, tx := range txs {
|
||||
app.DeliverTx(types.RequestDeliverTx{Tx: tx})
|
||||
}
|
||||
app.EndBlock(types.RequestEndBlock{})
|
||||
app.Commit()
|
||||
|
||||
if tc.assertions != nil {
|
||||
tc.assertions(t, app)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func asTxs(msgs ...*orderbook.Msg) [][]byte {
|
||||
output := make([][]byte, len(msgs))
|
||||
for i, msg := range msgs {
|
||||
bz, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
output[i] = bz
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func newRegisterPair(d1, d2 string) *orderbook.Msg {
|
||||
return &orderbook.Msg{Sum: &orderbook.Msg_MsgRegisterPair{MsgRegisterPair: &orderbook.MsgRegisterPair{
|
||||
Pair: &orderbook.Pair{BuyersDenomination: d1, SellersDenomination: d2},
|
||||
}}}
|
||||
}
|
||||
|
||||
func newRegisterAccount(pubkey []byte, commodities []*orderbook.Commodity) *orderbook.Msg {
|
||||
return &orderbook.Msg{Sum: &orderbook.Msg_MsgCreateAccount{MsgCreateAccount: &orderbook.MsgCreateAccount{
|
||||
PublicKey: pubkey,
|
||||
Commodities: commodities,
|
||||
}}}
|
||||
|
||||
}
|
||||
9
abci/example/orderbook/buf.gen.yaml
Normal file
9
abci/example/orderbook/buf.gen.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- name: gogofaster
|
||||
out: .
|
||||
opt:
|
||||
- Mgoogle/protobuf/timestamp.proto=github.com/cosmos/gogoproto/types
|
||||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
|
||||
- plugins=grpc
|
||||
- paths=source_relative
|
||||
243
abci/example/orderbook/cmd/orderbook.go
Normal file
243
abci/example/orderbook/cmd/orderbook.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/orderbook"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
NewCLI().Run()
|
||||
}
|
||||
|
||||
type CLI struct {
|
||||
root *cobra.Command
|
||||
config *cfg.Config
|
||||
}
|
||||
|
||||
func NewCLI() *CLI {
|
||||
cli := &CLI{}
|
||||
cli.root = &cobra.Command{
|
||||
Use: "orderbook",
|
||||
Short: "orderbook abci++ example",
|
||||
}
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "init",
|
||||
Short: "initialize the file system for an orderbook node",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
root, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
viper.AddConfigPath(filepath.Join(root, "config"))
|
||||
viper.SetConfigName("config")
|
||||
|
||||
config := cfg.DefaultConfig()
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
// Config file not found; use default
|
||||
// This often happens when initializing a config for the first time
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
config.SetRoot(root)
|
||||
cli.config = config
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
privValKeyFile := cli.config.PrivValidatorKeyFile()
|
||||
privValStateFile := cli.config.PrivValidatorStateFile()
|
||||
var pv *privval.FilePV
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
fmt.Print("found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv = privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
fmt.Print("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
|
||||
nodeKeyFile := cli.config.NodeKeyFile()
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
fmt.Print("Found node key", "path", nodeKeyFile)
|
||||
} else {
|
||||
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Print("Generated node key", "path", nodeKeyFile)
|
||||
}
|
||||
|
||||
// genesis file
|
||||
genFile := cli.config.GenesisFile()
|
||||
if tmos.FileExists(genFile) {
|
||||
fmt.Print("Found genesis file", "path", genFile)
|
||||
} else {
|
||||
genDoc := types.GenesisDoc{
|
||||
ChainID: fmt.Sprintf("orderbook-chain-%v", tmrand.Int()),
|
||||
GenesisTime: tmtime.Now(),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
pubKey, err := pv.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
if err := genDoc.SaveAs(genFile); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Print("Generated genesis file", "path", genFile)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "run",
|
||||
Short: "runs an orderbook node",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
dbProvider := node.DefaultDBProvider
|
||||
appDB, err := dbProvider(&node.DBContext{"orderbook", cli.config})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app, err := orderbook.New(appDB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(cli.config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load or gen node key %s: %w", cli.config.NodeKeyFile(), err)
|
||||
}
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
n, err := node.NewNode(
|
||||
cli.config,
|
||||
privval.LoadOrGenFilePV(cli.config.PrivValidatorKeyFile(), cli.config.PrivValidatorStateFile()),
|
||||
nodeKey,
|
||||
proxy.NewLocalClientCreator(app),
|
||||
node.DefaultGenesisDocProviderFunc(cli.config),
|
||||
dbProvider,
|
||||
node.DefaultMetricsProvider(cli.config.Instrumentation),
|
||||
logger,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := n.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmos.TrapSignal(logger, func() {
|
||||
if err := n.Stop(); err != nil {
|
||||
logger.Error("unable to stop the node", "error", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "create-account [commodities...]",
|
||||
Short: "creates a new account message and submits it to the chain",
|
||||
Example: "create-account 500BTC 10000USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "create-pair buyers-denomination sellers-denomination",
|
||||
Short: "creates a new pair message and submits it to the chain",
|
||||
Example: "create-pair BTC USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "bid buying-commodity price",
|
||||
Short: "creates a bid message and submits it to the chain",
|
||||
Example: "bid 10BTC 15000BTC/USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "ask selling-commodity price",
|
||||
Short: "creates an ask message and submits it to the chain",
|
||||
Example: "ask 5BTC 12000BTC/USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
querySubcommand := &cobra.Command{
|
||||
Use: "query",
|
||||
Short: "query the bal",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
querySubcommand.AddCommand(&cobra.Command{
|
||||
Use: "account pubkey|id",
|
||||
Short: "query the balance of an account",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
querySubcommand.AddCommand(&cobra.Command{
|
||||
Use: "pairs",
|
||||
Short: "list all the trading pairs",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
querySubcommand.AddCommand(&cobra.Command{
|
||||
Use: "orders pair",
|
||||
Short: "list all current orders for a given pair",
|
||||
Example: "orders BTC/USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(querySubcommand)
|
||||
|
||||
return cli
|
||||
}
|
||||
|
||||
// Run runs the CLI.
|
||||
func (cli *CLI) Run() {
|
||||
if err := cli.root.Execute(); err != nil {
|
||||
fmt.Print(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
19
abci/example/orderbook/doc.go
Normal file
19
abci/example/orderbook/doc.go
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:generate go install github.com/bufbuild/buf/cmd/buf
|
||||
//go:generate buf generate
|
||||
|
||||
// The orderbook presents a more advanced example of a Tendermint application than the simple kvstore
|
||||
//
|
||||
// An orderbook is a tool used in financial markets for enabling trading of various commodities. Without
|
||||
// delving into too much detail, an orderbook is made of two types of transactions: Bids and Asks. An Ask
|
||||
// is an offer by a seller for n amount of a commodity at an AskPrice and a bid is an offer from a buyer
|
||||
// for m amount of a commodity at a BidPrice. When the bid price exceeds the ask price, and the buyer quantity
|
||||
// is less than or equal to the sellers quantity, the order is matched. In actual terms, we neglect the
|
||||
// underlying denomination (i.e. USD) and effectively both participants are simultaneously a buyer and seller.
|
||||
//
|
||||
// This example falls far short of being a decentralized orderbook, but demonstrates how one can build an
|
||||
// app-side mempool, how one can use PrepareProposal and ProcessProposal to craft complex transactions,
|
||||
// how we can use signatures and validate transactions against state. How applications can manage concurrency,
|
||||
// and demonstrate the lifecycle of transactions from RPC -> CheckTx -> Mempool -> PrepareProposal -> ProcessProposal
|
||||
// -> DeliverTx -> Commit -> Querying
|
||||
|
||||
package orderbook
|
||||
254
abci/example/orderbook/market.go
Normal file
254
abci/example/orderbook/market.go
Normal file
@@ -0,0 +1,254 @@
|
||||
package orderbook
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
type Market struct {
|
||||
// immutable
|
||||
pair *Pair // i.e. EUR/USD (a market is bidirectional)
|
||||
|
||||
mtx sync.RWMutex
|
||||
askOrders *AskOrders // i.e. buying EUR for USD
|
||||
lowestAsk float64
|
||||
bidOrders *BidOrders // i.e. selling EUR for USD or buying USD for EUR
|
||||
highestBid float64
|
||||
}
|
||||
|
||||
func NewMarket(p *Pair) *Market {
|
||||
askOrders := make(AskOrders, 0)
|
||||
bidOrders := make(BidOrders, 0)
|
||||
return &Market{pair: p, askOrders: &askOrders, bidOrders: &bidOrders}
|
||||
}
|
||||
|
||||
func (m *Market) AddBid(b *OrderBid) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
heap.Push(m.bidOrders, b)
|
||||
if b.MaxPrice > m.highestBid {
|
||||
m.highestBid = b.MaxPrice
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Market) AddAsk(a *OrderAsk) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
heap.Push(m.askOrders, a)
|
||||
if a.AskPrice < m.lowestAsk || m.lowestAsk == 0 {
|
||||
m.lowestAsk = a.AskPrice
|
||||
}
|
||||
}
|
||||
|
||||
// Match takes the set of bids and asks and matches them together.
|
||||
// A bid matches an ask when the MaxPrice is greater than the AskPrice
|
||||
// and the MaxQuantity is greater than the quantity.
|
||||
func (m *Market) Match() *TradeSet {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
// if one side doesn't have any orders than there is nothing to match
|
||||
// and we return early
|
||||
if m.askOrders.Len() == 0 || m.bidOrders.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.highestBid < m.lowestAsk {
|
||||
// no orders match, we return early.
|
||||
return nil
|
||||
}
|
||||
|
||||
t := &TradeSet{Pair: m.pair}
|
||||
bids := make([]*OrderBid, 0)
|
||||
asks := make([]*OrderAsk, 0)
|
||||
|
||||
// get all the bids that are greater than the lowest ask. In order from heighest bid to lowest bid
|
||||
for m.bidOrders.Len() > 0 {
|
||||
bid := heap.Pop(m.bidOrders).(*OrderBid)
|
||||
if bid.MaxPrice < m.lowestAsk {
|
||||
// we've reached the limit, push the bid back and break the loop
|
||||
heap.Push(m.bidOrders, bid)
|
||||
break
|
||||
} else {
|
||||
bids = append(bids, bid)
|
||||
}
|
||||
}
|
||||
|
||||
// get all the asks that are lower than the highest bid in the bids set. Ordered from lowest to highest ask
|
||||
for m.askOrders.Len() > 0 {
|
||||
ask := heap.Pop(m.askOrders).(*OrderAsk)
|
||||
if ask.AskPrice > bids[0].MaxPrice {
|
||||
// the ask price is greater than the highest bid; push the ask back and break theh loop
|
||||
heap.Push(m.askOrders, ask)
|
||||
break
|
||||
} else {
|
||||
asks = append(asks, ask)
|
||||
}
|
||||
}
|
||||
|
||||
// this is to keep track of the index of the bids that have been matched
|
||||
reserved := make(map[int]struct{})
|
||||
|
||||
// start from the highest ask and the highest bid and for each ask loop downwards through the slice of
|
||||
// bids until one is matched
|
||||
OUTER_LOOP:
|
||||
for i := len(asks) - 1; i >= 0; i-- {
|
||||
ask := asks[i]
|
||||
|
||||
// start with the highest bid and increment down since we're more likely to find a match
|
||||
for j := len(bids) - 1; j >= 0; j-- {
|
||||
if _, ok := reserved[j]; ok {
|
||||
// skip over the bids that have already been reserved
|
||||
continue
|
||||
}
|
||||
|
||||
bid := bids[j]
|
||||
if bid.MaxPrice >= ask.AskPrice {
|
||||
if bid.MaxQuantity >= ask.Quantity {
|
||||
// yay! we have a match
|
||||
t.AddFilledOrder(ask, bid)
|
||||
|
||||
// reserve the bid so we don't rematch it with another ask
|
||||
reserved[j] = struct{}{}
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
} else {
|
||||
// once we've dropped below the ask price there are no more possible bids and so we break
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// as we go from highest to lowest, asks that aren't matched become the new lowest ask price
|
||||
m.lowestAsk = ask.AskPrice
|
||||
|
||||
// no match found, add the ask order back into the heap
|
||||
heap.Push(m.askOrders, ask)
|
||||
}
|
||||
|
||||
// if all available asks were matched then
|
||||
// we never have the opportunity to update the lowest ask.
|
||||
// Now we reset it to 0
|
||||
if m.askOrders.Len() == 0 {
|
||||
m.lowestAsk = 0
|
||||
}
|
||||
|
||||
// add back the unmatched bids to the heap so they can be matched again in a later round.
|
||||
// We also neeed to recalculate the new highest bid. First we tackle an edge case whereby all
|
||||
// selected bids were matched. In this case we grab the next highest and set that as the new
|
||||
// highest bid
|
||||
m.highestBid = 0
|
||||
if len(reserved) == len(bids) && m.bidOrders.Len() > 0 {
|
||||
newHighestBid := heap.Pop(m.bidOrders).(*OrderBid)
|
||||
m.highestBid = newHighestBid.MaxPrice
|
||||
heap.Push(m.bidOrders, newHighestBid)
|
||||
}
|
||||
for j := 0; j < len(bids); j++ {
|
||||
if _, ok := reserved[j]; !ok {
|
||||
if bids[j].MaxPrice > m.highestBid {
|
||||
m.highestBid = bids[j].MaxPrice
|
||||
}
|
||||
heap.Push(m.bidOrders, bids[j])
|
||||
}
|
||||
}
|
||||
|
||||
if len(t.MatchedOrders) == 0 {
|
||||
return nil
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (m Market) LowestAsk() float64 {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
return m.lowestAsk
|
||||
}
|
||||
|
||||
func (m Market) HighestBid() float64 {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
return m.highestBid
|
||||
}
|
||||
|
||||
func (m Market) GetBids() []OrderBid {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
orders := make([]OrderBid, m.bidOrders.Len())
|
||||
for idx, order := range *m.bidOrders {
|
||||
orders[idx] = *order
|
||||
}
|
||||
return orders
|
||||
}
|
||||
|
||||
func (m Market) GetAsks() []OrderAsk {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
orders := make([]OrderAsk, m.askOrders.Len())
|
||||
for idx, order := range *m.askOrders {
|
||||
orders[idx] = *order
|
||||
}
|
||||
return orders
|
||||
}
|
||||
|
||||
// Heap ordered by lowest price
|
||||
type AskOrders []*OrderAsk
|
||||
|
||||
var _ heap.Interface = (*AskOrders)(nil)
|
||||
|
||||
func (a AskOrders) Len() int { return len(a) }
|
||||
|
||||
func (a AskOrders) Less(i, j int) bool {
|
||||
return a[i].AskPrice < a[j].AskPrice
|
||||
}
|
||||
|
||||
func (a AskOrders) Swap(i, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
func (a *AskOrders) Push(x any) {
|
||||
item := x.(*OrderAsk)
|
||||
*a = append(*a, item)
|
||||
}
|
||||
|
||||
func (a *AskOrders) Pop() any {
|
||||
old := *a
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil
|
||||
*a = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// Heap ordered by highest price
|
||||
type BidOrders []*OrderBid
|
||||
|
||||
var _ heap.Interface = (*BidOrders)(nil)
|
||||
|
||||
func (b BidOrders) Len() int { return len(b) }
|
||||
|
||||
func (b BidOrders) Less(i, j int) bool {
|
||||
return b[i].MaxPrice > b[j].MaxPrice
|
||||
}
|
||||
|
||||
func (b BidOrders) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b *BidOrders) Push(x any) {
|
||||
item := x.(*OrderBid)
|
||||
*b = append(*b, item)
|
||||
}
|
||||
|
||||
func (b *BidOrders) Pop() any {
|
||||
old := *b
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil
|
||||
*b = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
func (t *TradeSet) AddFilledOrder(ask *OrderAsk, bid *OrderBid) {
|
||||
t.MatchedOrders = append(t.MatchedOrders, &MatchedOrder{
|
||||
OrderAsk: ask,
|
||||
OrderBid: bid,
|
||||
})
|
||||
}
|
||||
179
abci/example/orderbook/market_test.go
Normal file
179
abci/example/orderbook/market_test.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package orderbook_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/abci/example/orderbook"
|
||||
)
|
||||
|
||||
var testPair = &orderbook.Pair{BuyersDenomination: "ATOM", SellersDenomination: "USD"}
|
||||
|
||||
func testBid(price, quantity float64) *orderbook.OrderBid {
|
||||
return &orderbook.OrderBid{
|
||||
MaxPrice: price,
|
||||
MaxQuantity: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
func testAsk(price, quantity float64) *orderbook.OrderAsk {
|
||||
return &orderbook.OrderAsk{
|
||||
AskPrice: price,
|
||||
Quantity: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackLowestAndHighestPrices(t *testing.T) {
|
||||
market := orderbook.NewMarket(testPair)
|
||||
require.Zero(t, market.LowestAsk())
|
||||
require.Zero(t, market.HighestBid())
|
||||
|
||||
market.AddBid(testBid(100, 10))
|
||||
require.EqualValues(t, 100, market.HighestBid())
|
||||
|
||||
market.AddAsk(testAsk(50, 10))
|
||||
require.EqualValues(t, 50, market.LowestAsk())
|
||||
|
||||
market.AddAsk(testAsk(30, 10))
|
||||
require.EqualValues(t, 30, market.LowestAsk())
|
||||
|
||||
market.AddAsk(testAsk(40, 10))
|
||||
require.EqualValues(t, 30, market.LowestAsk())
|
||||
}
|
||||
|
||||
func TestSimpleOrderMatching(t *testing.T) {
|
||||
testcases := []struct {
|
||||
bid *orderbook.OrderBid
|
||||
ask *orderbook.OrderAsk
|
||||
match bool
|
||||
}{
|
||||
{
|
||||
bid: testBid(50, 10),
|
||||
ask: testAsk(50, 10),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
bid: testBid(60, 10),
|
||||
ask: testAsk(50, 10),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
bid: testBid(50, 10),
|
||||
ask: testAsk(60, 10),
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
bid: testBid(50, 5),
|
||||
ask: testAsk(40, 10),
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
bid: testBid(50, 15),
|
||||
ask: testAsk(40, 10),
|
||||
match: true,
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range testcases {
|
||||
market := orderbook.NewMarket(testPair)
|
||||
market.AddAsk(tc.ask)
|
||||
market.AddBid(tc.bid)
|
||||
resp := market.Match()
|
||||
if tc.match {
|
||||
require.Len(t, resp.MatchedOrders, 1, idx)
|
||||
} else {
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiOrderMatching(t *testing.T) {
|
||||
testcases := []struct {
|
||||
bids []*orderbook.OrderBid
|
||||
asks []*orderbook.OrderAsk
|
||||
expected []*orderbook.MatchedOrder
|
||||
expectedLowestAsk float64
|
||||
expectedHighestBid float64
|
||||
}{
|
||||
{
|
||||
bids: []*orderbook.OrderBid{
|
||||
testBid(50, 20),
|
||||
testBid(40, 10),
|
||||
testBid(30, 15),
|
||||
},
|
||||
asks: []*orderbook.OrderAsk{
|
||||
testAsk(30, 15),
|
||||
testAsk(30, 5),
|
||||
},
|
||||
expected: []*orderbook.MatchedOrder{
|
||||
{
|
||||
OrderAsk: testAsk(30, 5),
|
||||
OrderBid: testBid(30, 15),
|
||||
},
|
||||
{
|
||||
OrderAsk: testAsk(30, 15),
|
||||
OrderBid: testBid(50, 20),
|
||||
},
|
||||
},
|
||||
expectedLowestAsk: 0,
|
||||
expectedHighestBid: 40,
|
||||
},
|
||||
{
|
||||
bids: []*orderbook.OrderBid{
|
||||
testBid(60, 20),
|
||||
testBid(80, 5),
|
||||
},
|
||||
asks: []*orderbook.OrderAsk{
|
||||
testAsk(60, 15),
|
||||
testAsk(70, 10),
|
||||
testAsk(50, 20),
|
||||
},
|
||||
expected: []*orderbook.MatchedOrder{
|
||||
{
|
||||
OrderAsk: testAsk(60, 15),
|
||||
OrderBid: testBid(60, 20),
|
||||
},
|
||||
},
|
||||
expectedLowestAsk: 50,
|
||||
expectedHighestBid: 80,
|
||||
},
|
||||
{
|
||||
bids: []*orderbook.OrderBid{
|
||||
testBid(60, 20),
|
||||
testBid(80, 5),
|
||||
},
|
||||
asks: []*orderbook.OrderAsk{},
|
||||
expected: []*orderbook.MatchedOrder{},
|
||||
expectedLowestAsk: 0,
|
||||
expectedHighestBid: 80,
|
||||
},
|
||||
{
|
||||
bids: []*orderbook.OrderBid{},
|
||||
asks: []*orderbook.OrderAsk{
|
||||
testAsk(70, 10),
|
||||
testAsk(50, 20),
|
||||
},
|
||||
expected: []*orderbook.MatchedOrder{},
|
||||
expectedLowestAsk: 50,
|
||||
expectedHighestBid: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range testcases {
|
||||
market := orderbook.NewMarket(testPair)
|
||||
for _, ask := range tc.asks {
|
||||
market.AddAsk(ask)
|
||||
}
|
||||
for _, bid := range tc.bids {
|
||||
market.AddBid(bid)
|
||||
}
|
||||
resp := market.Match()
|
||||
if len(tc.expected) == 0 {
|
||||
require.Nil(t, resp, idx)
|
||||
} else {
|
||||
require.Equal(t, tc.expected, resp.MatchedOrders, idx)
|
||||
}
|
||||
require.EqualValues(t, tc.expectedLowestAsk, market.LowestAsk(), idx)
|
||||
require.EqualValues(t, tc.expectedHighestBid, market.HighestBid(), idx)
|
||||
}
|
||||
}
|
||||
1795
abci/example/orderbook/msgs.pb.go
Normal file
1795
abci/example/orderbook/msgs.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
40
abci/example/orderbook/msgs.proto
Normal file
40
abci/example/orderbook/msgs.proto
Normal file
@@ -0,0 +1,40 @@
|
||||
€syntax = "proto3";
|
||||
|
||||
package orderbook;
|
||||
option go_package = "github.com/tendermint/tendermint/abci/example/orderbook";
|
||||
|
||||
import "wire.proto";
|
||||
|
||||
message MsgBid {
|
||||
Pair pair = 1;
|
||||
OrderBid bid_order = 2;
|
||||
}
|
||||
|
||||
message MsgAsk {
|
||||
Pair pair = 1;
|
||||
OrderAsk ask_order = 2;
|
||||
}
|
||||
|
||||
message MsgCreateAccount {
|
||||
bytes public_key = 1;
|
||||
repeated Commodity commodities = 2;
|
||||
}
|
||||
|
||||
message MsgRegisterPair {
|
||||
Pair pair = 1;
|
||||
}
|
||||
|
||||
message MsgTradeSet {
|
||||
TradeSet trade_set = 1;
|
||||
}
|
||||
|
||||
message Msg {
|
||||
//a Msg has to be one of the below
|
||||
oneof sum {
|
||||
MsgBid msg_bid = 1;
|
||||
MsgAsk msg_ask = 2;
|
||||
MsgRegisterPair msg_register_pair = 3;
|
||||
MsgCreateAccount msg_create_account = 4;
|
||||
MsgTradeSet msg_trade_set = 5;
|
||||
}
|
||||
}
|
||||
BIN
abci/example/orderbook/orderbook
Executable file
BIN
abci/example/orderbook/orderbook
Executable file
Binary file not shown.
31
abci/example/orderbook/query.go
Normal file
31
abci/example/orderbook/query.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package orderbook
|
||||
|
||||
// Query the state of an account (returns a concrete copy)
|
||||
func (sm *StateMachine) Account(id uint64) Account {
|
||||
if int(id) >= len(sm.accounts) {
|
||||
return Account{}
|
||||
}
|
||||
return *sm.accounts[id]
|
||||
}
|
||||
|
||||
// Query all the pairs that the orderbook has (returns a concrete copy)
|
||||
func (sm *StateMachine) Pairs() []Pair {
|
||||
pairs := make([]Pair, len(sm.pairs))
|
||||
idx := 0
|
||||
for _, pair := range sm.pairs {
|
||||
pairs[idx] = *pair
|
||||
idx++
|
||||
}
|
||||
return pairs
|
||||
}
|
||||
|
||||
// Query the current orders for a pair (returns concrete copies)
|
||||
func (sm *StateMachine) Orders(pair *Pair) ([]OrderBid, []OrderAsk) {
|
||||
market, ok := sm.markets[pair.String()]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return market.GetBids(), market.GetAsks()
|
||||
}
|
||||
|
||||
func (sm *StateMachine) Height() int64 { return sm.lastHeight }
|
||||
299
abci/example/orderbook/types.go
Normal file
299
abci/example/orderbook/types.go
Normal file
@@ -0,0 +1,299 @@
|
||||
package orderbook
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
)
|
||||
|
||||
func NewMsgBid(pair *Pair, maxPrice, maxQuantity float64, ownerId uint64) *MsgBid {
|
||||
return &MsgBid{
|
||||
Pair: pair,
|
||||
BidOrder: &OrderBid{
|
||||
MaxPrice: maxPrice,
|
||||
MaxQuantity: maxQuantity,
|
||||
OwnerId: ownerId,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (msg *MsgBid) Sign(pk crypto.PrivKey) error {
|
||||
sig, err := pk.Sign(msg.BidOrder.DeterministicSignatureBytes(msg.Pair))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.BidOrder.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (msg *MsgBid) ValidateBasic() error {
|
||||
if err := msg.BidOrder.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := msg.Pair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(msg.BidOrder.Signature) != ed25519.SignatureSize {
|
||||
return errors.New("invalid signature size")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMsgAsk(pair *Pair, askPrice, quantity float64, ownerId uint64) *MsgAsk {
|
||||
return &MsgAsk{
|
||||
Pair: pair,
|
||||
AskOrder: &OrderAsk{
|
||||
AskPrice: askPrice,
|
||||
Quantity: quantity,
|
||||
OwnerId: ownerId,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (msg *MsgAsk) Sign(pk crypto.PrivKey) error {
|
||||
sig, err := pk.Sign(msg.AskOrder.DeterministicSignatureBytes(msg.Pair))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.AskOrder.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (msg *MsgAsk) ValidateBasic() error {
|
||||
if err := msg.AskOrder.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := msg.Pair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMsgCreateAccount(commodities ...*Commodity) (*MsgCreateAccount, crypto.PrivKey) {
|
||||
pk := ed25519.GenPrivKey()
|
||||
return &MsgCreateAccount{
|
||||
PublicKey: pk.PubKey().Bytes(),
|
||||
Commodities: commodities,
|
||||
}, pk
|
||||
}
|
||||
|
||||
func (msg *MsgCreateAccount) ValidateBasic() error {
|
||||
if len(msg.PublicKey) != ed25519.PubKeySize {
|
||||
return errors.New("invalid pub key size")
|
||||
}
|
||||
|
||||
uniqueMap := make(map[string]struct{}, len(msg.Commodities))
|
||||
for _, c := range msg.Commodities {
|
||||
if err := c.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := uniqueMap[c.Denom]; ok {
|
||||
return fmt.Errorf("commodity %s declared twice", c.Denom)
|
||||
}
|
||||
uniqueMap[c.Denom] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMsgRegisterPair(pair *Pair) *MsgRegisterPair {
|
||||
return &MsgRegisterPair{Pair: pair}
|
||||
}
|
||||
|
||||
func (msg *MsgRegisterPair) ValidateBasic() error {
|
||||
return msg.Pair.ValidateBasic()
|
||||
}
|
||||
|
||||
func NewCommodity(denom string, quantity float64) *Commodity {
|
||||
return &Commodity{
|
||||
Denom: denom,
|
||||
Quantity: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Commodity) ValidateBasic() error {
|
||||
if c.Quantity <= 0 {
|
||||
return errors.New("quantity must be greater than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pair) ValidateBasic() error {
|
||||
if p.BuyersDenomination == "" || p.SellersDenomination == "" {
|
||||
return errors.New("inbound and outbound commodities must be present")
|
||||
}
|
||||
|
||||
if p.BuyersDenomination == p.SellersDenomination {
|
||||
return errors.New("commodities must not be the same")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderBid) ValidateBasic() error {
|
||||
if o.MaxQuantity == 0 {
|
||||
return errors.New("max quantity must be non zero")
|
||||
}
|
||||
|
||||
if o.MaxPrice <= 0 {
|
||||
return errors.New("min price must be greater than 0")
|
||||
}
|
||||
|
||||
if len(o.Signature) != ed25519.SignatureSize {
|
||||
return errors.New("invalid signature size")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderBid) ValidateSignature(pk crypto.PubKey, pair *Pair) bool {
|
||||
return pk.VerifySignature(o.DeterministicSignatureBytes(pair), o.Signature)
|
||||
}
|
||||
|
||||
func (o *OrderBid) DeterministicSignatureBytes(pair *Pair) []byte {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.WriteString(pair.SellersDenomination)
|
||||
buf.WriteString(pair.BuyersDenomination)
|
||||
bz := buf.Bytes()
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.MaxQuantity))
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.MaxPrice))
|
||||
return bz
|
||||
}
|
||||
|
||||
func (m *MatchedOrder) ValidateBasic() error {
|
||||
if err := m.OrderAsk.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.OrderBid.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TradeSet) ValidateBasic() error {
|
||||
for _, matchedOrder := range t.MatchedOrders {
|
||||
if err := matchedOrder.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
// checking if there is an account
|
||||
if matchedOrder.OrderAsk.OwnerId == 0 {
|
||||
return errors.New("must have an owner id more than zero")
|
||||
}
|
||||
}
|
||||
// validate the pairs are valid
|
||||
if err := t.Pair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderAsk) ValidateBasic() error {
|
||||
if o.Quantity == 0 {
|
||||
return errors.New("quantity outbound must be non zero")
|
||||
}
|
||||
|
||||
if o.AskPrice <= 0 {
|
||||
return errors.New("min price must be greater than 0")
|
||||
}
|
||||
|
||||
if len(o.Signature) != ed25519.SignatureSize {
|
||||
return errors.New("invalid signature size")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderAsk) ValidateSignature(pk crypto.PubKey, pair *Pair) bool {
|
||||
return pk.VerifySignature(o.DeterministicSignatureBytes(pair), o.Signature)
|
||||
}
|
||||
|
||||
func (o *OrderAsk) DeterministicSignatureBytes(pair *Pair) []byte {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.WriteString(pair.BuyersDenomination)
|
||||
buf.WriteString(pair.SellersDenomination)
|
||||
bz := buf.Bytes()
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.Quantity))
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.AskPrice))
|
||||
return bz
|
||||
}
|
||||
|
||||
func (a Account) IsEmpty() bool {
|
||||
return len(a.PublicKey) == 0
|
||||
}
|
||||
|
||||
func (a *Account) FindCommidity(denom string) *Commodity {
|
||||
for _, c := range a.Commodities {
|
||||
if c.Denom == denom {
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (a *Account) AddCommodity(c *Commodity) {
|
||||
curr := a.FindCommidity(c.Denom)
|
||||
if curr == nil {
|
||||
a.Commodities = append(a.Commodities, c)
|
||||
} else {
|
||||
curr.Quantity += c.Quantity
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Account) SubtractCommodity(c *Commodity) {
|
||||
curr := a.FindCommidity(c.Denom)
|
||||
if curr == nil {
|
||||
panic("trying to remove a commodity the account does not have")
|
||||
}
|
||||
curr.Quantity -= c.Quantity
|
||||
}
|
||||
|
||||
func (msg *Msg) ValidateBasic() error {
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgRegisterPair:
|
||||
if err := m.MsgRegisterPair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgCreateAccount:
|
||||
if err := m.MsgCreateAccount.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgBid:
|
||||
if err := m.MsgBid.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgAsk:
|
||||
if err := m.MsgAsk.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgTradeSet:
|
||||
if err := m.MsgTradeSet.TradeSet.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return errors.New("unknown tx")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
1867
abci/example/orderbook/wire.pb.go
Normal file
1867
abci/example/orderbook/wire.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
52
abci/example/orderbook/wire.proto
Normal file
52
abci/example/orderbook/wire.proto
Normal file
@@ -0,0 +1,52 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package orderbook;
|
||||
option go_package = "github.com/tendermint/tendermint/abci/example/orderbook";
|
||||
|
||||
|
||||
message OrderAsk {
|
||||
double quantity = 1;
|
||||
double ask_price = 2;
|
||||
uint64 owner_id = 3;
|
||||
bytes signature = 4;
|
||||
}
|
||||
|
||||
message OrderBid {
|
||||
double max_quantity = 1;
|
||||
double max_price = 2;
|
||||
uint64 owner_id = 3;
|
||||
bytes signature = 4;
|
||||
}
|
||||
|
||||
message Pair {
|
||||
// the denomination that the buyer receives i.e. EUR
|
||||
string buyers_denomination = 1;
|
||||
// the denomination that the seller receives i.e. USD
|
||||
string sellers_denomination = 2;
|
||||
}
|
||||
|
||||
message Commodity {
|
||||
string denom = 1;
|
||||
double quantity = 2;
|
||||
}
|
||||
|
||||
// Accounts is the atomic piece of information that is persisted to disk.
|
||||
message Account {
|
||||
uint64 index = 1;
|
||||
bytes public_key = 2;
|
||||
// the set of commodities that the account has
|
||||
repeated Commodity commodities = 3;
|
||||
}
|
||||
|
||||
// TradeSet is the transaction that eventually is committed in a block
|
||||
// It is derived from a group of MsgBid and MsgAsk's
|
||||
message TradeSet {
|
||||
Pair pair = 1; // i.e. EUR/USD
|
||||
// the set of matched trades for that peer
|
||||
repeated MatchedOrder matched_orders = 2;
|
||||
}
|
||||
|
||||
message MatchedOrder {
|
||||
OrderAsk order_ask = 1;
|
||||
OrderBid order_bid = 2;
|
||||
}
|
||||
@@ -2,9 +2,8 @@
|
||||
Package server is used to start a new ABCI server.
|
||||
|
||||
It contains two server implementation:
|
||||
* gRPC server
|
||||
* socket server
|
||||
|
||||
- gRPC server
|
||||
- socket server
|
||||
*/
|
||||
package server
|
||||
|
||||
|
||||
@@ -200,9 +200,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_SetOption:
|
||||
res := s.app.SetOption(*r.SetOption)
|
||||
responses <- types.ToResponseSetOption(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
@@ -230,6 +227,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_OfferSnapshot:
|
||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||
responses <- types.ToResponseOfferSnapshot(res)
|
||||
case *types.Request_PrepareProposal:
|
||||
res := s.app.PrepareProposal(*r.PrepareProposal)
|
||||
responses <- types.ToResponsePrepareProposal(res)
|
||||
case *types.Request_ProcessProposal:
|
||||
res := s.app.ProcessProposal(*r.ProcessProposal)
|
||||
responses <- types.ToResponseProcessProposal(res)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||
|
||||
@@ -19,9 +19,12 @@ func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
assert.NoError(t, err, "expected no error on NewServer")
|
||||
err = server.Start()
|
||||
assert.NoError(t, err, "expected no error on server.Start")
|
||||
defer func() { _ = server.Stop() }()
|
||||
|
||||
client, err := abciclient.NewClient(addr, transport, true)
|
||||
assert.NoError(t, err, "expected no error on NewClient")
|
||||
err = client.Start()
|
||||
assert.NoError(t, err, "expected no error on client.Start")
|
||||
|
||||
_ = client.Stop()
|
||||
}
|
||||
|
||||
@@ -29,17 +29,6 @@ func InitChain(client abcicli.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetOption(client abcicli.Client, key, value string) error {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: SetOption")
|
||||
fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err)
|
||||
return err
|
||||
}
|
||||
fmt.Println("Passed test: SetOption")
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync()
|
||||
data := res.Data
|
||||
@@ -76,6 +65,32 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrepareProposal(client abcicli.Client, txBytes [][]byte, txExpected [][]byte, dataExp []byte) error {
|
||||
res, _ := client.PrepareProposalSync(types.RequestPrepareProposal{Txs: txBytes})
|
||||
for i, tx := range res.Txs {
|
||||
if !bytes.Equal(tx, txExpected[i]) {
|
||||
fmt.Println("Failed test: PrepareProposal")
|
||||
fmt.Printf("PrepareProposal transaction was unexpected. Got %x expected %x.",
|
||||
tx, txExpected[i])
|
||||
return errors.New("PrepareProposal error")
|
||||
}
|
||||
}
|
||||
fmt.Println("Passed test: PrepareProposal")
|
||||
return nil
|
||||
}
|
||||
|
||||
func ProcessProposal(client abcicli.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error {
|
||||
res, _ := client.ProcessProposalSync(types.RequestProcessProposal{Txs: txBytes})
|
||||
if res.Status != statusExp {
|
||||
fmt.Println("Failed test: ProcessProposal")
|
||||
fmt.Printf("ProcessProposal response status was unexpected. Got %v expected %v.",
|
||||
res.Status, statusExp)
|
||||
return errors.New("ProcessProposal error")
|
||||
}
|
||||
fmt.Println("Passed test: ProcessProposal")
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func setOption(client abcicli.Client, key, value string) {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
panicf("setting %v=%v: \nerr: %v", key, value, err)
|
||||
}
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync()
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(res.Data, hashExp) {
|
||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
||||
}
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
|
||||
}
|
||||
}
|
||||
|
||||
/*func checkTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.CheckTxSync(txBytes)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.IsErr() {
|
||||
panicf("checking tx %X: %v\nlog: %v", txBytes, res.Log)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("CheckTx response code was unexpected. Got %v expected %v. Log: %v",
|
||||
res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("CheckTx response data was unexpected. Got %X expected %X",
|
||||
res.Data, dataExp)
|
||||
}
|
||||
}*/
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var abciType string
|
||||
|
||||
func init() {
|
||||
abciType = os.Getenv("ABCI")
|
||||
if abciType == "" {
|
||||
abciType = "socket"
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
testCounter()
|
||||
}
|
||||
|
||||
const (
|
||||
maxABCIConnectTries = 10
|
||||
)
|
||||
|
||||
func ensureABCIIsUp(typ string, n int) error {
|
||||
var err error
|
||||
cmdString := "abci-cli echo hello"
|
||||
if typ == "grpc" {
|
||||
cmdString = "abci-cli --abci grpc echo hello"
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
<-time.After(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func testCounter() {
|
||||
abciApp := os.Getenv("ABCI_APP")
|
||||
if abciApp == "" {
|
||||
panic("No ABCI_APP specified")
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
||||
cmd := exec.Command("bash", "-c", subCommand)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
setOption(client, "serial", "on")
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
# TODO: fix node app
|
||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
||||
@@ -1,5 +1,7 @@
|
||||
echo hello
|
||||
info
|
||||
prepare_proposal "abc"
|
||||
process_proposal "abc"
|
||||
commit
|
||||
deliver_tx "abc"
|
||||
info
|
||||
@@ -8,3 +10,9 @@ query "abc"
|
||||
deliver_tx "def=xyz"
|
||||
commit
|
||||
query "def"
|
||||
prepare_proposal "preparedef"
|
||||
process_proposal "replacedef"
|
||||
process_proposal "preparedef"
|
||||
prepare_proposal
|
||||
process_proposal
|
||||
commit
|
||||
|
||||
@@ -8,6 +8,14 @@
|
||||
-> data: {"size":0}
|
||||
-> data.hex: 0x7B2273697A65223A307D
|
||||
|
||||
> prepare_proposal "abc"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: abc
|
||||
|
||||
> process_proposal "abc"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
@@ -49,3 +57,25 @@
|
||||
-> value: xyz
|
||||
-> value.hex: 78797A
|
||||
|
||||
> prepare_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: replacedef
|
||||
|
||||
> process_proposal "replacedef"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> process_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> status: REJECT
|
||||
|
||||
> prepare_proposal
|
||||
|
||||
> process_proposal
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
> set_option serial on
|
||||
-> code: OK
|
||||
-> log: OK (SetOption doesn't return anything.)
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
@@ -12,18 +8,16 @@
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected >= 1, got 0
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected 2, got 4
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"hashes":0,"txs":2}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
|
||||
-> data: {"size":3}
|
||||
-> data.hex: 0x7B2273697A65223A337D
|
||||
|
||||
|
||||
@@ -30,6 +30,8 @@ function testExample() {
|
||||
cat "${INPUT}.out.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}.out"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}.out" "${INPUT}.out.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -37,7 +39,7 @@ function testExample() {
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli kvstore
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
@@ -4,21 +4,24 @@ import (
|
||||
context "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Application
|
||||
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
// to be driven by a blockchain-based replication engine via the ABCI.
|
||||
// All methods take a RequestXxx argument and return a ResponseXxx argument,
|
||||
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
SetOption(RequestSetOption) ResponseSetOption // Set application option
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
|
||||
ProcessProposal(RequestProcessProposal) ResponseProcessProposal
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
@@ -47,10 +50,6 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
|
||||
return ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
@@ -95,6 +94,24 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
|
||||
txs := make([][]byte, 0, len(req.Txs))
|
||||
var totalBytes int64
|
||||
for _, tx := range req.Txs {
|
||||
totalBytes += int64(len(tx))
|
||||
if totalBytes > req.MaxTxBytes {
|
||||
break
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
return ResponsePrepareProposal{Txs: txs}
|
||||
}
|
||||
|
||||
func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProcessProposal {
|
||||
return ResponseProcessProposal{
|
||||
Status: ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
@@ -119,11 +136,6 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) {
|
||||
res := app.app.SetOption(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
@@ -182,3 +194,15 @@ func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) PrepareProposal(
|
||||
ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||
res := app.app.PrepareProposal(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ProcessProposal(
|
||||
ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
|
||||
res := app.app.ProcessProposal(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -14,57 +13,19 @@ const (
|
||||
|
||||
// WriteMessage writes a varint length-delimited protobuf message.
|
||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||
bz, err := proto.Marshal(msg)
|
||||
protoWriter := protoio.NewDelimitedWriter(w)
|
||||
_, err := protoWriter.WriteMsg(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return encodeByteSlice(w, bz)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMessage reads a varint length-delimited protobuf message.
|
||||
func ReadMessage(r io.Reader, msg proto.Message) error {
|
||||
return readProtoMsg(r, msg, maxMsgSize)
|
||||
}
|
||||
|
||||
func readProtoMsg(r io.Reader, msg proto.Message, maxSize int) error {
|
||||
// binary.ReadVarint takes an io.ByteReader, eg. a bufio.Reader
|
||||
reader, ok := r.(*bufio.Reader)
|
||||
if !ok {
|
||||
reader = bufio.NewReader(r)
|
||||
}
|
||||
length64, err := binary.ReadVarint(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
length := int(length64)
|
||||
if length < 0 || length > maxSize {
|
||||
return io.ErrShortBuffer
|
||||
}
|
||||
buf := make([]byte, length)
|
||||
if _, err := io.ReadFull(reader, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return proto.Unmarshal(buf, msg)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------
|
||||
// NOTE: we copied wire.EncodeByteSlice from go-wire rather than keep
|
||||
// go-wire as a dep
|
||||
|
||||
func encodeByteSlice(w io.Writer, bz []byte) (err error) {
|
||||
err = encodeVarint(w, int64(len(bz)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = w.Write(bz)
|
||||
return
|
||||
}
|
||||
|
||||
func encodeVarint(w io.Writer, i int64) (err error) {
|
||||
var buf [10]byte
|
||||
n := binary.PutVarint(buf[:], i)
|
||||
_, err = w.Write(buf[0:n])
|
||||
return
|
||||
_, err := protoio.NewDelimitedReader(r, maxMsgSize).ReadMsg(msg)
|
||||
return err
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -87,12 +48,6 @@ func ToRequestInfo(req RequestInfo) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestSetOption(req RequestSetOption) *Request {
|
||||
return &Request{
|
||||
Value: &Request_SetOption{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
@@ -159,6 +114,18 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestPrepareProposal(req RequestPrepareProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_PrepareProposal{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestProcessProposal(req RequestProcessProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ProcessProposal{&req},
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func ToResponseException(errStr string) *Response {
|
||||
@@ -185,12 +152,6 @@ func ToResponseInfo(res ResponseInfo) *Response {
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseSetOption(res ResponseSetOption) *Response {
|
||||
return &Response{
|
||||
Value: &Response_SetOption{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
@@ -256,3 +217,15 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_PrepareProposal{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseProcessProposal(res ResponseProcessProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ProcessProposal{&res},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -25,7 +25,7 @@ func TestMarshalJSON(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
{Key: "pho", Value: "bo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -92,7 +92,7 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: []byte("abc"), Value: []byte("def")},
|
||||
{Key: "abc", Value: "def"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
224
abci/types/mocks/application.go
Normal file
224
abci/types/mocks/application.go
Normal file
@@ -0,0 +1,224 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// Application is an autogenerated mock type for the Application type
|
||||
type Application struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BeginBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) BeginBlock(_a0 types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseBeginBlock)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCheckTx)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields:
|
||||
func (_m *Application) Commit() types.ResponseCommit {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func() types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCommit)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeliverTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseDeliverTx)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// EndBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) EndBlock(_a0 types.RequestEndBlock) types.ResponseEndBlock {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseEndBlock)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0
|
||||
func (_m *Application) Info(_a0 types.RequestInfo) types.ResponseInfo {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInfo)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0
|
||||
func (_m *Application) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInitChain)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0
|
||||
func (_m *Application) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseListSnapshots)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0
|
||||
func (_m *Application) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseOfferSnapshot)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) PrepareProposal(_a0 types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponsePrepareProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) ProcessProposal(_a0 types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseProcessProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0
|
||||
func (_m *Application) Query(_a0 types.RequestQuery) types.ResponseQuery {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseQuery)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewApplication interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewApplication(t mockConstructorTestingTNewApplication) *Application {
|
||||
mock := &Application{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user