mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-23 21:22:06 +00:00
Compare commits
72 Commits
finalizeBl
...
wb/pqueue-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a2fd106b62 | ||
|
|
537b04d59d | ||
|
|
a23b31d6e6 | ||
|
|
a341a626e0 | ||
|
|
a1a7e5888d | ||
|
|
c3ae6f5b58 | ||
|
|
a393cf8bab | ||
|
|
0e2752ae42 | ||
|
|
97a8f125e0 | ||
|
|
84c15857e4 | ||
|
|
e70445f942 | ||
|
|
478f5321ad | ||
|
|
08e4e2ed3d | ||
|
|
7d63e991c5 | ||
|
|
7638235d33 | ||
|
|
2abfe20114 | ||
|
|
0bf7813c4e | ||
|
|
ff9038e2ce | ||
|
|
00a40835a2 | ||
|
|
c4f77ab6d1 | ||
|
|
2030875056 | ||
|
|
639e145729 | ||
|
|
68ffe8bc64 | ||
|
|
21309ccb7b | ||
|
|
f70396c6fd | ||
|
|
fdc246e4a8 | ||
|
|
78a0a5fe73 | ||
|
|
4f885209aa | ||
|
|
6dd0cf92c8 | ||
|
|
626d9b4fbe | ||
|
|
8addf99f90 | ||
|
|
76c6c67734 | ||
|
|
a46724e4f6 | ||
|
|
40fba3960d | ||
|
|
36a859ae54 | ||
|
|
ab5c63eff3 | ||
|
|
8228936155 | ||
|
|
a12e2bbb60 | ||
|
|
11bebfb6a0 | ||
|
|
4009102e2b | ||
|
|
cabd916517 | ||
|
|
363ea56680 | ||
|
|
aa4854ff8f | ||
|
|
581dd01d47 | ||
|
|
50b00dff71 | ||
|
|
051e127d38 | ||
|
|
5530726df8 | ||
|
|
decac693ab | ||
|
|
7ca0f24040 | ||
|
|
69848bef26 | ||
|
|
2c14d491f6 | ||
|
|
cd248576ea | ||
|
|
c256edc622 | ||
|
|
9d9360774f | ||
|
|
c7c11fc7d5 | ||
|
|
37bc1d74df | ||
|
|
d882f31569 | ||
|
|
ba3f7106b1 | ||
|
|
3ccfb26137 | ||
|
|
96863decca | ||
|
|
d4cda544ae | ||
|
|
800cce80b7 | ||
|
|
e850863296 | ||
|
|
1dec3e139a | ||
|
|
11b920480f | ||
|
|
4f8bcb1cce | ||
|
|
2d95e38986 | ||
|
|
6bb4b688e0 | ||
|
|
a1e1e6c290 | ||
|
|
736364178a | ||
|
|
a99c7188d7 | ||
|
|
a56b10fbef |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -7,5 +7,5 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish
|
||||
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield
|
||||
|
||||
|
||||
2
.github/workflows/coverage.yml
vendored
2
.github/workflows/coverage.yml
vendored
@@ -121,7 +121,7 @@ jobs:
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.5.2
|
||||
- uses: codecov/codecov-action@v2.0.1
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
4
.github/workflows/e2e-nightly-master.yml
vendored
4
.github/workflows/e2e-nightly-master.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
|
||||
2
.github/workflows/linkchecker.yml
vendored
2
.github/workflows/linkchecker.yml
vendored
@@ -7,6 +7,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
7
.github/workflows/stale.yml
vendored
7
.github/workflows/stale.yml
vendored
@@ -7,12 +7,13 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3.0.19
|
||||
- uses: actions/stale@v4
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
days-before-stale: -1
|
||||
days-before-pr-stale: 10
|
||||
days-before-pr-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
|
||||
32
.github/workflows/tests.yml
vendored
32
.github/workflows/tests.yml
vendored
@@ -42,38 +42,6 @@ jobs:
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -15,7 +15,7 @@
|
||||
.vagrant
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci-cli
|
||||
abci/abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
|
||||
@@ -21,7 +21,9 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
|
||||
- [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
|
||||
- [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
@@ -30,6 +32,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
@@ -95,6 +98,8 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
- [fastsync/event] \#6619 Emit fastsync status event when switching consensus/fastsync (@JayT106)
|
||||
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
|
||||
|
||||
### IMPROVEMENTS
|
||||
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
|
||||
@@ -147,3 +152,5 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
|
||||
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
|
||||
- [gitignore] \#6668 Fix gitignore of abci-cli (@tanyabouman)
|
||||
- [light] \#6687 Fix bug with incorrecly handled contexts in the light client (@cmwaters)
|
||||
|
||||
11
Makefile
11
Makefile
@@ -202,7 +202,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@golangci-lint run
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -231,6 +231,15 @@ build-docker: build-linux
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="mockery" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Local testnet using docker ###
|
||||
###############################################################################
|
||||
|
||||
@@ -32,6 +32,8 @@ This guide provides instructions for upgrading to specific versions of Tendermin
|
||||
- configuration values starting with `priv-validator-` have moved to the new
|
||||
`priv-validator` section, without the `priv-validator-` prefix.
|
||||
|
||||
* Fast Sync v2 has been deprecated, please use v0 to sync a node.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
|
||||
@@ -74,6 +76,10 @@ will need to change to accommodate these changes. Most notably:
|
||||
longer exported and have been replaced with `node.New` and
|
||||
`node.NewDefault` which provide more functional interfaces.
|
||||
|
||||
### RPC changes
|
||||
|
||||
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
|
||||
|
||||
## v0.34.0
|
||||
|
||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -47,9 +46,6 @@ var (
|
||||
flagHeight int
|
||||
flagProve bool
|
||||
|
||||
// counter
|
||||
flagSerial bool
|
||||
|
||||
// kvstore
|
||||
flagPersist string
|
||||
)
|
||||
@@ -61,9 +57,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
case "kvstore", "version":
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -135,10 +129,6 @@ func addQueryFlags() {
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -157,8 +147,6 @@ func addCommands() {
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -258,14 +246,6 @@ var queryCmd = &cobra.Command{
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
Use: "counter",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
@@ -593,32 +573,6 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
package counter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
txCount int
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(res.Data, hashExp) {
|
||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
||||
}
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
|
||||
}
|
||||
}
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var abciType string
|
||||
|
||||
func init() {
|
||||
abciType = os.Getenv("ABCI")
|
||||
if abciType == "" {
|
||||
abciType = "socket"
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
testCounter()
|
||||
}
|
||||
|
||||
const (
|
||||
maxABCIConnectTries = 10
|
||||
)
|
||||
|
||||
func ensureABCIIsUp(typ string, n int) error {
|
||||
var err error
|
||||
cmdString := "abci-cli echo hello"
|
||||
if typ == "grpc" {
|
||||
cmdString = "abci-cli --abci grpc echo hello"
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func testCounter() {
|
||||
abciApp := os.Getenv("ABCI_APP")
|
||||
if abciApp == "" {
|
||||
panic("No ABCI_APP specified")
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
||||
cmd := exec.Command("bash", "-c", subCommand)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
# TODO: fix node app
|
||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
||||
@@ -37,7 +37,6 @@ function testExample() {
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
@@ -1827,16 +1827,17 @@ func (m *ResponseBeginBlock) GetEvents() []Event {
|
||||
}
|
||||
|
||||
type ResponseCheckTx struct {
|
||||
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
|
||||
Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"`
|
||||
GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"`
|
||||
GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"`
|
||||
Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"`
|
||||
Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"`
|
||||
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
|
||||
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
|
||||
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
|
||||
Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"`
|
||||
GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"`
|
||||
GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"`
|
||||
Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"`
|
||||
Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"`
|
||||
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
|
||||
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
|
||||
MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} }
|
||||
@@ -1942,6 +1943,13 @@ func (m *ResponseCheckTx) GetPriority() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetMempoolError() string {
|
||||
if m != nil {
|
||||
return m.MempoolError
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ResponseDeliverTx struct {
|
||||
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
@@ -2954,170 +2962,172 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) }
|
||||
|
||||
var fileDescriptor_252557cfdd89a31a = []byte{
|
||||
// 2608 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xbd, 0x73, 0x1b, 0xc7,
|
||||
0x15, 0xc7, 0x37, 0x70, 0x8f, 0x04, 0x08, 0xae, 0x68, 0x19, 0x86, 0x65, 0x52, 0x3e, 0x8f, 0x1d,
|
||||
0x4b, 0xb6, 0xc9, 0x98, 0x1a, 0x29, 0xf2, 0x38, 0x1f, 0x26, 0x20, 0x28, 0xa0, 0xc5, 0x90, 0xcc,
|
||||
0x12, 0x92, 0xc7, 0x49, 0xac, 0xf3, 0x01, 0xb7, 0x04, 0xce, 0x02, 0xee, 0xce, 0x77, 0x0b, 0x8a,
|
||||
0x74, 0x99, 0x49, 0x1a, 0x4d, 0x0a, 0x95, 0x69, 0x3c, 0x93, 0xff, 0x20, 0x65, 0x52, 0xa5, 0x4a,
|
||||
0xe1, 0x22, 0x99, 0x71, 0x99, 0xca, 0xc9, 0x48, 0x5d, 0xfe, 0x81, 0x54, 0x99, 0xc9, 0xec, 0xc7,
|
||||
0x7d, 0x01, 0x38, 0x02, 0x8c, 0xd3, 0xa5, 0xdb, 0xdd, 0x7b, 0xef, 0x61, 0xf7, 0xed, 0xbe, 0xdf,
|
||||
0xfb, 0xed, 0x5b, 0xc0, 0xcb, 0x94, 0x58, 0x06, 0x71, 0x47, 0xa6, 0x45, 0xb7, 0xf4, 0x6e, 0xcf,
|
||||
0xdc, 0xa2, 0x67, 0x0e, 0xf1, 0x36, 0x1d, 0xd7, 0xa6, 0x36, 0x5a, 0x09, 0x3f, 0x6e, 0xb2, 0x8f,
|
||||
0xf5, 0x57, 0x22, 0xd2, 0x3d, 0xf7, 0xcc, 0xa1, 0xf6, 0x96, 0xe3, 0xda, 0xf6, 0xb1, 0x90, 0xaf,
|
||||
0x5f, 0x89, 0x7c, 0xe6, 0x76, 0xa2, 0xd6, 0x62, 0x5f, 0xa5, 0xf2, 0x23, 0x72, 0xe6, 0x7f, 0x7d,
|
||||
0x65, 0x4a, 0xd7, 0xd1, 0x5d, 0x7d, 0xe4, 0x7f, 0xde, 0xe8, 0xdb, 0x76, 0x7f, 0x48, 0xb6, 0x78,
|
||||
0xaf, 0x3b, 0x3e, 0xde, 0xa2, 0xe6, 0x88, 0x78, 0x54, 0x1f, 0x39, 0x52, 0x60, 0xad, 0x6f, 0xf7,
|
||||
0x6d, 0xde, 0xdc, 0x62, 0x2d, 0x31, 0xaa, 0xfe, 0xb5, 0x08, 0x45, 0x4c, 0x3e, 0x1f, 0x13, 0x8f,
|
||||
0xa2, 0x6d, 0xc8, 0x91, 0xde, 0xc0, 0xae, 0xa5, 0xaf, 0xa6, 0xdf, 0x5c, 0xda, 0xbe, 0xb2, 0x39,
|
||||
0xb1, 0xb8, 0x4d, 0x29, 0xd7, 0xea, 0x0d, 0xec, 0x76, 0x0a, 0x73, 0x59, 0x74, 0x13, 0xf2, 0xc7,
|
||||
0xc3, 0xb1, 0x37, 0xa8, 0x65, 0xb8, 0xd2, 0x2b, 0x49, 0x4a, 0x77, 0x99, 0x50, 0x3b, 0x85, 0x85,
|
||||
0x34, 0xfb, 0x29, 0xd3, 0x3a, 0xb6, 0x6b, 0xd9, 0xf3, 0x7f, 0x6a, 0xd7, 0x3a, 0xe6, 0x3f, 0xc5,
|
||||
0x64, 0x51, 0x03, 0xc0, 0xb4, 0x4c, 0xaa, 0xf5, 0x06, 0xba, 0x69, 0xd5, 0x72, 0x5c, 0xf3, 0xd5,
|
||||
0x64, 0x4d, 0x93, 0x36, 0x99, 0x60, 0x3b, 0x85, 0x15, 0xd3, 0xef, 0xb0, 0xe9, 0x7e, 0x3e, 0x26,
|
||||
0xee, 0x59, 0x2d, 0x7f, 0xfe, 0x74, 0x7f, 0xca, 0x84, 0xd8, 0x74, 0xb9, 0x34, 0x6a, 0xc1, 0x52,
|
||||
0x97, 0xf4, 0x4d, 0x4b, 0xeb, 0x0e, 0xed, 0xde, 0xa3, 0x5a, 0x81, 0x2b, 0xab, 0x49, 0xca, 0x0d,
|
||||
0x26, 0xda, 0x60, 0x92, 0xed, 0x14, 0x86, 0x6e, 0xd0, 0x43, 0xdf, 0x87, 0x52, 0x6f, 0x40, 0x7a,
|
||||
0x8f, 0x34, 0x7a, 0x5a, 0x2b, 0x72, 0x1b, 0x1b, 0x49, 0x36, 0x9a, 0x4c, 0xae, 0x73, 0xda, 0x4e,
|
||||
0xe1, 0x62, 0x4f, 0x34, 0xd9, 0xfa, 0x0d, 0x32, 0x34, 0x4f, 0x88, 0xcb, 0xf4, 0x4b, 0xe7, 0xaf,
|
||||
0xff, 0x8e, 0x90, 0xe4, 0x16, 0x14, 0xc3, 0xef, 0xa0, 0x1f, 0x81, 0x42, 0x2c, 0x43, 0x2e, 0x43,
|
||||
0xe1, 0x26, 0xae, 0x26, 0xee, 0xb3, 0x65, 0xf8, 0x8b, 0x28, 0x11, 0xd9, 0x46, 0xb7, 0xa1, 0xd0,
|
||||
0xb3, 0x47, 0x23, 0x93, 0xd6, 0x80, 0x6b, 0xaf, 0x27, 0x2e, 0x80, 0x4b, 0xb5, 0x53, 0x58, 0xca,
|
||||
0xa3, 0x7d, 0xa8, 0x0c, 0x4d, 0x8f, 0x6a, 0x9e, 0xa5, 0x3b, 0xde, 0xc0, 0xa6, 0x5e, 0x6d, 0x89,
|
||||
0x5b, 0x78, 0x3d, 0xc9, 0xc2, 0x9e, 0xe9, 0xd1, 0x23, 0x5f, 0xb8, 0x9d, 0xc2, 0xe5, 0x61, 0x74,
|
||||
0x80, 0xd9, 0xb3, 0x8f, 0x8f, 0x89, 0x1b, 0x18, 0xac, 0x2d, 0x9f, 0x6f, 0xef, 0x80, 0x49, 0xfb,
|
||||
0xfa, 0xcc, 0x9e, 0x1d, 0x1d, 0x40, 0x3f, 0x87, 0x4b, 0x43, 0x5b, 0x37, 0x02, 0x73, 0x5a, 0x6f,
|
||||
0x30, 0xb6, 0x1e, 0xd5, 0xca, 0xdc, 0xe8, 0xb5, 0xc4, 0x49, 0xda, 0xba, 0xe1, 0x9b, 0x68, 0x32,
|
||||
0x85, 0x76, 0x0a, 0xaf, 0x0e, 0x27, 0x07, 0xd1, 0x43, 0x58, 0xd3, 0x1d, 0x67, 0x78, 0x36, 0x69,
|
||||
0xbd, 0xc2, 0xad, 0x5f, 0x4f, 0xb2, 0xbe, 0xc3, 0x74, 0x26, 0xcd, 0x23, 0x7d, 0x6a, 0xb4, 0x51,
|
||||
0x84, 0xfc, 0x89, 0x3e, 0x1c, 0x13, 0xf5, 0x3b, 0xb0, 0x14, 0x09, 0x53, 0x54, 0x83, 0xe2, 0x88,
|
||||
0x78, 0x9e, 0xde, 0x27, 0x3c, 0xaa, 0x15, 0xec, 0x77, 0xd5, 0x0a, 0x2c, 0x47, 0x43, 0x53, 0x7d,
|
||||
0x9a, 0x0e, 0x34, 0x59, 0xd4, 0x31, 0xcd, 0x13, 0xe2, 0x7a, 0xa6, 0x6d, 0xf9, 0x9a, 0xb2, 0x8b,
|
||||
0x5e, 0x83, 0x32, 0x3f, 0x3f, 0x9a, 0xff, 0x9d, 0x85, 0x7e, 0x0e, 0x2f, 0xf3, 0xc1, 0x07, 0x52,
|
||||
0x68, 0x03, 0x96, 0x9c, 0x6d, 0x27, 0x10, 0xc9, 0x72, 0x11, 0x70, 0xb6, 0x1d, 0x5f, 0xe0, 0x55,
|
||||
0x58, 0x66, 0x2b, 0x0d, 0x24, 0x72, 0xfc, 0x47, 0x96, 0xd8, 0x98, 0x14, 0x51, 0xff, 0x92, 0x81,
|
||||
0xea, 0x64, 0x38, 0xa3, 0xdb, 0x90, 0x63, 0xc8, 0x26, 0x41, 0xaa, 0xbe, 0x29, 0x60, 0x6f, 0xd3,
|
||||
0x87, 0xbd, 0xcd, 0x8e, 0x0f, 0x7b, 0x8d, 0xd2, 0x57, 0xdf, 0x6c, 0xa4, 0x9e, 0xfe, 0x7d, 0x23,
|
||||
0x8d, 0xb9, 0x06, 0x7a, 0x89, 0x45, 0x9f, 0x6e, 0x5a, 0x9a, 0x69, 0xf0, 0x29, 0x2b, 0x2c, 0xb4,
|
||||
0x74, 0xd3, 0xda, 0x35, 0xd0, 0x1e, 0x54, 0x7b, 0xb6, 0xe5, 0x11, 0xcb, 0x1b, 0x7b, 0x9a, 0x80,
|
||||
0x55, 0x09, 0x4d, 0xb1, 0x00, 0x13, 0x60, 0xdd, 0xf4, 0x25, 0x0f, 0xb9, 0x20, 0x5e, 0xe9, 0xc5,
|
||||
0x07, 0xd0, 0x5d, 0x80, 0x13, 0x7d, 0x68, 0x1a, 0x3a, 0xb5, 0x5d, 0xaf, 0x96, 0xbb, 0x9a, 0x9d,
|
||||
0x19, 0x65, 0x0f, 0x7c, 0x91, 0xfb, 0x8e, 0xa1, 0x53, 0xd2, 0xc8, 0xb1, 0xe9, 0xe2, 0x88, 0x26,
|
||||
0x7a, 0x03, 0x56, 0x74, 0xc7, 0xd1, 0x3c, 0xaa, 0x53, 0xa2, 0x75, 0xcf, 0x28, 0xf1, 0x38, 0x6c,
|
||||
0x2d, 0xe3, 0xb2, 0xee, 0x38, 0x47, 0x6c, 0xb4, 0xc1, 0x06, 0xd1, 0xeb, 0x50, 0x61, 0x08, 0x67,
|
||||
0xea, 0x43, 0x6d, 0x40, 0xcc, 0xfe, 0x80, 0x72, 0x80, 0xca, 0xe2, 0xb2, 0x1c, 0x6d, 0xf3, 0x41,
|
||||
0xd5, 0x08, 0x76, 0x9c, 0xa3, 0x1b, 0x42, 0x90, 0x33, 0x74, 0xaa, 0x73, 0x4f, 0x2e, 0x63, 0xde,
|
||||
0x66, 0x63, 0x8e, 0x4e, 0x07, 0xd2, 0x3f, 0xbc, 0x8d, 0x2e, 0x43, 0x41, 0x9a, 0xcd, 0x72, 0xb3,
|
||||
0xb2, 0x87, 0xd6, 0x20, 0xef, 0xb8, 0xf6, 0x09, 0xe1, 0x5b, 0x57, 0xc2, 0xa2, 0xa3, 0xfe, 0x2a,
|
||||
0x03, 0xab, 0x53, 0x38, 0xc8, 0xec, 0x0e, 0x74, 0x6f, 0xe0, 0xff, 0x16, 0x6b, 0xa3, 0x5b, 0xcc,
|
||||
0xae, 0x6e, 0x10, 0x57, 0xe6, 0x8e, 0xda, 0xb4, 0xab, 0xdb, 0xfc, 0xbb, 0x74, 0x8d, 0x94, 0x46,
|
||||
0x07, 0x50, 0x1d, 0xea, 0x1e, 0xd5, 0x04, 0xae, 0x68, 0x91, 0x3c, 0x32, 0x8d, 0xa6, 0x7b, 0xba,
|
||||
0x8f, 0x44, 0xec, 0x50, 0x4b, 0x43, 0x95, 0x61, 0x6c, 0x14, 0x61, 0x58, 0xeb, 0x9e, 0x7d, 0xa1,
|
||||
0x5b, 0xd4, 0xb4, 0x88, 0x36, 0xb5, 0x73, 0x2f, 0x4d, 0x19, 0x6d, 0x9d, 0x98, 0x06, 0xb1, 0x7a,
|
||||
0xfe, 0x96, 0x5d, 0x0a, 0x94, 0x83, 0x2d, 0xf5, 0x54, 0x0c, 0x95, 0x38, 0x92, 0xa3, 0x0a, 0x64,
|
||||
0xe8, 0xa9, 0x74, 0x40, 0x86, 0x9e, 0xa2, 0xef, 0x42, 0x8e, 0x2d, 0x92, 0x2f, 0xbe, 0x32, 0x23,
|
||||
0x05, 0x4a, 0xbd, 0xce, 0x99, 0x43, 0x30, 0x97, 0x54, 0xd5, 0x20, 0x1c, 0x02, 0x74, 0x9f, 0xb4,
|
||||
0xaa, 0x5e, 0x83, 0x95, 0x09, 0xf8, 0x8e, 0xec, 0x5f, 0x3a, 0xba, 0x7f, 0xea, 0x0a, 0x94, 0x63,
|
||||
0x58, 0xad, 0x5e, 0x86, 0xb5, 0x59, 0xd0, 0xab, 0x0e, 0x82, 0xf1, 0x18, 0x84, 0xa2, 0x9b, 0x50,
|
||||
0x0a, 0xb0, 0x57, 0x84, 0xe3, 0xb4, 0xaf, 0x7c, 0x61, 0x1c, 0x88, 0xb2, 0x38, 0x64, 0xc7, 0x9a,
|
||||
0x9f, 0x87, 0x0c, 0x9f, 0x78, 0x51, 0x77, 0x9c, 0xb6, 0xee, 0x0d, 0xd4, 0x4f, 0xa1, 0x96, 0x84,
|
||||
0xab, 0x13, 0xcb, 0xc8, 0x05, 0xc7, 0xf0, 0x32, 0x14, 0x8e, 0x6d, 0x77, 0xa4, 0x53, 0x6e, 0xac,
|
||||
0x8c, 0x65, 0x8f, 0x1d, 0x4f, 0x81, 0xb1, 0x59, 0x3e, 0x2c, 0x3a, 0xaa, 0x06, 0x2f, 0x25, 0x62,
|
||||
0x2b, 0x53, 0x31, 0x2d, 0x83, 0x08, 0x7f, 0x96, 0xb1, 0xe8, 0x84, 0x86, 0xc4, 0x64, 0x45, 0x87,
|
||||
0xfd, 0xac, 0xc7, 0xd7, 0xca, 0xed, 0x2b, 0x58, 0xf6, 0xd4, 0xdf, 0x95, 0xa0, 0x84, 0x89, 0xe7,
|
||||
0x30, 0x4c, 0x40, 0x0d, 0x50, 0xc8, 0x69, 0x8f, 0x38, 0xd4, 0x87, 0xd1, 0xd9, 0xac, 0x41, 0x48,
|
||||
0xb7, 0x7c, 0x49, 0x96, 0xb2, 0x03, 0x35, 0x74, 0x43, 0xb2, 0xb2, 0x64, 0x82, 0x25, 0xd5, 0xa3,
|
||||
0xb4, 0xec, 0x96, 0x4f, 0xcb, 0xb2, 0x89, 0x59, 0x5a, 0x68, 0x4d, 0xf0, 0xb2, 0x1b, 0x92, 0x97,
|
||||
0xe5, 0xe6, 0xfc, 0x58, 0x8c, 0x98, 0x35, 0x63, 0xc4, 0x2c, 0x3f, 0x67, 0x99, 0x09, 0xcc, 0xec,
|
||||
0x96, 0xcf, 0xcc, 0x0a, 0x73, 0x66, 0x3c, 0x41, 0xcd, 0xee, 0xc6, 0xa9, 0x99, 0xa0, 0x55, 0xaf,
|
||||
0x25, 0x6a, 0x27, 0x72, 0xb3, 0x1f, 0x44, 0xb8, 0x59, 0x29, 0x91, 0x18, 0x09, 0x23, 0x33, 0xc8,
|
||||
0x59, 0x33, 0x46, 0xce, 0x94, 0x39, 0x3e, 0x48, 0x60, 0x67, 0x1f, 0x44, 0xd9, 0x19, 0x24, 0x12,
|
||||
0x3c, 0xb9, 0xdf, 0xb3, 0xe8, 0xd9, 0x7b, 0x01, 0x3d, 0x5b, 0x4a, 0xe4, 0x97, 0x72, 0x0d, 0x93,
|
||||
0xfc, 0xec, 0x60, 0x8a, 0x9f, 0x09, 0x3e, 0xf5, 0x46, 0xa2, 0x89, 0x39, 0x04, 0xed, 0x60, 0x8a,
|
||||
0xa0, 0x95, 0xe7, 0x18, 0x9c, 0xc3, 0xd0, 0x7e, 0x31, 0x9b, 0xa1, 0x25, 0x73, 0x28, 0x39, 0xcd,
|
||||
0xc5, 0x28, 0x9a, 0x96, 0x40, 0xd1, 0x56, 0xb8, 0xf9, 0xb7, 0x12, 0xcd, 0x5f, 0x9c, 0xa3, 0x5d,
|
||||
0x63, 0x19, 0x72, 0x22, 0xe6, 0x19, 0xca, 0x10, 0xd7, 0xb5, 0x5d, 0xc9, 0xb6, 0x44, 0x47, 0x7d,
|
||||
0x93, 0xe5, 0xec, 0x30, 0xbe, 0xcf, 0xe1, 0x73, 0x1c, 0xcd, 0x23, 0x31, 0xad, 0xfe, 0x31, 0x1d,
|
||||
0xea, 0xf2, 0x34, 0x17, 0xcd, 0xf7, 0x8a, 0xcc, 0xf7, 0x11, 0x96, 0x97, 0x89, 0xb3, 0xbc, 0x0d,
|
||||
0x58, 0x62, 0x28, 0x3d, 0x41, 0xe0, 0x74, 0x27, 0x20, 0x70, 0xd7, 0x61, 0x95, 0xa7, 0x61, 0xc1,
|
||||
0x05, 0x25, 0x34, 0xe7, 0x78, 0x86, 0x59, 0x61, 0x1f, 0xc4, 0xe1, 0x14, 0x18, 0xfd, 0x0e, 0x5c,
|
||||
0x8a, 0xc8, 0x06, 0xe8, 0x2f, 0xd8, 0x4c, 0x35, 0x90, 0xde, 0x91, 0x69, 0xe0, 0xcf, 0xe9, 0xd0,
|
||||
0x43, 0x21, 0xf3, 0x9b, 0x45, 0xd2, 0xd2, 0xff, 0x23, 0x92, 0x96, 0xf9, 0xaf, 0x49, 0x5a, 0x34,
|
||||
0x9b, 0x65, 0xe3, 0xd9, 0xec, 0x5f, 0xe9, 0x70, 0x4f, 0x02, 0xca, 0xd5, 0xb3, 0x0d, 0x22, 0xf3,
|
||||
0x0b, 0x6f, 0xa3, 0x2a, 0x64, 0x87, 0x76, 0x5f, 0x66, 0x11, 0xd6, 0x64, 0x52, 0x01, 0x08, 0x2b,
|
||||
0x12, 0x63, 0x83, 0xd4, 0x94, 0xe7, 0x1e, 0x96, 0xa9, 0xa9, 0x0a, 0xd9, 0x47, 0x44, 0x40, 0xe6,
|
||||
0x32, 0x66, 0x4d, 0x26, 0xc7, 0x0f, 0x19, 0x07, 0xc2, 0x65, 0x2c, 0x3a, 0xe8, 0x36, 0x28, 0xbc,
|
||||
0x0c, 0xa1, 0xd9, 0x8e, 0x27, 0xd1, 0xed, 0xe5, 0xe8, 0x5a, 0x45, 0xb5, 0x61, 0xf3, 0x90, 0xc9,
|
||||
0x1c, 0x38, 0x1e, 0x2e, 0x39, 0xb2, 0x15, 0xc9, 0xba, 0x4a, 0x8c, 0xfc, 0x5d, 0x01, 0x85, 0xcd,
|
||||
0xde, 0x73, 0xf4, 0x1e, 0xe1, 0x50, 0xa5, 0xe0, 0x70, 0x40, 0x7d, 0x08, 0x68, 0x1a, 0x70, 0x51,
|
||||
0x1b, 0x0a, 0xe4, 0x84, 0x58, 0x94, 0x6d, 0x1b, 0x73, 0xf7, 0xe5, 0x19, 0xcc, 0x8a, 0x58, 0xb4,
|
||||
0x51, 0x63, 0x4e, 0xfe, 0xe7, 0x37, 0x1b, 0x55, 0x21, 0xfd, 0xb6, 0x3d, 0x32, 0x29, 0x19, 0x39,
|
||||
0xf4, 0x0c, 0x4b, 0x7d, 0xf5, 0x0f, 0x19, 0x46, 0x73, 0x62, 0x60, 0x3c, 0xd3, 0xb7, 0xfe, 0x91,
|
||||
0xcf, 0x44, 0x28, 0xee, 0x62, 0xfe, 0x5e, 0x07, 0xe8, 0xeb, 0x9e, 0xf6, 0x58, 0xb7, 0x28, 0x31,
|
||||
0xa4, 0xd3, 0x23, 0x23, 0xa8, 0x0e, 0x25, 0xd6, 0x1b, 0x7b, 0xc4, 0x90, 0x6c, 0x3b, 0xe8, 0x47,
|
||||
0xd6, 0x59, 0xfc, 0x76, 0xeb, 0x8c, 0x7b, 0xb9, 0x34, 0xe1, 0xe5, 0x08, 0x05, 0x51, 0xa2, 0x14,
|
||||
0x84, 0xcd, 0xcd, 0x71, 0x4d, 0xdb, 0x35, 0xe9, 0x19, 0xdf, 0x9a, 0x2c, 0x0e, 0xfa, 0xea, 0xaf,
|
||||
0x33, 0x61, 0x68, 0x85, 0x2c, 0xf2, 0xff, 0xce, 0x77, 0xea, 0x6f, 0xf8, 0xdd, 0x32, 0x9e, 0x49,
|
||||
0xd1, 0x11, 0xac, 0x06, 0x91, 0xad, 0x8d, 0x79, 0xc4, 0xfb, 0x67, 0x75, 0x51, 0x68, 0xa8, 0x9e,
|
||||
0xc4, 0x87, 0x3d, 0xf4, 0x31, 0xbc, 0x38, 0x01, 0x5b, 0x81, 0xe9, 0xcc, 0xa2, 0xe8, 0xf5, 0x42,
|
||||
0x1c, 0xbd, 0x7c, 0xd3, 0xa1, 0xb3, 0xb2, 0xdf, 0x32, 0xa0, 0x76, 0xd9, 0x75, 0x25, 0x4a, 0x0c,
|
||||
0x66, 0x6e, 0xff, 0x6b, 0x50, 0x76, 0x09, 0x65, 0x57, 0xe8, 0xd8, 0x85, 0x70, 0x59, 0x0c, 0xca,
|
||||
0x6b, 0xe6, 0x21, 0xbc, 0x30, 0x93, 0x20, 0xa0, 0xef, 0x81, 0x12, 0x72, 0x8b, 0x74, 0xc2, 0xdd,
|
||||
0x2a, 0xb8, 0x2f, 0x84, 0xb2, 0xea, 0x9f, 0xd2, 0xa1, 0xc9, 0xf8, 0x0d, 0xa4, 0x05, 0x05, 0x97,
|
||||
0x78, 0xe3, 0xa1, 0xb8, 0x13, 0x54, 0xb6, 0xdf, 0x59, 0x8c, 0x5a, 0xb0, 0xd1, 0xf1, 0x90, 0x62,
|
||||
0xa9, 0xac, 0x3e, 0x84, 0x82, 0x18, 0x41, 0x4b, 0x50, 0xbc, 0xbf, 0x7f, 0x6f, 0xff, 0xe0, 0xa3,
|
||||
0xfd, 0x6a, 0x0a, 0x01, 0x14, 0x76, 0x9a, 0xcd, 0xd6, 0x61, 0xa7, 0x9a, 0x46, 0x0a, 0xe4, 0x77,
|
||||
0x1a, 0x07, 0xb8, 0x53, 0xcd, 0xb0, 0x61, 0xdc, 0xfa, 0xb0, 0xd5, 0xec, 0x54, 0xb3, 0x68, 0x15,
|
||||
0xca, 0xa2, 0xad, 0xdd, 0x3d, 0xc0, 0x3f, 0xd9, 0xe9, 0x54, 0x73, 0x91, 0xa1, 0xa3, 0xd6, 0xfe,
|
||||
0x9d, 0x16, 0xae, 0xe6, 0xd5, 0x77, 0xd9, 0xa5, 0x23, 0x81, 0x8c, 0x84, 0xd7, 0x8b, 0x74, 0xe4,
|
||||
0x7a, 0xa1, 0xfe, 0x36, 0x03, 0xf5, 0x64, 0x86, 0x81, 0x3e, 0x9c, 0x58, 0xf8, 0xf6, 0x05, 0xe8,
|
||||
0xc9, 0xc4, 0xea, 0xd1, 0xeb, 0x50, 0x71, 0xc9, 0x31, 0xa1, 0xbd, 0x81, 0x60, 0x3c, 0x22, 0x1b,
|
||||
0x96, 0x71, 0x59, 0x8e, 0x72, 0x25, 0x4f, 0x88, 0x7d, 0x46, 0x7a, 0x54, 0x13, 0x30, 0x23, 0x0e,
|
||||
0x9d, 0xc2, 0xc4, 0xd8, 0xe8, 0x91, 0x18, 0x54, 0x3f, 0xbd, 0x90, 0x2f, 0x15, 0xc8, 0xe3, 0x56,
|
||||
0x07, 0x7f, 0x5c, 0xcd, 0x22, 0x04, 0x15, 0xde, 0xd4, 0x8e, 0xf6, 0x77, 0x0e, 0x8f, 0xda, 0x07,
|
||||
0xcc, 0x97, 0x97, 0x60, 0xc5, 0xf7, 0xa5, 0x3f, 0x98, 0x57, 0x3f, 0x81, 0x4a, 0xfc, 0x5a, 0xcf,
|
||||
0x5c, 0xe8, 0xda, 0x63, 0xcb, 0xe0, 0xce, 0xc8, 0x63, 0xd1, 0x41, 0x37, 0x21, 0x7f, 0x62, 0x8b,
|
||||
0x30, 0x9b, 0x7d, 0xd6, 0x1e, 0xd8, 0x94, 0x44, 0xca, 0x02, 0x42, 0x5a, 0xfd, 0x02, 0xf2, 0x3c,
|
||||
0x6a, 0x58, 0x04, 0xf0, 0x0b, 0xba, 0xe4, 0x4b, 0xac, 0x8d, 0x3e, 0x01, 0xd0, 0x29, 0x75, 0xcd,
|
||||
0xee, 0x38, 0x34, 0xbc, 0x31, 0x3b, 0xea, 0x76, 0x7c, 0xb9, 0xc6, 0x15, 0x19, 0x7e, 0x6b, 0xa1,
|
||||
0x6a, 0x24, 0x04, 0x23, 0x06, 0xd5, 0x7d, 0xa8, 0xc4, 0x75, 0xfd, 0x0c, 0x2f, 0xe6, 0x10, 0xcf,
|
||||
0xf0, 0x82, 0xb0, 0xc9, 0x0c, 0x1f, 0xf0, 0x83, 0xac, 0x28, 0xc6, 0xf0, 0x8e, 0xfa, 0x24, 0x0d,
|
||||
0xa5, 0xce, 0xa9, 0xdc, 0x8f, 0x84, 0x3a, 0x40, 0xa8, 0x9a, 0x89, 0xde, 0x7a, 0x45, 0x61, 0x21,
|
||||
0x1b, 0x94, 0x2b, 0x3e, 0x08, 0x4e, 0x5c, 0x6e, 0xd1, 0xcb, 0x8d, 0x5f, 0xb7, 0x91, 0x51, 0xf6,
|
||||
0x3e, 0x28, 0x01, 0x66, 0x32, 0xe2, 0xa9, 0x1b, 0x86, 0x4b, 0x3c, 0x4f, 0x9e, 0x7b, 0xbf, 0xcb,
|
||||
0xcb, 0x4a, 0xf6, 0x63, 0x79, 0xaf, 0xce, 0x62, 0xd1, 0x51, 0x0d, 0x58, 0x99, 0x00, 0x5c, 0xf4,
|
||||
0x3e, 0x14, 0x9d, 0x71, 0x57, 0xf3, 0xdd, 0x33, 0xf1, 0x8c, 0xe0, 0x53, 0x9a, 0x71, 0x77, 0x68,
|
||||
0xf6, 0xee, 0x91, 0x33, 0x7f, 0x32, 0xce, 0xb8, 0x7b, 0x4f, 0x78, 0x51, 0xfc, 0x4a, 0x26, 0xfa,
|
||||
0x2b, 0x27, 0x50, 0xf2, 0x0f, 0x05, 0xfa, 0x21, 0x28, 0x01, 0x96, 0x07, 0xd5, 0xc6, 0xc4, 0x24,
|
||||
0x20, 0xcd, 0x87, 0x2a, 0x8c, 0x1f, 0x7b, 0x66, 0xdf, 0x22, 0x86, 0x16, 0x52, 0x5f, 0xfe, 0x6b,
|
||||
0x25, 0xbc, 0x22, 0x3e, 0xec, 0xf9, 0xbc, 0x57, 0xfd, 0x77, 0x1a, 0x4a, 0x7e, 0x55, 0x09, 0xbd,
|
||||
0x1b, 0x39, 0x77, 0x95, 0x19, 0x77, 0x70, 0x5f, 0x30, 0xac, 0x0c, 0xc5, 0xe7, 0x9a, 0xb9, 0xf8,
|
||||
0x5c, 0x93, 0x4a, 0x7c, 0x7e, 0xb1, 0x35, 0x77, 0xe1, 0x62, 0xeb, 0xdb, 0x80, 0xa8, 0x4d, 0xf5,
|
||||
0xa1, 0x76, 0x62, 0x53, 0xd3, 0xea, 0x6b, 0xc2, 0xd9, 0x82, 0x0b, 0x54, 0xf9, 0x97, 0x07, 0xfc,
|
||||
0xc3, 0x21, 0xf7, 0xfb, 0x2f, 0xd3, 0x50, 0x0a, 0x40, 0xfd, 0xa2, 0x85, 0x9e, 0xcb, 0x50, 0x90,
|
||||
0xb8, 0x25, 0x2a, 0x3d, 0xb2, 0x17, 0xd4, 0x1c, 0x73, 0x91, 0x9a, 0x63, 0x1d, 0x4a, 0x23, 0x42,
|
||||
0x75, 0x9e, 0xd9, 0xc4, 0xed, 0x23, 0xe8, 0x5f, 0x7f, 0x0f, 0x96, 0x22, 0x35, 0x37, 0x16, 0x79,
|
||||
0xfb, 0xad, 0x8f, 0xaa, 0xa9, 0x7a, 0xf1, 0xc9, 0x97, 0x57, 0xb3, 0xfb, 0xe4, 0x31, 0x3b, 0xb3,
|
||||
0xb8, 0xd5, 0x6c, 0xb7, 0x9a, 0xf7, 0xaa, 0xe9, 0xfa, 0xd2, 0x93, 0x2f, 0xaf, 0x16, 0x31, 0xe1,
|
||||
0xf7, 0xff, 0xeb, 0x6d, 0x58, 0x8e, 0xee, 0x4a, 0x1c, 0xfa, 0x10, 0x54, 0xee, 0xdc, 0x3f, 0xdc,
|
||||
0xdb, 0x6d, 0xee, 0x74, 0x5a, 0xda, 0x83, 0x83, 0x4e, 0xab, 0x9a, 0x46, 0x2f, 0xc2, 0xa5, 0xbd,
|
||||
0xdd, 0x1f, 0xb7, 0x3b, 0x5a, 0x73, 0x6f, 0xb7, 0xb5, 0xdf, 0xd1, 0x76, 0x3a, 0x9d, 0x9d, 0xe6,
|
||||
0xbd, 0x6a, 0x66, 0xfb, 0xf7, 0x0a, 0xac, 0xec, 0x34, 0x9a, 0xbb, 0x0c, 0xb6, 0xcd, 0x9e, 0xce,
|
||||
0xaf, 0x86, 0x4d, 0xc8, 0xf1, 0xcb, 0xdf, 0xb9, 0x2f, 0x72, 0xf5, 0xf3, 0x2b, 0x43, 0xe8, 0x2e,
|
||||
0xe4, 0xf9, 0xbd, 0x10, 0x9d, 0xff, 0x44, 0x57, 0x9f, 0x53, 0x2a, 0x62, 0x93, 0xe1, 0xe1, 0x71,
|
||||
0xee, 0x9b, 0x5d, 0xfd, 0xfc, 0xca, 0x11, 0xc2, 0xa0, 0x84, 0xe4, 0x73, 0xfe, 0x1b, 0x56, 0x7d,
|
||||
0x01, 0xb0, 0x41, 0x7b, 0x50, 0xf4, 0xaf, 0x02, 0xf3, 0x5e, 0xd5, 0xea, 0x73, 0x4b, 0x3b, 0xcc,
|
||||
0x5d, 0xe2, 0xca, 0x76, 0xfe, 0x13, 0x61, 0x7d, 0x4e, 0x9d, 0x0a, 0xed, 0x42, 0x41, 0x12, 0xaa,
|
||||
0x39, 0x2f, 0x65, 0xf5, 0x79, 0xa5, 0x1a, 0xe6, 0xb4, 0xf0, 0x32, 0x3c, 0xff, 0xe1, 0xb3, 0xbe,
|
||||
0x40, 0x09, 0x0e, 0xdd, 0x07, 0x88, 0x5c, 0xd0, 0x16, 0x78, 0xd1, 0xac, 0x2f, 0x52, 0x5a, 0x43,
|
||||
0x07, 0x50, 0x0a, 0x48, 0xf5, 0xdc, 0xf7, 0xc5, 0xfa, 0xfc, 0x1a, 0x17, 0x7a, 0x08, 0xe5, 0x38,
|
||||
0x99, 0x5c, 0xec, 0xd5, 0xb0, 0xbe, 0x60, 0xf1, 0x8a, 0xd9, 0x8f, 0x33, 0xcb, 0xc5, 0x5e, 0x11,
|
||||
0xeb, 0x0b, 0xd6, 0xb2, 0xd0, 0x67, 0xb0, 0x3a, 0xcd, 0xfc, 0x16, 0x7f, 0x54, 0xac, 0x5f, 0xa0,
|
||||
0xba, 0x85, 0x46, 0x80, 0x66, 0x30, 0xc6, 0x0b, 0xbc, 0x31, 0xd6, 0x2f, 0x52, 0xec, 0x6a, 0xb4,
|
||||
0xbe, 0x7a, 0xb6, 0x9e, 0xfe, 0xfa, 0xd9, 0x7a, 0xfa, 0x1f, 0xcf, 0xd6, 0xd3, 0x4f, 0x9f, 0xaf,
|
||||
0xa7, 0xbe, 0x7e, 0xbe, 0x9e, 0xfa, 0xdb, 0xf3, 0xf5, 0xd4, 0xcf, 0xde, 0xea, 0x9b, 0x74, 0x30,
|
||||
0xee, 0x6e, 0xf6, 0xec, 0xd1, 0x56, 0xf4, 0xcf, 0x0b, 0xb3, 0xfe, 0x50, 0xd1, 0x2d, 0xf0, 0xa4,
|
||||
0x72, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xed, 0x8f, 0xef, 0x31, 0x70, 0x21, 0x00, 0x00,
|
||||
// 2627 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x73, 0xdb, 0xc6,
|
||||
0x15, 0xe7, 0x37, 0x89, 0x47, 0x91, 0xa2, 0xd6, 0x8a, 0x43, 0x33, 0xb6, 0xe4, 0xc0, 0xe3, 0x34,
|
||||
0x76, 0x12, 0xa9, 0x91, 0xc7, 0xae, 0x33, 0xe9, 0x47, 0x44, 0x9a, 0x2e, 0x15, 0xab, 0x92, 0xba,
|
||||
0xa2, 0x9d, 0x49, 0xdb, 0x18, 0x01, 0x89, 0x15, 0x89, 0x98, 0x04, 0x10, 0x60, 0x29, 0x4b, 0x39,
|
||||
0x76, 0xda, 0x8b, 0xa7, 0x07, 0x1f, 0x7b, 0xc9, 0x4c, 0xff, 0x83, 0x5e, 0x7b, 0xea, 0xa9, 0x87,
|
||||
0x1c, 0xda, 0x99, 0x1c, 0x7b, 0xe8, 0xa4, 0x1d, 0xfb, 0xd6, 0x7f, 0xa0, 0xa7, 0xce, 0x74, 0xf6,
|
||||
0x03, 0x20, 0x40, 0x12, 0x22, 0xd5, 0xf4, 0xd6, 0xdb, 0xee, 0xc3, 0x7b, 0x8f, 0xbb, 0x6f, 0xf7,
|
||||
0xfd, 0xf6, 0xb7, 0x6f, 0x09, 0xaf, 0x51, 0x62, 0x19, 0xc4, 0x1d, 0x9a, 0x16, 0xdd, 0xd4, 0x3b,
|
||||
0x5d, 0x73, 0x93, 0x9e, 0x3a, 0xc4, 0xdb, 0x70, 0x5c, 0x9b, 0xda, 0x68, 0x79, 0xfc, 0x71, 0x83,
|
||||
0x7d, 0xac, 0x5d, 0x09, 0x69, 0x77, 0xdd, 0x53, 0x87, 0xda, 0x9b, 0x8e, 0x6b, 0xdb, 0x47, 0x42,
|
||||
0xbf, 0x76, 0x39, 0xf4, 0x99, 0xfb, 0x09, 0x7b, 0x8b, 0x7c, 0x95, 0xc6, 0x4f, 0xc8, 0xa9, 0xff,
|
||||
0xf5, 0xca, 0x94, 0xad, 0xa3, 0xbb, 0xfa, 0xd0, 0xff, 0xbc, 0xde, 0xb3, 0xed, 0xde, 0x80, 0x6c,
|
||||
0xf2, 0x5e, 0x67, 0x74, 0xb4, 0x49, 0xcd, 0x21, 0xf1, 0xa8, 0x3e, 0x74, 0xa4, 0xc2, 0x6a, 0xcf,
|
||||
0xee, 0xd9, 0xbc, 0xb9, 0xc9, 0x5a, 0x42, 0xaa, 0xfe, 0x25, 0x0f, 0x79, 0x4c, 0x3e, 0x1f, 0x11,
|
||||
0x8f, 0xa2, 0x2d, 0xc8, 0x90, 0x6e, 0xdf, 0xae, 0x26, 0xaf, 0x26, 0xdf, 0x2c, 0x6e, 0x5d, 0xde,
|
||||
0x98, 0x98, 0xdc, 0x86, 0xd4, 0x6b, 0x76, 0xfb, 0x76, 0x2b, 0x81, 0xb9, 0x2e, 0xba, 0x0d, 0xd9,
|
||||
0xa3, 0xc1, 0xc8, 0xeb, 0x57, 0x53, 0xdc, 0xe8, 0x4a, 0x9c, 0xd1, 0x7d, 0xa6, 0xd4, 0x4a, 0x60,
|
||||
0xa1, 0xcd, 0x7e, 0xca, 0xb4, 0x8e, 0xec, 0x6a, 0xfa, 0xec, 0x9f, 0xda, 0xb1, 0x8e, 0xf8, 0x4f,
|
||||
0x31, 0x5d, 0x54, 0x07, 0x30, 0x2d, 0x93, 0x6a, 0xdd, 0xbe, 0x6e, 0x5a, 0xd5, 0x0c, 0xb7, 0x7c,
|
||||
0x3d, 0xde, 0xd2, 0xa4, 0x0d, 0xa6, 0xd8, 0x4a, 0x60, 0xc5, 0xf4, 0x3b, 0x6c, 0xb8, 0x9f, 0x8f,
|
||||
0x88, 0x7b, 0x5a, 0xcd, 0x9e, 0x3d, 0xdc, 0x9f, 0x32, 0x25, 0x36, 0x5c, 0xae, 0x8d, 0x9a, 0x50,
|
||||
0xec, 0x90, 0x9e, 0x69, 0x69, 0x9d, 0x81, 0xdd, 0x7d, 0x52, 0xcd, 0x71, 0x63, 0x35, 0xce, 0xb8,
|
||||
0xce, 0x54, 0xeb, 0x4c, 0xb3, 0x95, 0xc0, 0xd0, 0x09, 0x7a, 0xe8, 0xfb, 0x50, 0xe8, 0xf6, 0x49,
|
||||
0xf7, 0x89, 0x46, 0x4f, 0xaa, 0x79, 0xee, 0x63, 0x3d, 0xce, 0x47, 0x83, 0xe9, 0xb5, 0x4f, 0x5a,
|
||||
0x09, 0x9c, 0xef, 0x8a, 0x26, 0x9b, 0xbf, 0x41, 0x06, 0xe6, 0x31, 0x71, 0x99, 0x7d, 0xe1, 0xec,
|
||||
0xf9, 0xdf, 0x13, 0x9a, 0xdc, 0x83, 0x62, 0xf8, 0x1d, 0xf4, 0x23, 0x50, 0x88, 0x65, 0xc8, 0x69,
|
||||
0x28, 0xdc, 0xc5, 0xd5, 0xd8, 0x75, 0xb6, 0x0c, 0x7f, 0x12, 0x05, 0x22, 0xdb, 0xe8, 0x2e, 0xe4,
|
||||
0xba, 0xf6, 0x70, 0x68, 0xd2, 0x2a, 0x70, 0xeb, 0xb5, 0xd8, 0x09, 0x70, 0xad, 0x56, 0x02, 0x4b,
|
||||
0x7d, 0xb4, 0x07, 0xe5, 0x81, 0xe9, 0x51, 0xcd, 0xb3, 0x74, 0xc7, 0xeb, 0xdb, 0xd4, 0xab, 0x16,
|
||||
0xb9, 0x87, 0xeb, 0x71, 0x1e, 0x76, 0x4d, 0x8f, 0x1e, 0xfa, 0xca, 0xad, 0x04, 0x2e, 0x0d, 0xc2,
|
||||
0x02, 0xe6, 0xcf, 0x3e, 0x3a, 0x22, 0x6e, 0xe0, 0xb0, 0xba, 0x74, 0xb6, 0xbf, 0x7d, 0xa6, 0xed,
|
||||
0xdb, 0x33, 0x7f, 0x76, 0x58, 0x80, 0x7e, 0x0e, 0x17, 0x06, 0xb6, 0x6e, 0x04, 0xee, 0xb4, 0x6e,
|
||||
0x7f, 0x64, 0x3d, 0xa9, 0x96, 0xb8, 0xd3, 0x1b, 0xb1, 0x83, 0xb4, 0x75, 0xc3, 0x77, 0xd1, 0x60,
|
||||
0x06, 0xad, 0x04, 0x5e, 0x19, 0x4c, 0x0a, 0xd1, 0x63, 0x58, 0xd5, 0x1d, 0x67, 0x70, 0x3a, 0xe9,
|
||||
0xbd, 0xcc, 0xbd, 0xdf, 0x8c, 0xf3, 0xbe, 0xcd, 0x6c, 0x26, 0xdd, 0x23, 0x7d, 0x4a, 0x5a, 0xcf,
|
||||
0x43, 0xf6, 0x58, 0x1f, 0x8c, 0x88, 0xfa, 0x1d, 0x28, 0x86, 0xd2, 0x14, 0x55, 0x21, 0x3f, 0x24,
|
||||
0x9e, 0xa7, 0xf7, 0x08, 0xcf, 0x6a, 0x05, 0xfb, 0x5d, 0xb5, 0x0c, 0x4b, 0xe1, 0xd4, 0x54, 0x9f,
|
||||
0x27, 0x03, 0x4b, 0x96, 0x75, 0xcc, 0xf2, 0x98, 0xb8, 0x9e, 0x69, 0x5b, 0xbe, 0xa5, 0xec, 0xa2,
|
||||
0x6b, 0x50, 0xe2, 0xfb, 0x47, 0xf3, 0xbf, 0xb3, 0xd4, 0xcf, 0xe0, 0x25, 0x2e, 0x7c, 0x24, 0x95,
|
||||
0xd6, 0xa1, 0xe8, 0x6c, 0x39, 0x81, 0x4a, 0x9a, 0xab, 0x80, 0xb3, 0xe5, 0xf8, 0x0a, 0xaf, 0xc3,
|
||||
0x12, 0x9b, 0x69, 0xa0, 0x91, 0xe1, 0x3f, 0x52, 0x64, 0x32, 0xa9, 0xa2, 0xfe, 0x39, 0x05, 0x95,
|
||||
0xc9, 0x74, 0x46, 0x77, 0x21, 0xc3, 0x90, 0x4d, 0x82, 0x54, 0x6d, 0x43, 0xc0, 0xde, 0x86, 0x0f,
|
||||
0x7b, 0x1b, 0x6d, 0x1f, 0xf6, 0xea, 0x85, 0xaf, 0xbe, 0x59, 0x4f, 0x3c, 0xff, 0xfb, 0x7a, 0x12,
|
||||
0x73, 0x0b, 0x74, 0x89, 0x65, 0x9f, 0x6e, 0x5a, 0x9a, 0x69, 0xf0, 0x21, 0x2b, 0x2c, 0xb5, 0x74,
|
||||
0xd3, 0xda, 0x31, 0xd0, 0x2e, 0x54, 0xba, 0xb6, 0xe5, 0x11, 0xcb, 0x1b, 0x79, 0x9a, 0x80, 0x55,
|
||||
0x09, 0x4d, 0x91, 0x04, 0x13, 0x60, 0xdd, 0xf0, 0x35, 0x0f, 0xb8, 0x22, 0x5e, 0xee, 0x46, 0x05,
|
||||
0xe8, 0x3e, 0xc0, 0xb1, 0x3e, 0x30, 0x0d, 0x9d, 0xda, 0xae, 0x57, 0xcd, 0x5c, 0x4d, 0xcf, 0xcc,
|
||||
0xb2, 0x47, 0xbe, 0xca, 0x43, 0xc7, 0xd0, 0x29, 0xa9, 0x67, 0xd8, 0x70, 0x71, 0xc8, 0x12, 0xbd,
|
||||
0x01, 0xcb, 0xba, 0xe3, 0x68, 0x1e, 0xd5, 0x29, 0xd1, 0x3a, 0xa7, 0x94, 0x78, 0x1c, 0xb6, 0x96,
|
||||
0x70, 0x49, 0x77, 0x9c, 0x43, 0x26, 0xad, 0x33, 0x21, 0xba, 0x0e, 0x65, 0x86, 0x70, 0xa6, 0x3e,
|
||||
0xd0, 0xfa, 0xc4, 0xec, 0xf5, 0x29, 0x07, 0xa8, 0x34, 0x2e, 0x49, 0x69, 0x8b, 0x0b, 0x55, 0x23,
|
||||
0x58, 0x71, 0x8e, 0x6e, 0x08, 0x41, 0xc6, 0xd0, 0xa9, 0xce, 0x23, 0xb9, 0x84, 0x79, 0x9b, 0xc9,
|
||||
0x1c, 0x9d, 0xf6, 0x65, 0x7c, 0x78, 0x1b, 0x5d, 0x84, 0x9c, 0x74, 0x9b, 0xe6, 0x6e, 0x65, 0x0f,
|
||||
0xad, 0x42, 0xd6, 0x71, 0xed, 0x63, 0xc2, 0x97, 0xae, 0x80, 0x45, 0x47, 0xfd, 0x55, 0x0a, 0x56,
|
||||
0xa6, 0x70, 0x90, 0xf9, 0xed, 0xeb, 0x5e, 0xdf, 0xff, 0x2d, 0xd6, 0x46, 0x77, 0x98, 0x5f, 0xdd,
|
||||
0x20, 0xae, 0x3c, 0x3b, 0xaa, 0xd3, 0xa1, 0x6e, 0xf1, 0xef, 0x32, 0x34, 0x52, 0x1b, 0xed, 0x43,
|
||||
0x65, 0xa0, 0x7b, 0x54, 0x13, 0xb8, 0xa2, 0x85, 0xce, 0x91, 0x69, 0x34, 0xdd, 0xd5, 0x7d, 0x24,
|
||||
0x62, 0x9b, 0x5a, 0x3a, 0x2a, 0x0f, 0x22, 0x52, 0x84, 0x61, 0xb5, 0x73, 0xfa, 0x85, 0x6e, 0x51,
|
||||
0xd3, 0x22, 0xda, 0xd4, 0xca, 0x5d, 0x9a, 0x72, 0xda, 0x3c, 0x36, 0x0d, 0x62, 0x75, 0xfd, 0x25,
|
||||
0xbb, 0x10, 0x18, 0x07, 0x4b, 0xea, 0xa9, 0x18, 0xca, 0x51, 0x24, 0x47, 0x65, 0x48, 0xd1, 0x13,
|
||||
0x19, 0x80, 0x14, 0x3d, 0x41, 0xdf, 0x85, 0x0c, 0x9b, 0x24, 0x9f, 0x7c, 0x79, 0xc6, 0x11, 0x28,
|
||||
0xed, 0xda, 0xa7, 0x0e, 0xc1, 0x5c, 0x53, 0x55, 0x83, 0x74, 0x08, 0xd0, 0x7d, 0xd2, 0xab, 0x7a,
|
||||
0x03, 0x96, 0x27, 0xe0, 0x3b, 0xb4, 0x7e, 0xc9, 0xf0, 0xfa, 0xa9, 0xcb, 0x50, 0x8a, 0x60, 0xb5,
|
||||
0x7a, 0x11, 0x56, 0x67, 0x41, 0xaf, 0xda, 0x0f, 0xe4, 0x11, 0x08, 0x45, 0xb7, 0xa1, 0x10, 0x60,
|
||||
0xaf, 0x48, 0xc7, 0xe9, 0x58, 0xf9, 0xca, 0x38, 0x50, 0x65, 0x79, 0xc8, 0xb6, 0x35, 0xdf, 0x0f,
|
||||
0x29, 0x3e, 0xf0, 0xbc, 0xee, 0x38, 0x2d, 0xdd, 0xeb, 0xab, 0x9f, 0x42, 0x35, 0x0e, 0x57, 0x27,
|
||||
0xa6, 0x91, 0x09, 0xb6, 0xe1, 0x45, 0xc8, 0x1d, 0xd9, 0xee, 0x50, 0xa7, 0xdc, 0x59, 0x09, 0xcb,
|
||||
0x1e, 0xdb, 0x9e, 0x02, 0x63, 0xd3, 0x5c, 0x2c, 0x3a, 0xaa, 0x06, 0x97, 0x62, 0xb1, 0x95, 0x99,
|
||||
0x98, 0x96, 0x41, 0x44, 0x3c, 0x4b, 0x58, 0x74, 0xc6, 0x8e, 0xc4, 0x60, 0x45, 0x87, 0xfd, 0xac,
|
||||
0xc7, 0xe7, 0xca, 0xfd, 0x2b, 0x58, 0xf6, 0xd4, 0xdf, 0x15, 0xa0, 0x80, 0x89, 0xe7, 0x30, 0x4c,
|
||||
0x40, 0x75, 0x50, 0xc8, 0x49, 0x97, 0x38, 0xd4, 0x87, 0xd1, 0xd9, 0xac, 0x41, 0x68, 0x37, 0x7d,
|
||||
0x4d, 0x76, 0x64, 0x07, 0x66, 0xe8, 0x96, 0x64, 0x65, 0xf1, 0x04, 0x4b, 0x9a, 0x87, 0x69, 0xd9,
|
||||
0x1d, 0x9f, 0x96, 0xa5, 0x63, 0x4f, 0x69, 0x61, 0x35, 0xc1, 0xcb, 0x6e, 0x49, 0x5e, 0x96, 0x99,
|
||||
0xf3, 0x63, 0x11, 0x62, 0xd6, 0x88, 0x10, 0xb3, 0xec, 0x9c, 0x69, 0xc6, 0x30, 0xb3, 0x3b, 0x3e,
|
||||
0x33, 0xcb, 0xcd, 0x19, 0xf1, 0x04, 0x35, 0xbb, 0x1f, 0xa5, 0x66, 0x82, 0x56, 0x5d, 0x8b, 0xb5,
|
||||
0x8e, 0xe5, 0x66, 0x3f, 0x08, 0x71, 0xb3, 0x42, 0x2c, 0x31, 0x12, 0x4e, 0x66, 0x90, 0xb3, 0x46,
|
||||
0x84, 0x9c, 0x29, 0x73, 0x62, 0x10, 0xc3, 0xce, 0x3e, 0x08, 0xb3, 0x33, 0x88, 0x25, 0x78, 0x72,
|
||||
0xbd, 0x67, 0xd1, 0xb3, 0xf7, 0x02, 0x7a, 0x56, 0x8c, 0xe5, 0x97, 0x72, 0x0e, 0x93, 0xfc, 0x6c,
|
||||
0x7f, 0x8a, 0x9f, 0x09, 0x3e, 0xf5, 0x46, 0xac, 0x8b, 0x39, 0x04, 0x6d, 0x7f, 0x8a, 0xa0, 0x95,
|
||||
0xe6, 0x38, 0x9c, 0xc3, 0xd0, 0x7e, 0x31, 0x9b, 0xa1, 0xc5, 0x73, 0x28, 0x39, 0xcc, 0xc5, 0x28,
|
||||
0x9a, 0x16, 0x43, 0xd1, 0x96, 0xb9, 0xfb, 0xb7, 0x62, 0xdd, 0x9f, 0x9f, 0xa3, 0xdd, 0x60, 0x27,
|
||||
0xe4, 0x44, 0xce, 0x33, 0x94, 0x21, 0xae, 0x6b, 0xbb, 0x92, 0x6d, 0x89, 0x8e, 0xfa, 0x26, 0x3b,
|
||||
0xb3, 0xc7, 0xf9, 0x7d, 0x06, 0x9f, 0xe3, 0x68, 0x1e, 0xca, 0x69, 0xf5, 0x0f, 0xc9, 0xb1, 0x2d,
|
||||
0x3f, 0xe6, 0xc2, 0xe7, 0xbd, 0x22, 0xcf, 0xfb, 0x10, 0xcb, 0x4b, 0x45, 0x59, 0xde, 0x3a, 0x14,
|
||||
0x19, 0x4a, 0x4f, 0x10, 0x38, 0xdd, 0x09, 0x08, 0xdc, 0x4d, 0x58, 0xe1, 0xc7, 0xb0, 0xe0, 0x82,
|
||||
0x12, 0x9a, 0x33, 0xfc, 0x84, 0x59, 0x66, 0x1f, 0xc4, 0xe6, 0x14, 0x18, 0xfd, 0x0e, 0x5c, 0x08,
|
||||
0xe9, 0x06, 0xe8, 0x2f, 0xd8, 0x4c, 0x25, 0xd0, 0xde, 0x96, 0xc7, 0xc0, 0x9f, 0x92, 0xe3, 0x08,
|
||||
0x8d, 0x99, 0xdf, 0x2c, 0x92, 0x96, 0xfc, 0x1f, 0x91, 0xb4, 0xd4, 0x7f, 0x4d, 0xd2, 0xc2, 0xa7,
|
||||
0x59, 0x3a, 0x7a, 0x9a, 0xfd, 0x2b, 0x39, 0x5e, 0x93, 0x80, 0x72, 0x75, 0x6d, 0x83, 0xc8, 0xf3,
|
||||
0x85, 0xb7, 0x51, 0x05, 0xd2, 0x03, 0xbb, 0x27, 0x4f, 0x11, 0xd6, 0x64, 0x5a, 0x01, 0x08, 0x2b,
|
||||
0x12, 0x63, 0x83, 0xa3, 0x29, 0xcb, 0x23, 0x2c, 0x8f, 0xa6, 0x0a, 0xa4, 0x9f, 0x10, 0x01, 0x99,
|
||||
0x4b, 0x98, 0x35, 0x99, 0x1e, 0xdf, 0x64, 0x1c, 0x08, 0x97, 0xb0, 0xe8, 0xa0, 0xbb, 0xa0, 0xf0,
|
||||
0x32, 0x84, 0x66, 0x3b, 0x9e, 0x44, 0xb7, 0xd7, 0xc2, 0x73, 0x15, 0xd5, 0x86, 0x8d, 0x03, 0xa6,
|
||||
0xb3, 0xef, 0x78, 0xb8, 0xe0, 0xc8, 0x56, 0xe8, 0xd4, 0x55, 0x22, 0xe4, 0xef, 0x32, 0x28, 0x6c,
|
||||
0xf4, 0x9e, 0xa3, 0x77, 0x09, 0x87, 0x2a, 0x05, 0x8f, 0x05, 0xea, 0x63, 0x40, 0xd3, 0x80, 0x8b,
|
||||
0x5a, 0x90, 0x23, 0xc7, 0xc4, 0xa2, 0x6c, 0xd9, 0x58, 0xb8, 0x2f, 0xce, 0x60, 0x56, 0xc4, 0xa2,
|
||||
0xf5, 0x2a, 0x0b, 0xf2, 0x3f, 0xbf, 0x59, 0xaf, 0x08, 0xed, 0xb7, 0xed, 0xa1, 0x49, 0xc9, 0xd0,
|
||||
0xa1, 0xa7, 0x58, 0xda, 0xab, 0x7f, 0x4b, 0x31, 0x9a, 0x13, 0x01, 0xe3, 0x99, 0xb1, 0xf5, 0xb7,
|
||||
0x7c, 0x2a, 0x44, 0x71, 0x17, 0x8b, 0xf7, 0x1a, 0x40, 0x4f, 0xf7, 0xb4, 0xa7, 0xba, 0x45, 0x89,
|
||||
0x21, 0x83, 0x1e, 0x92, 0xa0, 0x1a, 0x14, 0x58, 0x6f, 0xe4, 0x11, 0x43, 0xb2, 0xed, 0xa0, 0x1f,
|
||||
0x9a, 0x67, 0xfe, 0xdb, 0xcd, 0x33, 0x1a, 0xe5, 0xc2, 0x44, 0x94, 0x43, 0x14, 0x44, 0x09, 0x53,
|
||||
0x10, 0x36, 0x36, 0xc7, 0x35, 0x6d, 0xd7, 0xa4, 0xa7, 0x7c, 0x69, 0xd2, 0x38, 0xe8, 0xb3, 0xcb,
|
||||
0xdb, 0x90, 0x0c, 0x1d, 0xdb, 0x1e, 0x68, 0x02, 0x6e, 0x8a, 0xdc, 0x74, 0x49, 0x0a, 0x9b, 0x1c,
|
||||
0x75, 0x7e, 0x9d, 0x1a, 0xe7, 0xdf, 0x98, 0x6a, 0xfe, 0xdf, 0x05, 0x58, 0xfd, 0x0d, 0xbf, 0x80,
|
||||
0x46, 0x8f, 0x5b, 0x74, 0x08, 0x2b, 0x41, 0xfa, 0x6b, 0x23, 0x0e, 0x0b, 0xfe, 0x86, 0x5e, 0x14,
|
||||
0x3f, 0x2a, 0xc7, 0x51, 0xb1, 0x87, 0x3e, 0x86, 0x57, 0x27, 0xb0, 0x2d, 0x70, 0x9d, 0x5a, 0x14,
|
||||
0xe2, 0x5e, 0x89, 0x42, 0x9c, 0xef, 0x7a, 0x1c, 0xac, 0xf4, 0xb7, 0xcc, 0xba, 0x1d, 0x76, 0xa7,
|
||||
0x09, 0xb3, 0x87, 0x99, 0xcb, 0x7f, 0x0d, 0x4a, 0x2e, 0xa1, 0xec, 0x9e, 0x1d, 0xb9, 0x35, 0x2e,
|
||||
0x09, 0xa1, 0xbc, 0x8b, 0x1e, 0xc0, 0x2b, 0x33, 0x59, 0x04, 0xfa, 0x1e, 0x28, 0x63, 0x02, 0x92,
|
||||
0x8c, 0xb9, 0x80, 0x05, 0x97, 0x8a, 0xb1, 0xae, 0xfa, 0xc7, 0xe4, 0xd8, 0x65, 0xf4, 0x9a, 0xd2,
|
||||
0x84, 0x9c, 0x4b, 0xbc, 0xd1, 0x40, 0x5c, 0x1c, 0xca, 0x5b, 0xef, 0x2c, 0xc6, 0x3f, 0x98, 0x74,
|
||||
0x34, 0xa0, 0x58, 0x1a, 0xab, 0x8f, 0x21, 0x27, 0x24, 0xa8, 0x08, 0xf9, 0x87, 0x7b, 0x0f, 0xf6,
|
||||
0xf6, 0x3f, 0xda, 0xab, 0x24, 0x10, 0x40, 0x6e, 0xbb, 0xd1, 0x68, 0x1e, 0xb4, 0x2b, 0x49, 0xa4,
|
||||
0x40, 0x76, 0xbb, 0xbe, 0x8f, 0xdb, 0x95, 0x14, 0x13, 0xe3, 0xe6, 0x87, 0xcd, 0x46, 0xbb, 0x92,
|
||||
0x46, 0x2b, 0x50, 0x12, 0x6d, 0xed, 0xfe, 0x3e, 0xfe, 0xc9, 0x76, 0xbb, 0x92, 0x09, 0x89, 0x0e,
|
||||
0x9b, 0x7b, 0xf7, 0x9a, 0xb8, 0x92, 0x55, 0xdf, 0x65, 0x37, 0x93, 0x18, 0xc6, 0x32, 0xbe, 0x83,
|
||||
0x24, 0x43, 0x77, 0x10, 0xf5, 0xb7, 0x29, 0xa8, 0xc5, 0xd3, 0x10, 0xf4, 0xe1, 0xc4, 0xc4, 0xb7,
|
||||
0xce, 0xc1, 0x61, 0x26, 0x66, 0x8f, 0xae, 0x43, 0xd9, 0x25, 0x47, 0x84, 0x76, 0xfb, 0x82, 0x16,
|
||||
0x89, 0x23, 0xb3, 0x84, 0x4b, 0x52, 0xca, 0x8d, 0x3c, 0xa1, 0xf6, 0x19, 0xe9, 0x52, 0x4d, 0x60,
|
||||
0x91, 0xd8, 0x74, 0x0a, 0x53, 0x63, 0xd2, 0x43, 0x21, 0x54, 0x3f, 0x3d, 0x57, 0x2c, 0x15, 0xc8,
|
||||
0xe2, 0x66, 0x1b, 0x7f, 0x5c, 0x49, 0x23, 0x04, 0x65, 0xde, 0xd4, 0x0e, 0xf7, 0xb6, 0x0f, 0x0e,
|
||||
0x5b, 0xfb, 0x2c, 0x96, 0x17, 0x60, 0xd9, 0x8f, 0xa5, 0x2f, 0xcc, 0xaa, 0x9f, 0x40, 0x39, 0x7a,
|
||||
0xf7, 0x67, 0x21, 0x74, 0xed, 0x91, 0x65, 0xf0, 0x60, 0x64, 0xb1, 0xe8, 0xa0, 0xdb, 0x90, 0x3d,
|
||||
0xb6, 0x45, 0x9a, 0xcd, 0xde, 0x6b, 0x8f, 0x6c, 0x4a, 0x42, 0xb5, 0x03, 0xa1, 0xad, 0x7e, 0x01,
|
||||
0x59, 0x9e, 0x35, 0x2c, 0x03, 0xf8, 0x2d, 0x5e, 0x92, 0x2a, 0xd6, 0x46, 0x9f, 0x00, 0xe8, 0x94,
|
||||
0xba, 0x66, 0x67, 0x34, 0x76, 0xbc, 0x3e, 0x3b, 0xeb, 0xb6, 0x7d, 0xbd, 0xfa, 0x65, 0x99, 0x7e,
|
||||
0xab, 0x63, 0xd3, 0x50, 0x0a, 0x86, 0x1c, 0xaa, 0x7b, 0x50, 0x8e, 0xda, 0xfa, 0x34, 0x40, 0x8c,
|
||||
0x21, 0x4a, 0x03, 0x04, 0xab, 0x93, 0x34, 0x20, 0x20, 0x11, 0x69, 0x51, 0xb1, 0xe1, 0x1d, 0xf5,
|
||||
0x59, 0x12, 0x0a, 0xed, 0x13, 0xb9, 0x1e, 0x31, 0xc5, 0x82, 0xb1, 0x69, 0x2a, 0x7c, 0x35, 0x16,
|
||||
0xd5, 0x87, 0x74, 0x50, 0xd3, 0xf8, 0x20, 0xd8, 0x71, 0x99, 0x45, 0x6f, 0x40, 0x7e, 0x71, 0x47,
|
||||
0x66, 0xd9, 0xfb, 0xa0, 0x04, 0x98, 0xc9, 0xd8, 0xa9, 0x6e, 0x18, 0x2e, 0xf1, 0x3c, 0xb9, 0xef,
|
||||
0xfd, 0x2e, 0xaf, 0x3d, 0xd9, 0x4f, 0xe5, 0xe5, 0x3b, 0x8d, 0x45, 0x47, 0x35, 0x60, 0x79, 0x02,
|
||||
0x70, 0xd1, 0xfb, 0x90, 0x77, 0x46, 0x1d, 0xcd, 0x0f, 0xcf, 0xc4, 0x5b, 0x83, 0xcf, 0x7b, 0x46,
|
||||
0x9d, 0x81, 0xd9, 0x7d, 0x40, 0x4e, 0xfd, 0xc1, 0x38, 0xa3, 0xce, 0x03, 0x11, 0x45, 0xf1, 0x2b,
|
||||
0xa9, 0xf0, 0xaf, 0x1c, 0x43, 0xc1, 0xdf, 0x14, 0xe8, 0x87, 0xa0, 0x04, 0x58, 0x1e, 0x94, 0x24,
|
||||
0x63, 0x0f, 0x01, 0xe9, 0x7e, 0x6c, 0xc2, 0x48, 0xb4, 0x67, 0xf6, 0x2c, 0x62, 0x68, 0x63, 0x7e,
|
||||
0xcc, 0x7f, 0xad, 0x80, 0x97, 0xc5, 0x87, 0x5d, 0x9f, 0x1c, 0xab, 0xff, 0x4e, 0x42, 0xc1, 0x2f,
|
||||
0x3d, 0xa1, 0x77, 0x43, 0xfb, 0xae, 0x3c, 0xe3, 0xa2, 0xee, 0x2b, 0x8e, 0xcb, 0x47, 0xd1, 0xb1,
|
||||
0xa6, 0xce, 0x3f, 0xd6, 0xb8, 0x3a, 0xa0, 0x5f, 0x91, 0xcd, 0x9c, 0xbb, 0x22, 0xfb, 0x36, 0x20,
|
||||
0x6a, 0x53, 0x7d, 0xa0, 0x1d, 0xdb, 0xd4, 0xb4, 0x7a, 0x9a, 0x08, 0xb6, 0xe0, 0x02, 0x15, 0xfe,
|
||||
0xe5, 0x11, 0xff, 0x70, 0xc0, 0xe3, 0xfe, 0xcb, 0x24, 0x14, 0x02, 0x50, 0x3f, 0x6f, 0x35, 0xe8,
|
||||
0x22, 0xe4, 0x24, 0x6e, 0x89, 0x72, 0x90, 0xec, 0x05, 0x85, 0xc9, 0x4c, 0xa8, 0x30, 0x59, 0x83,
|
||||
0xc2, 0x90, 0x50, 0x9d, 0x9f, 0x6c, 0xe2, 0x8a, 0x12, 0xf4, 0x6f, 0xbe, 0x07, 0xc5, 0x50, 0x61,
|
||||
0x8e, 0x65, 0xde, 0x5e, 0xf3, 0xa3, 0x4a, 0xa2, 0x96, 0x7f, 0xf6, 0xe5, 0xd5, 0xf4, 0x1e, 0x79,
|
||||
0xca, 0xf6, 0x2c, 0x6e, 0x36, 0x5a, 0xcd, 0xc6, 0x83, 0x4a, 0xb2, 0x56, 0x7c, 0xf6, 0xe5, 0xd5,
|
||||
0x3c, 0x26, 0xbc, 0x48, 0x70, 0xb3, 0x05, 0x4b, 0xe1, 0x55, 0x89, 0x42, 0x1f, 0x82, 0xf2, 0xbd,
|
||||
0x87, 0x07, 0xbb, 0x3b, 0x8d, 0xed, 0x76, 0x53, 0x7b, 0xb4, 0xdf, 0x6e, 0x56, 0x92, 0xe8, 0x55,
|
||||
0xb8, 0xb0, 0xbb, 0xf3, 0xe3, 0x56, 0x5b, 0x6b, 0xec, 0xee, 0x34, 0xf7, 0xda, 0xda, 0x76, 0xbb,
|
||||
0xbd, 0xdd, 0x78, 0x50, 0x49, 0x6d, 0xfd, 0x5e, 0x81, 0xe5, 0xed, 0x7a, 0x63, 0x87, 0xc1, 0xb6,
|
||||
0xd9, 0xd5, 0xf9, 0xfd, 0xb1, 0x01, 0x19, 0x7e, 0x43, 0x3c, 0xf3, 0xd9, 0xae, 0x76, 0x76, 0xf9,
|
||||
0x08, 0xdd, 0x87, 0x2c, 0xbf, 0x3c, 0xa2, 0xb3, 0xdf, 0xf1, 0x6a, 0x73, 0xea, 0x49, 0x6c, 0x30,
|
||||
0x3c, 0x3d, 0xce, 0x7c, 0xd8, 0xab, 0x9d, 0x5d, 0x5e, 0x42, 0x18, 0x94, 0x31, 0xf9, 0x9c, 0xff,
|
||||
0xd0, 0x55, 0x5b, 0x00, 0x6c, 0xd0, 0x2e, 0xe4, 0xfd, 0xfb, 0xc2, 0xbc, 0xa7, 0xb7, 0xda, 0xdc,
|
||||
0xfa, 0x0f, 0x0b, 0x97, 0xb8, 0xd7, 0x9d, 0xfd, 0x8e, 0x58, 0x9b, 0x53, 0xcc, 0x42, 0x3b, 0x90,
|
||||
0x93, 0x84, 0x6a, 0xce, 0x73, 0x5a, 0x6d, 0x5e, 0x3d, 0x87, 0x05, 0x6d, 0x7c, 0x63, 0x9e, 0xff,
|
||||
0x3a, 0x5a, 0x5b, 0xa0, 0x4e, 0x87, 0x1e, 0x02, 0x84, 0x6e, 0x71, 0x0b, 0x3c, 0x7b, 0xd6, 0x16,
|
||||
0xa9, 0xbf, 0xa1, 0x7d, 0x28, 0x04, 0xa4, 0x7a, 0xee, 0x23, 0x64, 0x6d, 0x7e, 0x21, 0x0c, 0x3d,
|
||||
0x86, 0x52, 0x94, 0x4c, 0x2e, 0xf6, 0xb4, 0x58, 0x5b, 0xb0, 0xc2, 0xc5, 0xfc, 0x47, 0x99, 0xe5,
|
||||
0x62, 0x4f, 0x8d, 0xb5, 0x05, 0x0b, 0x5e, 0xe8, 0x33, 0x58, 0x99, 0x66, 0x7e, 0x8b, 0xbf, 0x3c,
|
||||
0xd6, 0xce, 0x51, 0x02, 0x43, 0x43, 0x40, 0x33, 0x18, 0xe3, 0x39, 0x1e, 0x22, 0x6b, 0xe7, 0xa9,
|
||||
0x88, 0xd5, 0x9b, 0x5f, 0xbd, 0x58, 0x4b, 0x7e, 0xfd, 0x62, 0x2d, 0xf9, 0x8f, 0x17, 0x6b, 0xc9,
|
||||
0xe7, 0x2f, 0xd7, 0x12, 0x5f, 0xbf, 0x5c, 0x4b, 0xfc, 0xf5, 0xe5, 0x5a, 0xe2, 0x67, 0x6f, 0xf5,
|
||||
0x4c, 0xda, 0x1f, 0x75, 0x36, 0xba, 0xf6, 0x70, 0x33, 0xfc, 0x0f, 0x87, 0x59, 0xff, 0xba, 0xe8,
|
||||
0xe4, 0xf8, 0xa1, 0x72, 0xeb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x92, 0xa5, 0x39, 0xcc, 0x95,
|
||||
0x21, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -5229,6 +5239,13 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.MempoolError) > 0 {
|
||||
i -= len(m.MempoolError)
|
||||
copy(dAtA[i:], m.MempoolError)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError)))
|
||||
i--
|
||||
dAtA[i] = 0x5a
|
||||
}
|
||||
if m.Priority != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Priority))
|
||||
i--
|
||||
@@ -6842,6 +6859,10 @@ func (m *ResponseCheckTx) Size() (n int) {
|
||||
if m.Priority != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Priority))
|
||||
}
|
||||
l = len(m.MempoolError)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -11107,6 +11128,38 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 11:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.MempoolError = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
|
||||
@@ -48,9 +48,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"proxy-app",
|
||||
config.ProxyApp,
|
||||
"proxy app address, or one of: 'kvstore',"+
|
||||
" 'persistent_kvstore',"+
|
||||
" 'counter',"+
|
||||
" 'counter_serial' or 'noop' for local testing.")
|
||||
" 'persistent_kvstore' or 'noop' for local testing.")
|
||||
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
|
||||
|
||||
// rpc flags
|
||||
|
||||
@@ -446,6 +446,7 @@ type RPCConfig struct {
|
||||
|
||||
// TCP or UNIX socket address for the gRPC server to listen on
|
||||
// NOTE: This server only supports /broadcast_tx_commit
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCListenAddress string `mapstructure:"grpc-laddr"`
|
||||
|
||||
// Maximum number of simultaneous connections.
|
||||
@@ -453,6 +454,7 @@ type RPCConfig struct {
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
|
||||
|
||||
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
|
||||
@@ -782,25 +784,47 @@ type MempoolConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
|
||||
// Maximum number of transactions in the mempool
|
||||
Size int `mapstructure:"size"`
|
||||
|
||||
// Limit the total size of all txs in the mempool.
|
||||
// This only accounts for raw transactions (e.g. given 1MB transactions and
|
||||
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
|
||||
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
|
||||
|
||||
// Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
CacheSize int `mapstructure:"cache-size"`
|
||||
|
||||
// Do not remove invalid transactions from the cache (default: false)
|
||||
// Set to true if it's not possible for any invalid transaction to become
|
||||
// valid again in the future.
|
||||
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
|
||||
|
||||
// Maximum size of a single transaction
|
||||
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
|
||||
MaxTxBytes int `mapstructure:"max-tx-bytes"`
|
||||
|
||||
// Maximum size of a batch of transactions to send to a peer
|
||||
// Including space needed by encoding (one varint per transaction).
|
||||
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
|
||||
|
||||
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
|
||||
// insertion time into the mempool is beyond TTLDuration.
|
||||
TTLDuration time.Duration `mapstructure:"ttl-duration"`
|
||||
|
||||
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLDuration is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if
|
||||
// it's insertion time into the mempool is beyond TTLDuration.
|
||||
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
|
||||
@@ -811,10 +835,12 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
Broadcast: true,
|
||||
// Each signature verification takes .5ms, Size reduced until we implement
|
||||
// ABCI Recheck
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
TTLDuration: 0 * time.Second,
|
||||
TTLNumBlocks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -840,6 +866,13 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
if cfg.MaxTxBytes < 0 {
|
||||
return errors.New("max-tx-bytes can't be negative")
|
||||
}
|
||||
if cfg.TTLDuration < 0 {
|
||||
return errors.New("ttl-duration can't be negative")
|
||||
}
|
||||
if cfg.TTLNumBlocks < 0 {
|
||||
return errors.New("ttl-num-blocks can't be negative")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -959,7 +992,7 @@ func (cfg *FastSyncConfig) ValidateBasic() error {
|
||||
case BlockchainV0:
|
||||
return nil
|
||||
case BlockchainV2:
|
||||
return nil
|
||||
return errors.New("fastsync version v2 is no longer supported. Please use v0")
|
||||
default:
|
||||
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func TestFastSyncConfigValidateBasic(t *testing.T) {
|
||||
|
||||
// tamper with version
|
||||
cfg.Version = "v2"
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
|
||||
cfg.Version = "invalid"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
|
||||
@@ -200,6 +200,7 @@ cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
@@ -209,6 +210,7 @@ grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
|
||||
|
||||
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
|
||||
@@ -397,6 +399,22 @@ max-tx-bytes = {{ .Mempool.MaxTxBytes }}
|
||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
|
||||
|
||||
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||
# insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-duration = "{{ .Mempool.TTLDuration }}"
|
||||
|
||||
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||
# it's insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
|
||||
|
||||
#######################################################
|
||||
### State Sync Configuration Options ###
|
||||
#######################################################
|
||||
@@ -440,7 +458,7 @@ fetchers = "{{ .StateSync.Fetchers }}"
|
||||
|
||||
# Fast Sync version to use:
|
||||
# 1) "v0" (default) - the legacy fast sync implementation
|
||||
# 2) "v2" - complete redesign of v0, optimized for testability & readability
|
||||
# 2) "v2" - DEPRECATED, please use v0
|
||||
version = "{{ .FastSync.Version }}"
|
||||
|
||||
#######################################################
|
||||
|
||||
@@ -31,7 +31,6 @@ Available Commands:
|
||||
check_tx Validate a tx
|
||||
commit Commit the application state and return the Merkle root hash
|
||||
console Start an interactive abci console for multiple commands
|
||||
counter ABCI demo example
|
||||
deliver_tx Deliver a new tx to the application
|
||||
kvstore ABCI demo example
|
||||
echo Have the application echo a message
|
||||
@@ -214,137 +213,9 @@ we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
|
||||
Similarly, you could put the commands in a file and run
|
||||
`abci-cli --verbose batch < myfile`.
|
||||
|
||||
## Counter - Another Example
|
||||
|
||||
Now that we've got the hang of it, let's try another application, the
|
||||
"counter" app.
|
||||
|
||||
Like the kvstore app, its code can be found
|
||||
[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go)
|
||||
and looks like:
|
||||
|
||||
```go
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
|
||||
app := counter.NewCounterApplication(flagSerial)
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddrC, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
```
|
||||
|
||||
The counter app doesn't use a Merkle tree, it just counts how many times
|
||||
we've sent a transaction, asked for a hash, or committed the state. The
|
||||
result of `commit` is just the number of transactions sent.
|
||||
|
||||
This application has two modes: `serial=off` and `serial=on`.
|
||||
|
||||
When `serial=on`, transactions must be a big-endian encoded incrementing
|
||||
integer, starting at 0.
|
||||
|
||||
If `serial=off`, there are no restrictions on transactions.
|
||||
|
||||
We can toggle the value of `serial` using the `set_option` ABCI message.
|
||||
|
||||
When `serial=on`, some transactions are invalid. In a live blockchain,
|
||||
transactions collect in memory before they are committed into blocks. To
|
||||
avoid wasting resources on invalid transactions, ABCI provides the
|
||||
`check_tx` message, which application developers can use to accept or
|
||||
reject transactions, before they are stored in memory or gossipped to
|
||||
other peers.
|
||||
|
||||
In this instance of the counter app, `check_tx` only allows transactions
|
||||
whose integer is greater than the last committed one.
|
||||
|
||||
Let's kill the console and the kvstore application, and start the
|
||||
counter app:
|
||||
|
||||
```sh
|
||||
abci-cli counter
|
||||
```
|
||||
|
||||
In another window, start the `abci-cli console`:
|
||||
|
||||
```sh
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0xff
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: BadNonce
|
||||
-> log: Invalid nonce. Expected >= 1, got 0
|
||||
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
-> code: BadNonce
|
||||
-> log: Invalid nonce. Expected 2, got 4
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"hashes":0,"txs":2}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
|
||||
```
|
||||
|
||||
This is a very simple application, but between `counter` and `kvstore`,
|
||||
its easy to see how you can build out arbitrary application states on
|
||||
top of the ABCI. [Hyperledger's
|
||||
Burrow](https://github.com/hyperledger/burrow) also runs atop ABCI,
|
||||
bringing with it Ethereum-like accounts, the Ethereum virtual-machine,
|
||||
Monax's permissioning scheme, and native contracts extensions.
|
||||
|
||||
But the ultimate flexibility comes from being able to write the
|
||||
application easily in any language.
|
||||
|
||||
We have implemented the counter in a number of languages [see the
|
||||
example directory](https://github.com/tendermint/tendermint/tree/master/abci/example).
|
||||
|
||||
To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci):
|
||||
|
||||
```sh
|
||||
git clone https://github.com/tendermint/js-abci.git
|
||||
cd js-abci
|
||||
npm install abci
|
||||
```
|
||||
|
||||
Now you can start the app:
|
||||
|
||||
```sh
|
||||
node example/counter.js
|
||||
```
|
||||
|
||||
(you'll have to kill the other counter application process). In another
|
||||
window, run the console and those previous ABCI commands. You should get
|
||||
the same results as for the Go version.
|
||||
|
||||
## Bounties
|
||||
|
||||
Want to write the counter app in your favorite language?! We'd be happy
|
||||
Want to write an app in your favorite language?! We'd be happy
|
||||
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
|
||||
See [funding](https://github.com/interchainio/funding) opportunities from the
|
||||
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.
|
||||
|
||||
@@ -37,8 +37,8 @@ cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
make install_abci
|
||||
```
|
||||
|
||||
Now you should have the `abci-cli` installed; you'll see a couple of
|
||||
commands (`counter` and `kvstore`) that are example applications written
|
||||
Now you should have the `abci-cli` installed; you'll notice the `kvstore`
|
||||
command, an example application written
|
||||
in Go. See below for an application written in JavaScript.
|
||||
|
||||
Now, let's run some apps!
|
||||
@@ -165,92 +165,6 @@ curl -s 'localhost:26657/abci_query?data="name"'
|
||||
Try some other transactions and queries to make sure everything is
|
||||
working!
|
||||
|
||||
## Counter - Another Example
|
||||
|
||||
Now that we've got the hang of it, let's try another application, the
|
||||
`counter` app.
|
||||
|
||||
The counter app doesn't use a Merkle tree, it just counts how many times
|
||||
we've sent a transaction, or committed the state.
|
||||
|
||||
This application has two modes: `serial=off` and `serial=on`.
|
||||
|
||||
When `serial=on`, transactions must be a big-endian encoded incrementing
|
||||
integer, starting at 0.
|
||||
|
||||
If `serial=off`, there are no restrictions on transactions.
|
||||
|
||||
In a live blockchain, transactions collect in memory before they are
|
||||
committed into blocks. To avoid wasting resources on invalid
|
||||
transactions, ABCI provides the `CheckTx` message, which application
|
||||
developers can use to accept or reject transactions, before they are
|
||||
stored in memory or gossipped to other peers.
|
||||
|
||||
In this instance of the counter app, with `serial=on`, `CheckTx` only
|
||||
allows transactions whose integer is greater than the last committed
|
||||
one.
|
||||
|
||||
Let's kill the previous instance of `tendermint` and the `kvstore`
|
||||
application, and start the counter app. We can enable `serial=on` with a
|
||||
flag:
|
||||
|
||||
```sh
|
||||
abci-cli counter --serial
|
||||
```
|
||||
|
||||
In another window, reset then start Tendermint:
|
||||
|
||||
```sh
|
||||
tendermint unsafe_reset_all
|
||||
tendermint start
|
||||
```
|
||||
|
||||
Once again, you can see the blocks streaming by. Let's send some
|
||||
transactions. Since we have set `serial=on`, the first transaction must
|
||||
be the number `0`:
|
||||
|
||||
```sh
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x00
|
||||
```
|
||||
|
||||
Note the empty (hence successful) response. The next transaction must be
|
||||
the number `1`. If instead, we try to send a `5`, we get an error:
|
||||
|
||||
```json
|
||||
> curl localhost:26657/broadcast_tx_commit?tx=0x05
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"check_tx": {},
|
||||
"deliver_tx": {
|
||||
"code": 2,
|
||||
"log": "Invalid nonce. Expected 1, got 5"
|
||||
},
|
||||
"hash": "33B93DFF98749B0D6996A70F64071347060DC19C",
|
||||
"height": 34
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
But if we send a `1`, it works again:
|
||||
|
||||
```json
|
||||
> curl localhost:26657/broadcast_tx_commit?tx=0x01
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"check_tx": {},
|
||||
"deliver_tx": {},
|
||||
"hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D",
|
||||
"height": 60
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For more details on the `broadcast_tx` API, see [the guide on using
|
||||
Tendermint](../tendermint-core/using-tendermint.md).
|
||||
|
||||
## CounterJS - Example in Another Language
|
||||
|
||||
|
||||
@@ -31,24 +31,61 @@ For example:
|
||||
|
||||
would be equal to the composite key of `jack.account.number`.
|
||||
|
||||
Let's take a look at the `[tx_index]` config section:
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
and height and blocks by their height.
|
||||
|
||||
## Configuration
|
||||
|
||||
Operators can configure indexing via the `[tx_index]` section. The `indexer`
|
||||
field takes a series of supported indexers. If `null` is included, indexing will
|
||||
be turned off regardless of other values provided.
|
||||
|
||||
```toml
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
[tx-index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
# The backend database list to back the indexer.
|
||||
# If list contains null, meaning no indexer service will be used.
|
||||
#
|
||||
# The application will set which txs to index. In some cases a node operator will be able
|
||||
# to decide which txs to index based on configuration set in the application.
|
||||
#
|
||||
# Options:
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
# indexer = []
|
||||
```
|
||||
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
and height and blocks by their height.
|
||||
### Supported Indexers
|
||||
|
||||
You can turn off indexing completely by setting `tx_index` to `null`.
|
||||
#### KV
|
||||
|
||||
The `kv` indexer type is an embedded key-value store supported by the main
|
||||
underling Tendermint database. Using the `kv` indexer type allows you to query
|
||||
for block and transaction events directly against Tendermint's RPC. However, the
|
||||
query syntax is limited and so this indexer type might be deprecated or removed
|
||||
entirely in the future.
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
The `psql` indexer type allows an operator to enable block and transaction event
|
||||
indexing by proxying it to an external PostgreSQL instance allowing for the events
|
||||
to be stored in relational models. Since the events are stored in a RDBMS, operators
|
||||
can leverage SQL to perform a series of rich and complex queries that are not
|
||||
supported by the `kv` indexer type. Since operators can leverage SQL directly,
|
||||
searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any
|
||||
such query will fail.
|
||||
|
||||
Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators
|
||||
must explicitly create the relations prior to starting Tendermint and enabling
|
||||
the `psql` indexer type.
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
$ psql ... -f state/indexer/sink/psql/schema.sql
|
||||
```
|
||||
|
||||
## Default Indexes
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-053: State-Sync-Prototype](./adr-053-state-sync-prototype.md)
|
||||
- [ADR-054: Crypto-Encoding-2](./adr-054-crypto-encoding-2.md)
|
||||
- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md)
|
||||
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks)
|
||||
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks.md)
|
||||
- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md)
|
||||
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
|
||||
@@ -97,8 +97,7 @@ design for tendermint was originally tracked in
|
||||
[#828](https://github.com/tendermint/tendermint/issues/828).
|
||||
|
||||
#### Eager StateSync
|
||||
Warp Sync as implemented in Parity
|
||||
["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly
|
||||
Warp Sync as implemented in OpenEthereum to rapidly
|
||||
download both blocks and state snapshots from peers. Data is carved into ~4MB
|
||||
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a
|
||||
manifest file which co-ordinates the state-sync. Obtaining a correct manifest
|
||||
@@ -234,5 +233,3 @@ Proposed
|
||||
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal
|
||||
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos
|
||||
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed
|
||||
|
||||
|
||||
|
||||
@@ -119,7 +119,7 @@ network usage.
|
||||
---
|
||||
|
||||
Check out the formal specification
|
||||
[here](https://docs.tendermint.com/master/spec/consensus/light-client.html).
|
||||
[here](https://github.com/tendermint/spec/tree/master/spec/light-client).
|
||||
|
||||
## Status
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ graceful here, but that's for another day.
|
||||
|
||||
It's possible to fool lite clients without there being a fork on the
|
||||
main chain - so called Fork-Lite. See the
|
||||
[fork accountability](https://docs.tendermint.com/master/spec/consensus/fork-accountability.html)
|
||||
[fork accountability](https://docs.tendermint.com/master/spec/light-client/accountability/)
|
||||
document for more details. For a sequential lite client, this can happen via
|
||||
equivocation or amnesia attacks. For a skipping lite client this can also happen
|
||||
via lunatic validator attacks. There must be some way for applications to punish
|
||||
|
||||
@@ -179,7 +179,7 @@ This then ends the process and the verify function that was called at the start
|
||||
the user.
|
||||
|
||||
For a detailed overview of how each of these three attacks can be conducted please refer to the
|
||||
[fork accountability spec]((https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md)).
|
||||
[fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md).
|
||||
|
||||
## Full Node Verification
|
||||
|
||||
|
||||
@@ -275,9 +275,13 @@ dial-timeout = "3s"
|
||||
#######################################################
|
||||
[mempool]
|
||||
|
||||
# Mempool version to use:
|
||||
# 1) "v0" - The legacy non-prioritized mempool reactor.
|
||||
# 2) "v1" (default) - The prioritized mempool reactor.
|
||||
version = "v1"
|
||||
|
||||
recheck = true
|
||||
broadcast = true
|
||||
wal-dir = ""
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
size = 5000
|
||||
@@ -304,6 +308,22 @@ max-tx-bytes = 1048576
|
||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
max-batch-bytes = 0
|
||||
|
||||
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||
# insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-duration = "0s"
|
||||
|
||||
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||
# it's insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-num-blocks = 0
|
||||
|
||||
#######################################################
|
||||
### State Sync Configuration Options ###
|
||||
#######################################################
|
||||
@@ -421,7 +441,6 @@ max-open-connections = 3
|
||||
|
||||
# Instrumentation namespace
|
||||
namespace = "tendermint"
|
||||
|
||||
```
|
||||
|
||||
## Empty blocks VS no empty blocks
|
||||
|
||||
@@ -32,7 +32,7 @@ tendermint start --log-level "info"
|
||||
Here is the list of modules you may encounter in Tendermint's log and a
|
||||
little overview what they do.
|
||||
|
||||
- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI
|
||||
- `abci-client` As mentioned in [Application Architecture Guide](../app-dev/app-architecture.md), Tendermint acts as an ABCI
|
||||
client with respect to the application and maintains 3 connections:
|
||||
mempool, consensus and query. The code used by Tendermint Core can
|
||||
be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client).
|
||||
@@ -45,12 +45,12 @@ little overview what they do.
|
||||
from a crash.
|
||||
[here](https://github.com/tendermint/tendermint/blob/master/types/events.go).
|
||||
You can subscribe to them by calling `subscribe` RPC method. Refer
|
||||
to [RPC docs](./rpc.md) for additional information.
|
||||
to [RPC docs](../tendermint-core/rpc.md) for additional information.
|
||||
- `mempool` Mempool module handles all incoming transactions, whenever
|
||||
they are coming from peers or the application.
|
||||
- `p2p` Provides an abstraction around peer-to-peer communication. For
|
||||
more details, please check out the
|
||||
[README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md).
|
||||
[README](https://github.com/tendermint/spec/tree/master/spec/p2p).
|
||||
- `rpc-server` RPC server. For implementation details, please read the
|
||||
[doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go).
|
||||
- `state` Represents the latest state and execution submodule, which
|
||||
|
||||
@@ -40,7 +40,7 @@ Default logging level (`log-level = "info"`) should suffice for
|
||||
normal operation mode. Read [this
|
||||
post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756)
|
||||
for details on how to configure `log-level` config variable. Some of the
|
||||
modules can be found [here](../nodes/logging#list-of-modules). If
|
||||
modules can be found [here](logging.md#list-of-modules). If
|
||||
you're trying to debug Tendermint or asked to provide logs with debug
|
||||
logging level, you can do so by running Tendermint with
|
||||
`--log-level="debug"`.
|
||||
@@ -114,7 +114,7 @@ just the votes seen at the current height.
|
||||
If, after consulting with the logs and above endpoints, you still have no idea
|
||||
what's happening, consider using `tendermint debug kill` sub-command. This
|
||||
command will scrap all the available info and kill the process. See
|
||||
[Debugging](../tools/debugging.md) for the exact format.
|
||||
[Debugging](../tools/debugging/README.md) for the exact format.
|
||||
|
||||
You can inspect the resulting archive yourself or create an issue on
|
||||
[Github](https://github.com/tendermint/tendermint). Before opening an issue
|
||||
@@ -134,7 +134,7 @@ Tendermint also can report and serve Prometheus metrics. See
|
||||
[Metrics](./metrics.md).
|
||||
|
||||
`tendermint debug dump` sub-command can be used to periodically dump useful
|
||||
information into an archive. See [Debugging](../tools/debugging.md) for more
|
||||
information into an archive. See [Debugging](../tools/debugging/README.md) for more
|
||||
information.
|
||||
|
||||
## What happens when my app dies
|
||||
@@ -268,6 +268,8 @@ While we do not favor any operation system, more secure and stable Linux server
|
||||
distributions (like Centos) should be preferred over desktop operation systems
|
||||
(like Mac OS).
|
||||
|
||||
Native Windows support is not provided. If you are using a windows machine, you can try using the [bash shell](https://docs.microsoft.com/en-us/windows/wsl/install-win10).
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
NOTE: if you are going to use Tendermint in a public domain, make sure
|
||||
@@ -313,7 +315,7 @@ We want `skip-timeout-commit=false` when there is economics on the line
|
||||
because proposers should wait to hear for more votes. But if you don't
|
||||
care about that and want the fastest consensus, you can skip it. It will
|
||||
be kept false by default for public deployments (e.g. [Cosmos
|
||||
Hub](https://cosmos.network/intro/hub)) while for enterprise
|
||||
Hub](https://hub.cosmos.network/main/hub-overview/overview.html)) while for enterprise
|
||||
applications, setting it to true is not a problem.
|
||||
|
||||
- `consensus.peer-gossip-sleep-duration`
|
||||
|
||||
@@ -45,3 +45,13 @@ version = "v0"
|
||||
|
||||
If we're lagging sufficiently, we should go back to fast syncing, but
|
||||
this is an [open issue](https://github.com/tendermint/tendermint/issues/129).
|
||||
|
||||
## The Fast Sync event
|
||||
When the tendermint blockchain core launches, it might switch to the `fast-sync`
|
||||
mode to catch up the states to the current network best height. the core will emits
|
||||
a fast-sync event to expose the current status and the sync height. Once it catched
|
||||
the network best height, it will switches to the state sync mechanism and then emit
|
||||
another event for exposing the fast-sync `complete` status and the state `height`.
|
||||
|
||||
The user can query the events by subscribing `EventQueryFastSyncStatus`
|
||||
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.
|
||||
@@ -9,3 +9,10 @@ With state sync your node will download data related to the head or near the hea
|
||||
This leads to drastically shorter times for joining a network.
|
||||
|
||||
Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md)
|
||||
|
||||
## Events
|
||||
|
||||
When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion.
|
||||
|
||||
The user can query the events by subscribing `EventQueryStateSyncStatus`
|
||||
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.
|
||||
@@ -36,7 +36,7 @@ more information on query syntax and other options.
|
||||
|
||||
You can also use tags, given you had included them into DeliverTx
|
||||
response, to query transaction results. See [Indexing
|
||||
transactions](./indexing-transactions.md) for details.
|
||||
transactions](../app-dev/indexing-transactions.md) for details.
|
||||
|
||||
## ValidatorSetUpdates
|
||||
|
||||
|
||||
@@ -552,8 +552,7 @@ To make a Tendermint network that can tolerate one of the validators
|
||||
failing, you need at least four validator nodes (e.g., 2/3).
|
||||
|
||||
Updating validators in a live network is supported but must be
|
||||
explicitly programmed by the application developer. See the [application
|
||||
developers guide](../app-dev/app-development.md) for more details.
|
||||
explicitly programmed by the application developer.
|
||||
|
||||
### Local Network
|
||||
|
||||
|
||||
11
go.mod
11
go.mod
@@ -10,32 +10,31 @@ require (
|
||||
github.com/btcsuite/btcd v0.22.0-beta
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
github.com/go-kit/kit v0.10.0
|
||||
github.com/go-kit/kit v0.11.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/golangci/golangci-lint v1.41.1
|
||||
github.com/google/orderedcode v0.0.1
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/lib/pq v1.10.2
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
||||
github.com/rs/cors v1.8.0
|
||||
github.com/rs/zerolog v1.23.0
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.2.0
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tendermint/tm-db v0.6.4
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
||||
google.golang.org/grpc v1.39.0
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
|
||||
@@ -1,17 +1,36 @@
|
||||
/*
|
||||
Package blockchain provides two implementations of the fast-sync protocol.
|
||||
Package blockchain implements two versions of a reactor Service that are
|
||||
responsible for block propagation and gossip between peers. This mechanism is
|
||||
more formally known as fast-sync.
|
||||
|
||||
- v0 was the very first implementation. it's battle tested, but does not have a
|
||||
lot of test coverage.
|
||||
- v2 is the newest implementation, with a focus on testability and readability.
|
||||
In order for a full node to successfully participate in consensus, it must have
|
||||
the latest view of state. The fast-sync protocol is a mechanism in which peers
|
||||
may exchange and gossip entire blocks with one another, in a request/response
|
||||
type model, until they've successfully synced to the latest head block. Once
|
||||
succussfully synced, the full node can switch to an active role in consensus and
|
||||
will no longer fast-sync and thus no longer run the fast-sync process.
|
||||
|
||||
Check out ADR-40 for the formal model and requirements.
|
||||
Note, the blockchain reactor Service gossips entire block and relevant data such
|
||||
that each receiving peer may construct the entire view of the blockchain state.
|
||||
|
||||
# Termination criteria
|
||||
There are two versions of the blockchain reactor Service, i.e. fast-sync:
|
||||
|
||||
1. the maximum peer height is reached
|
||||
2. termination timeout is triggered, which is set if the peer set is empty or
|
||||
there are no pending requests.
|
||||
- v0: The initial implementation that is battle-tested, but whose test coverage
|
||||
is lacking and is not formally verifiable.
|
||||
- v2: The latest implementation that has much higher test coverage and is formally
|
||||
verified. However, the current implementation of v2 is not as battle-tested and
|
||||
is known to have various bugs that could make it unreliable in production
|
||||
environments.
|
||||
|
||||
The v0 blockchain reactor Service has one p2p channel, BlockchainChannel. This
|
||||
channel is responsible for handling messages that both request blocks and respond
|
||||
to block requests from peers. For every block request from a peer, the reactor
|
||||
will execute respondToPeer which will fetch the block from the node's state store
|
||||
and respond to the peer. For every block response, the node will add the block
|
||||
to its pool via AddBlock.
|
||||
|
||||
Internally, v0 runs a poolRoutine that constantly checks for what blocks it needs
|
||||
and requests them. The poolRoutine is also responsible for taking blocks from the
|
||||
pool, saving and executing each block.
|
||||
*/
|
||||
package blockchain
|
||||
|
||||
@@ -83,6 +83,10 @@ type BlockPool struct {
|
||||
|
||||
requestsCh chan<- BlockRequest
|
||||
errorsCh chan<- peerError
|
||||
|
||||
startHeight int64
|
||||
lastHundredBlockTimeStamp time.Time
|
||||
lastSyncRate float64
|
||||
}
|
||||
|
||||
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
||||
@@ -91,12 +95,14 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
bp := &BlockPool{
|
||||
peers: make(map[types.NodeID]*bpPeer),
|
||||
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
numPending: 0,
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
startHeight: start,
|
||||
numPending: 0,
|
||||
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
lastSyncRate: 0,
|
||||
}
|
||||
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
|
||||
return bp
|
||||
@@ -106,6 +112,7 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
pool.lastAdvance = time.Now()
|
||||
pool.lastHundredBlockTimeStamp = pool.lastAdvance
|
||||
go pool.makeRequestersRoutine()
|
||||
return nil
|
||||
}
|
||||
@@ -216,6 +223,19 @@ func (pool *BlockPool) PopRequest() {
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
pool.lastAdvance = time.Now()
|
||||
|
||||
// the lastSyncRate will be updated every 100 blocks, it uses the adaptive filter
|
||||
// to smooth the block sync rate and the unit represents the number of blocks per second.
|
||||
if (pool.height-pool.startHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(pool.lastHundredBlockTimeStamp).Seconds()
|
||||
if pool.lastSyncRate == 0 {
|
||||
pool.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
pool.lastSyncRate = 0.9*pool.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
pool.lastHundredBlockTimeStamp = time.Now()
|
||||
}
|
||||
|
||||
} else {
|
||||
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
||||
}
|
||||
@@ -428,6 +448,20 @@ func (pool *BlockPool) debug() string {
|
||||
return str
|
||||
}
|
||||
|
||||
func (pool *BlockPool) targetSyncBlocks() int64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.maxPeerHeight - pool.startHeight + 1
|
||||
}
|
||||
|
||||
func (pool *BlockPool) getLastSyncRate() float64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.lastSyncRate
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bpPeer struct {
|
||||
|
||||
@@ -2,6 +2,7 @@ package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmSync "github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -72,7 +74,7 @@ func (e peerError) Error() string {
|
||||
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
// Reactor handles long-term catchup syncing.
|
||||
type Reactor struct {
|
||||
service.BaseService
|
||||
|
||||
@@ -83,12 +85,19 @@ type Reactor struct {
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
consReactor consensusReactor
|
||||
fastSync bool
|
||||
fastSync *tmSync.AtomicBool
|
||||
|
||||
blockchainCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
peerUpdatesCh chan p2p.Envelope
|
||||
closeCh chan struct{}
|
||||
blockchainCh *p2p.Channel
|
||||
// blockchainOutBridgeCh defines a channel that acts as a bridge between sending Envelope
|
||||
// messages that the reactor will consume in processBlockchainCh and receiving messages
|
||||
// from the peer updates channel and other goroutines. We do this instead of directly
|
||||
// sending on blockchainCh.Out to avoid race conditions in the case where other goroutines
|
||||
// send Envelopes directly to the to blockchainCh.Out channel, since processBlockchainCh
|
||||
// may close the blockchainCh.Out channel at the same time that other goroutines send to
|
||||
// blockchainCh.Out.
|
||||
blockchainOutBridgeCh chan p2p.Envelope
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
closeCh chan struct{}
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
@@ -99,6 +108,8 @@ type Reactor struct {
|
||||
poolWG sync.WaitGroup
|
||||
|
||||
metrics *cons.Metrics
|
||||
|
||||
syncStartTime time.Time
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
@@ -126,19 +137,20 @@ func NewReactor(
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
r := &Reactor{
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
peerUpdates: peerUpdates,
|
||||
peerUpdatesCh: make(chan p2p.Envelope),
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: tmSync.NewBool(fastSync),
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
blockchainOutBridgeCh: make(chan p2p.Envelope),
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
syncStartTime: time.Time{},
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
|
||||
@@ -153,7 +165,7 @@ func NewReactor(
|
||||
// If fastSync is enabled, we also start the pool and the pool processing
|
||||
// goroutine. If the pool fails to start, an error is returned.
|
||||
func (r *Reactor) OnStart() error {
|
||||
if r.fastSync {
|
||||
if r.fastSync.IsSet() {
|
||||
if err := r.pool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -171,7 +183,7 @@ func (r *Reactor) OnStart() error {
|
||||
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
|
||||
// blocking until they all exit.
|
||||
func (r *Reactor) OnStop() {
|
||||
if r.fastSync {
|
||||
if r.fastSync.IsSet() {
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
@@ -265,7 +277,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -283,7 +299,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
}
|
||||
|
||||
// processBlockchainCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockchainChannel and peerUpdatesCh. Any error encountered during
|
||||
// envelopes on the BlockchainChannel and blockchainOutBridgeCh. Any error encountered during
|
||||
// message execution will result in a PeerError being sent on the BlockchainChannel.
|
||||
// When the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
@@ -301,8 +317,8 @@ func (r *Reactor) processBlockchainCh() {
|
||||
}
|
||||
}
|
||||
|
||||
case envelop := <-r.peerUpdatesCh:
|
||||
r.blockchainCh.Out <- envelop
|
||||
case envelope := <-r.blockchainOutBridgeCh:
|
||||
r.blockchainCh.Out <- envelope
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on blockchain channel; closing...")
|
||||
@@ -324,7 +340,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
switch peerUpdate.Status {
|
||||
case p2p.PeerStatusUp:
|
||||
// send a status update the newly added peer
|
||||
r.peerUpdatesCh <- p2p.Envelope{
|
||||
r.blockchainOutBridgeCh <- p2p.Envelope{
|
||||
To: peerUpdate.NodeID,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: r.store.Base(),
|
||||
@@ -358,7 +374,7 @@ func (r *Reactor) processPeerUpdates() {
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast
|
||||
// sync.
|
||||
func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
r.fastSync = true
|
||||
r.fastSync.Set()
|
||||
r.initialState = state
|
||||
r.pool.height = state.LastBlockHeight + 1
|
||||
|
||||
@@ -366,6 +382,8 @@ func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r.syncStartTime = time.Now()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(true)
|
||||
|
||||
@@ -388,7 +406,7 @@ func (r *Reactor) requestRoutine() {
|
||||
return
|
||||
|
||||
case request := <-r.requestsCh:
|
||||
r.blockchainCh.Out <- p2p.Envelope{
|
||||
r.blockchainOutBridgeCh <- p2p.Envelope{
|
||||
To: request.PeerID,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}
|
||||
@@ -405,7 +423,7 @@ func (r *Reactor) requestRoutine() {
|
||||
go func() {
|
||||
defer r.poolWG.Done()
|
||||
|
||||
r.blockchainCh.Out <- p2p.Envelope{
|
||||
r.blockchainOutBridgeCh <- p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}
|
||||
@@ -478,6 +496,8 @@ FOR_LOOP:
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
|
||||
r.fastSync.UnSet()
|
||||
|
||||
if r.consReactor != nil {
|
||||
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
@@ -592,3 +612,27 @@ FOR_LOOP:
|
||||
func (r *Reactor) GetMaxPeerBlockHeight() int64 {
|
||||
return r.pool.MaxPeerHeight()
|
||||
}
|
||||
|
||||
func (r *Reactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *Reactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.fastSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
targetSyncs := r.pool.targetSyncBlocks()
|
||||
currentSyncs := r.store.Height() - r.pool.startHeight + 1
|
||||
lastSyncRate := r.pool.getLastSyncRate()
|
||||
if currentSyncs < 0 || lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
|
||||
@@ -215,6 +215,29 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0])
|
||||
}
|
||||
|
||||
func TestReactor_SyncTime(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(101)
|
||||
|
||||
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
rts.start(t)
|
||||
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
return rts.reactors[rts.nodes[1]].GetRemainingSyncTime() > time.Nanosecond &&
|
||||
rts.reactors[rts.nodes[1]].pool.getLastSyncRate() > 0.001
|
||||
},
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be partially synced",
|
||||
)
|
||||
}
|
||||
|
||||
func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -34,8 +35,8 @@ type blockStore interface {
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
fastSync bool // if true, enable fast sync on start
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
fastSync *sync.AtomicBool // enable fast sync on start when it's been Set
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
logger log.Logger
|
||||
@@ -48,6 +49,10 @@ type BlockchainReactor struct {
|
||||
reporter behavior.Reporter
|
||||
io iIO
|
||||
store blockStore
|
||||
|
||||
syncStartTime time.Time
|
||||
syncStartHeight int64
|
||||
lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
@@ -68,12 +73,15 @@ func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
|
||||
processor := newPcState(pContext)
|
||||
|
||||
return &BlockchainReactor{
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: fastSync,
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: sync.NewBool(fastSync),
|
||||
syncStartHeight: initHeight,
|
||||
syncStartTime: time.Time{},
|
||||
lastSyncRate: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +137,7 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) {
|
||||
// Start implements cmn.Service interface
|
||||
func (r *BlockchainReactor) Start() error {
|
||||
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
|
||||
if r.fastSync {
|
||||
if r.fastSync.IsSet() {
|
||||
err := r.startSync(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start fast sync: %w", err)
|
||||
@@ -175,7 +183,13 @@ func (r *BlockchainReactor) endSync() {
|
||||
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
|
||||
r.stateSynced = true
|
||||
state = state.Copy()
|
||||
return r.startSync(&state)
|
||||
|
||||
err := r.startSync(&state)
|
||||
if err == nil {
|
||||
r.syncStartTime = time.Now()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// reactor generated ticker events:
|
||||
@@ -283,7 +297,6 @@ func (e bcResetState) String() string {
|
||||
|
||||
// Takes the channel as a parameter to avoid race conditions on r.events.
|
||||
func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
var lastRate = 0.0
|
||||
var lastHundred = time.Now()
|
||||
|
||||
var (
|
||||
@@ -414,10 +427,15 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
switch event := event.(type) {
|
||||
case pcBlockProcessed:
|
||||
r.setSyncHeight(event.height)
|
||||
if r.syncHeight%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
if (r.syncHeight-r.syncStartHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(lastHundred).Seconds()
|
||||
if r.lastSyncRate == 0 {
|
||||
r.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", lastRate)
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
r.scheduler.send(event)
|
||||
@@ -429,6 +447,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
r.logger.Error("Failed to switch to consensus reactor")
|
||||
}
|
||||
r.endSync()
|
||||
r.fastSync.UnSet()
|
||||
return
|
||||
case noOpEvent:
|
||||
default:
|
||||
@@ -596,3 +615,29 @@ func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
|
||||
defer r.mtx.RUnlock()
|
||||
return r.maxPeerHeight
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.fastSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
targetSyncs := r.maxPeerHeight - r.syncStartHeight
|
||||
currentSyncs := r.syncHeight - r.syncStartHeight + 1
|
||||
if currentSyncs < 0 || r.lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
prevoteHeight := int64(2)
|
||||
testName := "consensus_byzantine_test"
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newCounter
|
||||
appFunc := newKVStore
|
||||
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, 30)
|
||||
states := make([]*State, nValidators)
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
@@ -449,7 +448,7 @@ func randState(config *cfg.Config, nValidators int) (*State, []*validatorStub) {
|
||||
|
||||
vss := make([]*validatorStub, nValidators)
|
||||
|
||||
cs := newState(state, privVals[0], counter.NewApplication(true))
|
||||
cs := newState(state, privVals[0], kvstore.NewApplication())
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
vss[i] = newValidatorStub(privVals[i], int32(i))
|
||||
@@ -862,10 +861,6 @@ func (m *mockTicker) Chan() <-chan timeoutInfo {
|
||||
|
||||
func (*mockTicker) SetLogger(log.Logger) {}
|
||||
|
||||
func newCounter() abci.Application {
|
||||
return counter.NewApplication(true)
|
||||
}
|
||||
|
||||
func newPersistentKVStore() abci.Application {
|
||||
dir, err := ioutil.TempDir("", "persistent-kvstore")
|
||||
if err != nil {
|
||||
@@ -874,6 +869,10 @@ func newPersistentKVStore() abci.Application {
|
||||
return kvstore.NewPersistentKVStoreApplication(dir)
|
||||
}
|
||||
|
||||
func newKVStore() abci.Application {
|
||||
return kvstore.NewApplication()
|
||||
}
|
||||
|
||||
func newPersistentKVStoreWithPath(dbDir string) abci.Application {
|
||||
return kvstore.NewPersistentKVStoreApplication(dbDir)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
states, cleanup := randConsensusState(t,
|
||||
config, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
|
||||
28
internal/consensus/mocks/cons_sync_reactor.go
Normal file
28
internal/consensus/mocks/cons_sync_reactor.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Code generated by mockery 2.7.5. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
)
|
||||
|
||||
// ConsSyncReactor is an autogenerated mock type for the ConsSyncReactor type
|
||||
type ConsSyncReactor struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// SetFastSyncingMetrics provides a mock function with given fields: _a0
|
||||
func (_m *ConsSyncReactor) SetFastSyncingMetrics(_a0 float64) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SetStateSyncingMetrics provides a mock function with given fields: _a0
|
||||
func (_m *ConsSyncReactor) SetStateSyncingMetrics(_a0 float64) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SwitchToConsensus provides a mock function with given fields: _a0, _a1
|
||||
func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) {
|
||||
_m.Called(_a0, _a1)
|
||||
}
|
||||
71
internal/consensus/mocks/fast_sync_reactor.go
Normal file
71
internal/consensus/mocks/fast_sync_reactor.go
Normal file
@@ -0,0 +1,71 @@
|
||||
// Code generated by mockery 2.7.5. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
|
||||
time "time"
|
||||
)
|
||||
|
||||
// FastSyncReactor is an autogenerated mock type for the FastSyncReactor type
|
||||
type FastSyncReactor struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetMaxPeerBlockHeight provides a mock function with given fields:
|
||||
func (_m *FastSyncReactor) GetMaxPeerBlockHeight() int64 {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 int64
|
||||
if rf, ok := ret.Get(0).(func() int64); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetRemainingSyncTime provides a mock function with given fields:
|
||||
func (_m *FastSyncReactor) GetRemainingSyncTime() time.Duration {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 time.Duration
|
||||
if rf, ok := ret.Get(0).(func() time.Duration); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(time.Duration)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetTotalSyncedTime provides a mock function with given fields:
|
||||
func (_m *FastSyncReactor) GetTotalSyncedTime() time.Duration {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 time.Duration
|
||||
if rf, ok := ret.Get(0).(func() time.Duration); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(time.Duration)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SwitchToFastSync provides a mock function with given fields: _a0
|
||||
func (_m *FastSyncReactor) SwitchToFastSync(_a0 state.State) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(state.State) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package consensus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
@@ -101,6 +102,22 @@ type FastSyncReactor interface {
|
||||
SwitchToFastSync(sm.State) error
|
||||
|
||||
GetMaxPeerBlockHeight() int64
|
||||
|
||||
// GetTotalSyncedTime returns the time duration since the fastsync starting.
|
||||
GetTotalSyncedTime() time.Duration
|
||||
|
||||
// GetRemainingSyncTime returns the estimating time the node will be fully synced,
|
||||
// if will return 0 if the fastsync does not perform or the number of block synced is
|
||||
// too small (less than 100).
|
||||
GetRemainingSyncTime() time.Duration
|
||||
}
|
||||
|
||||
//go:generate mockery --case underscore --name ConsSyncReactor
|
||||
// ConsSyncReactor defines an interface used for testing abilities of node.startStateSync.
|
||||
type ConsSyncReactor interface {
|
||||
SwitchToConsensus(sm.State, bool)
|
||||
SetStateSyncingMetrics(float64)
|
||||
SetFastSyncingMetrics(float64)
|
||||
}
|
||||
|
||||
// Reactor defines a reactor for the consensus service.
|
||||
@@ -295,6 +312,11 @@ conS:
|
||||
conR:
|
||||
%+v`, err, r.state, r))
|
||||
}
|
||||
|
||||
d := types.EventDataFastSyncStatus{Complete: true, Height: state.LastBlockHeight}
|
||||
if err := r.eventBus.PublishEventFastSyncStatus(d); err != nil {
|
||||
r.Logger.Error("failed to emit the fastsync complete event", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of the Reactor.
|
||||
@@ -1197,21 +1219,24 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message)
|
||||
// It will handle errors and any possible panics gracefully. A caller can handle
|
||||
// any error returned by sending a PeerError on the respective channel.
|
||||
//
|
||||
// NOTE: We process these messages even when we're fast_syncing. Messages affect
|
||||
// either a peer state or the consensus state. Peer state updates can happen in
|
||||
// parallel, but processing of proposals, block parts, and votes are ordered by
|
||||
// the p2p channel.
|
||||
//
|
||||
// NOTE: We block on consensus state for proposals, block parts, and votes.
|
||||
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
// Just skip the entire message during syncing so that we can
|
||||
// process fewer messages.
|
||||
if r.WaitSync() {
|
||||
return
|
||||
}
|
||||
|
||||
// We wrap the envelope's message in a Proto wire type so we can convert back
|
||||
// the domain type that individual channel message handlers can work with. We
|
||||
// do this here once to avoid having to do it for each individual message type.
|
||||
@@ -1412,3 +1437,11 @@ func (r *Reactor) peerStatsRoutine() {
|
||||
func (r *Reactor) GetConsensusState() *State {
|
||||
return r.state
|
||||
}
|
||||
|
||||
func (r *Reactor) SetStateSyncingMetrics(v float64) {
|
||||
r.Metrics.StateSyncing.Set(v)
|
||||
}
|
||||
|
||||
func (r *Reactor) SetFastSyncingMetrics(v float64) {
|
||||
r.Metrics.FastSyncing.Set(v)
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
@@ -42,6 +43,7 @@ type reactorTestSuite struct {
|
||||
states map[types.NodeID]*State
|
||||
reactors map[types.NodeID]*Reactor
|
||||
subs map[types.NodeID]types.Subscription
|
||||
fastsyncSubs map[types.NodeID]types.Subscription
|
||||
stateChannels map[types.NodeID]*p2p.Channel
|
||||
dataChannels map[types.NodeID]*p2p.Channel
|
||||
voteChannels map[types.NodeID]*p2p.Channel
|
||||
@@ -58,10 +60,11 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
t.Helper()
|
||||
|
||||
rts := &reactorTestSuite{
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
states: make(map[types.NodeID]*State),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
subs: make(map[types.NodeID]types.Subscription, numNodes),
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
states: make(map[types.NodeID]*State),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
subs: make(map[types.NodeID]types.Subscription, numNodes),
|
||||
fastsyncSubs: make(map[types.NodeID]types.Subscription, numNodes),
|
||||
}
|
||||
|
||||
rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size)
|
||||
@@ -69,6 +72,8 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel), new(tmcons.Message), size)
|
||||
rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel), new(tmcons.Message), size)
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
|
||||
i := 0
|
||||
for nodeID, node := range rts.network.Nodes {
|
||||
state := states[i]
|
||||
@@ -89,9 +94,13 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size)
|
||||
require.NoError(t, err)
|
||||
|
||||
fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryFastSyncStatus, size)
|
||||
require.NoError(t, err)
|
||||
|
||||
rts.states[nodeID] = state
|
||||
rts.subs[nodeID] = blocksSub
|
||||
rts.reactors[nodeID] = reactor
|
||||
rts.fastsyncSubs[nodeID] = fsSub
|
||||
|
||||
// simulate handle initChain in handshake
|
||||
if state.state.LastBlockHeight == 0 {
|
||||
@@ -117,6 +126,7 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
}
|
||||
|
||||
leaktest.Check(t)
|
||||
cancel()
|
||||
})
|
||||
|
||||
return rts
|
||||
@@ -253,11 +263,22 @@ func waitForBlockWithUpdatedValsAndValidateIt(
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func ensureFastSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) {
|
||||
t.Helper()
|
||||
status, ok := msg.Data().(types.EventDataFastSyncStatus)
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, complete, status.Complete)
|
||||
require.Equal(t, height, status.Height)
|
||||
}
|
||||
|
||||
func TestReactorBasic(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
states, cleanup := randConsensusState(t,
|
||||
config, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock
|
||||
@@ -273,8 +294,21 @@ func TestReactorBasic(t *testing.T) {
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
go func(s types.Subscription) {
|
||||
defer wg.Done()
|
||||
<-s.Out()
|
||||
wg.Done()
|
||||
}(sub)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
for _, sub := range rts.fastsyncSubs {
|
||||
wg.Add(1)
|
||||
|
||||
// wait till everyone makes the consensus switch
|
||||
go func(s types.Subscription) {
|
||||
defer wg.Done()
|
||||
msg := <-s.Out()
|
||||
ensureFastSyncStatus(t, msg, true, 0)
|
||||
}(sub)
|
||||
}
|
||||
|
||||
@@ -287,7 +321,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
n := 4
|
||||
testName := "consensus_reactor_test"
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newCounter
|
||||
appFunc := newKVStore
|
||||
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, n, false, 30)
|
||||
states := make([]*State, n)
|
||||
@@ -387,7 +421,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
n,
|
||||
"consensus_reactor_test",
|
||||
newMockTickerFunc(true),
|
||||
newCounter,
|
||||
newKVStore,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
},
|
||||
@@ -431,7 +465,9 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
states, cleanup := randConsensusState(t,
|
||||
config, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
p2pmock "github.com/tendermint/tendermint/internal/p2p/mock"
|
||||
@@ -654,7 +654,7 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
cs2 := newState(cs1.state, vs2, counter.NewApplication(true))
|
||||
cs2 := newState(cs1.state, vs2, kvstore.NewApplication())
|
||||
prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1)
|
||||
if prop == nil || propBlock == nil {
|
||||
t.Fatal("Failed to create proposal block with vs2")
|
||||
@@ -843,7 +843,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
cs2 := newState(cs1.state, vs2, counter.NewApplication(true))
|
||||
cs2 := newState(cs1.state, vs2, kvstore.NewApplication())
|
||||
prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1)
|
||||
if prop == nil || propBlock == nil {
|
||||
t.Fatal("Failed to create proposal block with vs2")
|
||||
@@ -887,7 +887,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
cs3 := newState(cs1.state, vs3, counter.NewApplication(true))
|
||||
cs3 := newState(cs1.state, vs3, kvstore.NewApplication())
|
||||
prop, propBlock = decideProposal(cs3, vs3, vs3.Height, vs3.Round+1)
|
||||
if prop == nil || propBlock == nil {
|
||||
t.Fatal("Failed to create proposal block with vs2")
|
||||
|
||||
@@ -489,14 +489,12 @@ func (evpool *Pool) batchExpiredPendingEvidence(batch dbm.Batch) (int64, time.Ti
|
||||
func (evpool *Pool) removeEvidenceFromList(
|
||||
blockEvidenceMap map[string]struct{}) {
|
||||
|
||||
el := evpool.evidenceList
|
||||
var nextE *clist.CElement
|
||||
for e := el.Front(); e != nil; e = nextE {
|
||||
nextE = e.Next()
|
||||
for e := evpool.evidenceList.Front(); e != nil; e = e.Next() {
|
||||
// Remove from clist
|
||||
ev := e.Value.(types.Evidence)
|
||||
if _, ok := blockEvidenceMap[evMapKey(ev)]; ok {
|
||||
evpool.evidenceList.Remove(e)
|
||||
e.DetachPrev()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package evidence
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -165,6 +166,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -296,7 +302,11 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos
|
||||
r.peerWG.Done()
|
||||
|
||||
if e := recover(); e != nil {
|
||||
r.Logger.Error("recovering from broadcasting evidence loop", "err", e)
|
||||
r.Logger.Error(
|
||||
"recovering from broadcasting evidence loop",
|
||||
"err", e,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ func (g *Group) OnStart() error {
|
||||
func (g *Group) OnStop() {
|
||||
g.ticker.Stop()
|
||||
if err := g.FlushAndSync(); err != nil {
|
||||
g.Logger.Error("Error flushin to disk", "err", err)
|
||||
g.Logger.Error("Error flushing to disk", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func (g *Group) Wait() {
|
||||
// Close closes the head file. The group must be stopped by this moment.
|
||||
func (g *Group) Close() {
|
||||
if err := g.FlushAndSync(); err != nil {
|
||||
g.Logger.Error("Error flushin to disk", "err", err)
|
||||
g.Logger.Error("Error flushing to disk", "err", err)
|
||||
}
|
||||
|
||||
g.mtx.Lock()
|
||||
|
||||
@@ -196,26 +196,21 @@ func (e *CElement) SetPrev(newPrev *CElement) {
|
||||
e.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Detach is a shortcut to mark the given element remove and detach the prev/next elements.
|
||||
func (e *CElement) Detach() {
|
||||
func (e *CElement) SetRemoved() {
|
||||
e.mtx.Lock()
|
||||
defer e.mtx.Unlock()
|
||||
|
||||
e.removed = true
|
||||
|
||||
// This wakes up anyone waiting in either direction.
|
||||
if e.prev == nil {
|
||||
e.prevWg.Done()
|
||||
close(e.prevWaitCh)
|
||||
} else {
|
||||
e.prev = nil
|
||||
}
|
||||
|
||||
if e.next == nil {
|
||||
e.nextWg.Done()
|
||||
close(e.nextWaitCh)
|
||||
} else {
|
||||
e.next = nil
|
||||
}
|
||||
e.mtx.Unlock()
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
@@ -353,26 +348,24 @@ func (l *CList) PushBack(v interface{}) *CElement {
|
||||
return e
|
||||
}
|
||||
|
||||
// Remove removes the given element in the CList
|
||||
// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks.
|
||||
// NOTE: As per the contract of CList, removed elements cannot be added back.
|
||||
// Because CList detachse the prev/next element when it removes the given element,
|
||||
// please do not use CElement.Next() in the for loop postcondition, uses
|
||||
// a variable ahead the for loop and then assigns the Next() element
|
||||
// to it in the loop as the postcondition.
|
||||
func (l *CList) Remove(e *CElement) interface{} {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
|
||||
prev := e.Prev()
|
||||
next := e.Next()
|
||||
|
||||
if l.head == nil || l.tail == nil {
|
||||
l.mtx.Unlock()
|
||||
panic("Remove(e) on empty CList")
|
||||
}
|
||||
if prev == nil && l.head != e {
|
||||
l.mtx.Unlock()
|
||||
panic("Remove(e) with false head")
|
||||
}
|
||||
if next == nil && l.tail != e {
|
||||
l.mtx.Unlock()
|
||||
panic("Remove(e) with false tail")
|
||||
}
|
||||
|
||||
@@ -397,53 +390,13 @@ func (l *CList) Remove(e *CElement) interface{} {
|
||||
next.SetPrev(prev)
|
||||
}
|
||||
|
||||
e.Detach()
|
||||
// Set .Done() on e, otherwise waiters will wait forever.
|
||||
e.SetRemoved()
|
||||
|
||||
l.mtx.Unlock()
|
||||
return e.Value
|
||||
}
|
||||
|
||||
// Clear removes all the elements in the CList
|
||||
func (l *CList) Clear() {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
|
||||
if l.head == nil || l.tail == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for e := l.head; e != nil; e = l.head {
|
||||
prevE := e.Prev()
|
||||
nextE := e.Next()
|
||||
|
||||
if prevE == nil && e != l.head {
|
||||
panic("CList.Clear failed due to nil prev element")
|
||||
}
|
||||
|
||||
if nextE == nil && e != l.tail {
|
||||
panic("CList.Clear failed due to nil next element")
|
||||
}
|
||||
|
||||
if l.len == 1 {
|
||||
l.wg = waitGroup1()
|
||||
l.waitCh = make(chan struct{})
|
||||
}
|
||||
l.len--
|
||||
|
||||
if prevE == nil {
|
||||
l.head = nextE
|
||||
} else {
|
||||
prevE.SetNext(nextE)
|
||||
}
|
||||
if nextE == nil {
|
||||
l.tail = prevE
|
||||
} else {
|
||||
nextE.SetPrev(prevE)
|
||||
}
|
||||
|
||||
e.Detach()
|
||||
}
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
wg = &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPanicOnMaxLength(t *testing.T) {
|
||||
@@ -252,16 +253,9 @@ func TestScanRightDeleteRandom(t *testing.T) {
|
||||
// time.Sleep(time.Second * 1)
|
||||
|
||||
// And remove all the elements.
|
||||
// we detach the prev/next element in CList.Remove, so just assign l.Front direcly.
|
||||
halfElements := numElements / 2
|
||||
for el := l.Front(); el != nil && halfElements > 0; el = l.Front() {
|
||||
for el := l.Front(); el != nil; el = el.Next() {
|
||||
l.Remove(el)
|
||||
halfElements--
|
||||
}
|
||||
|
||||
// remove the rest half elements in the CList
|
||||
l.Clear()
|
||||
|
||||
if l.Len() != 0 {
|
||||
t.Fatal("Failed to remove all elements from CList")
|
||||
}
|
||||
@@ -341,3 +335,50 @@ FOR_LOOP2:
|
||||
t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoved(t *testing.T) {
|
||||
l := New()
|
||||
el1 := l.PushBack(1)
|
||||
el2 := l.PushBack(2)
|
||||
l.Remove(el1)
|
||||
require.True(t, el1.Removed())
|
||||
require.False(t, el2.Removed())
|
||||
}
|
||||
|
||||
func TestNextWaitChan(t *testing.T) {
|
||||
l := New()
|
||||
el1 := l.PushBack(1)
|
||||
t.Run("tail element should not have a closed nextWaitChan", func(t *testing.T) {
|
||||
select {
|
||||
case <-el1.NextWaitChan():
|
||||
t.Fatal("nextWaitChan should not have been closed")
|
||||
default:
|
||||
}
|
||||
})
|
||||
|
||||
el2 := l.PushBack(2)
|
||||
t.Run("adding element should close tail nextWaitChan", func(t *testing.T) {
|
||||
select {
|
||||
case <-el1.NextWaitChan():
|
||||
require.NotNil(t, el1.Next())
|
||||
default:
|
||||
t.Fatal("nextWaitChan should have been closed")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-el2.NextWaitChan():
|
||||
t.Fatal("nextWaitChan should not have been closed")
|
||||
default:
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("removing element should close its nextWaitChan", func(t *testing.T) {
|
||||
l.Remove(el2)
|
||||
select {
|
||||
case <-el2.NextWaitChan():
|
||||
require.Nil(t, el2.Next())
|
||||
default:
|
||||
t.Fatal("nextWaitChan should have been closed")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -164,7 +164,10 @@ func (mem *CListMempool) Flush() {
|
||||
_ = atomic.SwapInt64(&mem.txsBytes, 0)
|
||||
mem.cache.Reset()
|
||||
|
||||
mem.txs.Clear()
|
||||
for e := mem.txs.Front(); e != nil; e = e.Next() {
|
||||
mem.txs.Remove(e)
|
||||
e.DetachPrev()
|
||||
}
|
||||
|
||||
mem.txsMap.Range(func(key, _ interface{}) bool {
|
||||
mem.txsMap.Delete(key)
|
||||
@@ -335,6 +338,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
// - resCbRecheck (lock not held) if tx was invalidated
|
||||
func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
|
||||
mem.txs.Remove(elem)
|
||||
elem.DetachPrev()
|
||||
mem.txsMap.Delete(mempool.TxKey(tx))
|
||||
atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
@@ -217,7 +216,7 @@ func TestMempoolUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
app := counter.NewApplication(true)
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
wcfg := cfg.DefaultConfig()
|
||||
wcfg.Mempool.KeepInvalidTxsInCache = true
|
||||
@@ -309,7 +308,7 @@ func TestTxsAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSerialReap(t *testing.T) {
|
||||
app := counter.NewApplication(true)
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
@@ -508,7 +507,7 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
}
|
||||
|
||||
// 6. zero after tx is rechecked and removed due to not being valid anymore
|
||||
app2 := counter.NewApplication(true)
|
||||
app2 := kvstore.NewApplication()
|
||||
cc = proxy.NewLocalClientCreator(app2)
|
||||
mp, cleanup = newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
@@ -540,16 +539,16 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
// Pretend like we committed nothing so txBytes gets rechecked and removed.
|
||||
err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 0, mp.SizeBytes())
|
||||
assert.EqualValues(t, 8, mp.SizeBytes())
|
||||
|
||||
// 7. Test RemoveTxByKey function
|
||||
err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 1, mp.SizeBytes())
|
||||
assert.EqualValues(t, 9, mp.SizeBytes())
|
||||
mp.RemoveTxByKey(mempool.TxKey([]byte{0x07}), true)
|
||||
assert.EqualValues(t, 1, mp.SizeBytes())
|
||||
assert.EqualValues(t, 9, mp.SizeBytes())
|
||||
mp.RemoveTxByKey(mempool.TxKey([]byte{0x06}), true)
|
||||
assert.EqualValues(t, 0, mp.SizeBytes())
|
||||
assert.EqualValues(t, 8, mp.SizeBytes())
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -188,6 +189,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -312,7 +318,11 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
r.peerWG.Done()
|
||||
|
||||
if e := recover(); e != nil {
|
||||
r.Logger.Error("recovering from broadcasting mempool loop", "err", e)
|
||||
r.Logger.Error(
|
||||
"recovering from broadcasting mempool loop",
|
||||
"err", e,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -74,6 +74,14 @@ type TxMempool struct {
|
||||
// thread-safe priority queue.
|
||||
priorityIndex *TxPriorityQueue
|
||||
|
||||
// heightIndex defines a height-based, in ascending order, transaction index.
|
||||
// i.e. older transactions are first.
|
||||
heightIndex *WrappedTxList
|
||||
|
||||
// timestampIndex defines a timestamp-based, in ascending order, transaction
|
||||
// index. i.e. older transactions are first.
|
||||
timestampIndex *WrappedTxList
|
||||
|
||||
// A read/write lock is used to safe guard updates, insertions and deletions
|
||||
// from the mempool. A read-lock is implicitly acquired when executing CheckTx,
|
||||
// however, a caller must explicitly grab a write-lock via Lock when updating
|
||||
@@ -101,6 +109,12 @@ func NewTxMempool(
|
||||
txStore: NewTxStore(),
|
||||
gossipIndex: clist.New(),
|
||||
priorityIndex: NewTxPriorityQueue(),
|
||||
heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
}),
|
||||
timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp)
|
||||
}),
|
||||
}
|
||||
|
||||
if cfg.CacheSize > 0 {
|
||||
@@ -279,6 +293,7 @@ func (txmp *TxMempool) CheckTx(
|
||||
tx: tx,
|
||||
hash: txHash,
|
||||
timestamp: time.Now().UTC(),
|
||||
height: txmp.height,
|
||||
}
|
||||
txmp.initTxCallback(wtx, res, txInfo)
|
||||
|
||||
@@ -300,12 +315,11 @@ func (txmp *TxMempool) Flush() {
|
||||
txmp.mtx.RLock()
|
||||
defer txmp.mtx.RUnlock()
|
||||
|
||||
txmp.heightIndex.Reset()
|
||||
txmp.timestampIndex.Reset()
|
||||
|
||||
for _, wtx := range txmp.txStore.GetAllTxs() {
|
||||
if !txmp.txStore.IsTxRemoved(wtx.hash) {
|
||||
txmp.txStore.RemoveTx(wtx)
|
||||
txmp.priorityIndex.RemoveTx(wtx)
|
||||
txmp.gossipIndex.Remove(wtx.gossipEl)
|
||||
}
|
||||
txmp.removeTx(wtx, false)
|
||||
}
|
||||
|
||||
atomic.SwapInt64(&txmp.sizeBytes, 0)
|
||||
@@ -443,6 +457,8 @@ func (txmp *TxMempool) Update(
|
||||
}
|
||||
}
|
||||
|
||||
txmp.purgeExpiredTxs(blockHeight)
|
||||
|
||||
// If there any uncommitted transactions left in the mempool, we either
|
||||
// initiate re-CheckTx per remaining transaction or notify that remaining
|
||||
// transactions are left.
|
||||
@@ -488,105 +504,110 @@ func (txmp *TxMempool) Update(
|
||||
// - An explicit lock is NOT required.
|
||||
func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo mempool.TxInfo) {
|
||||
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
|
||||
if ok {
|
||||
var err error
|
||||
if txmp.postCheck != nil {
|
||||
err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
if txmp.postCheck != nil {
|
||||
err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx)
|
||||
}
|
||||
|
||||
if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK {
|
||||
// ignore bad transactions
|
||||
txmp.logger.Info(
|
||||
"rejected bad transaction",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"peer_id", txInfo.SenderNodeID,
|
||||
"code", checkTxRes.CheckTx.Code,
|
||||
"post_check_err", err,
|
||||
)
|
||||
|
||||
txmp.metrics.FailedTxs.Add(1)
|
||||
|
||||
if !txmp.config.KeepInvalidTxsInCache {
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
}
|
||||
if err != nil {
|
||||
checkTxRes.CheckTx.MempoolError = err.Error()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil {
|
||||
sender := checkTxRes.CheckTx.Sender
|
||||
priority := checkTxRes.CheckTx.Priority
|
||||
sender := checkTxRes.CheckTx.Sender
|
||||
priority := checkTxRes.CheckTx.Priority
|
||||
|
||||
if len(sender) > 0 {
|
||||
if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil {
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; tx already exists for sender",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"sender", sender,
|
||||
)
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := txmp.canAddTx(wtx); err != nil {
|
||||
evictTxs := txmp.priorityIndex.GetEvictableTxs(
|
||||
priority,
|
||||
int64(wtx.Size()),
|
||||
txmp.SizeBytes(),
|
||||
txmp.config.MaxTxsBytes,
|
||||
)
|
||||
if len(evictTxs) == 0 {
|
||||
// No room for the new incoming transaction so we just remove it from
|
||||
// the cache.
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; mempool full",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"err", err.Error(),
|
||||
)
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
|
||||
// evict an existing transaction(s)
|
||||
//
|
||||
// NOTE:
|
||||
// - The transaction, toEvict, can be removed while a concurrent
|
||||
// reCheckTx callback is being executed for the same transaction.
|
||||
for _, toEvict := range evictTxs {
|
||||
txmp.removeTx(toEvict, true)
|
||||
txmp.logger.Debug(
|
||||
"evicted existing good transaction; mempool full",
|
||||
"old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()),
|
||||
"old_priority", toEvict.priority,
|
||||
"new_tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"new_priority", wtx.priority,
|
||||
)
|
||||
txmp.metrics.EvictedTxs.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
wtx.gasWanted = checkTxRes.CheckTx.GasWanted
|
||||
wtx.height = txmp.height
|
||||
wtx.priority = priority
|
||||
wtx.sender = sender
|
||||
wtx.peers = map[uint16]struct{}{
|
||||
txInfo.SenderID: {},
|
||||
}
|
||||
|
||||
txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size()))
|
||||
txmp.metrics.Size.Set(float64(txmp.Size()))
|
||||
|
||||
txmp.insertTx(wtx)
|
||||
txmp.logger.Debug(
|
||||
"inserted good transaction",
|
||||
"priority", wtx.priority,
|
||||
if len(sender) > 0 {
|
||||
if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil {
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; tx already exists for sender",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"height", txmp.height,
|
||||
"num_txs", txmp.Size(),
|
||||
"sender", sender,
|
||||
)
|
||||
txmp.notifyTxsAvailable()
|
||||
|
||||
} else {
|
||||
// ignore bad transactions
|
||||
txmp.logger.Info(
|
||||
"rejected bad transaction",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"peer_id", txInfo.SenderNodeID,
|
||||
"code", checkTxRes.CheckTx.Code,
|
||||
"post_check_err", err,
|
||||
)
|
||||
|
||||
txmp.metrics.FailedTxs.Add(1)
|
||||
|
||||
if !txmp.config.KeepInvalidTxsInCache {
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
}
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := txmp.canAddTx(wtx); err != nil {
|
||||
evictTxs := txmp.priorityIndex.GetEvictableTxs(
|
||||
priority,
|
||||
int64(wtx.Size()),
|
||||
txmp.SizeBytes(),
|
||||
txmp.config.MaxTxsBytes,
|
||||
)
|
||||
if len(evictTxs) == 0 {
|
||||
// No room for the new incoming transaction so we just remove it from
|
||||
// the cache.
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; mempool full",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"err", err.Error(),
|
||||
)
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
|
||||
// evict an existing transaction(s)
|
||||
//
|
||||
// NOTE:
|
||||
// - The transaction, toEvict, can be removed while a concurrent
|
||||
// reCheckTx callback is being executed for the same transaction.
|
||||
for _, toEvict := range evictTxs {
|
||||
txmp.removeTx(toEvict, true)
|
||||
txmp.logger.Debug(
|
||||
"evicted existing good transaction; mempool full",
|
||||
"old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()),
|
||||
"old_priority", toEvict.priority,
|
||||
"new_tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"new_priority", wtx.priority,
|
||||
)
|
||||
txmp.metrics.EvictedTxs.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
wtx.gasWanted = checkTxRes.CheckTx.GasWanted
|
||||
wtx.priority = priority
|
||||
wtx.sender = sender
|
||||
wtx.peers = map[uint16]struct{}{
|
||||
txInfo.SenderID: {},
|
||||
}
|
||||
|
||||
txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size()))
|
||||
txmp.metrics.Size.Set(float64(txmp.Size()))
|
||||
|
||||
txmp.insertTx(wtx)
|
||||
txmp.logger.Debug(
|
||||
"inserted good transaction",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"height", txmp.height,
|
||||
"num_txs", txmp.Size(),
|
||||
)
|
||||
txmp.notifyTxsAvailable()
|
||||
|
||||
}
|
||||
|
||||
// defaultTxCallback performs the default CheckTx application callback. This is
|
||||
@@ -721,6 +742,8 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error {
|
||||
func (txmp *TxMempool) insertTx(wtx *WrappedTx) {
|
||||
txmp.txStore.SetTx(wtx)
|
||||
txmp.priorityIndex.PushTx(wtx)
|
||||
txmp.heightIndex.Insert(wtx)
|
||||
txmp.timestampIndex.Insert(wtx)
|
||||
|
||||
// Insert the transaction into the gossip index and mark the reference to the
|
||||
// linked-list element, which will be needed at a later point when the
|
||||
@@ -738,10 +761,13 @@ func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) {
|
||||
|
||||
txmp.txStore.RemoveTx(wtx)
|
||||
txmp.priorityIndex.RemoveTx(wtx)
|
||||
txmp.heightIndex.Remove(wtx)
|
||||
txmp.timestampIndex.Remove(wtx)
|
||||
|
||||
// Remove the transaction from the gossip index and cleanup the linked-list
|
||||
// element so it can be garbage collected.
|
||||
txmp.gossipIndex.Remove(wtx.gossipEl)
|
||||
wtx.gossipEl.DetachPrev()
|
||||
|
||||
atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size()))
|
||||
|
||||
@@ -750,6 +776,56 @@ func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// purgeExpiredTxs removes all transactions that have exceeded their respective
|
||||
// height and/or time based TTLs from their respective indexes. Every expired
|
||||
// transaction will be removed from the mempool entirely, except for the cache.
|
||||
//
|
||||
// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which
|
||||
// the caller has a write-lock on the mempool and so we can safely iterate over
|
||||
// the height and time based indexes.
|
||||
func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) {
|
||||
now := time.Now()
|
||||
expiredTxs := make(map[[mempool.TxKeySize]byte]*WrappedTx)
|
||||
|
||||
if txmp.config.TTLNumBlocks > 0 {
|
||||
purgeIdx := -1
|
||||
for i, wtx := range txmp.heightIndex.txs {
|
||||
if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks {
|
||||
expiredTxs[mempool.TxKey(wtx.tx)] = wtx
|
||||
purgeIdx = i
|
||||
} else {
|
||||
// since the index is sorted, we know no other txs can be be purged
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if purgeIdx >= 0 {
|
||||
txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:]
|
||||
}
|
||||
}
|
||||
|
||||
if txmp.config.TTLDuration > 0 {
|
||||
purgeIdx := -1
|
||||
for i, wtx := range txmp.timestampIndex.txs {
|
||||
if now.Sub(wtx.timestamp) > txmp.config.TTLDuration {
|
||||
expiredTxs[mempool.TxKey(wtx.tx)] = wtx
|
||||
purgeIdx = i
|
||||
} else {
|
||||
// since the index is sorted, we know no other txs can be be purged
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if purgeIdx >= 0 {
|
||||
txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:]
|
||||
}
|
||||
}
|
||||
|
||||
for _, wtx := range expiredTxs {
|
||||
txmp.removeTx(wtx, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (txmp *TxMempool) notifyTxsAvailable() {
|
||||
if txmp.Size() == 0 {
|
||||
panic("attempt to notify txs available but mempool is empty!")
|
||||
|
||||
@@ -3,11 +3,13 @@ package v1
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -70,13 +72,13 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
}
|
||||
}
|
||||
|
||||
func setup(t testing.TB, cacheSize int) *TxMempool {
|
||||
func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
|
||||
t.Helper()
|
||||
|
||||
app := &application{kvstore.NewApplication()}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
cfg := config.ResetTestRoot(t.Name())
|
||||
cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|"))
|
||||
cfg.Mempool.CacheSize = cacheSize
|
||||
|
||||
appConnMem, err := cc.NewABCIClient()
|
||||
@@ -88,7 +90,7 @@ func setup(t testing.TB, cacheSize int) *TxMempool {
|
||||
require.NoError(t, appConnMem.Stop())
|
||||
})
|
||||
|
||||
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0)
|
||||
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
|
||||
}
|
||||
|
||||
func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
|
||||
@@ -102,14 +104,10 @@ func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx
|
||||
_, err := rng.Read(prefix)
|
||||
require.NoError(t, err)
|
||||
|
||||
// sender := make([]byte, 10)
|
||||
// _, err = rng.Read(sender)
|
||||
// require.NoError(t, err)
|
||||
|
||||
priority := int64(rng.Intn(9999-1000) + 1000)
|
||||
|
||||
txs[i] = testTx{
|
||||
tx: []byte(fmt.Sprintf("sender-%d=%X=%d", i, prefix, priority)),
|
||||
tx: []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)),
|
||||
priority: priority,
|
||||
}
|
||||
require.NoError(t, txmp.CheckTx(context.Background(), txs[i].tx, nil, txInfo))
|
||||
@@ -176,7 +174,7 @@ func TestTxMempool_Size(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(txs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
rawTxs := make([]types.Tx, len(txs))
|
||||
for i, tx := range txs {
|
||||
@@ -193,14 +191,14 @@ func TestTxMempool_Size(t *testing.T) {
|
||||
txmp.Unlock()
|
||||
|
||||
require.Equal(t, len(rawTxs)/2, txmp.Size())
|
||||
require.Equal(t, int64(2750), txmp.SizeBytes())
|
||||
require.Equal(t, int64(2850), txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_Flush(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(txs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
rawTxs := make([]types.Tx, len(txs))
|
||||
for i, tx := range txs {
|
||||
@@ -225,7 +223,7 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
txMap := make(map[[mempool.TxKeySize]byte]testTx)
|
||||
priorities := make([]int64, len(tTxs))
|
||||
@@ -252,30 +250,30 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
|
||||
reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 50)
|
||||
|
||||
// reap by transaction bytes only
|
||||
reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 17)
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.GreaterOrEqual(t, len(reapedTxs), 16)
|
||||
|
||||
// Reap by both transaction bytes and gas, where the size yields 31 reaped
|
||||
// transactions and the gas limit reaps 26 transactions.
|
||||
// transactions and the gas limit reaps 25 transactions.
|
||||
reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 26)
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 25)
|
||||
}
|
||||
|
||||
func TestTxMempool_ReapMaxTxs(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
tTxs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
txMap := make(map[[mempool.TxKeySize]byte]testTx)
|
||||
priorities := make([]int64, len(tTxs))
|
||||
@@ -302,21 +300,21 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) {
|
||||
reapedTxs := txmp.ReapMaxTxs(-1)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, len(tTxs))
|
||||
|
||||
// reap a single transaction
|
||||
reapedTxs = txmp.ReapMaxTxs(1)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 1)
|
||||
|
||||
// reap half of the transactions
|
||||
reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, len(tTxs)/2)
|
||||
}
|
||||
|
||||
@@ -324,11 +322,17 @@ func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
tx := make([]byte, txmp.config.MaxTxsBytes+1)
|
||||
tx := make([]byte, txmp.config.MaxTxBytes+1)
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0}))
|
||||
|
||||
tx = make([]byte, txmp.config.MaxTxBytes-1)
|
||||
_, err = rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0}))
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxSamePeer(t *testing.T) {
|
||||
@@ -431,3 +435,93 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
|
||||
require.Zero(t, txmp.Size())
|
||||
require.Zero(t, txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp := setup(t, 500)
|
||||
txmp.height = 100
|
||||
txmp.config.TTLNumBlocks = 10
|
||||
|
||||
tTxs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, 100, txmp.heightIndex.Size())
|
||||
|
||||
// reap 5 txs at the next height -- no txs should expire
|
||||
reapedTxs := txmp.ReapMaxTxs(5)
|
||||
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
|
||||
}
|
||||
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
|
||||
require.Equal(t, 95, txmp.Size())
|
||||
require.Equal(t, 95, txmp.heightIndex.Size())
|
||||
|
||||
// check more txs at height 101
|
||||
_ = checkTxs(t, txmp, 50, 1)
|
||||
require.Equal(t, 145, txmp.Size())
|
||||
require.Equal(t, 145, txmp.heightIndex.Size())
|
||||
|
||||
// Reap 5 txs at a height that would expire all the transactions from before
|
||||
// the previous Update (height 100).
|
||||
//
|
||||
// NOTE: When we reap txs below, we do not know if we're picking txs from the
|
||||
// initial CheckTx calls or from the second round of CheckTx calls. Thus, we
|
||||
// cannot guarantee that all 95 txs are remaining that should be expired and
|
||||
// removed. However, we do know that that at most 95 txs can be expired and
|
||||
// removed.
|
||||
reapedTxs = txmp.ReapMaxTxs(5)
|
||||
responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
|
||||
}
|
||||
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
|
||||
require.GreaterOrEqual(t, txmp.Size(), 45)
|
||||
require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45)
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "error",
|
||||
err: errors.New("test error"),
|
||||
},
|
||||
{
|
||||
name: "no error",
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
testCase := tc
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
|
||||
return testCase.err
|
||||
}
|
||||
txmp := setup(t, 0, WithPostCheck(postCheckFn))
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
tx := make([]byte, txmp.config.MaxTxBytes-1)
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
callback := func(res *abci.Response) {
|
||||
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
|
||||
require.True(t, ok)
|
||||
expectedErrString := ""
|
||||
if testCase.err != nil {
|
||||
expectedErrString = testCase.err.Error()
|
||||
}
|
||||
require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError)
|
||||
}
|
||||
require.NoError(t, txmp.CheckTx(context.Background(), tx, callback, mempool.TxInfo{SenderID: 0}))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -187,6 +188,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -311,7 +317,11 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
r.peerWG.Done()
|
||||
|
||||
if e := recover(); e != nil {
|
||||
r.Logger.Error("recovering from broadcasting mempool loop", "err", e)
|
||||
r.Logger.Error(
|
||||
"recovering from broadcasting mempool loop",
|
||||
"err", e,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
@@ -198,3 +199,84 @@ func (txs *TxStore) GetOrSetPeerByTxHash(hash [mempool.TxKeySize]byte, peerID ui
|
||||
wtx.peers[peerID] = struct{}{}
|
||||
return wtx, false
|
||||
}
|
||||
|
||||
// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be
|
||||
// used to build generic transaction indexes in the mempool. It accepts a
|
||||
// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx
|
||||
// references which is used during Insert in order to determine sorted order. If
|
||||
// less returns true, a <= b.
|
||||
type WrappedTxList struct {
|
||||
mtx tmsync.RWMutex
|
||||
txs []*WrappedTx
|
||||
less func(*WrappedTx, *WrappedTx) bool
|
||||
}
|
||||
|
||||
func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList {
|
||||
return &WrappedTxList{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
less: less,
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the number of WrappedTx objects in the list.
|
||||
func (wtl *WrappedTxList) Size() int {
|
||||
wtl.mtx.RLock()
|
||||
defer wtl.mtx.RUnlock()
|
||||
|
||||
return len(wtl.txs)
|
||||
}
|
||||
|
||||
// Reset resets the list of transactions to an empty list.
|
||||
func (wtl *WrappedTxList) Reset() {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
wtl.txs = make([]*WrappedTx, 0)
|
||||
}
|
||||
|
||||
// Insert inserts a WrappedTx reference into the sorted list based on the list's
|
||||
// comparator function.
|
||||
func (wtl *WrappedTxList) Insert(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
if i == len(wtl.txs) {
|
||||
// insert at the end
|
||||
wtl.txs = append(wtl.txs, wtx)
|
||||
return
|
||||
}
|
||||
|
||||
// Make space for the inserted element by shifting values at the insertion
|
||||
// index up one index.
|
||||
//
|
||||
// NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs).
|
||||
wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...)
|
||||
wtl.txs[i] = wtx
|
||||
}
|
||||
|
||||
// Remove attempts to remove a WrappedTx from the sorted list.
|
||||
func (wtl *WrappedTxList) Remove(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
// Since the list is sorted, we evaluate all elements starting at i. Note, if
|
||||
// the element does not exist, we may potentially evaluate the entire remainder
|
||||
// of the list. However, a caller should not be expected to call Remove with a
|
||||
// non-existing element.
|
||||
for i < len(wtl.txs) {
|
||||
if wtl.txs[i] == wtx {
|
||||
wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...)
|
||||
return
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -132,3 +134,97 @@ func TestTxStore_Size(t *testing.T) {
|
||||
|
||||
require.Equal(t, numTxs, txStore.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Reset(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
require.Zero(t, list.Size())
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
list.Insert(&WrappedTx{height: int64(i)})
|
||||
}
|
||||
|
||||
require.Equal(t, 100, list.Size())
|
||||
|
||||
list.Reset()
|
||||
require.Zero(t, list.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Insert(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var expected []int
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
expected = append(expected, int(height))
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
|
||||
if i%10 == 0 {
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
expected = append(expected, int(height))
|
||||
}
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Remove(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var txs []*WrappedTx
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
tx := &WrappedTx{height: height}
|
||||
|
||||
txs = append(txs, tx)
|
||||
list.Insert(tx)
|
||||
|
||||
if i%10 == 0 {
|
||||
tx = &WrappedTx{height: height}
|
||||
list.Insert(tx)
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// remove a tx that does not exist
|
||||
list.Remove(&WrappedTx{height: 20000})
|
||||
|
||||
// remove a tx that exists (by height) but not referenced
|
||||
list.Remove(&WrappedTx{height: txs[0].height})
|
||||
|
||||
// remove a few existing txs
|
||||
for i := 0; i < 25; i++ {
|
||||
j := rng.Intn(len(txs))
|
||||
list.Remove(txs[j])
|
||||
txs = append(txs[:j], txs[j+1:]...)
|
||||
}
|
||||
|
||||
expected := make([]int, len(txs))
|
||||
for i, tx := range txs {
|
||||
expected[i] = int(tx.height)
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
@@ -421,7 +421,6 @@ func (c *MConnection) CanSend(chID byte) bool {
|
||||
// sendRoutine polls for packets to send from channels.
|
||||
func (c *MConnection) sendRoutine() {
|
||||
defer c._recover()
|
||||
|
||||
protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter)
|
||||
|
||||
FOR_LOOP:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
34
internal/p2p/pex/doc.go
Normal file
34
internal/p2p/pex/doc.go
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
Package PEX (Peer exchange) handles all the logic necessary for nodes to share
|
||||
information about their peers to other nodes. Specifically, this is the exchange
|
||||
of addresses that a peer can use to discover more peers within the network.
|
||||
|
||||
The PEX reactor is a continuous service which periodically requests addresses
|
||||
and serves addresses to other peers. There are two versions of this service
|
||||
aligning with the two p2p frameworks that Tendermint currently supports.
|
||||
|
||||
V1 is coupled with the Switch (which handles peer connections and routing of
|
||||
messages) and, alongside exchanging peer information in the form of port/IP
|
||||
pairs, also has the responsibility of dialing peers and ensuring that a
|
||||
node has a sufficient amount of peers connected.
|
||||
|
||||
V2 is embedded with the new p2p stack and uses the peer manager to advertise
|
||||
peers as well as add new peers to the peer store. The V2 reactor passes a
|
||||
different set of proto messages which include a list of
|
||||
[urls](https://golang.org/pkg/net/url/#URL).These can be used to save a set of
|
||||
endpoints that each peer uses. The V2 reactor has backwards compatibility with
|
||||
V1. It can also handle V1 messages.
|
||||
|
||||
The V2 reactor is able to tweak the intensity of it's search by decreasing or
|
||||
increasing the interval between each request. It tracks connected peers via a
|
||||
linked list, sending a request to the node at the front of the list and adding
|
||||
it to the back of the list once a response is received. Using this method, a
|
||||
node is able to spread out the load of requesting peers across all the peers it
|
||||
is currently connected with.
|
||||
|
||||
With each inbound set of addresses, the reactor monitors the amount of new
|
||||
addresses to already seen addresses and uses the information to dynamically
|
||||
build a picture of the size of the network in order to ascertain how often the
|
||||
node needs to search for new peers.
|
||||
*/
|
||||
package pex
|
||||
@@ -3,6 +3,7 @@ package pex
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -367,6 +368,11 @@ func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (er
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -257,7 +257,11 @@ func (s *pqScheduler) process() {
|
||||
s.metrics.PeerSendBytesTotal.With(
|
||||
"chID", chIDStr,
|
||||
"peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size))
|
||||
s.dequeueCh <- pqEnv.envelope
|
||||
select {
|
||||
case s.dequeueCh <- pqEnv.envelope:
|
||||
case <-s.closer.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
case <-s.closer.Done():
|
||||
|
||||
39
internal/p2p/pqueue_test.go
Normal file
39
internal/p2p/pqueue_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
func TestCloseWhileDequeueFull(t *testing.T) {
|
||||
enqueueLength := 5
|
||||
chDescs := []ChannelDescriptor{
|
||||
{ID: 0x01, Priority: 1, MaxSendBytes: 4},
|
||||
}
|
||||
pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120)
|
||||
|
||||
for i := 0; i < enqueueLength; i++ {
|
||||
pqueue.enqueue() <- Envelope{
|
||||
channelID: 0x01,
|
||||
Message: &testMessage{Value: "foo"}, // 5 bytes
|
||||
}
|
||||
}
|
||||
|
||||
go pqueue.process()
|
||||
|
||||
// sleep to allow context switch for process() to run
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
pqueue.close()
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-doneCh:
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("pqueue failed to close")
|
||||
}
|
||||
}
|
||||
@@ -476,8 +476,10 @@ func (r *Router) routeChannel(
|
||||
}
|
||||
|
||||
if !contains {
|
||||
r.logger.Error("tried to send message across a channel that the peer doesn't have available",
|
||||
"peer", envelope.To, "channel", chID)
|
||||
// reactor tried to send a message across a channel that the
|
||||
// peer doesn't have available. This is a known issue due to
|
||||
// how peer subscriptions work:
|
||||
// https://github.com/tendermint/tendermint/issues/6598
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1021,6 +1023,15 @@ func (r *Router) NodeInfo() types.NodeInfo {
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (r *Router) OnStart() error {
|
||||
netAddr, _ := r.nodeInfo.NetAddress()
|
||||
r.Logger.Info(
|
||||
"starting router",
|
||||
"node_id", r.nodeInfo.NodeID,
|
||||
"channels", r.nodeInfo.Channels,
|
||||
"listen_addr", r.nodeInfo.ListenAddr,
|
||||
"net_addr", netAddr,
|
||||
)
|
||||
|
||||
go r.dialPeers()
|
||||
go r.evictPeers()
|
||||
|
||||
|
||||
@@ -151,9 +151,6 @@ func TestMConnTransport_Listen(t *testing.T) {
|
||||
[]*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}},
|
||||
p2p.MConnTransportOptions{},
|
||||
)
|
||||
t.Cleanup(func() {
|
||||
_ = transport.Close()
|
||||
})
|
||||
|
||||
// Transport should not listen on any endpoints yet.
|
||||
require.Empty(t, transport.Endpoints())
|
||||
@@ -166,19 +163,6 @@ func TestMConnTransport_Listen(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start a goroutine to just accept any connections.
|
||||
go func() {
|
||||
for {
|
||||
conn, err := transport.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
||||
// Check the endpoint.
|
||||
endpoints := transport.Endpoints()
|
||||
require.Len(t, endpoints, 1)
|
||||
@@ -195,14 +179,40 @@ func TestMConnTransport_Listen(t *testing.T) {
|
||||
require.NotZero(t, endpoint.Port)
|
||||
require.Empty(t, endpoint.Path)
|
||||
|
||||
// Dialing the endpoint should work.
|
||||
conn, err := transport.Dial(ctx, endpoint)
|
||||
dialedChan := make(chan struct{})
|
||||
|
||||
var peerConn p2p.Connection
|
||||
go func() {
|
||||
// Dialing the endpoint should work.
|
||||
var err error
|
||||
peerConn, err = transport.Dial(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
close(dialedChan)
|
||||
}()
|
||||
|
||||
conn, err := transport.Accept()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, conn.Close())
|
||||
_ = conn.Close()
|
||||
<-dialedChan
|
||||
|
||||
time.Sleep(time.Minute)
|
||||
// closing the connection should not error
|
||||
require.NoError(t, peerConn.Close())
|
||||
|
||||
// try to read from the connection should error
|
||||
_, _, err = peerConn.ReceiveMessage()
|
||||
require.Error(t, err)
|
||||
|
||||
// Trying to listen again should error.
|
||||
err = transport.Listen(tc.endpoint)
|
||||
require.Error(t, err)
|
||||
|
||||
// close the transport
|
||||
_ = transport.Close()
|
||||
|
||||
// Dialing the closed endpoint should error
|
||||
_, err = transport.Dial(ctx, endpoint)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -403,6 +403,9 @@ func TestConnection_SendReceive(t *testing.T) {
|
||||
_, _, err = ab.ReceiveMessage()
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
_, err = ab.TrySendMessage(chID, []byte("closed try"))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
_, err = ab.SendMessage(chID, []byte("closed"))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
@@ -410,6 +413,9 @@ func TestConnection_SendReceive(t *testing.T) {
|
||||
_, _, err = ba.ReceiveMessage()
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
_, err = ba.TrySendMessage(chID, []byte("closed try"))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
_, err = ba.SendMessage(chID, []byte("closed"))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
|
||||
@@ -23,9 +23,10 @@ type blockQueue struct {
|
||||
verifyHeight int64
|
||||
|
||||
// termination conditions
|
||||
stopHeight int64
|
||||
stopTime time.Time
|
||||
terminal *types.LightBlock
|
||||
initialHeight int64
|
||||
stopHeight int64
|
||||
stopTime time.Time
|
||||
terminal *types.LightBlock
|
||||
|
||||
// track failed heights so we know what blocks to try fetch again
|
||||
failed *maxIntHeap
|
||||
@@ -45,21 +46,22 @@ type blockQueue struct {
|
||||
}
|
||||
|
||||
func newBlockQueue(
|
||||
startHeight, stopHeight int64,
|
||||
startHeight, stopHeight, initialHeight int64,
|
||||
stopTime time.Time,
|
||||
maxRetries int,
|
||||
) *blockQueue {
|
||||
return &blockQueue{
|
||||
stopHeight: stopHeight,
|
||||
stopTime: stopTime,
|
||||
fetchHeight: startHeight,
|
||||
verifyHeight: startHeight,
|
||||
pending: make(map[int64]lightBlockResponse),
|
||||
failed: &maxIntHeap{},
|
||||
retries: 0,
|
||||
maxRetries: maxRetries,
|
||||
waiters: make([]chan int64, 0),
|
||||
doneCh: make(chan struct{}),
|
||||
stopHeight: stopHeight,
|
||||
initialHeight: initialHeight,
|
||||
stopTime: stopTime,
|
||||
fetchHeight: startHeight,
|
||||
verifyHeight: startHeight,
|
||||
pending: make(map[int64]lightBlockResponse),
|
||||
failed: &maxIntHeap{},
|
||||
retries: 0,
|
||||
maxRetries: maxRetries,
|
||||
waiters: make([]chan int64, 0),
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,9 +95,10 @@ func (q *blockQueue) add(l lightBlockResponse) {
|
||||
q.pending[l.block.Height] = l
|
||||
}
|
||||
|
||||
// Lastly, if the incoming block is past the stop time and stop height then
|
||||
// we mark it as the terminal block
|
||||
if l.block.Height <= q.stopHeight && l.block.Time.Before(q.stopTime) {
|
||||
// Lastly, if the incoming block is past the stop time and stop height or
|
||||
// is equal to the initial height then we mark it as the terminal block.
|
||||
if l.block.Height <= q.stopHeight && l.block.Time.Before(q.stopTime) ||
|
||||
l.block.Height == q.initialHeight {
|
||||
q.terminal = l.block
|
||||
}
|
||||
}
|
||||
@@ -115,7 +118,7 @@ func (q *blockQueue) nextHeight() <-chan int64 {
|
||||
return ch
|
||||
}
|
||||
|
||||
if q.terminal == nil {
|
||||
if q.terminal == nil && q.fetchHeight >= q.initialHeight {
|
||||
// return and decrement the fetch height
|
||||
ch <- q.fetchHeight
|
||||
q.fetchHeight--
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestBlockQueueBasic(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
// asynchronously fetch blocks and add it to the queue
|
||||
@@ -72,7 +72,7 @@ func TestBlockQueueWithFailures(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 200)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 200)
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
failureRate := 4
|
||||
@@ -121,7 +121,7 @@ func TestBlockQueueWithFailures(t *testing.T) {
|
||||
func TestBlockQueueBlocks(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 2)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 2)
|
||||
expectedHeight := startHeight
|
||||
retryHeight := stopHeight + 2
|
||||
|
||||
@@ -168,7 +168,7 @@ loop:
|
||||
func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
|
||||
defer queue.close()
|
||||
|
||||
loop:
|
||||
@@ -194,7 +194,7 @@ func TestBlockQueueStopTime(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
baseTime := stopTime.Add(-50 * time.Second)
|
||||
@@ -233,6 +233,46 @@ func TestBlockQueueStopTime(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockQueueInitialHeight(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
const initialHeight int64 = 120
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, initialHeight, stopTime, 1)
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
// asynchronously fetch blocks and add it to the queue
|
||||
for i := 0; i <= numWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case height := <-queue.nextHeight():
|
||||
require.GreaterOrEqual(t, height, initialHeight)
|
||||
queue.add(mockLBResp(t, peerID, height, endTime))
|
||||
case <-queue.done():
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-queue.done():
|
||||
wg.Wait()
|
||||
require.NoError(t, queue.error())
|
||||
break loop
|
||||
|
||||
case resp := <-queue.verifyNext():
|
||||
require.GreaterOrEqual(t, resp.block.Height, initialHeight)
|
||||
queue.success(resp.block.Height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mockLBResp(t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse {
|
||||
return lightBlockResponse{
|
||||
block: mockLB(t, height, time, factory.MakeBlockID()),
|
||||
|
||||
@@ -45,23 +45,26 @@ func newDispatcher(requestCh chan<- p2p.Envelope, timeout time.Duration) *dispat
|
||||
}
|
||||
}
|
||||
|
||||
// LightBlock uses the request channel to fetch a light block from the next peer
|
||||
// in a list, tracks the call and waits for the reactor to pass along the response
|
||||
func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.LightBlock, types.NodeID, error) {
|
||||
d.mtx.Lock()
|
||||
outgoingCalls := len(d.calls)
|
||||
d.mtx.Unlock()
|
||||
|
||||
// check to see that the dispatcher is connected to at least one peer
|
||||
if d.availablePeers.Len() == 0 && outgoingCalls == 0 {
|
||||
if d.availablePeers.Len() == 0 && len(d.calls) == 0 {
|
||||
d.mtx.Unlock()
|
||||
return nil, "", errNoConnectedPeers
|
||||
}
|
||||
d.mtx.Unlock()
|
||||
|
||||
// fetch the next peer id in the list and request a light block from that
|
||||
// peer
|
||||
peer := d.availablePeers.Pop()
|
||||
peer := d.availablePeers.Pop(ctx)
|
||||
lb, err := d.lightBlock(ctx, height, peer)
|
||||
return lb, peer, err
|
||||
}
|
||||
|
||||
// Providers turns the dispatcher into a set of providers (per peer) which can
|
||||
// be used by a light client
|
||||
func (d *dispatcher) Providers(chainID string, timeout time.Duration) []provider.Provider {
|
||||
d.mtx.Lock()
|
||||
defer d.mtx.Unlock()
|
||||
@@ -272,7 +275,7 @@ func (l *peerlist) Len() int {
|
||||
return len(l.peers)
|
||||
}
|
||||
|
||||
func (l *peerlist) Pop() types.NodeID {
|
||||
func (l *peerlist) Pop(ctx context.Context) types.NodeID {
|
||||
l.mtx.Lock()
|
||||
if len(l.peers) == 0 {
|
||||
// if we don't have any peers in the list we block until a peer is
|
||||
@@ -281,8 +284,13 @@ func (l *peerlist) Pop() types.NodeID {
|
||||
l.waiting = append(l.waiting, wait)
|
||||
// unlock whilst waiting so that the list can be appended to
|
||||
l.mtx.Unlock()
|
||||
peer := <-wait
|
||||
return peer
|
||||
select {
|
||||
case peer := <-wait:
|
||||
return peer
|
||||
|
||||
case <-ctx.Done():
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
peer := l.peers[0]
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
)
|
||||
|
||||
func TestDispatcherBasic(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
ch := make(chan p2p.Envelope, 100)
|
||||
closeCh := make(chan struct{})
|
||||
@@ -49,7 +51,83 @@ func TestDispatcherBasic(t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDispatcherReturnsNoBlock(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
ch := make(chan p2p.Envelope, 100)
|
||||
d := newDispatcher(ch, 1*time.Second)
|
||||
peerFromSet := createPeerSet(1)[0]
|
||||
d.addPeer(peerFromSet)
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
<-ch
|
||||
require.NoError(t, d.respond(nil, peerFromSet))
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
lb, peerResult, err := d.LightBlock(context.Background(), 1)
|
||||
<-doneCh
|
||||
|
||||
require.Nil(t, lb)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, peerFromSet, peerResult)
|
||||
}
|
||||
|
||||
func TestDispatcherErrorsWhenNoPeers(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
ch := make(chan p2p.Envelope, 100)
|
||||
d := newDispatcher(ch, 1*time.Second)
|
||||
|
||||
lb, peerResult, err := d.LightBlock(context.Background(), 1)
|
||||
|
||||
require.Nil(t, lb)
|
||||
require.Empty(t, peerResult)
|
||||
require.Equal(t, errNoConnectedPeers, err)
|
||||
}
|
||||
|
||||
func TestDispatcherReturnsBlockOncePeerAvailable(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
dispatcherRequestCh := make(chan p2p.Envelope, 100)
|
||||
d := newDispatcher(dispatcherRequestCh, 1*time.Second)
|
||||
peerFromSet := createPeerSet(1)[0]
|
||||
d.addPeer(peerFromSet)
|
||||
ctx := context.Background()
|
||||
wrapped, cancelFunc := context.WithCancel(ctx)
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
lb, peerResult, err := d.LightBlock(wrapped, 1)
|
||||
require.Nil(t, lb)
|
||||
require.Equal(t, peerFromSet, peerResult)
|
||||
require.Nil(t, err)
|
||||
|
||||
// calls to dispatcher.Lightblock write into the dispatcher's requestCh.
|
||||
// we read from the requestCh here to unblock the requestCh for future
|
||||
// calls.
|
||||
<-dispatcherRequestCh
|
||||
close(doneCh)
|
||||
}()
|
||||
cancelFunc()
|
||||
<-doneCh
|
||||
|
||||
go func() {
|
||||
<-dispatcherRequestCh
|
||||
lb := &types.LightBlock{}
|
||||
asProto, err := lb.ToProto()
|
||||
require.Nil(t, err)
|
||||
err = d.respond(asProto, peerFromSet)
|
||||
require.Nil(t, err)
|
||||
}()
|
||||
|
||||
lb, peerResult, err := d.LightBlock(context.Background(), 1)
|
||||
|
||||
require.NotNil(t, lb)
|
||||
require.Equal(t, peerFromSet, peerResult)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestDispatcherProviders(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
ch := make(chan p2p.Envelope, 100)
|
||||
chainID := "state-sync-test"
|
||||
@@ -78,6 +156,7 @@ func TestDispatcherProviders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPeerListBasic(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
peerList := newPeerList()
|
||||
assert.Zero(t, peerList.Len())
|
||||
numPeers := 10
|
||||
@@ -95,7 +174,7 @@ func TestPeerListBasic(t *testing.T) {
|
||||
|
||||
half := numPeers / 2
|
||||
for i := 0; i < half; i++ {
|
||||
assert.Equal(t, peerSet[i], peerList.Pop())
|
||||
assert.Equal(t, peerSet[i], peerList.Pop(ctx))
|
||||
}
|
||||
assert.Equal(t, half, peerList.Len())
|
||||
|
||||
@@ -104,11 +183,56 @@ func TestPeerListBasic(t *testing.T) {
|
||||
|
||||
peerList.Remove(peerSet[half])
|
||||
half++
|
||||
assert.Equal(t, peerSet[half], peerList.Pop())
|
||||
assert.Equal(t, peerSet[half], peerList.Pop(ctx))
|
||||
|
||||
}
|
||||
|
||||
func TestPeerListBlocksWhenEmpty(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
peerList := newPeerList()
|
||||
require.Zero(t, peerList.Len())
|
||||
doneCh := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go func() {
|
||||
peerList.Pop(ctx)
|
||||
close(doneCh)
|
||||
}()
|
||||
select {
|
||||
case <-doneCh:
|
||||
t.Error("empty peer list should not have returned result")
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
peerList := newPeerList()
|
||||
require.Zero(t, peerList.Len())
|
||||
doneCh := make(chan struct{})
|
||||
ctx := context.Background()
|
||||
wrapped, cancel := context.WithCancel(ctx)
|
||||
go func() {
|
||||
peerList.Pop(wrapped)
|
||||
close(doneCh)
|
||||
}()
|
||||
select {
|
||||
case <-doneCh:
|
||||
t.Error("empty peer list should not have returned result")
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
||||
select {
|
||||
case <-doneCh:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("peer list should have returned after context canceled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerListConcurrent(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
peerList := newPeerList()
|
||||
numPeers := 10
|
||||
|
||||
@@ -117,7 +241,7 @@ func TestPeerListConcurrent(t *testing.T) {
|
||||
// peer list hasn't been populated each these go routines should block
|
||||
for i := 0; i < numPeers/2; i++ {
|
||||
go func() {
|
||||
_ = peerList.Pop()
|
||||
_ = peerList.Pop(ctx)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
@@ -132,7 +256,7 @@ func TestPeerListConcurrent(t *testing.T) {
|
||||
// we request the second half of the peer set
|
||||
for i := 0; i < numPeers/2; i++ {
|
||||
go func() {
|
||||
_ = peerList.Pop()
|
||||
_ = peerList.Pop(ctx)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
50
internal/statesync/mock_sync_reactor.go
Normal file
50
internal/statesync/mock_sync_reactor.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package statesync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
)
|
||||
|
||||
// MockSyncReactor is an autogenerated mock type for the SyncReactor type.
|
||||
// Because of the stateprovider uses in Sync(), we use package statesync instead of mocks.
|
||||
type MockSyncReactor struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Backfill provides a mock function with given fields: _a0
|
||||
func (_m *MockSyncReactor) Backfill(_a0 state.State) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(state.State) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Sync provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *MockSyncReactor) Sync(_a0 context.Context, _a1 StateProvider, _a2 time.Duration) (state.State, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
|
||||
var r0 state.State
|
||||
if rf, ok := ret.Get(0).(func(context.Context, StateProvider, time.Duration) state.State); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r0 = ret.Get(0).(state.State)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, StateProvider, time.Duration) error); ok {
|
||||
r1 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@@ -94,13 +95,19 @@ const (
|
||||
|
||||
// lightBlockResponseTimeout is how long the dispatcher waits for a peer to
|
||||
// return a light block
|
||||
lightBlockResponseTimeout = 10 * time.Second
|
||||
lightBlockResponseTimeout = 30 * time.Second
|
||||
|
||||
// maxLightBlockRequestRetries is the amount of retries acceptable before
|
||||
// the backfill process aborts
|
||||
maxLightBlockRequestRetries = 20
|
||||
)
|
||||
|
||||
// SyncReactor defines an interface used for testing abilities of node.startStateSync.
|
||||
type SyncReactor interface {
|
||||
Sync(context.Context, StateProvider, time.Duration) (sm.State, error)
|
||||
Backfill(sm.State) error
|
||||
}
|
||||
|
||||
// Reactor handles state sync, both restoring snapshots for the local node and
|
||||
// serving snapshots for other nodes.
|
||||
type Reactor struct {
|
||||
@@ -221,6 +228,11 @@ func (r *Reactor) Sync(
|
||||
return sm.State{}, errors.New("a state sync is already in progress")
|
||||
}
|
||||
|
||||
if stateProvider == nil {
|
||||
r.mtx.Unlock()
|
||||
return sm.State{}, errors.New("the stateProvider should not be nil when doing the state sync")
|
||||
}
|
||||
|
||||
r.syncer = newSyncer(
|
||||
r.cfg,
|
||||
r.Logger,
|
||||
@@ -280,7 +292,9 @@ func (r *Reactor) Backfill(state sm.State) error {
|
||||
return r.backfill(
|
||||
context.Background(),
|
||||
state.ChainID,
|
||||
state.LastBlockHeight, stopHeight,
|
||||
state.LastBlockHeight,
|
||||
stopHeight,
|
||||
state.InitialHeight,
|
||||
state.LastBlockID,
|
||||
stopTime,
|
||||
)
|
||||
@@ -289,7 +303,7 @@ func (r *Reactor) Backfill(state sm.State) error {
|
||||
func (r *Reactor) backfill(
|
||||
ctx context.Context,
|
||||
chainID string,
|
||||
startHeight, stopHeight int64,
|
||||
startHeight, stopHeight, initialHeight int64,
|
||||
trustedBlockID types.BlockID,
|
||||
stopTime time.Time,
|
||||
) error {
|
||||
@@ -302,7 +316,7 @@ func (r *Reactor) backfill(
|
||||
lastChangeHeight int64 = startHeight
|
||||
)
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, maxLightBlockRequestRetries)
|
||||
queue := newBlockQueue(startHeight, stopHeight, initialHeight, stopTime, maxLightBlockRequestRetries)
|
||||
|
||||
// fetch light blocks across four workers. The aim with deploying concurrent
|
||||
// workers is to equate the network messaging time with the verification
|
||||
@@ -310,12 +324,17 @@ func (r *Reactor) backfill(
|
||||
// waiting on blocks. If it takes 4s to retrieve a block and 1s to verify
|
||||
// it, then steady state involves four workers.
|
||||
for i := 0; i < int(r.cfg.Fetchers); i++ {
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case height := <-queue.nextHeight():
|
||||
r.Logger.Debug("fetching next block", "height", height)
|
||||
lb, peer, err := r.dispatcher.LightBlock(ctx, height)
|
||||
lb, peer, err := r.dispatcher.LightBlock(ctxWithCancel, height)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
queue.retry(height)
|
||||
if errors.Is(err, errNoConnectedPeers) {
|
||||
@@ -332,8 +351,8 @@ func (r *Reactor) backfill(
|
||||
if lb == nil {
|
||||
r.Logger.Info("backfill: peer didn't have block, fetching from another peer", "height", height)
|
||||
queue.retry(height)
|
||||
// as we are fetching blocks backwards, if this node doesn't have the block it likely doesn't
|
||||
// have any prior ones, thus we remove it from the peer list
|
||||
// As we are fetching blocks backwards, if this node doesn't have the block it likely doesn't
|
||||
// have any prior ones, thus we remove it from the peer list.
|
||||
r.dispatcher.removePeer(peer)
|
||||
continue
|
||||
}
|
||||
@@ -452,10 +471,11 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error {
|
||||
}
|
||||
|
||||
for _, snapshot := range snapshots {
|
||||
logger.Debug(
|
||||
logger.Info(
|
||||
"advertising snapshot",
|
||||
"height", snapshot.Height,
|
||||
"format", snapshot.Format,
|
||||
"peer", envelope.From,
|
||||
)
|
||||
r.snapshotCh.Out <- p2p.Envelope{
|
||||
To: envelope.From,
|
||||
@@ -622,7 +642,6 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error {
|
||||
case *ssproto.LightBlockResponse:
|
||||
if err := r.dispatcher.respond(msg.LightBlock, envelope.From); err != nil {
|
||||
r.Logger.Error("error processing light block response", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
@@ -639,6 +658,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -3,12 +3,11 @@ package statesync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
// "github.com/fortytw2/leaktest"
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
@@ -421,11 +420,11 @@ func TestReactor_Dispatcher(t *testing.T) {
|
||||
|
||||
func TestReactor_Backfill(t *testing.T) {
|
||||
// test backfill algorithm with varying failure rates [0, 10]
|
||||
failureRates := []int{0, 3, 9}
|
||||
failureRates := []int{0, 2, 9}
|
||||
for _, failureRate := range failureRates {
|
||||
failureRate := failureRate
|
||||
t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) {
|
||||
// t.Cleanup(leaktest.Check(t))
|
||||
t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute))
|
||||
rts := setup(t, nil, nil, nil, 21)
|
||||
|
||||
var (
|
||||
@@ -464,10 +463,11 @@ func TestReactor_Backfill(t *testing.T) {
|
||||
factory.DefaultTestChainID,
|
||||
startHeight,
|
||||
stopHeight,
|
||||
1,
|
||||
factory.MakeBlockIDWithHash(chain[startHeight].Header.Hash()),
|
||||
stopTime,
|
||||
)
|
||||
if failureRate > 5 {
|
||||
if failureRate > 3 {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
@@ -506,6 +506,7 @@ func handleLightBlockRequests(t *testing.T,
|
||||
close chan struct{},
|
||||
failureRate int) {
|
||||
requests := 0
|
||||
errorCount := 0
|
||||
for {
|
||||
select {
|
||||
case envelope := <-receiving:
|
||||
@@ -520,7 +521,7 @@ func handleLightBlockRequests(t *testing.T,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
switch rand.Intn(3) {
|
||||
switch errorCount % 3 {
|
||||
case 0: // send a different block
|
||||
differntLB, err := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID()).ToProto()
|
||||
require.NoError(t, err)
|
||||
@@ -539,6 +540,7 @@ func handleLightBlockRequests(t *testing.T,
|
||||
}
|
||||
case 2: // don't do anything
|
||||
}
|
||||
errorCount++
|
||||
}
|
||||
}
|
||||
case <-close:
|
||||
|
||||
@@ -80,12 +80,12 @@ func newSnapshotPool(stateProvider StateProvider) *snapshotPool {
|
||||
// snapshot height is verified using the light client, and the expected app hash
|
||||
// is set for the snapshot.
|
||||
func (p *snapshotPool) Add(peerID types.NodeID, snapshot *snapshot) (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
appHash, err := p.stateProvider.AppHash(ctx, snapshot.Height)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, fmt.Errorf("failed to get app hash: %w", err)
|
||||
}
|
||||
snapshot.trustedAppHash = appHash
|
||||
key := snapshot.Key()
|
||||
|
||||
@@ -94,7 +94,7 @@ func NewLightClientStateProviderFromDispatcher(
|
||||
trustOptions light.TrustOptions,
|
||||
logger log.Logger,
|
||||
) (StateProvider, error) {
|
||||
providers := dispatcher.Providers(chainID, 10*time.Second)
|
||||
providers := dispatcher.Providers(chainID, 30*time.Second)
|
||||
if len(providers) < 2 {
|
||||
return nil, fmt.Errorf("at least 2 peers are required, got %d", len(providers))
|
||||
}
|
||||
|
||||
@@ -38,12 +38,16 @@ var (
|
||||
errRejectFormat = errors.New("snapshot format was rejected")
|
||||
// errRejectSender is returned by Sync() when the snapshot sender is rejected.
|
||||
errRejectSender = errors.New("snapshot sender was rejected")
|
||||
// errVerifyFailed is returned by Sync() when app hash or last height verification fails.
|
||||
// errVerifyFailed is returned by Sync() when app hash or last height
|
||||
// verification fails.
|
||||
errVerifyFailed = errors.New("verification failed")
|
||||
// errTimeout is returned by Sync() when we've waited too long to receive a chunk.
|
||||
errTimeout = errors.New("timed out waiting for chunk")
|
||||
// errNoSnapshots is returned by SyncAny() if no snapshots are found and discovery is disabled.
|
||||
errNoSnapshots = errors.New("no suitable snapshots found")
|
||||
// errStateCommitTimeout is returned by Sync() when the timeout for retrieving
|
||||
// tendermint state or the commit is exceeded
|
||||
errStateCommitTimeout = errors.New("timed out trying to retrieve state and commit")
|
||||
)
|
||||
|
||||
// syncer runs a state sync against an ABCI app. Use either SyncAny() to automatically attempt to
|
||||
@@ -226,6 +230,10 @@ func (s *syncer) SyncAny(
|
||||
s.logger.Info("Snapshot sender rejected", "peer", peer)
|
||||
}
|
||||
|
||||
case errors.Is(err, errStateCommitTimeout):
|
||||
s.logger.Info("Timed out retrieving state and commit, rejecting and retrying...", "height", snapshot.Height)
|
||||
s.snapshots.Reject(snapshot)
|
||||
|
||||
default:
|
||||
return sm.State{}, nil, fmt.Errorf("snapshot restoration failed: %w", err)
|
||||
}
|
||||
@@ -269,16 +277,26 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu
|
||||
go s.fetchChunks(fetchCtx, snapshot, chunks)
|
||||
}
|
||||
|
||||
pctx, pcancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
pctx, pcancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer pcancel()
|
||||
|
||||
// Optimistically build new state, so we don't discover any light client failures at the end.
|
||||
state, err := s.stateProvider.State(pctx, snapshot.Height)
|
||||
if err != nil {
|
||||
// check if the provider context exceeded the 10 second deadline
|
||||
if err == context.DeadlineExceeded && ctx.Err() == nil {
|
||||
return sm.State{}, nil, errStateCommitTimeout
|
||||
}
|
||||
|
||||
return sm.State{}, nil, fmt.Errorf("failed to build new state: %w", err)
|
||||
}
|
||||
commit, err := s.stateProvider.Commit(pctx, snapshot.Height)
|
||||
if err != nil {
|
||||
// check if the provider context exceeded the 10 second deadline
|
||||
if err == context.DeadlineExceeded && ctx.Err() == nil {
|
||||
return sm.State{}, nil, errStateCommitTimeout
|
||||
}
|
||||
|
||||
return sm.State{}, nil, fmt.Errorf("failed to fetch commit: %w", err)
|
||||
}
|
||||
|
||||
|
||||
33
libs/sync/atomic_bool.go
Normal file
33
libs/sync/atomic_bool.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package sync
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// AtomicBool is an atomic Boolean.
|
||||
// Its methods are all atomic, thus safe to be called by multiple goroutines simultaneously.
|
||||
// Note: When embedding into a struct one should always use *AtomicBool to avoid copy.
|
||||
// it's a simple implmentation from https://github.com/tevino/abool
|
||||
type AtomicBool int32
|
||||
|
||||
// NewBool creates an AtomicBool with given default value.
|
||||
func NewBool(ok bool) *AtomicBool {
|
||||
ab := new(AtomicBool)
|
||||
if ok {
|
||||
ab.Set()
|
||||
}
|
||||
return ab
|
||||
}
|
||||
|
||||
// Set sets the Boolean to true.
|
||||
func (ab *AtomicBool) Set() {
|
||||
atomic.StoreInt32((*int32)(ab), 1)
|
||||
}
|
||||
|
||||
// UnSet sets the Boolean to false.
|
||||
func (ab *AtomicBool) UnSet() {
|
||||
atomic.StoreInt32((*int32)(ab), 0)
|
||||
}
|
||||
|
||||
// IsSet returns whether the Boolean is true.
|
||||
func (ab *AtomicBool) IsSet() bool {
|
||||
return atomic.LoadInt32((*int32)(ab))&1 == 1
|
||||
}
|
||||
27
libs/sync/atomic_bool_test.go
Normal file
27
libs/sync/atomic_bool_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDefaultValue(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := NewBool(false)
|
||||
assert.False(t, v.IsSet())
|
||||
|
||||
v = NewBool(true)
|
||||
assert.True(t, v.IsSet())
|
||||
}
|
||||
|
||||
func TestSetUnSet(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := NewBool(false)
|
||||
|
||||
v.Set()
|
||||
assert.True(t, v.IsSet())
|
||||
|
||||
v.UnSet()
|
||||
assert.False(t, v.IsSet())
|
||||
}
|
||||
157
light/client.go
157
light/client.go
@@ -478,7 +478,7 @@ func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now
|
||||
}
|
||||
|
||||
if !bytes.Equal(l.Hash(), newHeader.Hash()) {
|
||||
return fmt.Errorf("light block header %X does not match newHeader %X", l.Hash(), newHeader.Hash())
|
||||
return fmt.Errorf("header from primary %X does not match newHeader %X", l.Hash(), newHeader.Hash())
|
||||
}
|
||||
|
||||
return c.verifyLightBlock(ctx, l, now)
|
||||
@@ -586,7 +586,7 @@ func (c *Client) verifySequential(
|
||||
}
|
||||
|
||||
// If some intermediate header is invalid, remove the primary and try again.
|
||||
c.logger.Error("primary sent invalid header -> removing", "err", err, "primary", c.primary)
|
||||
c.logger.Info("primary sent invalid header -> removing", "err", err, "primary", c.primary)
|
||||
|
||||
replacementBlock, removeErr := c.findNewPrimary(ctx, newLightBlock.Height, true)
|
||||
if removeErr != nil {
|
||||
@@ -630,6 +630,10 @@ func (c *Client) verifySequential(
|
||||
// requested from source is kept such that when a verification is made, and the
|
||||
// light client tries again to verify the new light block in the middle, the light
|
||||
// client does not need to ask for all the same light blocks again.
|
||||
//
|
||||
// If this function errors, it should always wrap it in a `ErrVerifcationFailed`
|
||||
// struct so that the calling function can determine where it failed and handle
|
||||
// it accordingly.
|
||||
func (c *Client) verifySkipping(
|
||||
ctx context.Context,
|
||||
source provider.Provider,
|
||||
@@ -690,20 +694,10 @@ func (c *Client) verifySkipping(
|
||||
// schedule what the next height we need to fetch is
|
||||
pivotHeight := c.schedule(verifiedBlock.Height, blockCache[depth].Height)
|
||||
interimBlock, providerErr := source.LightBlock(ctx, pivotHeight)
|
||||
switch providerErr {
|
||||
case nil:
|
||||
blockCache = append(blockCache, interimBlock)
|
||||
|
||||
// if the error is benign, the client does not need to replace the primary
|
||||
case provider.ErrLightBlockNotFound, provider.ErrNoResponse, provider.ErrHeightTooHigh:
|
||||
return nil, err
|
||||
|
||||
// all other errors such as ErrBadLightBlock or ErrUnreliableProvider are seen as malevolent and the
|
||||
// provider is removed
|
||||
default:
|
||||
if providerErr != nil {
|
||||
return nil, ErrVerificationFailed{From: verifiedBlock.Height, To: pivotHeight, Reason: providerErr}
|
||||
}
|
||||
|
||||
blockCache = append(blockCache, interimBlock)
|
||||
}
|
||||
depth++
|
||||
|
||||
@@ -729,33 +723,8 @@ func (c *Client) verifySkippingAgainstPrimary(
|
||||
now time.Time) error {
|
||||
|
||||
trace, err := c.verifySkipping(ctx, c.primary, trustedBlock, newLightBlock, now)
|
||||
|
||||
switch errors.Unwrap(err).(type) {
|
||||
case ErrInvalidHeader:
|
||||
// If the target header is invalid, return immediately.
|
||||
invalidHeaderHeight := err.(ErrVerificationFailed).To
|
||||
if invalidHeaderHeight == newLightBlock.Height {
|
||||
c.logger.Debug("target header is invalid", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// If some intermediate header is invalid, remove the primary and try again.
|
||||
c.logger.Error("primary sent invalid header -> replacing", "err", err, "primary", c.primary)
|
||||
replacementBlock, removeErr := c.findNewPrimary(ctx, newLightBlock.Height, true)
|
||||
if removeErr != nil {
|
||||
c.logger.Error("failed to replace primary. Returning original error", "err", removeErr)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(replacementBlock.Hash(), newLightBlock.Hash()) {
|
||||
c.logger.Debug("replaced primary but new primary has a different block to the initial one. Returning original error")
|
||||
return err
|
||||
}
|
||||
|
||||
// attempt to verify the header again
|
||||
return c.verifySkippingAgainstPrimary(ctx, trustedBlock, replacementBlock, now)
|
||||
case nil:
|
||||
// Compare header with the witnesses to ensure it's not a fork.
|
||||
if err == nil {
|
||||
// Success! Now compare the header with the witnesses to ensure it's not a fork.
|
||||
// More witnesses we have, more chance to notice one.
|
||||
//
|
||||
// CORRECTNESS ASSUMPTION: there's at least 1 correct full node
|
||||
@@ -763,11 +732,62 @@ func (c *Client) verifySkippingAgainstPrimary(
|
||||
if cmpErr := c.detectDivergence(ctx, trace, now); cmpErr != nil {
|
||||
return cmpErr
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
var e = &ErrVerificationFailed{}
|
||||
// all errors from verify skipping should be `ErrVerificationFailed`
|
||||
// if it's not we just return the error directly
|
||||
if !errors.As(err, e) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
replace := true
|
||||
switch e.Reason.(type) {
|
||||
// Verification returned an invalid header
|
||||
case ErrInvalidHeader:
|
||||
// If it was the target header, return immediately.
|
||||
if e.To == newLightBlock.Height {
|
||||
c.logger.Debug("target header is invalid", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// If some intermediate header is invalid, remove the primary and try
|
||||
// again.
|
||||
|
||||
// An intermediate header expired. We can no longer validate it as there is
|
||||
// no longer the ability to punish invalid blocks as evidence of misbehavior
|
||||
case ErrOldHeaderExpired:
|
||||
return err
|
||||
|
||||
// This happens if there was a problem in finding the next block or a
|
||||
// context was canceled.
|
||||
default:
|
||||
if errors.Is(e.Reason, context.Canceled) || errors.Is(e.Reason, context.DeadlineExceeded) {
|
||||
return e.Reason
|
||||
}
|
||||
|
||||
if !c.providerShouldBeRemoved(e.Reason) {
|
||||
replace = false
|
||||
}
|
||||
}
|
||||
|
||||
// if we've reached here we're attempting to retry verification with a
|
||||
// different provider
|
||||
c.logger.Info("primary returned error", "err", e, "primary", c.primary, "replace", replace)
|
||||
|
||||
replacementBlock, removeErr := c.findNewPrimary(ctx, newLightBlock.Height, replace)
|
||||
if removeErr != nil {
|
||||
c.logger.Error("failed to replace primary. Returning original error", "err", removeErr)
|
||||
return e.Reason
|
||||
}
|
||||
|
||||
if !bytes.Equal(replacementBlock.Hash(), newLightBlock.Hash()) {
|
||||
c.logger.Debug("replaced primary but new primary has a different block to the initial one. Returning original error")
|
||||
return e.Reason
|
||||
}
|
||||
|
||||
// attempt to verify the header again from the trusted block
|
||||
return c.verifySkippingAgainstPrimary(ctx, trustedBlock, replacementBlock, now)
|
||||
}
|
||||
|
||||
// LastTrustedHeight returns a last trusted height. -1 and nil are returned if
|
||||
@@ -811,6 +831,15 @@ func (c *Client) Witnesses() []provider.Provider {
|
||||
return c.witnesses
|
||||
}
|
||||
|
||||
// AddProvider adds a providers to the light clients set
|
||||
//
|
||||
// NOTE: The light client does not check for uniqueness
|
||||
func (c *Client) AddProvider(p provider.Provider) {
|
||||
c.providerMutex.Lock()
|
||||
defer c.providerMutex.Unlock()
|
||||
c.witnesses = append(c.witnesses, p)
|
||||
}
|
||||
|
||||
// Cleanup removes all the data (headers and validator sets) stored. Note: the
|
||||
// client must be stopped at this point.
|
||||
func (c *Client) Cleanup() error {
|
||||
@@ -865,7 +894,7 @@ func (c *Client) backwards(
|
||||
"newHash", interimHeader.Hash())
|
||||
if err := VerifyBackwards(interimHeader, verifiedHeader); err != nil {
|
||||
// verification has failed
|
||||
c.logger.Error("backwards verification failed, replacing primary...", "err", err, "primary", c.primary)
|
||||
c.logger.Info("backwards verification failed, replacing primary...", "err", err, "primary", c.primary)
|
||||
|
||||
// the client tries to see if it can get a witness to continue with the request
|
||||
newPrimarysBlock, replaceErr := c.findNewPrimary(ctx, newHeader.Height, true)
|
||||
@@ -909,16 +938,20 @@ func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*type
|
||||
// Everything went smoothly. We reset the lightBlockRequests and return the light block
|
||||
return l, nil
|
||||
|
||||
// catch canceled contexts or deadlines
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
return nil, err
|
||||
|
||||
case provider.ErrNoResponse, provider.ErrLightBlockNotFound, provider.ErrHeightTooHigh:
|
||||
// we find a new witness to replace the primary
|
||||
c.logger.Debug("error from light block request from primary, replacing...",
|
||||
c.logger.Info("error from light block request from primary, replacing...",
|
||||
"error", err, "height", height, "primary", c.primary)
|
||||
return c.findNewPrimary(ctx, height, false)
|
||||
|
||||
default:
|
||||
// The light client has most likely received either provider.ErrUnreliableProvider or provider.ErrBadLightBlock
|
||||
// These errors mean that the light client should drop the primary and try with another provider instead
|
||||
c.logger.Error("error from light block request from primary, removing...",
|
||||
c.logger.Info("error from light block request from primary, removing...",
|
||||
"error", err, "height", height, "primary", c.primary)
|
||||
return c.findNewPrimary(ctx, height, true)
|
||||
}
|
||||
@@ -1011,10 +1044,14 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool)
|
||||
// return the light block that new primary responded with
|
||||
return response.lb, nil
|
||||
|
||||
// catch canceled contexts or deadlines
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
return nil, response.err
|
||||
|
||||
// process benign errors by logging them only
|
||||
case provider.ErrNoResponse, provider.ErrLightBlockNotFound, provider.ErrHeightTooHigh:
|
||||
lastError = response.err
|
||||
c.logger.Debug("error on light block request from witness",
|
||||
c.logger.Info("error on light block request from witness",
|
||||
"error", response.err, "primary", c.witnesses[response.witnessIndex])
|
||||
continue
|
||||
|
||||
@@ -1058,17 +1095,23 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S
|
||||
case nil:
|
||||
continue
|
||||
case errConflictingHeaders:
|
||||
c.logger.Error(fmt.Sprintf(`witness #%d has a different header. Please check primary is correct
|
||||
and remove witness. Otherwise, use a different primary`, e.WitnessIndex), "witness", c.witnesses[e.WitnessIndex])
|
||||
c.logger.Error(`witness has a different header. Please check primary is
|
||||
correct and remove witness. Otherwise, use a different primary`,
|
||||
"Witness", c.witnesses[e.WitnessIndex], "ExpHeader", h.Hash(), "GotHeader", e.Block.Hash())
|
||||
return err
|
||||
case errBadWitness:
|
||||
// If witness sent us an invalid header, then remove it
|
||||
c.logger.Info("witness sent an invalid light block or didn't respond, removing...",
|
||||
"witness", c.witnesses[e.WitnessIndex],
|
||||
c.logger.Info("witness returned an error, removing...",
|
||||
"err", err)
|
||||
witnessesToRemove = append(witnessesToRemove, e.WitnessIndex)
|
||||
default: // the witness either didn't respond or didn't have the block. We ignore it.
|
||||
c.logger.Debug("unable to compare first header with witness",
|
||||
default:
|
||||
// check for canceled contexts or deadlines
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return err
|
||||
}
|
||||
|
||||
// the witness either didn't respond or didn't have the block. We ignore it.
|
||||
c.logger.Debug("unable to compare first header with witness, ignoring",
|
||||
"err", err)
|
||||
}
|
||||
|
||||
@@ -1077,3 +1120,11 @@ and remove witness. Otherwise, use a different primary`, e.WitnessIndex), "witne
|
||||
// remove all witnesses that misbehaved
|
||||
return c.removeWitnesses(witnessesToRemove)
|
||||
}
|
||||
|
||||
// providerShouldBeRemoved analyzes the nature of the error and whether the provider
|
||||
// should be removed from the light clients set
|
||||
func (c *Client) providerShouldBeRemoved(err error) bool {
|
||||
return errors.As(err, &provider.ErrUnreliableProvider{}) ||
|
||||
errors.As(err, &provider.ErrBadLightBlock{}) ||
|
||||
errors.Is(err, provider.ErrConnectionClosed)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package light_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -588,6 +589,36 @@ func TestClient_Concurrency(t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestClient_AddProviders(t *testing.T) {
|
||||
c, err := light.NewClient(
|
||||
ctx,
|
||||
chainID,
|
||||
trustOptions,
|
||||
fullNode,
|
||||
[]provider.Provider{fullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
closeCh := make(chan struct{})
|
||||
go func() {
|
||||
// run verification concurrently to make sure it doesn't dead lock
|
||||
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour))
|
||||
require.NoError(t, err)
|
||||
close(closeCh)
|
||||
}()
|
||||
|
||||
// NOTE: the light client doesn't check uniqueness of providers
|
||||
c.AddProvider(fullNode)
|
||||
require.Len(t, c.Witnesses(), 2)
|
||||
select {
|
||||
case <-closeCh:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("concurent light block verification failed to finish in 5s")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) {
|
||||
c, err := light.NewClient(
|
||||
ctx,
|
||||
@@ -929,3 +960,61 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestClientHandlesContexts(t *testing.T) {
|
||||
p := mockp.New(genMockNode(chainID, 100, 10, 1, bTime))
|
||||
genBlock, err := p.LightBlock(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// instantiate the light client with a timeout
|
||||
ctxTimeOut, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
|
||||
defer cancel()
|
||||
_, err = light.NewClient(
|
||||
ctxTimeOut,
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: 24 * time.Hour,
|
||||
Height: 1,
|
||||
Hash: genBlock.Hash(),
|
||||
},
|
||||
p,
|
||||
[]provider.Provider{p, p},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
)
|
||||
require.Error(t, ctxTimeOut.Err())
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Is(err, context.DeadlineExceeded))
|
||||
|
||||
// instantiate the client for real
|
||||
c, err := light.NewClient(
|
||||
ctx,
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: 24 * time.Hour,
|
||||
Height: 1,
|
||||
Hash: genBlock.Hash(),
|
||||
},
|
||||
p,
|
||||
[]provider.Provider{p, p},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// verify a block with a timeout
|
||||
ctxTimeOutBlock, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
|
||||
defer cancel()
|
||||
_, err = c.VerifyLightBlockAtHeight(ctxTimeOutBlock, 100, bTime.Add(100*time.Minute))
|
||||
require.Error(t, ctxTimeOutBlock.Err())
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Is(err, context.DeadlineExceeded))
|
||||
|
||||
// verify a block with a cancel
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
time.AfterFunc(10*time.Millisecond, cancel)
|
||||
_, err = c.VerifyLightBlockAtHeight(ctxCancel, 100, bTime.Add(100*time.Minute))
|
||||
require.Error(t, ctxCancel.Err())
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Is(err, context.Canceled))
|
||||
|
||||
}
|
||||
|
||||
@@ -78,7 +78,10 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig
|
||||
"witness", c.witnesses[e.WitnessIndex], "err", err)
|
||||
witnessesToRemove = append(witnessesToRemove, e.WitnessIndex)
|
||||
default:
|
||||
c.logger.Debug("error in light block request to witness", "err", err)
|
||||
if errors.Is(e, context.Canceled) || errors.Is(e, context.DeadlineExceeded) {
|
||||
return e
|
||||
}
|
||||
c.logger.Info("error in light block request to witness", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,7 +118,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro
|
||||
|
||||
// the witness hasn't been helpful in comparing headers, we mark the response and continue
|
||||
// comparing with the rest of the witnesses
|
||||
case provider.ErrNoResponse, provider.ErrLightBlockNotFound:
|
||||
case provider.ErrNoResponse, provider.ErrLightBlockNotFound, context.DeadlineExceeded, context.Canceled:
|
||||
errc <- err
|
||||
return
|
||||
|
||||
@@ -128,7 +131,11 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro
|
||||
var isTargetHeight bool
|
||||
isTargetHeight, lightBlock, err = c.getTargetBlockOrLatest(ctx, h.Height, witness)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
if c.providerShouldBeRemoved(err) {
|
||||
errc <- errBadWitness{Reason: err, WitnessIndex: witnessIndex}
|
||||
} else {
|
||||
errc <- err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -152,7 +159,11 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro
|
||||
time.Sleep(2*c.maxClockDrift + c.maxBlockLag)
|
||||
isTargetHeight, lightBlock, err = c.getTargetBlockOrLatest(ctx, h.Height, witness)
|
||||
if err != nil {
|
||||
errc <- errBadWitness{Reason: err, WitnessIndex: witnessIndex}
|
||||
if c.providerShouldBeRemoved(err) {
|
||||
errc <- errBadWitness{Reason: err, WitnessIndex: witnessIndex}
|
||||
} else {
|
||||
errc <- err
|
||||
}
|
||||
return
|
||||
}
|
||||
if isTargetHeight {
|
||||
|
||||
@@ -93,7 +93,6 @@ func ExampleClient_Update() {
|
||||
} else {
|
||||
fmt.Println("update failed")
|
||||
}
|
||||
// Output: successful update
|
||||
}
|
||||
|
||||
// Manually getting light blocks and verifying them.
|
||||
@@ -169,5 +168,4 @@ func ExampleClient_VerifyLightBlockAtHeight() {
|
||||
}
|
||||
|
||||
fmt.Println("got header", h.Height)
|
||||
// Output: got header 3
|
||||
}
|
||||
|
||||
138
light/light_test.go
Normal file
138
light/light_test.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package light_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
"github.com/tendermint/tendermint/light/provider"
|
||||
httpp "github.com/tendermint/tendermint/light/provider/http"
|
||||
dbs "github.com/tendermint/tendermint/light/store/db"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
)
|
||||
|
||||
// NOTE: these are ports of the tests from example_test.go but
|
||||
// rewritten as more conventional tests.
|
||||
|
||||
// Automatically getting new headers and verifying them.
|
||||
func TestClientIntegration_Update(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
conf := rpctest.CreateConfig(t.Name())
|
||||
|
||||
// Start a test application
|
||||
app := kvstore.NewApplication()
|
||||
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, closer(ctx)) }()
|
||||
|
||||
// give Tendermint time to generate some blocks
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
dbDir, err := ioutil.TempDir("", "light-client-test-update-example")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dbDir)
|
||||
|
||||
chainID := conf.ChainID()
|
||||
|
||||
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
|
||||
require.NoError(t, err)
|
||||
|
||||
block, err := primary.LightBlock(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := light.NewClient(
|
||||
ctx,
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: 504 * time.Hour, // 21 days
|
||||
Height: 2,
|
||||
Hash: block.Hash(),
|
||||
},
|
||||
primary,
|
||||
[]provider.Provider{primary}, // NOTE: primary should not be used here
|
||||
dbs.New(db),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() { require.NoError(t, c.Cleanup()) }()
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
h, err := c.Update(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
require.True(t, h.Height > 2)
|
||||
}
|
||||
|
||||
// Manually getting light blocks and verifying them.
|
||||
func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
conf := rpctest.CreateConfig(t.Name())
|
||||
|
||||
// Start a test application
|
||||
app := kvstore.NewApplication()
|
||||
|
||||
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, closer(ctx)) }()
|
||||
|
||||
// give Tendermint time to generate some blocks
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
dbDir, err := ioutil.TempDir("", "light-client-test-verify-example")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dbDir)
|
||||
|
||||
chainID := conf.ChainID()
|
||||
|
||||
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
|
||||
require.NoError(t, err)
|
||||
|
||||
block, err := primary.LightBlock(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := light.NewClient(ctx,
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: 504 * time.Hour, // 21 days
|
||||
Height: 2,
|
||||
Hash: block.Hash(),
|
||||
},
|
||||
primary,
|
||||
[]provider.Provider{primary}, // NOTE: primary should not be used here
|
||||
dbs.New(db),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() { require.NoError(t, c.Cleanup()) }()
|
||||
|
||||
_, err = c.VerifyLightBlockAtHeight(ctx, 3, time.Now())
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err := c.TrustedLightBlock(3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.EqualValues(t, 3, h.Height)
|
||||
}
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
var defaultOptions = Options{
|
||||
MaxRetryAttempts: 5,
|
||||
Timeout: 3 * time.Second,
|
||||
Timeout: 5 * time.Second,
|
||||
NoBlockThreshold: 5,
|
||||
NoResponseThreshold: 5,
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func (p *http) LightBlock(ctx context.Context, height int64) (*types.LightBlock,
|
||||
|
||||
if sh.Header == nil {
|
||||
return nil, provider.ErrBadLightBlock{
|
||||
Reason: errors.New("header is nil unexpectedly"),
|
||||
Reason: errors.New("returned header is nil unexpectedly"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,6 +205,11 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato
|
||||
return nil, p.parseRPCError(e)
|
||||
|
||||
default:
|
||||
// check if the error stems from the context
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we don't know the error then by default we return an unreliable provider error and
|
||||
// terminate the connection with the peer.
|
||||
return nil, provider.ErrUnreliableProvider{Reason: e.Error()}
|
||||
@@ -236,11 +241,19 @@ func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHe
|
||||
return &commit.SignedHeader, nil
|
||||
|
||||
case *url.Error:
|
||||
// check if the request timed out
|
||||
if e.Timeout() {
|
||||
// we wait and try again with exponential backoff
|
||||
time.Sleep(backoffTimeout(attempt))
|
||||
continue
|
||||
}
|
||||
|
||||
// check if the connection was refused or dropped
|
||||
if strings.Contains(e.Error(), "connection refused") {
|
||||
return nil, provider.ErrConnectionClosed
|
||||
}
|
||||
|
||||
// else, as a catch all, we return the error as a bad light block response
|
||||
return nil, provider.ErrBadLightBlock{Reason: e}
|
||||
|
||||
case *rpctypes.RPCError:
|
||||
@@ -248,6 +261,11 @@ func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHe
|
||||
return nil, p.parseRPCError(e)
|
||||
|
||||
default:
|
||||
// check if the error stems from the context
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we don't know the error then by default we return an unreliable provider error and
|
||||
// terminate the connection with the peer.
|
||||
return nil, provider.ErrUnreliableProvider{Reason: e.Error()}
|
||||
|
||||
@@ -4,13 +4,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/light/provider"
|
||||
lighthttp "github.com/tendermint/tendermint/light/provider/http"
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/client"
|
||||
@@ -33,30 +32,17 @@ func TestNewProvider(t *testing.T) {
|
||||
require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1}")
|
||||
}
|
||||
|
||||
// NodeSuite initiates and runs a full node instance in the
|
||||
// background, stopping it once the test is completed
|
||||
func NodeSuite(t *testing.T) (service.Service, *config.Config) {
|
||||
t.Helper()
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
conf := rpctest.CreateConfig(t.Name())
|
||||
defer cancel()
|
||||
cfg := rpctest.CreateConfig(t.Name())
|
||||
|
||||
// start a tendermint node in the background to test against
|
||||
app := kvstore.NewApplication()
|
||||
app.RetainBlocks = 9
|
||||
|
||||
node, closer, err := rpctest.StartTendermint(ctx, conf, app)
|
||||
_, closer, err := rpctest.StartTendermint(ctx, cfg, app)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = closer(ctx)
|
||||
cancel()
|
||||
})
|
||||
return node, conf
|
||||
}
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
_, cfg := NodeSuite(t)
|
||||
rpcAddr := cfg.RPC.ListenAddress
|
||||
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
@@ -95,8 +81,9 @@ func TestProvider(t *testing.T) {
|
||||
require.Nil(t, lb)
|
||||
assert.Equal(t, provider.ErrHeightTooHigh, err)
|
||||
|
||||
_, err = p.LightBlock(context.Background(), 1)
|
||||
lb, err = p.LightBlock(context.Background(), 1)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, lb)
|
||||
assert.Equal(t, provider.ErrLightBlockNotFound, err)
|
||||
|
||||
// if the provider is unable to provide four more blocks then we should return
|
||||
@@ -105,4 +92,15 @@ func TestProvider(t *testing.T) {
|
||||
_, err = p.LightBlock(context.Background(), 1)
|
||||
}
|
||||
assert.IsType(t, provider.ErrUnreliableProvider{}, err)
|
||||
|
||||
// shut down tendermint node
|
||||
require.NoError(t, closer(ctx))
|
||||
cancel()
|
||||
|
||||
time.Sleep(10 * time.Second)
|
||||
lb, err = p.LightBlock(context.Background(), lower+2)
|
||||
// we should see a connection refused
|
||||
require.Error(t, err)
|
||||
require.Nil(t, lb)
|
||||
assert.Equal(t, provider.ErrConnectionClosed, err)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/light/provider"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -55,10 +56,16 @@ func (p *Mock) String() string {
|
||||
return fmt.Sprintf("Mock{id: %s, headers: %s, vals: %v}", p.id, headers.String(), vals.String())
|
||||
}
|
||||
|
||||
func (p *Mock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) {
|
||||
func (p *Mock) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) {
|
||||
p.mtx.Lock()
|
||||
defer p.mtx.Unlock()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
}
|
||||
|
||||
var lb *types.LightBlock
|
||||
|
||||
if height > p.latestHeight {
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//go:generate mockery --case underscore --name Provider
|
||||
|
||||
// Provider provides information for the light client to sync (verification
|
||||
// happens in the client).
|
||||
type Provider interface {
|
||||
|
||||
@@ -50,8 +50,9 @@ func VerifyNonAdjacent(
|
||||
return err
|
||||
}
|
||||
|
||||
if HeaderExpired(trustedHeader, trustingPeriod, now) {
|
||||
return ErrOldHeaderExpired{trustedHeader.Time.Add(trustingPeriod), now}
|
||||
// check if the untrusted header is within the trust period
|
||||
if HeaderExpired(untrustedHeader, trustingPeriod, now) {
|
||||
return ErrOldHeaderExpired{untrustedHeader.Time.Add(trustingPeriod), now}
|
||||
}
|
||||
|
||||
if err := verifyNewHeaderAndVals(
|
||||
@@ -117,8 +118,9 @@ func VerifyAdjacent(
|
||||
return errors.New("headers must be adjacent in height")
|
||||
}
|
||||
|
||||
if HeaderExpired(trustedHeader, trustingPeriod, now) {
|
||||
return ErrOldHeaderExpired{trustedHeader.Time.Add(trustingPeriod), now}
|
||||
// check if the untrusted header is within the trust period
|
||||
if HeaderExpired(untrustedHeader, trustingPeriod, now) {
|
||||
return ErrOldHeaderExpired{untrustedHeader.Time.Add(trustingPeriod), now}
|
||||
}
|
||||
|
||||
if err := verifyNewHeaderAndVals(
|
||||
@@ -192,7 +194,10 @@ func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time
|
||||
// of the trusted header
|
||||
//
|
||||
// For any of these cases ErrInvalidHeader is returned.
|
||||
// NOTE: This does not check whether the trusted header has expired or not.
|
||||
// NOTE: This does not check whether the trusted or untrusted header has expired
|
||||
// or not. These checks are not necessary because the detector never runs during
|
||||
// backwards verification and thus evidence that needs to be within a certain
|
||||
// time bound is never sent.
|
||||
func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error {
|
||||
if err := untrustedHeader.ValidateBasic(); err != nil {
|
||||
return ErrInvalidHeader{err}
|
||||
@@ -220,6 +225,8 @@ func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NOTE: This function assumes that untrustedHeader is after trustedHeader.
|
||||
// Do not use for backwards verification.
|
||||
func verifyNewHeaderAndVals(
|
||||
untrustedHeader *types.SignedHeader,
|
||||
untrustedVals *types.ValidatorSet,
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) {
|
||||
keys.GenSignedHeader(chainID, nextHeight, bTime.Add(-1*time.Hour), nil, vals, vals,
|
||||
hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)),
|
||||
vals,
|
||||
3 * time.Hour,
|
||||
4 * time.Hour,
|
||||
bTime.Add(2 * time.Hour),
|
||||
nil,
|
||||
"to be after old header time",
|
||||
@@ -146,7 +146,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) {
|
||||
hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)),
|
||||
keys.ToValidators(10, 1),
|
||||
1 * time.Hour,
|
||||
bTime.Add(1 * time.Hour),
|
||||
bTime.Add(2 * time.Hour),
|
||||
nil,
|
||||
"old header has expired",
|
||||
},
|
||||
|
||||
137
node/node.go
137
node/node.go
@@ -63,26 +63,25 @@ type nodeImpl struct {
|
||||
isListening bool
|
||||
|
||||
// services
|
||||
eventBus *types.EventBus // pub/sub for services
|
||||
stateStore sm.Store
|
||||
blockStore *store.BlockStore // store the blockchain to disk
|
||||
bcReactor service.Service // for fast-syncing
|
||||
mempoolReactor service.Service // for gossipping transactions
|
||||
mempool mempool.Mempool
|
||||
stateSync bool // whether the node should state sync on startup
|
||||
stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots
|
||||
stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
|
||||
consensusState *cs.State // latest consensus state
|
||||
consensusReactor *cs.Reactor // for participating in the consensus
|
||||
pexReactor *pex.Reactor // for exchanging peer addresses
|
||||
pexReactorV2 *pex.ReactorV2 // for exchanging peer addresses
|
||||
evidenceReactor *evidence.Reactor
|
||||
evidencePool *evidence.Pool // tracking evidence
|
||||
proxyApp proxy.AppConns // connection to the application
|
||||
rpcListeners []net.Listener // rpc servers
|
||||
eventSinks []indexer.EventSink
|
||||
indexerService *indexer.Service
|
||||
prometheusSrv *http.Server
|
||||
eventBus *types.EventBus // pub/sub for services
|
||||
stateStore sm.Store
|
||||
blockStore *store.BlockStore // store the blockchain to disk
|
||||
bcReactor service.Service // for fast-syncing
|
||||
mempoolReactor service.Service // for gossipping transactions
|
||||
mempool mempool.Mempool
|
||||
stateSync bool // whether the node should state sync on startup
|
||||
stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots
|
||||
consensusState *cs.State // latest consensus state
|
||||
consensusReactor *cs.Reactor // for participating in the consensus
|
||||
pexReactor *pex.Reactor // for exchanging peer addresses
|
||||
pexReactorV2 *pex.ReactorV2 // for exchanging peer addresses
|
||||
evidenceReactor *evidence.Reactor
|
||||
evidencePool *evidence.Pool // tracking evidence
|
||||
proxyApp proxy.AppConns // connection to the application
|
||||
rpcListeners []net.Listener // rpc servers
|
||||
eventSinks []indexer.EventSink
|
||||
indexerService *indexer.Service
|
||||
prometheusSrv *http.Server
|
||||
}
|
||||
|
||||
// newDefaultNode returns a Tendermint node with default settings for the
|
||||
@@ -663,9 +662,15 @@ func (n *nodeImpl) OnStart() error {
|
||||
return fmt.Errorf("unable to derive state: %w", err)
|
||||
}
|
||||
|
||||
err = startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
|
||||
n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, state)
|
||||
ssc := n.config.StateSync
|
||||
sp, err := constructStateProvider(ssc, state, n.Logger.With("module", "light"))
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up light client state provider: %w", err)
|
||||
}
|
||||
|
||||
if err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, sp,
|
||||
ssc, n.config.FastSyncMode, state.InitialHeight, n.eventBus); err != nil {
|
||||
return fmt.Errorf("failed to start state sync: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -1027,50 +1032,59 @@ func (n *nodeImpl) NodeInfo() types.NodeInfo {
|
||||
}
|
||||
|
||||
// startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
|
||||
func startStateSync(ssR *statesync.Reactor, bcR cs.FastSyncReactor, conR *cs.Reactor,
|
||||
stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
|
||||
stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error {
|
||||
ssR.Logger.Info("starting state sync...")
|
||||
func startStateSync(
|
||||
ssR statesync.SyncReactor,
|
||||
bcR cs.FastSyncReactor,
|
||||
conR cs.ConsSyncReactor,
|
||||
sp statesync.StateProvider,
|
||||
config *cfg.StateSyncConfig,
|
||||
fastSync bool,
|
||||
stateInitHeight int64,
|
||||
eb *types.EventBus,
|
||||
) error {
|
||||
stateSyncLogger := eb.Logger.With("module", "statesync")
|
||||
|
||||
if stateProvider == nil {
|
||||
var err error
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
stateProvider, err = statesync.NewLightClientStateProvider(
|
||||
ctx,
|
||||
state.ChainID, state.Version, state.InitialHeight,
|
||||
config.RPCServers, light.TrustOptions{
|
||||
Period: config.TrustPeriod,
|
||||
Height: config.TrustHeight,
|
||||
Hash: config.TrustHashBytes(),
|
||||
}, ssR.Logger.With("module", "light"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up light client state provider: %w", err)
|
||||
}
|
||||
stateSyncLogger.Info("starting state sync...")
|
||||
|
||||
// at the beginning of the statesync start, we use the initialHeight as the event height
|
||||
// because of the statesync doesn't have the concreate state height before fetched the snapshot.
|
||||
d := types.EventDataStateSyncStatus{Complete: false, Height: stateInitHeight}
|
||||
if err := eb.PublishEventStateSyncStatus(d); err != nil {
|
||||
stateSyncLogger.Error("failed to emit the statesync start event", "err", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
state, err := ssR.Sync(context.TODO(), stateProvider, config.DiscoveryTime)
|
||||
state, err := ssR.Sync(context.TODO(), sp, config.DiscoveryTime)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("state sync failed", "err", err)
|
||||
stateSyncLogger.Error("state sync failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = ssR.Backfill(state)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("backfill failed; node has insufficient history to verify all evidence;"+
|
||||
if err := ssR.Backfill(state); err != nil {
|
||||
stateSyncLogger.Error("backfill failed; node has insufficient history to verify all evidence;"+
|
||||
" proceeding optimistically...", "err", err)
|
||||
}
|
||||
|
||||
conR.Metrics.StateSyncing.Set(0)
|
||||
conR.SetStateSyncingMetrics(0)
|
||||
|
||||
d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight}
|
||||
if err := eb.PublishEventStateSyncStatus(d); err != nil {
|
||||
stateSyncLogger.Error("failed to emit the statesync start event", "err", err)
|
||||
}
|
||||
|
||||
if fastSync {
|
||||
// FIXME Very ugly to have these metrics bleed through here.
|
||||
conR.Metrics.FastSyncing.Set(1)
|
||||
err = bcR.SwitchToFastSync(state)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("failed to switch to fast sync", "err", err)
|
||||
conR.SetFastSyncingMetrics(1)
|
||||
if err := bcR.SwitchToFastSync(state); err != nil {
|
||||
stateSyncLogger.Error("failed to switch to fast sync", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
d := types.EventDataFastSyncStatus{Complete: false, Height: state.LastBlockHeight}
|
||||
if err := eb.PublishEventFastSyncStatus(d); err != nil {
|
||||
stateSyncLogger.Error("failed to emit the fastsync starting event", "err", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
conR.SwitchToConsensus(state, true)
|
||||
}
|
||||
@@ -1260,3 +1274,24 @@ func getChannelsFromShim(reactorShim *p2p.ReactorShim) map[p2p.ChannelID]*p2p.Ch
|
||||
|
||||
return channels
|
||||
}
|
||||
|
||||
func constructStateProvider(
|
||||
ssc *cfg.StateSyncConfig,
|
||||
state sm.State,
|
||||
logger log.Logger,
|
||||
) (statesync.StateProvider, error) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
to := light.TrustOptions{
|
||||
Period: ssc.TrustPeriod,
|
||||
Height: ssc.TrustHeight,
|
||||
Hash: ssc.TrustHashBytes(),
|
||||
}
|
||||
|
||||
return statesync.NewLightClientStateProvider(
|
||||
ctx,
|
||||
state.ChainID, state.Version, state.InitialHeight,
|
||||
ssc.RPCServers, to, logger,
|
||||
)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user