mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-15 01:02:50 +00:00
Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
936221e0a8 | ||
|
|
70563686a4 | ||
|
|
69e96f171b | ||
|
|
9e8ed70031 | ||
|
|
09ede0fd53 | ||
|
|
cbfe970120 | ||
|
|
defe61604f | ||
|
|
94a5fdfb5f | ||
|
|
775026ecfd | ||
|
|
0ab2ae2ac6 | ||
|
|
1233cc8e8b | ||
|
|
01d904b33c | ||
|
|
20a2fe6691 | ||
|
|
69d941ea3f | ||
|
|
dadbf7d49b | ||
|
|
60a8cc52e3 | ||
|
|
e7a53ba297 | ||
|
|
e941ad9294 | ||
|
|
f33a104560 | ||
|
|
a8721843e8 | ||
|
|
49dd6413a7 | ||
|
|
6b332c3cb6 | ||
|
|
c78adbc0fa | ||
|
|
409d94abac | ||
|
|
8e0f196b4b |
2
.github/workflows/e2e-manual.yml
vendored
2
.github/workflows/e2e-manual.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
run: ./generator-multiversion.sh -g 4 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
|
||||
31
.github/workflows/govulncheck.yml
vendored
Normal file
31
.github/workflows/govulncheck.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Check for Go vulnerabilities
|
||||
# Runs https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck to proactively
|
||||
# check for vulnerabilities in code packages if there were any changes made to
|
||||
# any Go code or dependencies.
|
||||
#
|
||||
# Run `make vulncheck` from the root of the repo to run this workflow locally.
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
govulncheck:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: govulncheck
|
||||
run: make vulncheck
|
||||
if: "env.GIT_DIFF != ''"
|
||||
4
.github/workflows/pre-release.yml
vendored
4
.github/workflows/pre-release.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-setup-action@v1.10.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
|
||||
60
.github/workflows/testapp-docker.yml
vendored
Normal file
60
.github/workflows/testapp-docker.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Docker E2E Node
|
||||
# Build & Push rebuilds the e2e Testapp docker image on every push to main and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/tendermint/e2e-node
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermint/e2e-node
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
|
||||
fi
|
||||
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.2.0
|
||||
with:
|
||||
context: .
|
||||
file: ./test/e2e/docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'beep_boop' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
@@ -16,7 +16,14 @@
|
||||
|
||||
### FEATURES
|
||||
|
||||
- `[rpc]` [\#9759] Added `match_event` query parameter to indicate to Tendermint that the query should match event attributes within events, not only within a height.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- `[state/kvindexer]` [\#9759] Added `match.event` keyword to support condition evalution based on the event attributes belong to. (@jmalicevic)
|
||||
- [crypto] \#9250 Update to use btcec v2 and the latest btcutil. (@wcsiu)
|
||||
- [consensus] \#9760 Save peer LastCommit correctly to achieve 50% reduction in gossiped precommits. (@williambanfield)
|
||||
- [metrics] \#9733 Add metrics for timing the consensus steps and for the progress of block gossip. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
|
||||
4
Makefile
4
Makefile
@@ -203,6 +203,10 @@ lint:
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
vulncheck:
|
||||
@go run golang.org/x/vuln/cmd/govulncheck@latest ./...
|
||||
.PHONY: vulncheck
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
###############################################################################
|
||||
|
||||
29
README.md
29
README.md
@@ -1,9 +1,5 @@
|
||||
# Tendermint
|
||||
|
||||
_UPDATE: TendermintCore featureset is frozen for LTS, see issue https://github.com/tendermint/tendermint/issues/9972_<br/>
|
||||
_This is the latest stable release used by cosmoshub-4, version 0.34.24_<br/>
|
||||
_The previous main branch (v0.38.xx) can now be found under "main_backup"_<br/>
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant][bft] [State Machine Replication][smr]. Or
|
||||
@@ -46,15 +42,20 @@ since we are making breaking changes to the protocol and the APIs. See below for
|
||||
more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help.
|
||||
You can contact us [over email](mailto:hello@newtendermint.org) or [join the
|
||||
chat](https://discord.gg/gnoland).
|
||||
You can contact us [over email](mailto:hello@interchain.io) or [join the
|
||||
chat](https://discord.gg/cosmosnetwork).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, please [email us](mailto:security@newtendermint.org).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're
|
||||
looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only
|
||||
ever use this mailing list to notify you of vulnerabilities and fixes in
|
||||
Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
@@ -144,10 +145,14 @@ Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Join us!
|
||||
|
||||
The development of Tendermint Core was led primarily by All in Bits, Inc. The
|
||||
Tendermint trademark is owned by New Tendermint, LLC. If you'd like to work
|
||||
full-time on Tendermint2 or [gno.land](https://gno.land), [we're
|
||||
hiring](mailto:hiring@newtendermint.org)!
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
If you'd like to work full-time on Tendermint Core,
|
||||
[we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the
|
||||
[Interchain Foundation](https://interchain.io), a Swiss non-profit. The
|
||||
Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the
|
||||
for-profit entity that also maintains [tendermint.com](https://tendermint.com).
|
||||
|
||||
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
|
||||
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
|
||||
|
||||
@@ -68,6 +68,9 @@ type Application struct {
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
// If true, the app will generate block events in BeginBlock. Used to test the event indexer
|
||||
// Should be false by default to avoid generating too much data.
|
||||
genBlockEvents bool
|
||||
}
|
||||
|
||||
func NewApplication() *Application {
|
||||
@@ -75,6 +78,10 @@ func NewApplication() *Application {
|
||||
return &Application{state: state}
|
||||
}
|
||||
|
||||
func (app *Application) SetGenBlockEvents() {
|
||||
app.genBlockEvents = true
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
@@ -111,6 +118,15 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
{Key: []byte("noindex_key"), Value: []byte("index is working"), Index: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: []byte("creator"), Value: []byte("Cosmoshi"), Index: true},
|
||||
{Key: []byte("key"), Value: value, Index: true},
|
||||
{Key: []byte("index_key"), Value: []byte("index is working"), Index: true},
|
||||
{Key: []byte("noindex_key"), Value: []byte("index is working"), Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
@@ -170,3 +186,71 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo
|
||||
|
||||
return resQuery
|
||||
}
|
||||
|
||||
func (app *Application) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
|
||||
response := types.ResponseBeginBlock{}
|
||||
|
||||
if !app.genBlockEvents {
|
||||
return response
|
||||
}
|
||||
|
||||
if app.state.Height%2 == 0 {
|
||||
response = types.ResponseBeginBlock{
|
||||
Events: []types.Event{
|
||||
{
|
||||
Type: "begin_event",
|
||||
Attributes: []types.EventAttribute{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("100"),
|
||||
Index: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("bar"),
|
||||
Value: []byte("200"),
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "begin_event",
|
||||
Attributes: []types.EventAttribute{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("200"),
|
||||
Index: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("bar"),
|
||||
Value: []byte("300"),
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
response = types.ResponseBeginBlock{
|
||||
Events: []types.Event{
|
||||
{
|
||||
Type: "begin_event",
|
||||
Attributes: []types.EventAttribute{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("400"),
|
||||
Index: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("bar"),
|
||||
Value: []byte("300"),
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
@@ -51,6 +51,9 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetGenBlockEvents() {
|
||||
app.app.genBlockEvents = true
|
||||
}
|
||||
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
|
||||
app.logger = l
|
||||
}
|
||||
@@ -142,7 +145,7 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
return types.ResponseBeginBlock{}
|
||||
return app.app.BeginBlock(req)
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
|
||||
@@ -687,9 +687,6 @@ type MempoolConfig struct {
|
||||
// Mempool version to use:
|
||||
// 1) "v0" - (default) FIFO mempool.
|
||||
// 2) "v1" - prioritized mempool.
|
||||
// WARNING: There's a known memory leak with the prioritized mempool
|
||||
// that the team are working on. Read more here:
|
||||
// https://github.com/tendermint/tendermint/issues/8775
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
@@ -61,6 +65,14 @@ type Metrics struct {
|
||||
// Number of blockparts transmitted by peer.
|
||||
BlockParts metrics.Counter
|
||||
|
||||
// Histogram of step duration.
|
||||
StepDuration metrics.Histogram
|
||||
stepStart time.Time
|
||||
|
||||
// Number of block parts received by the node, separated by whether the part
|
||||
// was relevant to the block the node is trying to gather or not.
|
||||
BlockGossipPartsReceived metrics.Counter
|
||||
|
||||
// QuroumPrevoteMessageDelay is the interval in seconds between the proposal
|
||||
// timestamp and the timestamp of the earliest prevote that achieved a quorum
|
||||
// during the prevote step.
|
||||
@@ -99,7 +111,6 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "rounds",
|
||||
Help: "Number of rounds.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -202,6 +213,20 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "block_parts",
|
||||
Help: "Number of blockparts transmitted by peer.",
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
BlockGossipPartsReceived: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_gossip_parts_received",
|
||||
Help: "Number of block parts received by the node, labeled by whether the " +
|
||||
"part was relevant to the block the node was currently gathering or not.",
|
||||
}, append(labels, "matches_current")).With(labelsAndValues...),
|
||||
StepDuration: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "step_duration_seconds",
|
||||
Help: "Time spent per step.",
|
||||
Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8),
|
||||
}, append(labels, "step")).With(labelsAndValues...),
|
||||
QuorumPrevoteMessageDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -226,7 +251,8 @@ func NopMetrics() *Metrics {
|
||||
|
||||
ValidatorLastSignedHeight: discard.NewGauge(),
|
||||
|
||||
Rounds: discard.NewGauge(),
|
||||
Rounds: discard.NewGauge(),
|
||||
StepDuration: discard.NewHistogram(),
|
||||
|
||||
Validators: discard.NewGauge(),
|
||||
ValidatorsPower: discard.NewGauge(),
|
||||
@@ -246,7 +272,21 @@ func NopMetrics() *Metrics {
|
||||
FastSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
BlockGossipPartsReceived: discard.NewCounter(),
|
||||
QuorumPrevoteMessageDelay: discard.NewGauge(),
|
||||
FullPrevoteMessageDelay: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Metrics) MarkRound(r int32) {
|
||||
m.Rounds.Set(float64(r))
|
||||
}
|
||||
|
||||
func (m *Metrics) MarkStep(s cstypes.RoundStepType) {
|
||||
if !m.stepStart.IsZero() {
|
||||
stepTime := time.Since(m.stepStart).Seconds()
|
||||
stepName := strings.TrimPrefix(s.String(), "RoundStep")
|
||||
m.StepDuration.With("step", stepName).Observe(stepTime)
|
||||
}
|
||||
m.stepStart = time.Now()
|
||||
}
|
||||
|
||||
@@ -1375,6 +1375,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
|
||||
psRound := ps.PRS.Round
|
||||
psCatchupCommitRound := ps.PRS.CatchupCommitRound
|
||||
psCatchupCommit := ps.PRS.CatchupCommit
|
||||
lastPrecommits := ps.PRS.Precommits
|
||||
|
||||
startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
|
||||
ps.PRS.Height = msg.Height
|
||||
@@ -1402,7 +1403,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
|
||||
// Shift Precommits to LastCommit.
|
||||
if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
|
||||
ps.PRS.LastCommitRound = msg.LastCommitRound
|
||||
ps.PRS.LastCommit = ps.PRS.Precommits
|
||||
ps.PRS.LastCommit = lastPrecommits
|
||||
} else {
|
||||
ps.PRS.LastCommitRound = msg.LastCommitRound
|
||||
ps.PRS.LastCommit = nil
|
||||
|
||||
@@ -520,6 +520,14 @@ func (cs *State) updateHeight(height int64) {
|
||||
}
|
||||
|
||||
func (cs *State) updateRoundStep(round int32, step cstypes.RoundStepType) {
|
||||
if !cs.replayMode {
|
||||
if round != cs.Round || round == 0 && step == cstypes.RoundStepNewRound {
|
||||
cs.metrics.MarkRound(cs.Round)
|
||||
}
|
||||
if cs.Step != step {
|
||||
cs.metrics.MarkStep(cs.Step)
|
||||
}
|
||||
}
|
||||
cs.Round = round
|
||||
cs.Step = step
|
||||
}
|
||||
@@ -1019,9 +1027,6 @@ func (cs *State) enterNewRound(height int64, round int32) {
|
||||
if err := cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()); err != nil {
|
||||
cs.Logger.Error("failed publishing new round", "err", err)
|
||||
}
|
||||
|
||||
cs.metrics.Rounds.Set(float64(round))
|
||||
|
||||
// Wait for txs to be available in the mempool
|
||||
// before we enterPropose in round 0. If the last block changed the app hash,
|
||||
// we may need an empty "proof" block, and enterPropose immediately.
|
||||
@@ -1853,11 +1858,13 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add
|
||||
// Blocks might be reused, so round mismatch is OK
|
||||
if cs.Height != height {
|
||||
cs.Logger.Debug("received block part from wrong height", "height", height, "round", round)
|
||||
cs.metrics.BlockGossipPartsReceived.With("matches_current", "false").Add(1)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// We're not expecting a block part.
|
||||
if cs.ProposalBlockParts == nil {
|
||||
cs.metrics.BlockGossipPartsReceived.With("matches_current", "false").Add(1)
|
||||
// NOTE: this can happen when we've gone to a higher round and
|
||||
// then receive parts from the previous round - not necessarily a bad peer.
|
||||
cs.Logger.Debug(
|
||||
@@ -1872,8 +1879,14 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add
|
||||
|
||||
added, err = cs.ProposalBlockParts.AddPart(part)
|
||||
if err != nil {
|
||||
if errors.Is(err, types.ErrPartSetInvalidProof) || errors.Is(err, types.ErrPartSetUnexpectedIndex) {
|
||||
cs.metrics.BlockGossipPartsReceived.With("matches_current", "false").Add(1)
|
||||
}
|
||||
return added, err
|
||||
}
|
||||
|
||||
cs.metrics.BlockGossipPartsReceived.With("matches_current", "true").Add(1)
|
||||
|
||||
if cs.ProposalBlockParts.ByteSize() > cs.state.ConsensusParams.Block.MaxBytes {
|
||||
return added, fmt.Errorf("total size of proposal block parts exceeds maximum block bytes (%d > %d)",
|
||||
cs.ProposalBlockParts.ByteSize(), cs.state.ConsensusParams.Block.MaxBytes,
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
|
||||
"golang.org/x/crypto/ripemd160" //nolint: staticcheck // necessary for Bitcoin address format
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
@@ -42,7 +43,7 @@ func (privKey PrivKey) Bytes() []byte {
|
||||
// PubKey performs the point-scalar multiplication from the privKey on the
|
||||
// generator point to get the pubkey.
|
||||
func (privKey PrivKey) PubKey() crypto.PubKey {
|
||||
_, pubkeyObject := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
|
||||
_, pubkeyObject := secp256k1.PrivKeyFromBytes(privKey)
|
||||
|
||||
pk := pubkeyObject.SerializeCompressed()
|
||||
|
||||
@@ -122,24 +123,18 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey {
|
||||
return PrivKey(privKey32)
|
||||
}
|
||||
|
||||
// used to reject malleable signatures
|
||||
// see:
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
// The returned signature will be of the form R || S (in lower-S form).
|
||||
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(privKey)
|
||||
|
||||
sig, err := priv.Sign(crypto.Sha256(msg))
|
||||
sig, err := ecdsa.SignCompact(priv, crypto.Sha256(msg), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sigBytes := serializeSig(sig)
|
||||
return sigBytes, nil
|
||||
// remove the first byte which is compactSigRecoveryCode
|
||||
return sig[1:], nil
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -199,7 +194,7 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
pub, err := secp256k1.ParsePubKey(pubKey, secp256k1.S256())
|
||||
pub, err := secp256k1.ParsePubKey(pubKey)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@@ -208,7 +203,13 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
signature := signatureFromBytes(sigStr)
|
||||
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
|
||||
// see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
if signature.S.Cmp(secp256k1halfN) > 0 {
|
||||
// Serialize() would negate S value if it is over half order.
|
||||
// Hence, if the signature is different after Serialize() if should be rejected.
|
||||
var modifiedSignature, parseErr = ecdsa.ParseDERSignature(signature.Serialize())
|
||||
if parseErr != nil {
|
||||
return false
|
||||
}
|
||||
if !signature.IsEqual(modifiedSignature) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -217,21 +218,10 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
|
||||
// Read Signature struct from R || S. Caller needs to ensure
|
||||
// that len(sigStr) == 64.
|
||||
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
|
||||
return &secp256k1.Signature{
|
||||
R: new(big.Int).SetBytes(sigStr[:32]),
|
||||
S: new(big.Int).SetBytes(sigStr[32:64]),
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize signature to R || S.
|
||||
// R, S are padded to 32 bytes respectively.
|
||||
func serializeSig(sig *secp256k1.Signature) []byte {
|
||||
rBytes := sig.R.Bytes()
|
||||
sBytes := sig.S.Bytes()
|
||||
sigBytes := make([]byte, 64)
|
||||
// 0 pad the byte arrays from the left if they aren't big enough.
|
||||
copy(sigBytes[32-len(rBytes):32], rBytes)
|
||||
copy(sigBytes[64-len(sBytes):64], sBytes)
|
||||
return sigBytes
|
||||
func signatureFromBytes(sigStr []byte) *ecdsa.Signature {
|
||||
var r secp256k1.ModNScalar
|
||||
r.SetByteSlice(sigStr[:32])
|
||||
var s secp256k1.ModNScalar
|
||||
s.SetByteSlice(sigStr[32:64])
|
||||
return ecdsa.NewSignature(&r, &s)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec/v2"
|
||||
)
|
||||
|
||||
func Test_genPrivKey(t *testing.T) {
|
||||
@@ -54,20 +54,30 @@ func TestSignatureVerificationAndRejectUpperS(t *testing.T) {
|
||||
priv := GenPrivKey()
|
||||
sigStr, err := priv.Sign(msg)
|
||||
require.NoError(t, err)
|
||||
sig := signatureFromBytes(sigStr)
|
||||
require.False(t, sig.S.Cmp(secp256k1halfN) > 0)
|
||||
var r secp256k1.ModNScalar
|
||||
r.SetByteSlice(sigStr[:32])
|
||||
var s secp256k1.ModNScalar
|
||||
s.SetByteSlice(sigStr[32:64])
|
||||
require.False(t, s.IsOverHalfOrder())
|
||||
|
||||
pub := priv.PubKey()
|
||||
require.True(t, pub.VerifySignature(msg, sigStr))
|
||||
|
||||
// malleate:
|
||||
sig.S.Sub(secp256k1.S256().CurveParams.N, sig.S)
|
||||
require.True(t, sig.S.Cmp(secp256k1halfN) > 0)
|
||||
malSigStr := serializeSig(sig)
|
||||
var S256 secp256k1.ModNScalar
|
||||
S256.SetByteSlice(secp256k1.S256().N.Bytes())
|
||||
s.Negate().Add(&S256)
|
||||
require.True(t, s.IsOverHalfOrder())
|
||||
|
||||
rBytes := r.Bytes()
|
||||
sBytes := s.Bytes()
|
||||
malSigStr := make([]byte, 64)
|
||||
copy(malSigStr[32-len(rBytes):32], rBytes[:])
|
||||
copy(malSigStr[64-len(sBytes):64], sBytes[:])
|
||||
|
||||
require.False(t, pub.VerifySignature(msg, malSigStr),
|
||||
"VerifyBytes incorrect with malleated & invalid S. sig=%v, key=%v",
|
||||
sig,
|
||||
malSigStr,
|
||||
priv,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -5,14 +5,14 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcutil/base58"
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
|
||||
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec/v2"
|
||||
)
|
||||
|
||||
type keyData struct {
|
||||
@@ -75,7 +75,7 @@ func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) {
|
||||
|
||||
// This function creates a private and public key in the underlying libraries format.
|
||||
// The private key is basically calling new(big.Int).SetBytes(pk), which removes leading zero bytes
|
||||
priv, _ := underlyingSecp256k1.PrivKeyFromBytes(underlyingSecp256k1.S256(), privKeyBytes[:])
|
||||
priv, _ := underlyingSecp256k1.PrivKeyFromBytes(privKeyBytes[:])
|
||||
// this takes the bytes returned by `(big int).Bytes()`, and if the length is less than 32 bytes,
|
||||
// pads the bytes from the left with zero bytes. Therefore these two functions composed
|
||||
// result in the identity function on privKeyBytes, hence the following equality check
|
||||
|
||||
@@ -9,8 +9,8 @@ module.exports = {
|
||||
editLinks: true,
|
||||
label: 'core',
|
||||
algolia: {
|
||||
id: "BH4D9OD16A",
|
||||
key: "59f0e2deb984aa9cdf2b3a5fd24ac501",
|
||||
id: "QQFROLBNZC",
|
||||
key: "f1b68b96fb31d8aa4a54412c44917a26",
|
||||
index: "tendermint"
|
||||
},
|
||||
versions: [
|
||||
@@ -92,7 +92,7 @@ module.exports = {
|
||||
}
|
||||
],
|
||||
smallprint:
|
||||
'The development of Tendermint Core was led primarily by All in Bits, Inc. The Tendermint trademark is owned by New Tendermint, LLC.'
|
||||
'The development of Tendermint Core is led primarily by [Interchain GmbH](https://interchain.berlin/). Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit. The Tendermint trademark is owned by Tendermint Inc, the for-profit entity that also maintains this website.',
|
||||
links: [
|
||||
{
|
||||
title: 'Documentation',
|
||||
|
||||
@@ -349,8 +349,8 @@ the same results as for the Go version.
|
||||
|
||||
Want to write the counter app in your favorite language?! We'd be happy
|
||||
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
|
||||
|
||||
TODO link to bounties page.
|
||||
See [funding](https://github.com/interchainio/funding) opportunities from the
|
||||
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.
|
||||
|
||||
The `abci-cli` is designed strictly for testing and debugging. In a real
|
||||
deployment, the role of sending messages is taken by Tendermint, which
|
||||
|
||||
@@ -34,6 +34,9 @@ would be equal to the composite key of `jack.account.number`.
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
and height and blocks by their height.
|
||||
|
||||
Tendermint allows for different events within the same height to have
|
||||
equal attributes.
|
||||
|
||||
## Configuration
|
||||
|
||||
Operators can configure indexing via the `[tx_index]` section. The `indexer`
|
||||
@@ -67,6 +70,56 @@ for block and transaction events directly against Tendermint's RPC. However, the
|
||||
query syntax is limited and so this indexer type might be deprecated or removed
|
||||
entirely in the future.
|
||||
|
||||
**Implementation and data layout**
|
||||
|
||||
The kv indexer stores each attribute of an event individually, by creating a composite key
|
||||
of the *event type*, *attribute key*, *attribute value*, *height* and *event sequence*.
|
||||
|
||||
For example the following events:
|
||||
|
||||
```
|
||||
Type: "transfer",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{Key: []byte("sender"), Value: []byte("Bob"), Index: true},
|
||||
{Key: []byte("recipient"), Value: []byte("Alice"), Index: true},
|
||||
{Key: []byte("balance"), Value: []byte("100"), Index: true},
|
||||
{Key: []byte("note"), Value: []byte("nothing"), Index: true},
|
||||
},
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
Type: "transfer",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{Key: []byte("sender"), Value: []byte("Tom"), Index: true},
|
||||
{Key: []byte("recipient"), Value: []byte("Alice"), Index: true},
|
||||
{Key: []byte("balance"), Value: []byte("200"), Index: true},
|
||||
{Key: []byte("note"), Value: []byte("nothing"), Index: true},
|
||||
},
|
||||
```
|
||||
|
||||
will be represented as follows in the store:
|
||||
|
||||
```
|
||||
Key value
|
||||
transferSenderBobEndBlock1 1
|
||||
transferRecepientAliceEndBlock11 1
|
||||
transferBalance100EndBlock11 1
|
||||
transferNodeNothingEndblock11 1
|
||||
---- event2 ------
|
||||
transferSenderTomEndBlock12 1
|
||||
transferRecepientAliceEndBlock12 1
|
||||
transferBalance200EndBlock12 1
|
||||
transferNodeNothingEndblock12 1
|
||||
|
||||
```
|
||||
The key is thus formed of the event type, the attribute key and value, the event the attribute belongs to (`EndBlock` or `BeginBlock`),
|
||||
the height and the event number. The event number is a local variable kept by the indexer and incremented when a new event is processed.
|
||||
|
||||
It is an `int64` variable and has no other semantics besides being used to associate attributes belonging to the same events within a height.
|
||||
This variable is not atomically incremented as event indexing is deterministic. **Should this ever change**, the event id generation
|
||||
will be broken.
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
The `psql` indexer type allows an operator to enable block and transaction event
|
||||
@@ -145,6 +198,9 @@ You can query for a paginated set of transaction by their events by calling the
|
||||
```bash
|
||||
curl "localhost:26657/tx_search?query=\"message.sender='cosmos1...'\"&prove=true"
|
||||
```
|
||||
If the conditions are related to transaction events and the user wants to make sure the
|
||||
conditions are true within the same events, the `match.event` keyword should be used,
|
||||
as described [below](#querying_block_events)
|
||||
|
||||
Check out [API docs](https://docs.tendermint.com/v0.34/rpc/#/Info/tx_search)
|
||||
for more information on query syntax and other options.
|
||||
@@ -168,7 +224,7 @@ a query to `/subscribe` RPC endpoint.
|
||||
Check out [API docs](https://docs.tendermint.com/v0.34/rpc/#subscribe) for more information
|
||||
on query syntax and other options.
|
||||
|
||||
## Querying Blocks Events
|
||||
## Querying Block Events
|
||||
|
||||
You can query for a paginated set of blocks by their events by calling the
|
||||
`/block_search` RPC endpoint:
|
||||
@@ -177,5 +233,30 @@ You can query for a paginated set of blocks by their events by calling the
|
||||
curl "localhost:26657/block_search?query=\"block.height > 10 AND val_set.num_changed > 0\""
|
||||
```
|
||||
|
||||
## `match_events` keyword
|
||||
|
||||
The query results in the height number(s) (or transaction hashes when querying transactions) which contain events whose attributes match the query conditions.
|
||||
However, there are two options to query the indexers. To demonstrate the two modes, we reuse the two events
|
||||
where Bob and Tom send money to Alice and query the block indexer. We issue the following query:
|
||||
|
||||
```bash
|
||||
curl "localhost:26657/block_search?query=\"sender=Bob AND balance = 200\""
|
||||
```
|
||||
|
||||
The result will return height 1 even though the attributes matching the conditions in the query
|
||||
occurred in different events.
|
||||
|
||||
If we wish to retrieve only heights where the attributes occurred within the same event,
|
||||
the query syntax is as follows:
|
||||
|
||||
```bash
|
||||
curl "localhost:26657/block_search?query=\"sender=Bob AND balance = 200\"&match_events=true"
|
||||
```
|
||||
Currently the default behaviour is if `match_events` is set to false.
|
||||
|
||||
Check out [API docs](https://docs.tendermint.com/v0.34/rpc/#/Info/block_search)
|
||||
for more information on query syntax and other options.
|
||||
|
||||
**Backwards compatibility**
|
||||
|
||||
Up until Tendermint 0.34.25, the event sequence was not stored in the kvstore and the `match_events` keyword in the RPC query is not ignored by older versions. Thus, in a network running mixed Tendermint versions, nodes running older versions will still return blocks (or transactions) whose attributes match within different events on the same height.
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
cp -a ../rpc/openapi/ .vuepress/public/rpc/
|
||||
mkdir -p .vuepress/public/rpc/
|
||||
cp -a ../rpc/openapi/* .vuepress/public/rpc/
|
||||
|
||||
@@ -18,40 +18,43 @@ Listen address can be changed in the config file (see
|
||||
|
||||
The following metrics are available:
|
||||
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|------------------------------------------|-----------|-------------------|------------------------------------------------------------------------|
|
||||
| `consensus_height` | Gauge | | Height of the chain |
|
||||
| `consensus_validators` | Gauge | | Number of validators |
|
||||
| `consensus_validators_power` | Gauge | | Total voting power of all validators |
|
||||
| `consensus_validator_power` | Gauge | | Voting power of the node if in the validator set |
|
||||
| `consensus_validator_last_signed_height` | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| `consensus_validator_missed_blocks` | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| `consensus_missing_validators` | Gauge | | Number of validators who did not sign |
|
||||
| `consensus_missing_validators_power` | Gauge | | Total voting power of the missing validators |
|
||||
| `consensus_byzantine_validators` | Gauge | | Number of validators who tried to double sign |
|
||||
| `consensus_byzantine_validators_power` | Gauge | | Total voting power of the byzantine validators |
|
||||
| `consensus_block_interval_seconds` | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| `consensus_rounds` | Gauge | | Number of rounds |
|
||||
| `consensus_num_txs` | Gauge | | Number of transactions |
|
||||
| `consensus_total_txs` | Gauge | | Total number of transactions committed |
|
||||
| `consensus_block_parts` | Counter | `peer_id` | Number of blockparts transmitted by peer |
|
||||
| `consensus_latest_block_height` | Gauge | | /status sync\_info number |
|
||||
| `consensus_fast_syncing` | Gauge | | Either 0 (not fast syncing) or 1 (syncing) |
|
||||
| `consensus_state_syncing` | Gauge | | Either 0 (not state syncing) or 1 (syncing) |
|
||||
| `consensus_block_size_bytes` | Gauge | | Block size in bytes |
|
||||
| `p2p_message_send_bytes_total` | Counter | `message_type` | Number of bytes sent to all peers per message type |
|
||||
| `p2p_message_receive_bytes_total` | Counter | `message_type` | Number of bytes received from all peers per message type |
|
||||
| `p2p_peers` | Gauge | | Number of peers node's connected to |
|
||||
| `p2p_peer_receive_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel received from a given peer |
|
||||
| `p2p_peer_send_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel sent to a given peer |
|
||||
| `p2p_peer_pending_send_bytes` | Gauge | `peer_id` | Number of pending bytes to be sent to a given peer |
|
||||
| `p2p_num_txs` | Gauge | `peer_id` | Number of transactions submitted by each peer\_id |
|
||||
| `p2p_pending_send_bytes` | Gauge | `peer_id` | Amount of data pending to be sent to peer |
|
||||
| `mempool_size` | Gauge | | Number of uncommitted transactions |
|
||||
| `mempool_tx_size_bytes` | Histogram | | Transaction sizes in bytes |
|
||||
| `mempool_failed_txs` | Counter | | Number of failed transactions |
|
||||
| `mempool_recheck_times` | Counter | | Number of transactions rechecked in the mempool |
|
||||
| `state_block_processing_time` | Histogram | | Time between BeginBlock and EndBlock in ms |
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|--------------------------------------------|-----------|------------------|------------------------------------------------------------------------|
|
||||
| consensus\_height | Gauge | | Height of the chain |
|
||||
| consensus\_validators | Gauge | | Number of validators |
|
||||
| consensus\_validators\_power | Gauge | | Total voting power of all validators |
|
||||
| consensus\_validator\_power | Gauge | | Voting power of the node if in the validator set |
|
||||
| consensus\_validator\_last\_signed\_height | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus\_validator\_missed\_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus\_missing\_validators | Gauge | | Number of validators who did not sign |
|
||||
| consensus\_missing\_validators\_power | Gauge | | Total voting power of the missing validators |
|
||||
| consensus\_byzantine\_validators | Gauge | | Number of validators who tried to double sign |
|
||||
| consensus\_byzantine\_validators\_power | Gauge | | Total voting power of the byzantine validators |
|
||||
| consensus\_block\_interval\_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus\_rounds | Gauge | | Number of rounds |
|
||||
| consensus\_num\_txs | Gauge | | Number of transactions |
|
||||
| consensus\_total\_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus\_block\_parts | Counter | peer\_id | Number of blockparts transmitted by peer |
|
||||
| consensus\_latest\_block\_height | Gauge | | /status sync\_info number |
|
||||
| consensus\_fast\_syncing | Gauge | | Either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus\_state\_syncing | Gauge | | Either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus\_block\_size\_bytes | Gauge | | Block size in bytes |
|
||||
| consensus\_step\_duration | Histogram | step | Histogram of durations for each step in the consensus protocol |
|
||||
| consensus\_block\_gossip\_parts\_received | Counter | matches\_current | Number of block parts received by the node |
|
||||
| p2p\_message\_send\_bytes\_total | Counter | message\_type | Number of bytes sent to all peers per message type |
|
||||
| p2p\_message\_receive\_bytes\_total | Counter | message\_type | Number of bytes received from all peers per message type |
|
||||
| p2p\_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p\_peer\_receive\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel received from a given peer |
|
||||
| p2p\_peer\_send\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel sent to a given peer |
|
||||
| p2p\_peer\_pending\_send\_bytes | Gauge | peer\_id | Number of pending bytes to be sent to a given peer |
|
||||
| p2p\_num\_txs | Gauge | peer\_id | Number of transactions submitted by each peer\_id |
|
||||
| p2p\_pending\_send\_bytes | Gauge | peer\_id | Amount of data pending to be sent to peer |
|
||||
| mempool\_size | Gauge | | Number of uncommitted transactions |
|
||||
| mempool\_tx\_size\_bytes | Histogram | | Transaction sizes in bytes |
|
||||
| mempool\_failed\_txs | Counter | | Number of failed transactions |
|
||||
| mempool\_recheck\_times | Counter | | Number of transactions rechecked in the mempool |
|
||||
| state\_block\_processing\_time | Histogram | | Time between BeginBlock and EndBlock in ms |
|
||||
|
||||
|
||||
## Useful queries
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ More Information can be found at these links:
|
||||
|
||||
### Validator keys
|
||||
|
||||
Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`).
|
||||
Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io/) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM.
|
||||
|
||||
Currently Tendermint uses [Ed25519](https://ed25519.cr.yp.to/) keys which are widely supported across the security sector and HSMs.
|
||||
|
||||
|
||||
55
go.mod
55
go.mod
@@ -7,11 +7,6 @@ require (
|
||||
github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d
|
||||
github.com/Workiva/go-datastructures v1.0.53
|
||||
github.com/adlio/schema v1.3.3
|
||||
github.com/btcsuite/btcd v0.22.1
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/bufbuild/buf v1.9.0
|
||||
github.com/cosmos/gogoproto v1.4.2
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/go-kit/log v0.2.1
|
||||
@@ -35,23 +30,34 @@ require (
|
||||
github.com/spf13/cobra v1.6.0
|
||||
github.com/spf13/viper v1.13.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
golang.org/x/crypto v0.1.0
|
||||
golang.org/x/net v0.1.0
|
||||
golang.org/x/crypto v0.4.0
|
||||
golang.org/x/net v0.4.0
|
||||
google.golang.org/grpc v1.50.1
|
||||
)
|
||||
|
||||
require github.com/vektra/mockery/v2 v2.14.0
|
||||
|
||||
require (
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/informalsystems/tm-load-test v1.0.0
|
||||
gonum.org/v1/gonum v0.12.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bufbuild/buf v1.9.0
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.0
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2
|
||||
github.com/go-git/go-git/v5 v5.5.1
|
||||
github.com/vektra/mockery/v2 v2.14.0
|
||||
gonum.org/v1/gonum v0.8.2
|
||||
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8
|
||||
)
|
||||
|
||||
@@ -68,6 +74,8 @@ require (
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
github.com/alingse/asasalint v0.0.11 // indirect
|
||||
github.com/ashanbrown/forbidigo v1.3.0 // indirect
|
||||
@@ -86,6 +94,7 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/charithe/durationcheck v0.0.9 // indirect
|
||||
github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect
|
||||
github.com/cloudflare/circl v1.3.1 // indirect
|
||||
github.com/containerd/containerd v1.6.8 // indirect
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.2 // indirect
|
||||
@@ -94,6 +103,7 @@ require (
|
||||
github.com/curioswitch/go-reassign v0.2.0 // indirect
|
||||
github.com/daixiang0/gci v0.8.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.4.3 // indirect
|
||||
github.com/dgraph-io/badger/v2 v2.2007.2 // indirect
|
||||
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect
|
||||
@@ -103,6 +113,7 @@ require (
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/esimonov/ifshort v1.0.4 // indirect
|
||||
github.com/ettle/strcase v0.1.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
@@ -112,6 +123,8 @@ require (
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.0.7 // indirect
|
||||
github.com/go-critic/go-critic v0.6.5 // indirect
|
||||
github.com/go-git/gcfg v1.5.0 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.3.1 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-toolsmith/astcast v1.0.0 // indirect
|
||||
@@ -149,13 +162,16 @@ require (
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hexops/gotextdiff v1.0.3 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect
|
||||
github.com/jgautheron/goconst v1.5.1 // indirect
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
|
||||
github.com/jmhodges/levigo v1.0.0 // indirect
|
||||
github.com/julz/importas v0.1.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/kisielk/errcheck v1.6.2 // indirect
|
||||
github.com/kisielk/gotool v1.0.0 // indirect
|
||||
github.com/kkHAIKE/contextcheck v1.1.3 // indirect
|
||||
@@ -197,6 +213,7 @@ require (
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
|
||||
github.com/pjbgf/sha1cd v0.2.3 // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/pkg/profile v1.6.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
@@ -217,11 +234,13 @@ require (
|
||||
github.com/sashamelentyev/usestdlibvars v1.20.0 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.13.1 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/sivchari/containedctx v1.0.2 // indirect
|
||||
github.com/sivchari/nosnakecase v1.7.0 // indirect
|
||||
github.com/sivchari/tenv v1.7.0 // indirect
|
||||
github.com/skeema/knownhosts v1.1.0 // indirect
|
||||
github.com/sonatard/noctx v0.0.1 // indirect
|
||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
@@ -242,6 +261,7 @@ require (
|
||||
github.com/ultraware/funlen v0.0.3 // indirect
|
||||
github.com/ultraware/whitespace v0.0.5 // indirect
|
||||
github.com/uudashr/gocognit v1.0.6 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/yagipy/maintidx v1.0.0 // indirect
|
||||
github.com/yeya24/promlinter v0.2.0 // indirect
|
||||
gitlab.com/bosi/decorder v0.2.3 // indirect
|
||||
@@ -256,14 +276,15 @@ require (
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/tools v0.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
honnef.co/go/tools v0.3.3 // indirect
|
||||
|
||||
119
go.sum
119
go.sum
@@ -73,8 +73,11 @@ github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXY
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
@@ -83,6 +86,8 @@ github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA=
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 h1:ra2OtmuW0AE5csawV4YXMNGNQQXvLRps3z2Z59OPO+I=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
|
||||
@@ -90,6 +95,8 @@ github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/
|
||||
github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
|
||||
github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig=
|
||||
github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A=
|
||||
github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
|
||||
github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
|
||||
github.com/adlio/schema v1.1.13/go.mod h1:L5Z7tw+7lRK1Fnpi/LT/ooCP1elkXn0krMWBQHUhEDE=
|
||||
github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I=
|
||||
github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg=
|
||||
@@ -105,6 +112,8 @@ github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pO
|
||||
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
|
||||
github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw=
|
||||
github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
@@ -114,6 +123,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
|
||||
github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc=
|
||||
github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI=
|
||||
@@ -149,13 +160,23 @@ github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94=
|
||||
github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
|
||||
github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c=
|
||||
github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y=
|
||||
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
|
||||
github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA=
|
||||
github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8=
|
||||
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
@@ -172,6 +193,7 @@ github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r4
|
||||
github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4=
|
||||
github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY=
|
||||
github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
|
||||
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
@@ -200,6 +222,9 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
|
||||
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
|
||||
github.com/cloudflare/circl v1.3.1 h1:4OVCZRL62ijwEwxnF6I7hLwxvIYi3VaZt8TflkqtrtA=
|
||||
github.com/cloudflare/circl v1.3.1/go.mod h1:+CauBF6R70Jqcyl8N2hC8pAXYbWkGIezuSbuGLtRhnw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
@@ -234,8 +259,6 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d h1:49RLWk1j44Xu4fjHb6JFYmeUnDORVwHNkDxaQ0ctCVU=
|
||||
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
|
||||
github.com/cosmos/gogoproto v1.4.2 h1:UeGRcmFW41l0G0MiefWhkPEVEwvu78SZsHBvI78dAYw=
|
||||
github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
@@ -258,6 +281,10 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=
|
||||
github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
|
||||
@@ -287,6 +314,8 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -331,10 +360,20 @@ github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmV
|
||||
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
||||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
|
||||
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
|
||||
github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8=
|
||||
github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo=
|
||||
github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY=
|
||||
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
|
||||
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
|
||||
github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
|
||||
github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
|
||||
github.com/go-git/go-git/v5 v5.5.1 h1:5vtv2TB5PM/gPM+EvsHJ16hJh4uAkdGcKilcwY7FYwo=
|
||||
github.com/go-git/go-git/v5 v5.5.1/go.mod h1:uz5PQ3d0gz7mSgzZhSJToM6ALPaKCdSnl58/Xb5hzr8=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -598,6 +637,7 @@ github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOc
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -605,10 +645,13 @@ github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/informalsystems/tm-load-test v1.0.0 h1:e1IeUw8701HWCMuOM1vLM/XcpH2Lrb88GNWdFAPDmmA=
|
||||
github.com/informalsystems/tm-load-test v1.0.0/go.mod h1:WVaSKaQdfZK3v0C74EMzn7//+3aeCZF8wkIKBz2/M74=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a h1:d4+I1YEKVmWZrgkt6jpXBnLgV2ZjO0YxEtLDdfIZfH4=
|
||||
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||
github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
|
||||
github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
|
||||
github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM=
|
||||
@@ -640,6 +683,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
|
||||
github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=
|
||||
github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
|
||||
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7c=
|
||||
@@ -702,6 +747,7 @@ github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vx
|
||||
github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc=
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA=
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
|
||||
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
|
||||
github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE=
|
||||
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
@@ -791,7 +837,6 @@ github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM=
|
||||
github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg=
|
||||
@@ -871,6 +916,8 @@ github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7
|
||||
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pjbgf/sha1cd v0.2.3 h1:uKQP/7QOzNtKYH7UTohZLcjF5/55EnTw0jO/Ru4jZwI=
|
||||
github.com/pjbgf/sha1cd v0.2.3/go.mod h1:HOK9QrgzdHpbc2Kzip0Q1yi3M2MFGPADtR6HjG65m5M=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -984,6 +1031,9 @@ github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvW
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/securego/gosec/v2 v2.13.1 h1:7mU32qn2dyC81MH9L2kefnQyRMUarfDER3iQyMHcjYM=
|
||||
github.com/securego/gosec/v2 v2.13.1/go.mod h1:EO1sImBMBWFjOTFzMWfTRrZW6M15gm60ljzrmy/wtHo=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
@@ -1003,6 +1053,8 @@ github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt
|
||||
github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
|
||||
github.com/sivchari/tenv v1.7.0 h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE=
|
||||
github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
|
||||
github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0=
|
||||
github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY=
|
||||
@@ -1116,6 +1168,8 @@ github.com/vektra/mockery/v2 v2.14.0 h1:KZ1p5Hrn8tiY+LErRMr14HHle6khxo+JKOXLBW/y
|
||||
github.com/vektra/mockery/v2 v2.14.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
@@ -1208,8 +1262,12 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1257,8 +1315,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1314,9 +1372,12 @@ golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1343,8 +1404,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1432,6 +1493,7 @@ golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1443,14 +1505,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1460,8 +1527,9 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1563,16 +1631,16 @@ golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM=
|
||||
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
||||
gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o=
|
||||
gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
@@ -1707,8 +1775,9 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
@@ -1720,6 +1789,7 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
@@ -1733,6 +1803,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
|
||||
@@ -29,8 +29,8 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
|
||||
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", rpcserver.Cacheable()),
|
||||
"tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by"),
|
||||
"block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by"),
|
||||
"tx_search": rpcserver.NewRPCFunc(makeTxSearchFuncMatchEvents(c), "query,prove,page,per_page,order_by,match_events"),
|
||||
"block_search": rpcserver.NewRPCFunc(makeBlockSearchFuncMatchEvents(c), "query,page,per_page,order_by,match_events"),
|
||||
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page", rpcserver.Cacheable("height")),
|
||||
"dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""),
|
||||
"consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""),
|
||||
@@ -141,42 +141,52 @@ func makeTxFunc(c *lrpc.Client) rpcTxFunc {
|
||||
}
|
||||
}
|
||||
|
||||
type rpcTxSearchFunc func(
|
||||
type rpcTxSearchFuncMatchEvents func(
|
||||
ctx *rpctypes.Context,
|
||||
query string,
|
||||
prove bool,
|
||||
page, perPage *int,
|
||||
orderBy string,
|
||||
matchEvents bool,
|
||||
) (*ctypes.ResultTxSearch, error)
|
||||
|
||||
func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc {
|
||||
func makeTxSearchFuncMatchEvents(c *lrpc.Client) rpcTxSearchFuncMatchEvents {
|
||||
return func(
|
||||
ctx *rpctypes.Context,
|
||||
query string,
|
||||
prove bool,
|
||||
page, perPage *int,
|
||||
orderBy string,
|
||||
matchEvents bool,
|
||||
) (*ctypes.ResultTxSearch, error) {
|
||||
if matchEvents {
|
||||
query = "match.events = 1 AND " + query
|
||||
}
|
||||
return c.TxSearch(ctx.Context(), query, prove, page, perPage, orderBy)
|
||||
}
|
||||
}
|
||||
|
||||
type rpcBlockSearchFunc func(
|
||||
type rpcBlockSearchFuncMatchEvents func(
|
||||
ctx *rpctypes.Context,
|
||||
query string,
|
||||
prove bool,
|
||||
page, perPage *int,
|
||||
orderBy string,
|
||||
matchEvents bool,
|
||||
) (*ctypes.ResultBlockSearch, error)
|
||||
|
||||
func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc {
|
||||
func makeBlockSearchFuncMatchEvents(c *lrpc.Client) rpcBlockSearchFuncMatchEvents {
|
||||
return func(
|
||||
ctx *rpctypes.Context,
|
||||
query string,
|
||||
prove bool,
|
||||
page, perPage *int,
|
||||
orderBy string,
|
||||
matchEvents bool,
|
||||
) (*ctypes.ResultBlockSearch, error) {
|
||||
if matchEvents {
|
||||
query = "match.events = 1 AND " + query
|
||||
}
|
||||
return c.BlockSearch(ctx.Context(), query, page, perPage, orderBy)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -556,7 +556,7 @@ func (c *Client) updateLightClientIfNeededTo(ctx context.Context, height *int64)
|
||||
l, err = c.lc.VerifyLightBlockAtHeight(ctx, *height, time.Now())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update light client to %d: %w", height, err)
|
||||
return nil, fmt.Errorf("failed to update light client to %d: %w", *height, err)
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func SendEnvelopeShim(p Peer, e Envelope, lg log.Logger) bool {
|
||||
// Deprecated: Will be removed in v0.37.
|
||||
func TrySendEnvelopeShim(p Peer, e Envelope, lg log.Logger) bool {
|
||||
if es, ok := p.(EnvelopeSender); ok {
|
||||
return es.SendEnvelope(e)
|
||||
return es.TrySendEnvelope(e)
|
||||
}
|
||||
msg := e.Message
|
||||
if w, ok := msg.(Wrapper); ok {
|
||||
|
||||
@@ -761,7 +761,7 @@ func (sw *Switch) addOutboundPeerWithConfig(
|
||||
addr *NetAddress,
|
||||
cfg *config.P2PConfig,
|
||||
) error {
|
||||
sw.Logger.Info("Dialing peer", "address", addr)
|
||||
sw.Logger.Debug("Dialing peer", "address", addr)
|
||||
|
||||
// XXX(xla): Remove the leakage of test concerns in implementation.
|
||||
if cfg.TestDialFail {
|
||||
@@ -889,7 +889,7 @@ func (sw *Switch) addPeer(p Peer) error {
|
||||
reactor.AddPeer(p)
|
||||
}
|
||||
|
||||
sw.Logger.Info("Added peer", "peer", p)
|
||||
sw.Logger.Debug("Added peer", "peer", p)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
app := kvstore.NewPersistentKVStoreApplication(dir)
|
||||
// If testing block event generation
|
||||
// app.SetGenBlockEvents() needs to be called here
|
||||
node = rpctest.StartTendermint(app)
|
||||
|
||||
code := m.Run()
|
||||
|
||||
@@ -507,6 +507,27 @@ func TestTxSearchWithTimeout(t *testing.T) {
|
||||
require.Greater(t, len(result.Txs), 0, "expected a lot of transactions")
|
||||
}
|
||||
|
||||
// This test does nothing if we do not call app.SetGenBlockEvents() within main_test.go
|
||||
// It will nevertheless pass as there are no events being generated.
|
||||
func TestBlockSearch(t *testing.T) {
|
||||
c := getHTTPClient()
|
||||
|
||||
// first we broadcast a few txs
|
||||
for i := 0; i < 10; i++ {
|
||||
_, _, tx := MakeTxKV()
|
||||
|
||||
_, err := c.BroadcastTxCommit(context.Background(), tx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, client.WaitForHeight(c, 5, nil))
|
||||
// This cannot test match_events as it calls the client BlockSearch function directly
|
||||
// It is the RPC request handler that processes the match_event
|
||||
result, err := c.BlockSearch(context.Background(), "begin_event.foo = 100 AND begin_event.bar = 300", nil, nil, "asc")
|
||||
require.NoError(t, err)
|
||||
blockCount := len(result.Blocks)
|
||||
require.Equal(t, blockCount, 0)
|
||||
|
||||
}
|
||||
func TestTxSearch(t *testing.T) {
|
||||
c := getHTTPClient()
|
||||
|
||||
@@ -527,8 +548,7 @@ func TestTxSearch(t *testing.T) {
|
||||
find := result.Txs[len(result.Txs)-1]
|
||||
anotherTxHash := types.Tx("a different tx").Hash()
|
||||
|
||||
for i, c := range GetClients() {
|
||||
t.Logf("client %d", i)
|
||||
for _, c := range GetClients() {
|
||||
|
||||
// now we query for the tx.
|
||||
result, err := c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc")
|
||||
@@ -607,16 +627,17 @@ func TestTxSearch(t *testing.T) {
|
||||
pages = int(math.Ceil(float64(txCount) / float64(perPage)))
|
||||
)
|
||||
|
||||
totalTx := 0
|
||||
for page := 1; page <= pages; page++ {
|
||||
page := page
|
||||
result, err := c.TxSearch(context.Background(), "tx.height >= 1", false, &page, &perPage, "asc")
|
||||
result, err := c.TxSearch(context.Background(), "tx.height >= 1", true, &page, &perPage, "asc")
|
||||
require.NoError(t, err)
|
||||
if page < pages {
|
||||
require.Len(t, result.Txs, perPage)
|
||||
} else {
|
||||
require.LessOrEqual(t, len(result.Txs), perPage)
|
||||
}
|
||||
require.Equal(t, txCount, result.TotalCount)
|
||||
totalTx = totalTx + len(result.Txs)
|
||||
for _, tx := range result.Txs {
|
||||
require.False(t, seen[tx.Height],
|
||||
"Found duplicate height %v in page %v", tx.Height, page)
|
||||
@@ -626,6 +647,7 @@ func TestTxSearch(t *testing.T) {
|
||||
maxHeight = tx.Height
|
||||
}
|
||||
}
|
||||
require.Equal(t, txCount, totalTx)
|
||||
require.Len(t, seen, txCount)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,6 +160,19 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BlockSearchMatchEvents(
|
||||
ctx *rpctypes.Context,
|
||||
query string,
|
||||
pagePtr, perPagePtr *int,
|
||||
orderBy string,
|
||||
matchEvents bool,
|
||||
) (*ctypes.ResultBlockSearch, error) {
|
||||
if matchEvents {
|
||||
query = "match.events = 1 AND " + query
|
||||
}
|
||||
return BlockSearch(ctx, query, pagePtr, perPagePtr, orderBy)
|
||||
}
|
||||
|
||||
// BlockSearch searches for a paginated set of blocks matching BeginBlock and
|
||||
// EndBlock event search criteria.
|
||||
func BlockSearch(
|
||||
@@ -173,7 +186,6 @@ func BlockSearch(
|
||||
if _, ok := env.BlockIndexer.(*blockidxnull.BlockerIndexer); ok {
|
||||
return nil, errors.New("block indexing is disabled")
|
||||
}
|
||||
|
||||
q, err := tmquery.New(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -26,8 +26,8 @@ var Routes = map[string]*rpc.RPCFunc{
|
||||
"commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")),
|
||||
"check_tx": rpc.NewRPCFunc(CheckTx, "tx"),
|
||||
"tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()),
|
||||
"tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"),
|
||||
"block_search": rpc.NewRPCFunc(BlockSearch, "query,page,per_page,order_by"),
|
||||
"tx_search": rpc.NewRPCFunc(TxSearchMatchEvents, "query,prove,page,per_page,order_by,match_events"),
|
||||
"block_search": rpc.NewRPCFunc(BlockSearchMatchEvents, "query,page,per_page,order_by,match_events"),
|
||||
"validators": rpc.NewRPCFunc(Validators, "height,page,per_page", rpc.Cacheable("height")),
|
||||
"dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""),
|
||||
"consensus_state": rpc.NewRPCFunc(ConsensusState, ""),
|
||||
|
||||
@@ -133,3 +133,23 @@ func TxSearch(
|
||||
|
||||
return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil
|
||||
}
|
||||
|
||||
// TxSearchMatchEvents allows you to query for multiple transactions results and match the
|
||||
// query attributes to a common event. It returns a
|
||||
// list of transactions (maximum ?per_page entries) and the total count.
|
||||
// More: https://docs.tendermint.com/v0.34/rpc/#/Info/tx_search
|
||||
func TxSearchMatchEvents(
|
||||
ctx *rpctypes.Context,
|
||||
query string,
|
||||
prove bool,
|
||||
pagePtr, perPagePtr *int,
|
||||
orderBy string,
|
||||
matchEvents bool,
|
||||
) (*ctypes.ResultTxSearch, error) {
|
||||
|
||||
if matchEvents {
|
||||
query = "match.events = 1 AND " + query
|
||||
}
|
||||
return TxSearch(ctx, query, prove, pagePtr, perPagePtr, orderBy)
|
||||
|
||||
}
|
||||
|
||||
@@ -849,6 +849,41 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ErrorResponse"
|
||||
/genesis_chunked:
|
||||
get:
|
||||
summary: Get Genesis in multiple chunks
|
||||
operationId: genesis_chunked
|
||||
tags:
|
||||
- Info
|
||||
description: |
|
||||
Get genesis document in multiple chunks to make it easier to iterate
|
||||
through larger genesis structures. Each chunk is produced by converting
|
||||
the genesis document to JSON and then splitting the resulting payload
|
||||
into 16MB blocks, and then Base64-encoding each block.
|
||||
|
||||
Upon success, the `Cache-Control` header will be set with the default
|
||||
maximum age.
|
||||
parameters:
|
||||
- in: query
|
||||
name: chunk
|
||||
description: Sequence number of the chunk to download.
|
||||
schema:
|
||||
type: integer
|
||||
default: 0
|
||||
example: 1
|
||||
responses:
|
||||
"200":
|
||||
description: Genesis chunk response.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/GenesisChunkedResponse"
|
||||
"500":
|
||||
description: Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ErrorResponse"
|
||||
/dump_consensus_state:
|
||||
get:
|
||||
summary: Get consensus state
|
||||
@@ -1029,6 +1064,14 @@ paths:
|
||||
type: string
|
||||
default: "asc"
|
||||
example: "asc"
|
||||
- in: query
|
||||
name: match_events
|
||||
description: Match attributes in query within events, in addition to the height & txhash
|
||||
required: false
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
example: true
|
||||
tags:
|
||||
- Info
|
||||
responses:
|
||||
@@ -1084,6 +1127,14 @@ paths:
|
||||
type: string
|
||||
default: "desc"
|
||||
example: "asc"
|
||||
- in: query
|
||||
name: match_events
|
||||
description: Match attributes in query within events, in addition to the height
|
||||
required: false
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
example: true
|
||||
tags:
|
||||
- Info
|
||||
responses:
|
||||
@@ -1913,6 +1964,35 @@ components:
|
||||
properties: {}
|
||||
type: object
|
||||
|
||||
GenesisChunkedResponse:
|
||||
type: object
|
||||
required:
|
||||
- "jsonrpc"
|
||||
- "id"
|
||||
- "result"
|
||||
properties:
|
||||
jsonrpc:
|
||||
type: string
|
||||
example: "2.0"
|
||||
id:
|
||||
type: integer
|
||||
example: 0
|
||||
result:
|
||||
required:
|
||||
- "chunk"
|
||||
- "total"
|
||||
- "data"
|
||||
properties:
|
||||
chunk:
|
||||
type: integer
|
||||
example: 0
|
||||
total:
|
||||
type: integer
|
||||
example: 1
|
||||
data:
|
||||
type: string
|
||||
example: "Z2VuZXNpcwo="
|
||||
|
||||
DumpConsensusResponse:
|
||||
type: object
|
||||
required:
|
||||
|
||||
@@ -96,7 +96,7 @@ string pairs denoting metadata about what happened during the method's execution
|
||||
`Event` values can be used to index transactions and blocks according to what happened
|
||||
during their execution. Note that the set of events returned for a block from
|
||||
`BeginBlock` and `EndBlock` are merged. In case both methods return the same
|
||||
key, only the value defined in `EndBlock` is used.
|
||||
key and value combination, only the value defined in `EndBlock` is used.
|
||||
|
||||
Each event has a `type` which is meant to categorize the event for a particular
|
||||
`Response*` or `Tx`. A `Response*` or `Tx` may contain multiple events with duplicate
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -24,6 +25,10 @@ var _ indexer.BlockIndexer = (*BlockerIndexer)(nil)
|
||||
// such that matching search criteria returns the respective block height(s).
|
||||
type BlockerIndexer struct {
|
||||
store dbm.DB
|
||||
|
||||
// Add unique event identifier to use when querying
|
||||
// Matching will be done both on height AND eventSeq
|
||||
eventSeq int64
|
||||
}
|
||||
|
||||
func New(store dbm.DB) *BlockerIndexer {
|
||||
@@ -95,11 +100,46 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64,
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse query conditions: %w", err)
|
||||
}
|
||||
// conditions to skip because they're handled before "everything else"
|
||||
skipIndexes := make([]int, 0)
|
||||
|
||||
var matchEvents bool
|
||||
var matchEventIdx int
|
||||
|
||||
// If the match.events keyword is at the beginning of the query, we will only
|
||||
// return heights where the conditions are true within the same event
|
||||
// and set the matchEvents to true
|
||||
conditions, matchEvents = dedupMatchEvents(conditions)
|
||||
|
||||
if matchEvents {
|
||||
matchEventIdx = 0
|
||||
} else {
|
||||
matchEventIdx = -1
|
||||
}
|
||||
|
||||
if matchEventIdx != -1 {
|
||||
skipIndexes = append(skipIndexes, matchEventIdx)
|
||||
}
|
||||
// If there is an exact height query, return the result immediately
|
||||
// (if it exists).
|
||||
height, ok := lookForHeight(conditions)
|
||||
if ok {
|
||||
var height int64
|
||||
var ok bool
|
||||
var heightIdx int
|
||||
if matchEvents {
|
||||
// If we are not matching events and block.height = 3 occurs more than once, the later value will
|
||||
// overwrite the first one. For match.events it will create problems.
|
||||
conditions, height, ok, heightIdx = dedupHeight(conditions)
|
||||
} else {
|
||||
height, ok, heightIdx = lookForHeight(conditions)
|
||||
}
|
||||
|
||||
// If we have additional constraints and want to query per event
|
||||
// attributes, we cannot simply return all blocks for a height.
|
||||
// But we remember the height we want to find and forward it to
|
||||
// match(). If we only have the height constraint and match.events keyword
|
||||
// in the query (the second part of the ||), we don't need to query
|
||||
// per event conditions and return all events within the height range.
|
||||
if ok && (!matchEvents || (matchEvents && len(conditions) == 2)) {
|
||||
ok, err := idx.Has(height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -114,24 +154,39 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64,
|
||||
|
||||
var heightsInitialized bool
|
||||
filteredHeights := make(map[string][]byte)
|
||||
|
||||
// conditions to skip because they're handled before "everything else"
|
||||
skipIndexes := make([]int, 0)
|
||||
if matchEvents && heightIdx != -1 {
|
||||
skipIndexes = append(skipIndexes, heightIdx)
|
||||
}
|
||||
|
||||
// Extract ranges. If both upper and lower bounds exist, it's better to get
|
||||
// them in order as to not iterate over kvs that are not within range.
|
||||
ranges, rangeIndexes := indexer.LookForRanges(conditions)
|
||||
var heightRanges indexer.QueryRange
|
||||
if len(ranges) > 0 {
|
||||
skipIndexes = append(skipIndexes, rangeIndexes...)
|
||||
|
||||
for _, qr := range ranges {
|
||||
// If we have a query range over height and want to still look for
|
||||
// specific event values we do not want to simply return all
|
||||
// blocks in this height range. We remember the height range info
|
||||
// and pass it on to match() to take into account when processing events.
|
||||
if qr.Key == types.BlockHeightKey && matchEvents {
|
||||
heightRanges = qr
|
||||
// If the query contains ranges other than the height then we need to treat the height
|
||||
// range when querying the conditions of the other range.
|
||||
// Otherwise we can just return all the blocks within the height range (as there is no
|
||||
// additional constraint on events)
|
||||
if len(ranges)+1 != 2 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
prefix, err := orderedcode.Append(nil, qr.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create prefix key: %w", err)
|
||||
}
|
||||
|
||||
if !heightsInitialized {
|
||||
filteredHeights, err = idx.matchRange(ctx, qr, prefix, filteredHeights, true)
|
||||
filteredHeights, err = idx.matchRange(ctx, qr, prefix, filteredHeights, true, matchEvents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -144,7 +199,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64,
|
||||
break
|
||||
}
|
||||
} else {
|
||||
filteredHeights, err = idx.matchRange(ctx, qr, prefix, filteredHeights, false)
|
||||
filteredHeights, err = idx.matchRange(ctx, qr, prefix, filteredHeights, false, matchEvents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -159,12 +214,13 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64,
|
||||
}
|
||||
|
||||
startKey, err := orderedcode.Append(nil, c.CompositeKey, fmt.Sprintf("%v", c.Operand))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !heightsInitialized {
|
||||
filteredHeights, err = idx.match(ctx, c, startKey, filteredHeights, true)
|
||||
filteredHeights, err = idx.match(ctx, c, startKey, filteredHeights, true, matchEvents, height, heightRanges)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -177,7 +233,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64,
|
||||
break
|
||||
}
|
||||
} else {
|
||||
filteredHeights, err = idx.match(ctx, c, startKey, filteredHeights, false)
|
||||
filteredHeights, err = idx.match(ctx, c, startKey, filteredHeights, false, matchEvents, height, heightRanges)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -222,6 +278,7 @@ func (idx *BlockerIndexer) matchRange(
|
||||
startKey []byte,
|
||||
filteredHeights map[string][]byte,
|
||||
firstRun bool,
|
||||
matchEvents bool,
|
||||
) (map[string][]byte, error) {
|
||||
|
||||
// A previous match was attempted but resulted in no matches, so we return
|
||||
@@ -231,8 +288,6 @@ func (idx *BlockerIndexer) matchRange(
|
||||
}
|
||||
|
||||
tmpHeights := make(map[string][]byte)
|
||||
lowerBound := qr.LowerBoundValue()
|
||||
upperBound := qr.UpperBoundValue()
|
||||
|
||||
it, err := dbm.IteratePrefix(idx.store, startKey)
|
||||
if err != nil {
|
||||
@@ -262,18 +317,8 @@ LOOP:
|
||||
if err != nil {
|
||||
continue LOOP
|
||||
}
|
||||
|
||||
include := true
|
||||
if lowerBound != nil && v < lowerBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
|
||||
if upperBound != nil && v > upperBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
|
||||
if include {
|
||||
tmpHeights[string(it.Value())] = it.Value()
|
||||
if checkBounds(qr, v) {
|
||||
idx.setTmpHeights(tmpHeights, it, matchEvents)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,8 +347,12 @@ LOOP:
|
||||
|
||||
// Remove/reduce matches in filteredHashes that were not found in this
|
||||
// match (tmpHashes).
|
||||
for k := range filteredHeights {
|
||||
if tmpHeights[k] == nil {
|
||||
for k, v := range filteredHeights {
|
||||
tmpHeight := tmpHeights[k]
|
||||
|
||||
// Check whether in this iteration we have not found an overlapping height (tmpHeight == nil)
|
||||
// or whether the events in which the attributed occurred do not match (first part of the condition)
|
||||
if tmpHeight == nil || !bytes.Equal(tmpHeight, v) {
|
||||
delete(filteredHeights, k)
|
||||
|
||||
select {
|
||||
@@ -318,6 +367,33 @@ LOOP:
|
||||
return filteredHeights, nil
|
||||
}
|
||||
|
||||
func (idx *BlockerIndexer) setTmpHeights(tmpHeights map[string][]byte, it dbm.Iterator, matchEvents bool) {
|
||||
// If we return attributes that occur within the same events, then store the event sequence in the
|
||||
// result map as well
|
||||
if matchEvents {
|
||||
eventSeq, _ := parseEventSeqFromEventKey(it.Key())
|
||||
retVal := it.Value()
|
||||
tmpHeights[string(retVal)+strconv.FormatInt(eventSeq, 10)] = it.Value()
|
||||
} else {
|
||||
tmpHeights[string(it.Value())] = it.Value()
|
||||
}
|
||||
}
|
||||
|
||||
func checkBounds(ranges indexer.QueryRange, v int64) bool {
|
||||
include := true
|
||||
lowerBound := ranges.LowerBoundValue()
|
||||
upperBound := ranges.UpperBoundValue()
|
||||
if lowerBound != nil && v < lowerBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
|
||||
if upperBound != nil && v > upperBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
|
||||
return include
|
||||
}
|
||||
|
||||
// match returns all matching heights that meet a given query condition and start
|
||||
// key. An already filtered result (filteredHeights) is provided such that any
|
||||
// non-intersecting matches are removed.
|
||||
@@ -330,6 +406,9 @@ func (idx *BlockerIndexer) match(
|
||||
startKeyBz []byte,
|
||||
filteredHeights map[string][]byte,
|
||||
firstRun bool,
|
||||
matchEvents bool,
|
||||
height int64,
|
||||
heightRanges indexer.QueryRange,
|
||||
) (map[string][]byte, error) {
|
||||
|
||||
// A previous match was attempted but resulted in no matches, so we return
|
||||
@@ -349,7 +428,23 @@ func (idx *BlockerIndexer) match(
|
||||
defer it.Close()
|
||||
|
||||
for ; it.Valid(); it.Next() {
|
||||
tmpHeights[string(it.Value())] = it.Value()
|
||||
if matchEvents {
|
||||
|
||||
if heightRanges.Key != "" {
|
||||
eventHeight, err := parseHeightFromEventKey(it.Key())
|
||||
if err != nil || !checkBounds(heightRanges, eventHeight) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if height != 0 {
|
||||
eventHeight, _ := parseHeightFromEventKey(it.Key())
|
||||
if eventHeight != height {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
idx.setTmpHeights(tmpHeights, it, matchEvents)
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
break
|
||||
@@ -373,7 +468,7 @@ func (idx *BlockerIndexer) match(
|
||||
defer it.Close()
|
||||
|
||||
for ; it.Valid(); it.Next() {
|
||||
tmpHeights[string(it.Value())] = it.Value()
|
||||
idx.setTmpHeights(tmpHeights, it, matchEvents)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -406,7 +501,7 @@ func (idx *BlockerIndexer) match(
|
||||
}
|
||||
|
||||
if strings.Contains(eventValue, c.Operand.(string)) {
|
||||
tmpHeights[string(it.Value())] = it.Value()
|
||||
idx.setTmpHeights(tmpHeights, it, matchEvents)
|
||||
}
|
||||
|
||||
select {
|
||||
@@ -437,8 +532,9 @@ func (idx *BlockerIndexer) match(
|
||||
|
||||
// Remove/reduce matches in filteredHeights that were not found in this
|
||||
// match (tmpHeights).
|
||||
for k := range filteredHeights {
|
||||
if tmpHeights[k] == nil {
|
||||
for k, v := range filteredHeights {
|
||||
tmpHeight := tmpHeights[k]
|
||||
if tmpHeight == nil || !bytes.Equal(tmpHeight, v) {
|
||||
delete(filteredHeights, k)
|
||||
|
||||
select {
|
||||
@@ -457,6 +553,7 @@ func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, typ
|
||||
heightBz := int64ToBytes(height)
|
||||
|
||||
for _, event := range events {
|
||||
idx.eventSeq = idx.eventSeq + 1
|
||||
// only index events with a non-empty type
|
||||
if len(event.Type) == 0 {
|
||||
continue
|
||||
@@ -474,7 +571,7 @@ func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, typ
|
||||
}
|
||||
|
||||
if attr.GetIndex() {
|
||||
key, err := eventKey(compositeKey, typ, string(attr.Value), height)
|
||||
key, err := eventKey(compositeKey, typ, string(attr.Value), height, idx.eventSeq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create block index key: %w", err)
|
||||
}
|
||||
|
||||
@@ -140,3 +140,141 @@ func TestBlockIndexer(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockIndexerMulti(t *testing.T) {
|
||||
store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events"))
|
||||
indexer := blockidxkv.New(store)
|
||||
|
||||
require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{
|
||||
Header: types.Header{Height: 1},
|
||||
ResultBeginBlock: abci.ResponseBeginBlock{
|
||||
Events: []abci.Event{},
|
||||
},
|
||||
ResultEndBlock: abci.ResponseEndBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "end_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("100"),
|
||||
Index: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("bar"),
|
||||
Value: []byte("200"),
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "end_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("300"),
|
||||
Index: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("bar"),
|
||||
Value: []byte("400"),
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{
|
||||
Header: types.Header{Height: 2},
|
||||
ResultBeginBlock: abci.ResponseBeginBlock{
|
||||
Events: []abci.Event{},
|
||||
},
|
||||
ResultEndBlock: abci.ResponseEndBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "end_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("100"),
|
||||
Index: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("bar"),
|
||||
Value: []byte("200"),
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "end_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("300"),
|
||||
Index: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("bar"),
|
||||
Value: []byte("400"),
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
testCases := map[string]struct {
|
||||
q *query.Query
|
||||
results []int64
|
||||
}{
|
||||
"query return all events from a height - exact": {
|
||||
q: query.MustParse("match.events = 1 AND block.height = 1"),
|
||||
results: []int64{1},
|
||||
},
|
||||
"query return all events from a height - exact (deduplicate height)": {
|
||||
q: query.MustParse("match.events = 1 AND block.height = 1 AND block.height = 2"),
|
||||
results: []int64{1},
|
||||
},
|
||||
"query return all events from a height - range": {
|
||||
q: query.MustParse("match.events = 1 AND block.height < 2 AND block.height > 0 AND block.height > 0"),
|
||||
results: []int64{1},
|
||||
},
|
||||
"query matches fields from same event": {
|
||||
q: query.MustParse("match.events = 1 AND end_event.bar < 300 AND end_event.foo = 100 AND block.height > 0 AND block.height <= 2"),
|
||||
results: []int64{1, 2},
|
||||
},
|
||||
"query matches fields from multiple events": {
|
||||
q: query.MustParse("match.events = 1 AND end_event.foo = 100 AND end_event.bar = 400 AND block.height = 2"),
|
||||
results: []int64{},
|
||||
},
|
||||
"deduplication test - match.events only at beginning": {
|
||||
q: query.MustParse("end_event.foo = 100 AND end_event.bar = 400 AND block.height = 2 AND match.events = 1"),
|
||||
results: []int64{2},
|
||||
},
|
||||
"deduplication test - match.events multiple": {
|
||||
q: query.MustParse("match.events = 1 AND end_event.foo = 100 AND end_event.bar = 400 AND block.height = 2 AND match.events = 1"),
|
||||
results: []int64{},
|
||||
},
|
||||
"query matches fields from multiple events allowed": {
|
||||
q: query.MustParse("end_event.foo = 100 AND end_event.bar = 400"),
|
||||
results: []int64{1, 2},
|
||||
},
|
||||
"query matches fields from all events whose attribute is within range": {
|
||||
q: query.MustParse("match.events = 1 AND end_event.foo < 300 AND block.height = 2"),
|
||||
results: []int64{1, 2},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(name, func(t *testing.T) {
|
||||
results, err := indexer.Search(context.Background(), tc.q)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.results, results)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,13 +40,14 @@ func heightKey(height int64) ([]byte, error) {
|
||||
)
|
||||
}
|
||||
|
||||
func eventKey(compositeKey, typ, eventValue string, height int64) ([]byte, error) {
|
||||
func eventKey(compositeKey, typ, eventValue string, height int64, eventSeq int64) ([]byte, error) {
|
||||
return orderedcode.Append(
|
||||
nil,
|
||||
compositeKey,
|
||||
eventValue,
|
||||
height,
|
||||
typ,
|
||||
eventSeq,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -74,24 +75,97 @@ func parseValueFromEventKey(key []byte) (string, error) {
|
||||
height int64
|
||||
)
|
||||
|
||||
remaining, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height, &typ)
|
||||
_, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height, &typ)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse event key: %w", err)
|
||||
}
|
||||
|
||||
if len(remaining) != 0 {
|
||||
return "", fmt.Errorf("unexpected remainder in key: %s", remaining)
|
||||
}
|
||||
|
||||
return eventValue, nil
|
||||
}
|
||||
|
||||
func lookForHeight(conditions []query.Condition) (int64, bool) {
|
||||
for _, c := range conditions {
|
||||
if c.CompositeKey == types.BlockHeightKey && c.Op == query.OpEqual {
|
||||
return c.Operand.(int64), true
|
||||
func parseHeightFromEventKey(key []byte) (int64, error) {
|
||||
var (
|
||||
compositeKey, typ, eventValue string
|
||||
height int64
|
||||
)
|
||||
|
||||
_, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height, &typ)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to parse event key: %w", err)
|
||||
}
|
||||
|
||||
return height, nil
|
||||
}
|
||||
|
||||
func parseEventSeqFromEventKey(key []byte) (int64, error) {
|
||||
var (
|
||||
compositeKey, typ, eventValue string
|
||||
height int64
|
||||
eventSeq int64
|
||||
)
|
||||
|
||||
remaining, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height, &typ)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse event key: %w", err)
|
||||
}
|
||||
|
||||
// This is done to support previous versions that did not have event sequence in their key
|
||||
if len(remaining) != 0 {
|
||||
remaining, err = orderedcode.Parse(remaining, &eventSeq)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse event key: %w", err)
|
||||
}
|
||||
if len(remaining) != 0 {
|
||||
return 0, fmt.Errorf("unexpected remainder in key: %s", remaining)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, false
|
||||
return eventSeq, nil
|
||||
}
|
||||
|
||||
func lookForHeight(conditions []query.Condition) (int64, bool, int) {
|
||||
for i, c := range conditions {
|
||||
if c.CompositeKey == types.BlockHeightKey && c.Op == query.OpEqual {
|
||||
return c.Operand.(int64), true, i
|
||||
}
|
||||
}
|
||||
|
||||
return 0, false, -1
|
||||
}
|
||||
|
||||
func dedupHeight(conditions []query.Condition) (dedupConditions []query.Condition, height int64, found bool, idx int) {
|
||||
idx = -1
|
||||
for i, c := range conditions {
|
||||
if c.CompositeKey == types.BlockHeightKey && c.Op == query.OpEqual {
|
||||
if found {
|
||||
continue
|
||||
} else {
|
||||
dedupConditions = append(dedupConditions, c)
|
||||
height = c.Operand.(int64)
|
||||
found = true
|
||||
idx = i
|
||||
}
|
||||
} else {
|
||||
dedupConditions = append(dedupConditions, c)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func dedupMatchEvents(conditions []query.Condition) ([]query.Condition, bool) {
|
||||
var dedupConditions []query.Condition
|
||||
matchEvents := false
|
||||
for i, c := range conditions {
|
||||
if c.CompositeKey == types.MatchEventKey {
|
||||
// Match events should be added only via RPC as the very first query condition
|
||||
if i == 0 {
|
||||
dedupConditions = append(dedupConditions, c)
|
||||
matchEvents = true
|
||||
}
|
||||
} else {
|
||||
dedupConditions = append(dedupConditions, c)
|
||||
}
|
||||
|
||||
}
|
||||
return dedupConditions, matchEvents
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ var _ txindex.TxIndexer = (*TxIndex)(nil)
|
||||
// TxIndex is the simplest possible indexer, backed by key-value storage (levelDB).
|
||||
type TxIndex struct {
|
||||
store dbm.DB
|
||||
// Number the events in the event list
|
||||
eventSeq int64
|
||||
}
|
||||
|
||||
// NewTxIndex creates new KV indexer.
|
||||
@@ -152,6 +154,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error {
|
||||
|
||||
func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Batch) error {
|
||||
for _, event := range result.Result.Events {
|
||||
txi.eventSeq = txi.eventSeq + 1
|
||||
// only index events with a non-empty type
|
||||
if len(event.Type) == 0 {
|
||||
continue
|
||||
@@ -165,7 +168,7 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Ba
|
||||
// index if `index: true` is set
|
||||
compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key))
|
||||
if attr.GetIndex() {
|
||||
err := store.Set(keyForEvent(compositeTag, attr.Value, result), hash)
|
||||
err := store.Set(keyForEvent(compositeTag, attr.Value, result, txi.eventSeq), hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -220,19 +223,46 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul
|
||||
}
|
||||
}
|
||||
|
||||
var matchEvents bool
|
||||
var matchEventIdx int
|
||||
|
||||
// If the match.events keyword is at the beginning of the query, we will only
|
||||
// return heights where the conditions are true within the same event
|
||||
// and set the matchEvents to true
|
||||
conditions, matchEvents = dedupMatchEvents(conditions)
|
||||
|
||||
if matchEvents {
|
||||
matchEventIdx = 0
|
||||
} else {
|
||||
matchEventIdx = -1
|
||||
}
|
||||
|
||||
// conditions to skip because they're handled before "everything else"
|
||||
skipIndexes := make([]int, 0)
|
||||
|
||||
if matchEventIdx != -1 {
|
||||
skipIndexes = append(skipIndexes, matchEventIdx)
|
||||
}
|
||||
// extract ranges
|
||||
// if both upper and lower bounds exist, it's better to get them in order not
|
||||
// no iterate over kvs that are not within range.
|
||||
ranges, rangeIndexes := indexer.LookForRanges(conditions)
|
||||
var heightRanges indexer.QueryRange
|
||||
if len(ranges) > 0 {
|
||||
skipIndexes = append(skipIndexes, rangeIndexes...)
|
||||
|
||||
for _, qr := range ranges {
|
||||
|
||||
//If we have a query range over height and want to still look for
|
||||
// specific event values we do not want to simply return all
|
||||
// transactios in this height range. We remember the height range info
|
||||
// and pass it on to match() to take into account when processing events.
|
||||
if qr.Key == types.TxHeightKey && matchEvents {
|
||||
heightRanges = qr
|
||||
continue
|
||||
}
|
||||
if !hashesInitialized {
|
||||
filteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, true)
|
||||
filteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, true, matchEvents)
|
||||
hashesInitialized = true
|
||||
|
||||
// Ignore any remaining conditions if the first condition resulted
|
||||
@@ -241,13 +271,24 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul
|
||||
break
|
||||
}
|
||||
} else {
|
||||
filteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, false)
|
||||
filteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, false, matchEvents)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if there is a height condition ("tx.height=3"), extract it
|
||||
height := lookForHeight(conditions)
|
||||
var height int64
|
||||
var heightIdx int
|
||||
if matchEvents {
|
||||
// If we are not matching events and tx.height = 3 occurs more than once, the later value will
|
||||
// overwrite the first one. For match.events it will create problems.
|
||||
conditions, height, heightIdx = dedupHeight(conditions)
|
||||
} else {
|
||||
height, heightIdx = lookForHeight(conditions)
|
||||
}
|
||||
if matchEvents && (len(conditions) != 2) {
|
||||
skipIndexes = append(skipIndexes, heightIdx)
|
||||
}
|
||||
|
||||
// for all other conditions
|
||||
for i, c := range conditions {
|
||||
@@ -256,7 +297,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul
|
||||
}
|
||||
|
||||
if !hashesInitialized {
|
||||
filteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, true)
|
||||
filteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, true, matchEvents, height, heightRanges)
|
||||
hashesInitialized = true
|
||||
|
||||
// Ignore any remaining conditions if the first condition resulted
|
||||
@@ -265,7 +306,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul
|
||||
break
|
||||
}
|
||||
} else {
|
||||
filteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, false)
|
||||
filteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, false, matchEvents, height, heightRanges)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,13 +340,21 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error)
|
||||
}
|
||||
|
||||
// lookForHeight returns a height if there is an "height=X" condition.
|
||||
func lookForHeight(conditions []query.Condition) (height int64) {
|
||||
for _, c := range conditions {
|
||||
func lookForHeight(conditions []query.Condition) (height int64, heightIdx int) {
|
||||
for i, c := range conditions {
|
||||
if c.CompositeKey == types.TxHeightKey && c.Op == query.OpEqual {
|
||||
return c.Operand.(int64)
|
||||
return c.Operand.(int64), i
|
||||
}
|
||||
}
|
||||
return 0
|
||||
return 0, -1
|
||||
}
|
||||
func (txi *TxIndex) setTmpHashes(tmpHeights map[string][]byte, it dbm.Iterator, matchEvents bool) {
|
||||
if matchEvents {
|
||||
eventSeq := extractEventSeqFromKey(it.Key())
|
||||
tmpHeights[string(it.Value())+eventSeq] = it.Value()
|
||||
} else {
|
||||
tmpHeights[string(it.Value())] = it.Value()
|
||||
}
|
||||
}
|
||||
|
||||
// match returns all matching txs by hash that meet a given condition and start
|
||||
@@ -319,6 +368,9 @@ func (txi *TxIndex) match(
|
||||
startKeyBz []byte,
|
||||
filteredHashes map[string][]byte,
|
||||
firstRun bool,
|
||||
matchEvents bool,
|
||||
height int64,
|
||||
heightRanges indexer.QueryRange,
|
||||
) map[string][]byte {
|
||||
// A previous match was attempted but resulted in no matches, so we return
|
||||
// no matches (assuming AND operand).
|
||||
@@ -337,8 +389,24 @@ func (txi *TxIndex) match(
|
||||
defer it.Close()
|
||||
|
||||
for ; it.Valid(); it.Next() {
|
||||
tmpHashes[string(it.Value())] = it.Value()
|
||||
|
||||
// If we have a height range in a query, we need only transactions
|
||||
// for this height
|
||||
if heightRanges.Key != "" {
|
||||
eventHeight, err := extractHeightFromKey(it.Key())
|
||||
if err != nil || !checkBounds(heightRanges, eventHeight) {
|
||||
continue
|
||||
}
|
||||
} else if height != 0 {
|
||||
// If we have a particular height in the query, return only transactions
|
||||
// matching this height.
|
||||
eventHeight, err := extractHeightFromKey(it.Key())
|
||||
if eventHeight != height || err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
txi.setTmpHashes(tmpHashes, it, matchEvents)
|
||||
// Potentially exit early.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -360,7 +428,7 @@ func (txi *TxIndex) match(
|
||||
defer it.Close()
|
||||
|
||||
for ; it.Valid(); it.Next() {
|
||||
tmpHashes[string(it.Value())] = it.Value()
|
||||
txi.setTmpHashes(tmpHashes, it, matchEvents)
|
||||
|
||||
// Potentially exit early.
|
||||
select {
|
||||
@@ -389,7 +457,7 @@ func (txi *TxIndex) match(
|
||||
}
|
||||
|
||||
if strings.Contains(extractValueFromKey(it.Key()), c.Operand.(string)) {
|
||||
tmpHashes[string(it.Value())] = it.Value()
|
||||
txi.setTmpHashes(tmpHashes, it, matchEvents)
|
||||
}
|
||||
|
||||
// Potentially exit early.
|
||||
@@ -419,8 +487,9 @@ func (txi *TxIndex) match(
|
||||
|
||||
// Remove/reduce matches in filteredHashes that were not found in this
|
||||
// match (tmpHashes).
|
||||
for k := range filteredHashes {
|
||||
if tmpHashes[k] == nil {
|
||||
for k, v := range filteredHashes {
|
||||
tmpHash := tmpHashes[k]
|
||||
if tmpHash == nil || !bytes.Equal(tmpHash, v) {
|
||||
delete(filteredHashes, k)
|
||||
|
||||
// Potentially exit early.
|
||||
@@ -446,6 +515,7 @@ func (txi *TxIndex) matchRange(
|
||||
startKey []byte,
|
||||
filteredHashes map[string][]byte,
|
||||
firstRun bool,
|
||||
matchEvents bool,
|
||||
) map[string][]byte {
|
||||
// A previous match was attempted but resulted in no matches, so we return
|
||||
// no matches (assuming AND operand).
|
||||
@@ -454,8 +524,6 @@ func (txi *TxIndex) matchRange(
|
||||
}
|
||||
|
||||
tmpHashes := make(map[string][]byte)
|
||||
lowerBound := qr.LowerBoundValue()
|
||||
upperBound := qr.UpperBoundValue()
|
||||
|
||||
it, err := dbm.IteratePrefix(txi.store, startKey)
|
||||
if err != nil {
|
||||
@@ -475,17 +543,8 @@ LOOP:
|
||||
continue LOOP
|
||||
}
|
||||
|
||||
include := true
|
||||
if lowerBound != nil && v < lowerBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
|
||||
if upperBound != nil && v > upperBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
|
||||
if include {
|
||||
tmpHashes[string(it.Value())] = it.Value()
|
||||
if checkBounds(qr, v) {
|
||||
txi.setTmpHashes(tmpHashes, it, matchEvents)
|
||||
}
|
||||
|
||||
// XXX: passing time in a ABCI Events is not yet implemented
|
||||
@@ -520,8 +579,9 @@ LOOP:
|
||||
|
||||
// Remove/reduce matches in filteredHashes that were not found in this
|
||||
// match (tmpHashes).
|
||||
for k := range filteredHashes {
|
||||
if tmpHashes[k] == nil {
|
||||
for k, v := range filteredHashes {
|
||||
tmpHash := tmpHashes[k]
|
||||
if tmpHash == nil || !bytes.Equal(tmpHashes[k], v) {
|
||||
delete(filteredHashes, k)
|
||||
|
||||
// Potentially exit early.
|
||||
@@ -539,29 +599,49 @@ LOOP:
|
||||
// Keys
|
||||
|
||||
func isTagKey(key []byte) bool {
|
||||
return strings.Count(string(key), tagKeySeparator) == 3
|
||||
// This should be always 4 if data is indexed together with event sequences
|
||||
// The check for 3 was added to allow data indexed before (w/o the event number)
|
||||
// to be retrieved.
|
||||
numTags := strings.Count(string(key), tagKeySeparator)
|
||||
return numTags == 4 || numTags == 3
|
||||
}
|
||||
|
||||
func extractHeightFromKey(key []byte) (int64, error) {
|
||||
parts := strings.SplitN(string(key), tagKeySeparator, -1)
|
||||
return strconv.ParseInt(parts[2], 10, 64)
|
||||
}
|
||||
func extractValueFromKey(key []byte) string {
|
||||
parts := strings.SplitN(string(key), tagKeySeparator, 3)
|
||||
parts := strings.SplitN(string(key), tagKeySeparator, -1)
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
func keyForEvent(key string, value []byte, result *abci.TxResult) []byte {
|
||||
return []byte(fmt.Sprintf("%s/%s/%d/%d",
|
||||
func extractEventSeqFromKey(key []byte) string {
|
||||
parts := strings.SplitN(string(key), tagKeySeparator, -1)
|
||||
|
||||
if len(parts) == 5 {
|
||||
return parts[4]
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
func keyForEvent(key string, value []byte, result *abci.TxResult, eventSeq int64) []byte {
|
||||
return []byte(fmt.Sprintf("%s/%s/%d/%d/%d",
|
||||
key,
|
||||
value,
|
||||
result.Height,
|
||||
result.Index,
|
||||
eventSeq,
|
||||
))
|
||||
}
|
||||
|
||||
func keyForHeight(result *abci.TxResult) []byte {
|
||||
return []byte(fmt.Sprintf("%s/%d/%d/%d",
|
||||
return []byte(fmt.Sprintf("%s/%d/%d/%d/%d",
|
||||
types.TxHeightKey,
|
||||
result.Height,
|
||||
result.Height,
|
||||
result.Index,
|
||||
// Added to facilitate having the eventSeq in event keys
|
||||
// Otherwise queries break expecting 5 entries
|
||||
0,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -579,3 +659,18 @@ func startKey(fields ...interface{}) []byte {
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func checkBounds(ranges indexer.QueryRange, v int64) bool {
|
||||
include := true
|
||||
lowerBound := ranges.LowerBoundValue()
|
||||
upperBound := ranges.UpperBoundValue()
|
||||
if lowerBound != nil && v < lowerBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
|
||||
if upperBound != nil && v > upperBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
|
||||
return include
|
||||
}
|
||||
|
||||
@@ -139,6 +139,78 @@ func TestTxSearch(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxSearchEventMatch(t *testing.T) {
|
||||
|
||||
indexer := NewTxIndex(db.NewMemDB())
|
||||
|
||||
txResult := txResultWithEvents([]abci.Event{
|
||||
{Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("1"), Index: true}, {Key: []byte("owner"), Value: []byte("Ana"), Index: true}}},
|
||||
{Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("2"), Index: true}, {Key: []byte("owner"), Value: []byte("Ivan"), Index: true}}},
|
||||
{Type: "", Attributes: []abci.EventAttribute{{Key: []byte("not_allowed"), Value: []byte("Vlad"), Index: true}}},
|
||||
})
|
||||
|
||||
err := indexer.Index(txResult)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := map[string]struct {
|
||||
q string
|
||||
resultsLength int
|
||||
}{
|
||||
"Return all events from a height": {
|
||||
q: "match.events = 1 AND tx.height = 1",
|
||||
resultsLength: 1,
|
||||
},
|
||||
"Return all events from a height (deduplicate height)": {
|
||||
q: "match.events = 1 AND tx.height = 1 AND tx.height = 1",
|
||||
resultsLength: 1,
|
||||
},
|
||||
"Match attributes with height range and event": {
|
||||
q: "match.events = 1 AND tx.height < 2 AND tx.height > 0 AND account.number = 1 AND account.owner CONTAINS 'Ana'",
|
||||
resultsLength: 1,
|
||||
},
|
||||
"Match attributes with height range and event - no match": {
|
||||
q: "match.events = 1 AND tx.height < 2 AND tx.height > 0 AND account.number = 2 AND account.owner = 'Ana'",
|
||||
resultsLength: 0,
|
||||
},
|
||||
"Deduplucation test - match events only at the beginning": {
|
||||
q: "tx.height < 2 AND tx.height > 0 AND account.number = 2 AND account.owner = 'Ana' AND match.events = 1",
|
||||
resultsLength: 1,
|
||||
},
|
||||
"Deduplucation test - match events multiple": {
|
||||
q: "match.events = 1 AND tx.height < 2 AND tx.height > 0 AND account.number = 2 AND account.owner = 'Ana' AND match.events = 1",
|
||||
resultsLength: 0,
|
||||
},
|
||||
"Match attributes with event": {
|
||||
q: "account.number = 2 AND account.owner = 'Ana' AND tx.height = 1",
|
||||
resultsLength: 1,
|
||||
},
|
||||
"Match range w/o match events": {
|
||||
q: "account.number < 2 AND account.owner = 'Ivan'",
|
||||
resultsLength: 1,
|
||||
},
|
||||
" Match range with match events": {
|
||||
q: "match.events = 1 AND account.number < 2 AND account.owner = 'Ivan'",
|
||||
resultsLength: 0,
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.q, func(t *testing.T) {
|
||||
results, err := indexer.Search(ctx, query.MustParse(tc.q))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, results, tc.resultsLength)
|
||||
if tc.resultsLength > 0 {
|
||||
for _, txr := range results {
|
||||
assert.True(t, proto.Equal(txResult, txr))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestTxSearchWithCancelation(t *testing.T) {
|
||||
indexer := NewTxIndex(db.NewMemDB())
|
||||
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/orderedcode"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// IntInSlice returns true if a is found in the list.
|
||||
func intInSlice(a int, list []int) bool {
|
||||
for _, b := range list {
|
||||
@@ -9,3 +17,60 @@ func intInSlice(a int, list []int) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func dedupMatchEvents(conditions []query.Condition) ([]query.Condition, bool) {
|
||||
var dedupConditions []query.Condition
|
||||
matchEvents := false
|
||||
for i, c := range conditions {
|
||||
if c.CompositeKey == types.MatchEventKey {
|
||||
// Match events should be added only via RPC as the very first query condition
|
||||
if i == 0 {
|
||||
dedupConditions = append(dedupConditions, c)
|
||||
matchEvents = true
|
||||
}
|
||||
} else {
|
||||
dedupConditions = append(dedupConditions, c)
|
||||
}
|
||||
|
||||
}
|
||||
return dedupConditions, matchEvents
|
||||
}
|
||||
|
||||
func ParseEventSeqFromEventKey(key []byte) (int64, error) {
|
||||
var (
|
||||
compositeKey, typ, eventValue string
|
||||
height int64
|
||||
eventSeq int64
|
||||
)
|
||||
|
||||
remaining, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height, &typ, &eventSeq)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse event key: %w", err)
|
||||
}
|
||||
|
||||
if len(remaining) != 0 {
|
||||
return 0, fmt.Errorf("unexpected remainder in key: %s", remaining)
|
||||
}
|
||||
|
||||
return eventSeq, nil
|
||||
}
|
||||
func dedupHeight(conditions []query.Condition) (dedupConditions []query.Condition, height int64, idx int) {
|
||||
found := false
|
||||
idx = -1
|
||||
height = 0
|
||||
for i, c := range conditions {
|
||||
if c.CompositeKey == types.TxHeightKey && c.Op == query.OpEqual {
|
||||
if found {
|
||||
continue
|
||||
} else {
|
||||
dedupConditions = append(dedupConditions, c)
|
||||
height = c.Operand.(int64)
|
||||
found = true
|
||||
idx = i
|
||||
}
|
||||
} else {
|
||||
dedupConditions = append(dedupConditions, c)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
all: docker generator runner
|
||||
|
||||
docker:
|
||||
docker build --tag tendermint/e2e-node -f docker/Dockerfile ../..
|
||||
docker build --tag tendermint/e2e-node --tag tendermint/e2e-node:local-version -f docker/Dockerfile ../..
|
||||
|
||||
# We need to build support for database backends into the app in
|
||||
# order to build a binary with a Tendermint node in it (for built-in
|
||||
|
||||
@@ -30,6 +30,35 @@ Multiple testnets can be run with the `run-multiple.sh` script:
|
||||
./run-multiple.sh networks/generated/gen-group3-*.toml
|
||||
```
|
||||
|
||||
Testnets running different versions of Tendermint can be generated by the
|
||||
generator. For example:
|
||||
|
||||
```sh
|
||||
# Generate testnets randomly choosing between v0.34.21 (making up 1/3rd of the
|
||||
# network) and v0.34.22 (making up 2/3rds of the network).
|
||||
./build/generator -m "v0.34.21:1,v0.34.22:2" -d networks/generated/
|
||||
|
||||
# "local" refers to the current local code. The E2E node built from the local
|
||||
# code will be run on 2/3rds of the network, whereas the v0.34.23 E2E node will
|
||||
# be run on the remaining 1/3rd.
|
||||
./build/generator -m "v0.34.23:1,local:2" -d networks/generated/
|
||||
|
||||
# Using "latest" will cause the generator to auto-detect the latest
|
||||
# non-pre-release version tag in the current Git repository that is closest to
|
||||
# the Tendermint version in the current local code (as specified in
|
||||
# ../../version/version.go).
|
||||
#
|
||||
# In the example below, if the local version.TMCoreSemVer value is "v0.34.24",
|
||||
# for example, and the latest official release is v0.34.23, then 1/3rd of the
|
||||
# network will run v0.34.23 and the remaining 2/3rds will run the E2E node built
|
||||
# from the local code.
|
||||
./build/generator -m "latest:1,local:2" -d networks/generated/
|
||||
```
|
||||
|
||||
**NB**: The corresponding Docker images for the relevant versions of the E2E
|
||||
node (the `tendermint/e2e-node` image) must be available on the local machine,
|
||||
or via [Docker Hub](https://hub.docker.com/r/tendermint/e2e-node).
|
||||
|
||||
## Test Stages
|
||||
|
||||
The test runner has the following stages, which can also be executed explicitly by running `./build/runner -f <manifest> <stage>`:
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing/object"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -22,7 +27,9 @@ var (
|
||||
},
|
||||
"validators": {"genesis", "initchain"},
|
||||
}
|
||||
|
||||
nodeVersions = weightedChoice{
|
||||
"": 2,
|
||||
}
|
||||
// The following specify randomly chosen values for testnet nodes.
|
||||
nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"}
|
||||
ipv6 = uniformChoice{false, true}
|
||||
@@ -51,11 +58,44 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
type generateConfig struct {
|
||||
randSource *rand.Rand
|
||||
outputDir string
|
||||
multiVersion string
|
||||
}
|
||||
|
||||
// Generate generates random testnets using the given RNG.
|
||||
func Generate(r *rand.Rand) ([]e2e.Manifest, error) {
|
||||
func Generate(cfg *generateConfig) ([]e2e.Manifest, error) {
|
||||
if cfg.multiVersion != "" {
|
||||
var err error
|
||||
nodeVersions, err = parseWeightedVersions(cfg.multiVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := nodeVersions["local"]; ok {
|
||||
nodeVersions[""] = nodeVersions["local"]
|
||||
delete(nodeVersions, "local")
|
||||
}
|
||||
if _, ok := nodeVersions["latest"]; ok {
|
||||
latestVersion, err := gitRepoLatestReleaseVersion(cfg.outputDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeVersions[latestVersion] = nodeVersions["latest"]
|
||||
delete(nodeVersions, "latest")
|
||||
}
|
||||
}
|
||||
fmt.Println("Generating testnet with weighted versions:")
|
||||
for ver, wt := range nodeVersions {
|
||||
if ver == "" {
|
||||
fmt.Printf("- local: %d\n", wt)
|
||||
} else {
|
||||
fmt.Printf("- %s: %d\n", ver, wt)
|
||||
}
|
||||
}
|
||||
manifests := []e2e.Manifest{}
|
||||
for _, opt := range combinations(testnetCombinations) {
|
||||
manifest, err := generateTestnet(r, opt)
|
||||
manifest, err := generateTestnet(cfg.randSource, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -206,6 +246,7 @@ func generateNode(
|
||||
r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool,
|
||||
) *e2e.ManifestNode {
|
||||
node := e2e.ManifestNode{
|
||||
Version: nodeVersions.Choose(r).(string),
|
||||
Mode: string(mode),
|
||||
StartAt: startAt,
|
||||
Database: nodeDatabases.Choose(r).(string),
|
||||
@@ -264,6 +305,7 @@ func generateNode(
|
||||
func generateLightNode(r *rand.Rand, startAt int64, providers []string) *e2e.ManifestNode {
|
||||
return &e2e.ManifestNode{
|
||||
Mode: string(e2e.ModeLight),
|
||||
Version: nodeVersions.Choose(r).(string),
|
||||
StartAt: startAt,
|
||||
Database: nodeDatabases.Choose(r).(string),
|
||||
PersistInterval: ptrUint64(0),
|
||||
@@ -287,3 +329,101 @@ func (m misbehaviorOption) atHeight(height int64) map[string]string {
|
||||
misbehaviorMap[strconv.Itoa(int(height))] = m.misbehavior
|
||||
return misbehaviorMap
|
||||
}
|
||||
|
||||
// Parses strings like "v0.34.21:1,v0.34.22:2" to represent two versions
|
||||
// ("v0.34.21" and "v0.34.22") with weights of 1 and 2 respectively.
|
||||
func parseWeightedVersions(s string) (weightedChoice, error) {
|
||||
wc := make(weightedChoice)
|
||||
wvs := strings.Split(strings.TrimSpace(s), ",")
|
||||
for _, wv := range wvs {
|
||||
parts := strings.Split(strings.TrimSpace(wv), ":")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("unexpected weight:version combination: %s", wv)
|
||||
}
|
||||
ver := strings.TrimSpace(parts[0])
|
||||
wt, err := strconv.Atoi(strings.TrimSpace(parts[1]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected weight \"%s\": %w", parts[1], err)
|
||||
}
|
||||
if wt < 1 {
|
||||
return nil, errors.New("version weights must be >= 1")
|
||||
}
|
||||
wc[ver] = uint(wt)
|
||||
}
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
// Extracts the latest release version from the given Git repository. Uses the
|
||||
// current version of Tendermint Core to establish the "major" version
|
||||
// currently in use.
|
||||
func gitRepoLatestReleaseVersion(gitRepoDir string) (string, error) {
|
||||
opts := &git.PlainOpenOptions{
|
||||
DetectDotGit: true,
|
||||
}
|
||||
r, err := git.PlainOpenWithOptions(gitRepoDir, opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
tags := make([]string, 0)
|
||||
tagObjs, err := r.TagObjects()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = tagObjs.ForEach(func(tagObj *object.Tag) error {
|
||||
tags = append(tags, tagObj.Name)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return findLatestReleaseTag(version.TMCoreSemVer, tags)
|
||||
}
|
||||
|
||||
func findLatestReleaseTag(baseVer string, tags []string) (string, error) {
|
||||
baseSemVer, err := semver.NewVersion(strings.Split(baseVer, "-")[0])
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse base version \"%s\": %w", baseVer, err)
|
||||
}
|
||||
compVer := fmt.Sprintf("%d.%d", baseSemVer.Major(), baseSemVer.Minor())
|
||||
// Build our version comparison string
|
||||
// See https://github.com/Masterminds/semver#caret-range-comparisons-major for details
|
||||
compStr := "^ " + compVer
|
||||
verCon, err := semver.NewConstraint(compStr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var latestVer *semver.Version
|
||||
for _, tag := range tags {
|
||||
if !strings.HasPrefix(tag, "v") {
|
||||
continue
|
||||
}
|
||||
curVer, err := semver.NewVersion(tag)
|
||||
// Skip tags that are not valid semantic versions
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// Skip pre-releases
|
||||
if len(curVer.Prerelease()) != 0 {
|
||||
continue
|
||||
}
|
||||
// Skip versions that don't match our constraints
|
||||
if !verCon.Check(curVer) {
|
||||
continue
|
||||
}
|
||||
if latestVer == nil || curVer.GreaterThan(latestVer) {
|
||||
latestVer = curVer
|
||||
}
|
||||
}
|
||||
// No relevant latest version (will cause the generator to only use the tip
|
||||
// of the current branch)
|
||||
if latestVer == nil {
|
||||
return "", nil
|
||||
}
|
||||
// Ensure the version string has a "v" prefix, because all Tendermint E2E
|
||||
// node Docker images' versions have a "v" prefix.
|
||||
vs := latestVer.String()
|
||||
if !strings.HasPrefix(vs, "v") {
|
||||
return "v" + vs, nil
|
||||
}
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
47
test/e2e/generator/generate_test.go
Normal file
47
test/e2e/generator/generate_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVersionFinder(t *testing.T) {
|
||||
testCases := []struct {
|
||||
baseVer string
|
||||
tags []string
|
||||
expectedLatest string
|
||||
}{
|
||||
{
|
||||
baseVer: "v0.34.0",
|
||||
tags: []string{"v0.34.0", "v0.34.1", "v0.34.2", "v0.34.3-rc1", "v0.34.3", "v0.35.0", "v0.35.1", "v0.36.0-rc1"},
|
||||
expectedLatest: "v0.34.3",
|
||||
},
|
||||
{
|
||||
baseVer: "v0.38.0-dev",
|
||||
tags: []string{"v0.34.0", "v0.34.1", "v0.34.2", "v0.37.0-rc2", "dev-v0.38.0"},
|
||||
expectedLatest: "",
|
||||
},
|
||||
{
|
||||
baseVer: "v0.37.1-rc1",
|
||||
tags: []string{"v0.36.0", "v0.37.0-rc1", "v0.37.0"},
|
||||
expectedLatest: "v0.37.0",
|
||||
},
|
||||
{
|
||||
baseVer: "v1.0.0",
|
||||
tags: []string{"v0.34.0", "v0.35.0", "v1.0.0", "v1.0.1"},
|
||||
expectedLatest: "v1.0.1",
|
||||
},
|
||||
{
|
||||
baseVer: "v1.1.5",
|
||||
tags: []string{"v0.35.0", "v1.0.0", "v1.0.1", "v1.1.1", "v1.1.2", "v1.1.3", "v1.1.4"},
|
||||
expectedLatest: "v1.1.4",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
actualLatest, err := findLatestReleaseTag(tc.baseVer, tc.tags)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedLatest, actualLatest)
|
||||
}
|
||||
}
|
||||
@@ -44,26 +44,35 @@ func NewCLI() *CLI {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cli.generate(dir, groups)
|
||||
multiVersion, err := cmd.Flags().GetString("multi-version")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cli.generate(dir, groups, multiVersion)
|
||||
},
|
||||
}
|
||||
|
||||
cli.root.PersistentFlags().StringP("dir", "d", "", "Output directory for manifests")
|
||||
_ = cli.root.MarkPersistentFlagRequired("dir")
|
||||
cli.root.PersistentFlags().StringP("multi-version", "m", "", "Comma-separated list of versions of Tendermint to test in the generated testnets, "+
|
||||
"or empty to only use this branch's version")
|
||||
cli.root.PersistentFlags().IntP("groups", "g", 0, "Number of groups")
|
||||
|
||||
return cli
|
||||
}
|
||||
|
||||
// generate generates manifests in a directory.
|
||||
func (cli *CLI) generate(dir string, groups int) error {
|
||||
func (cli *CLI) generate(dir string, groups int, multiVersion string) error {
|
||||
err := os.MkdirAll(dir, 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
manifests, err := Generate(rand.New(rand.NewSource(randomSeed)))
|
||||
cfg := &generateConfig{
|
||||
randSource: rand.New(rand.NewSource(randomSeed)), //nolint:gosec
|
||||
multiVersion: multiVersion,
|
||||
}
|
||||
manifests, err := Generate(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
ipv6 = true
|
||||
initial_height = 1000
|
||||
initial_state = { initial01 = "a", initial02 = "b", initial03 = "c" }
|
||||
# The most common case (e.g. Cosmos SDK-based chains).
|
||||
abci_protocol = "builtin"
|
||||
|
||||
[validators]
|
||||
validator01 = 100
|
||||
@@ -42,7 +44,6 @@ misbehaviors = { 1018 = "double-prevote" }
|
||||
[node.validator02]
|
||||
seeds = ["seed02"]
|
||||
database = "boltdb"
|
||||
abci_protocol = "tcp"
|
||||
privval_protocol = "tcp"
|
||||
persist_interval = 0
|
||||
perturb = ["restart"]
|
||||
@@ -50,8 +51,6 @@ perturb = ["restart"]
|
||||
[node.validator03]
|
||||
seeds = ["seed01"]
|
||||
database = "badgerdb"
|
||||
# FIXME: should be grpc, disabled due to https://github.com/tendermint/tendermint/issues/5439
|
||||
#abci_protocol = "grpc"
|
||||
privval_protocol = "unix"
|
||||
persist_interval = 3
|
||||
retain_blocks = 3
|
||||
@@ -60,7 +59,6 @@ perturb = ["kill"]
|
||||
[node.validator04]
|
||||
persistent_peers = ["validator01"]
|
||||
database = "rocksdb"
|
||||
abci_protocol = "builtin"
|
||||
perturb = ["pause"]
|
||||
|
||||
[node.validator05]
|
||||
@@ -69,8 +67,6 @@ seeds = ["seed02"]
|
||||
database = "cleveldb"
|
||||
fast_sync = "v0"
|
||||
mempool_version = "v1"
|
||||
# FIXME: should be grpc, disabled due to https://github.com/tendermint/tendermint/issues/5439
|
||||
#abci_protocol = "grpc"
|
||||
privval_protocol = "tcp"
|
||||
perturb = ["kill", "pause", "disconnect", "restart"]
|
||||
|
||||
@@ -100,4 +96,4 @@ persistent_peers = ["validator01", "validator02", "validator03"]
|
||||
[node.light02]
|
||||
mode= "light"
|
||||
start_at= 1015
|
||||
persistent_peers = ["validator04", "full01", "validator05"]
|
||||
persistent_peers = ["validator04", "full01", "validator05"]
|
||||
|
||||
@@ -2,4 +2,3 @@
|
||||
[node.validator02]
|
||||
[node.validator03]
|
||||
[node.validator04]
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ services:
|
||||
labels:
|
||||
e2e: true
|
||||
container_name: {{ .Name }}
|
||||
image: tendermint/e2e-node
|
||||
image: tendermint/e2e-node:{{ .Version }}
|
||||
{{- if eq .ABCIProtocol "builtin" }}
|
||||
entrypoint: /usr/bin/entrypoint-builtin
|
||||
{{- else if .Misbehaviors }}
|
||||
|
||||
@@ -56,6 +56,10 @@ type Manifest struct {
|
||||
// builtin will build a complete Tendermint node into the application and
|
||||
// launch it instead of launching a separate Tendermint process.
|
||||
ABCIProtocol string `toml:"abci_protocol"`
|
||||
|
||||
LoadTxSizeBytes int `toml:"load_tx_size_bytes"`
|
||||
LoadTxBatchSize int `toml:"load_tx_batch_size"`
|
||||
LoadTxConnections int `toml:"load_tx_connections"`
|
||||
}
|
||||
|
||||
// ManifestNode represents a node in a testnet manifest.
|
||||
@@ -65,6 +69,13 @@ type ManifestNode struct {
|
||||
// is generated), and seed nodes run in seed mode with the PEX reactor enabled.
|
||||
Mode string `toml:"mode"`
|
||||
|
||||
// Version specifies which version of Tendermint this node is. Specifying different
|
||||
// versions for different nodes allows for testing the interaction of different
|
||||
// node's compatibility. Note that in order to use a node at a particular version,
|
||||
// there must be a docker image of the test app tagged with this version present
|
||||
// on the machine where the test is being run.
|
||||
Version string `toml:"version"`
|
||||
|
||||
// Seeds is the list of node names to use as P2P seed nodes. Defaults to none.
|
||||
Seeds []string `toml:"seeds"`
|
||||
|
||||
@@ -134,6 +145,11 @@ type ManifestNode struct {
|
||||
// For more information, look at the readme in the maverick folder.
|
||||
// A list of all behaviors can be found in ../maverick/consensus/behavior.go
|
||||
Misbehaviors map[string]string `toml:"misbehaviors"`
|
||||
|
||||
// SendNoLoad determines if the e2e test should send load to this node.
|
||||
// It defaults to false so unless the configured, the node will
|
||||
// receive load.
|
||||
SendNoLoad bool `toml:"send_no_load"`
|
||||
}
|
||||
|
||||
// Save saves the testnet manifest to a file.
|
||||
|
||||
@@ -21,6 +21,10 @@ import (
|
||||
const (
|
||||
randomSeed int64 = 2308084734268
|
||||
proxyPortFirst uint32 = 5701
|
||||
|
||||
defaultBatchSize = 2
|
||||
defaultConnections = 1
|
||||
defaultTxSizeBytes = 1024
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -49,22 +53,27 @@ const (
|
||||
|
||||
// Testnet represents a single testnet.
|
||||
type Testnet struct {
|
||||
Name string
|
||||
File string
|
||||
Dir string
|
||||
IP *net.IPNet
|
||||
InitialHeight int64
|
||||
InitialState map[string]string
|
||||
Validators map[*Node]int64
|
||||
ValidatorUpdates map[int64]map[*Node]int64
|
||||
Nodes []*Node
|
||||
KeyType string
|
||||
ABCIProtocol string
|
||||
Name string
|
||||
File string
|
||||
Dir string
|
||||
IP *net.IPNet
|
||||
InitialHeight int64
|
||||
InitialState map[string]string
|
||||
Validators map[*Node]int64
|
||||
ValidatorUpdates map[int64]map[*Node]int64
|
||||
Nodes []*Node
|
||||
KeyType string
|
||||
Evidence int
|
||||
LoadTxSizeBytes int
|
||||
LoadTxBatchSize int
|
||||
LoadTxConnections int
|
||||
ABCIProtocol string
|
||||
}
|
||||
|
||||
// Node represents a Tendermint node in a testnet.
|
||||
type Node struct {
|
||||
Name string
|
||||
Version string
|
||||
Testnet *Testnet
|
||||
Mode Mode
|
||||
PrivvalKey crypto.PrivKey
|
||||
@@ -85,6 +94,9 @@ type Node struct {
|
||||
PersistentPeers []*Node
|
||||
Perturbations []Perturbation
|
||||
Misbehaviors map[int64]string
|
||||
|
||||
// SendNoLoad determines if the e2e test should send load to this node.
|
||||
SendNoLoad bool
|
||||
}
|
||||
|
||||
// LoadTestnet loads a testnet from a manifest file, using the filename to
|
||||
@@ -102,16 +114,19 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test
|
||||
}
|
||||
|
||||
testnet := &Testnet{
|
||||
Name: filepath.Base(dir),
|
||||
File: fname,
|
||||
Dir: dir,
|
||||
IP: ipNet,
|
||||
InitialHeight: 1,
|
||||
InitialState: manifest.InitialState,
|
||||
Validators: map[*Node]int64{},
|
||||
ValidatorUpdates: map[int64]map[*Node]int64{},
|
||||
Nodes: []*Node{},
|
||||
ABCIProtocol: manifest.ABCIProtocol,
|
||||
Name: filepath.Base(dir),
|
||||
File: fname,
|
||||
Dir: dir,
|
||||
IP: ipNet,
|
||||
InitialHeight: 1,
|
||||
InitialState: manifest.InitialState,
|
||||
Validators: map[*Node]int64{},
|
||||
ValidatorUpdates: map[int64]map[*Node]int64{},
|
||||
Nodes: []*Node{},
|
||||
LoadTxSizeBytes: manifest.LoadTxSizeBytes,
|
||||
LoadTxBatchSize: manifest.LoadTxBatchSize,
|
||||
LoadTxConnections: manifest.LoadTxConnections,
|
||||
ABCIProtocol: manifest.ABCIProtocol,
|
||||
}
|
||||
if len(manifest.KeyType) != 0 {
|
||||
testnet.KeyType = manifest.KeyType
|
||||
@@ -122,6 +137,15 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test
|
||||
if testnet.ABCIProtocol == "" {
|
||||
testnet.ABCIProtocol = string(ProtocolBuiltin)
|
||||
}
|
||||
if testnet.LoadTxConnections == 0 {
|
||||
testnet.LoadTxConnections = defaultConnections
|
||||
}
|
||||
if testnet.LoadTxBatchSize == 0 {
|
||||
testnet.LoadTxBatchSize = defaultBatchSize
|
||||
}
|
||||
if testnet.LoadTxSizeBytes == 0 {
|
||||
testnet.LoadTxSizeBytes = defaultTxSizeBytes
|
||||
}
|
||||
|
||||
// Set up nodes, in alphabetical order (IPs and ports get same order).
|
||||
nodeNames := []string{}
|
||||
@@ -136,8 +160,13 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("information for node '%s' missing from infrastucture data", name)
|
||||
}
|
||||
v := nodeManifest.Version
|
||||
if v == "" {
|
||||
v = "local-version"
|
||||
}
|
||||
node := &Node{
|
||||
Name: name,
|
||||
Version: v,
|
||||
Testnet: testnet,
|
||||
PrivvalKey: keyGen.Generate(manifest.KeyType),
|
||||
NodeKey: keyGen.Generate("ed25519"),
|
||||
@@ -156,6 +185,7 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test
|
||||
RetainBlocks: nodeManifest.RetainBlocks,
|
||||
Perturbations: []Perturbation{},
|
||||
Misbehaviors: make(map[int64]string),
|
||||
SendNoLoad: nodeManifest.SendNoLoad,
|
||||
}
|
||||
if node.StartAt == testnet.InitialHeight {
|
||||
node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this
|
||||
|
||||
@@ -2,47 +2,45 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const workerPoolSize = 16
|
||||
|
||||
// Load generates transactions against the network until the given context is
|
||||
// canceled. A multiplier of greater than one can be supplied if load needs to
|
||||
// be generated beyond a minimum amount.
|
||||
func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error {
|
||||
// Since transactions are executed across all nodes in the network, we need
|
||||
// to reduce transaction load for larger networks to avoid using too much
|
||||
// CPU. This gives high-throughput small networks and low-throughput large ones.
|
||||
// This also limits the number of TCP connections, since each worker has
|
||||
// a connection to all nodes.
|
||||
concurrency := 64 / len(testnet.Nodes)
|
||||
if concurrency == 0 {
|
||||
concurrency = 1
|
||||
}
|
||||
// canceled.
|
||||
func Load(ctx context.Context, testnet *e2e.Testnet) error {
|
||||
initialTimeout := 1 * time.Minute
|
||||
stallTimeout := 30 * time.Second
|
||||
|
||||
chTx := make(chan types.Tx)
|
||||
chSuccess := make(chan types.Tx)
|
||||
chSuccess := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Spawn job generator and processors.
|
||||
logger.Info("load", "msg", log.NewLazySprintf("Starting transaction load (%v workers)...", concurrency))
|
||||
logger.Info("load", "msg", log.NewLazySprintf("Starting transaction load (%v workers)...", workerPoolSize))
|
||||
started := time.Now()
|
||||
u := [16]byte(uuid.New()) // generate run ID on startup
|
||||
|
||||
go loadGenerate(ctx, chTx, multiplier)
|
||||
txCh := make(chan types.Tx)
|
||||
go loadGenerate(ctx, txCh, testnet, u[:])
|
||||
|
||||
for w := 0; w < concurrency; w++ {
|
||||
go loadProcess(ctx, testnet, chTx, chSuccess)
|
||||
for _, n := range testnet.Nodes {
|
||||
if n.SendNoLoad {
|
||||
continue
|
||||
}
|
||||
|
||||
for w := 0; w < testnet.LoadTxConnections; w++ {
|
||||
go loadProcess(ctx, txCh, chSuccess, n)
|
||||
}
|
||||
}
|
||||
|
||||
// Monitor successful transactions, and abort on stalls.
|
||||
@@ -67,58 +65,85 @@ func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error {
|
||||
}
|
||||
|
||||
// loadGenerate generates jobs until the context is canceled
|
||||
func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int) {
|
||||
for i := 0; i < math.MaxInt64; i++ {
|
||||
// We keep generating the same 1000 keys over and over, with different values.
|
||||
// This gives a reasonable load without putting too much data in the app.
|
||||
id := i % 1000
|
||||
|
||||
bz := make([]byte, 1024) // 1kb hex-encoded
|
||||
_, err := rand.Read(bz)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to read random bytes: %v", err))
|
||||
}
|
||||
tx := types.Tx(fmt.Sprintf("load-%X=%x", id, bz))
|
||||
|
||||
func loadGenerate(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testnet, id []byte) {
|
||||
t := time.NewTimer(0)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case chTx <- tx:
|
||||
time.Sleep(time.Second / time.Duration(multiplier))
|
||||
|
||||
case <-t.C:
|
||||
case <-ctx.Done():
|
||||
close(chTx)
|
||||
close(txCh)
|
||||
return
|
||||
}
|
||||
t.Reset(time.Second)
|
||||
|
||||
// A context with a timeout is created here to time the createTxBatch
|
||||
// function out. If createTxBatch has not completed its work by the time
|
||||
// the next batch is set to be sent out, then the context is cancled so that
|
||||
// the current batch is halted, allowing the next batch to begin.
|
||||
tctx, cf := context.WithTimeout(ctx, time.Second)
|
||||
createTxBatch(tctx, txCh, testnet, id)
|
||||
cf()
|
||||
}
|
||||
}
|
||||
|
||||
// loadProcess processes transactions
|
||||
func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- types.Tx) {
|
||||
// Each worker gets its own client to each node, which allows for some
|
||||
// concurrency while still bounding it.
|
||||
clients := map[string]*rpchttp.HTTP{}
|
||||
// createTxBatch creates new transactions and sends them into the txCh. createTxBatch
|
||||
// returns when either a full batch has been sent to the txCh or the context
|
||||
// is canceled.
|
||||
func createTxBatch(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testnet, id []byte) {
|
||||
wg := &sync.WaitGroup{}
|
||||
genCh := make(chan struct{})
|
||||
for i := 0; i < workerPoolSize; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range genCh {
|
||||
tx, err := payload.NewBytes(&payload.Payload{
|
||||
Id: id,
|
||||
Size: uint64(testnet.LoadTxSizeBytes),
|
||||
Rate: uint64(testnet.LoadTxBatchSize),
|
||||
Connections: uint64(testnet.LoadTxConnections),
|
||||
})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to generate tx: %v", err))
|
||||
}
|
||||
|
||||
var err error
|
||||
for tx := range chTx {
|
||||
node := testnet.RandomNode()
|
||||
client, ok := clients[node.Name]
|
||||
if !ok {
|
||||
client, err = node.Client()
|
||||
if err != nil {
|
||||
continue
|
||||
select {
|
||||
case txCh <- tx:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// check that the node is up
|
||||
_, err = client.Health(ctx)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
clients[node.Name] = client
|
||||
}()
|
||||
}
|
||||
for i := 0; i < testnet.LoadTxBatchSize; i++ {
|
||||
select {
|
||||
case genCh <- struct{}{}:
|
||||
case <-ctx.Done():
|
||||
break
|
||||
}
|
||||
}
|
||||
close(genCh)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// loadProcess processes transactions by sending transactions received on the txCh
|
||||
// to the client.
|
||||
func loadProcess(ctx context.Context, txCh <-chan types.Tx, chSuccess chan<- struct{}, n *e2e.Node) {
|
||||
var client *rpchttp.HTTP
|
||||
var err error
|
||||
s := struct{}{}
|
||||
for tx := range txCh {
|
||||
if client == nil {
|
||||
client, err = n.Client()
|
||||
if err != nil {
|
||||
logger.Info("non-fatal error creating node client", "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if _, err = client.BroadcastTxSync(ctx, tx); err != nil {
|
||||
continue
|
||||
}
|
||||
chSuccess <- tx
|
||||
chSuccess <- s
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -102,7 +101,7 @@ func NewCLI() *CLI {
|
||||
ctx, loadCancel := context.WithCancel(context.Background())
|
||||
defer loadCancel()
|
||||
go func() {
|
||||
err := Load(ctx, cli.testnet, 1)
|
||||
err := Load(ctx, cli.testnet)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("Transaction load failed: %v", err.Error()))
|
||||
}
|
||||
@@ -212,20 +211,10 @@ func NewCLI() *CLI {
|
||||
})
|
||||
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "load [multiplier]",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
Use: "load",
|
||||
Short: "Generates transaction load until the command is canceled",
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
m := 1
|
||||
|
||||
if len(args) == 1 {
|
||||
m, err = strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return Load(context.Background(), cli.testnet, m)
|
||||
return Load(context.Background(), cli.testnet)
|
||||
},
|
||||
})
|
||||
|
||||
@@ -271,7 +260,7 @@ func NewCLI() *CLI {
|
||||
Max Block Interval
|
||||
over a 100 block sampling period.
|
||||
|
||||
Does not run any perbutations.
|
||||
Does not run any perturbations.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := Cleanup(cli.testnet); err != nil {
|
||||
@@ -285,9 +274,9 @@ Does not run any perbutations.
|
||||
ctx, loadCancel := context.WithCancel(context.Background())
|
||||
defer loadCancel()
|
||||
go func() {
|
||||
err := Load(ctx, cli.testnet, 1)
|
||||
err := Load(ctx, cli.testnet)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("Transaction load failed: %v", err.Error()))
|
||||
logger.Error(fmt.Sprintf("Transaction load errored: %v", err.Error()))
|
||||
}
|
||||
chLoadResult <- err
|
||||
}()
|
||||
|
||||
@@ -140,6 +140,11 @@ const (
|
||||
// BlockHeightKey is a reserved key used for indexing BeginBlock and Endblock
|
||||
// events.
|
||||
BlockHeightKey = "block.height"
|
||||
|
||||
// MatchEventsKey is a reserved key used to indicate to the indexer that the
|
||||
// conditions in the query have to have occurred both on the same height
|
||||
// as well as in the same event
|
||||
MatchEventKey = "match.events"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
Reference in New Issue
Block a user