mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-13 00:02:52 +00:00
Compare commits
51 Commits
wb/release
...
wb/simul-q
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
738457a63f | ||
|
|
6d8079559b | ||
|
|
1d96faa35a | ||
|
|
921530c352 | ||
|
|
5274f80de4 | ||
|
|
47cb30fc1d | ||
|
|
5c26db733b | ||
|
|
60881f1d06 | ||
|
|
3bec1668c6 | ||
|
|
37f9d59969 | ||
|
|
013b46a6c3 | ||
|
|
373b262f35 | ||
|
|
463cff456b | ||
|
|
27ff2f46b8 | ||
|
|
52b6dc19ba | ||
|
|
c4d24eed7d | ||
|
|
409e057d73 | ||
|
|
6b5053046a | ||
|
|
5f5e74798b | ||
|
|
fb209136f8 | ||
|
|
436a38f876 | ||
|
|
52b2efb827 | ||
|
|
2e11760fbe | ||
|
|
8860e027a8 | ||
|
|
cfd13825e2 | ||
|
|
6f168df7e4 | ||
|
|
28d3239958 | ||
|
|
acf97128f3 | ||
|
|
2382b5c364 | ||
|
|
e3e162ff10 | ||
|
|
4d820ff4f5 | ||
|
|
82c1372f9e | ||
|
|
0ac03468d8 | ||
|
|
9e5b13725d | ||
|
|
a4f29bfd44 | ||
|
|
7cf09399bb | ||
|
|
8854ce4e68 | ||
|
|
56e329aa9e | ||
|
|
1062ae73d6 | ||
|
|
134bfefbe5 | ||
|
|
f0b0f34f3f | ||
|
|
51b3f111dc | ||
|
|
979a6a1b13 | ||
|
|
bf1cb89bb7 | ||
|
|
7971f4a2fc | ||
|
|
a2908c29d5 | ||
|
|
a4cf8939b8 | ||
|
|
21bbbe3e2a | ||
|
|
82907c84fa | ||
|
|
06175129ed | ||
|
|
7172862786 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -10,4 +10,4 @@
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir
|
||||
|
||||
# Spec related changes can be approved by the protocol design team
|
||||
/spec @josef-widder @milosevic @cason
|
||||
/spec @josef-widder @milosevic @cason @sergio-mena @jmalicevic
|
||||
|
||||
4
.github/workflows/e2e-manual.yml
vendored
4
.github/workflows/e2e-manual.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01', '02', '03', '04']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@@ -29,7 +29,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
|
||||
4
.github/workflows/e2e-nightly-36x.yml
vendored
4
.github/workflows/e2e-nightly-36x.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01', '02', '03', '04']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
run: ./build/generator -g 5 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
|
||||
4
.github/workflows/e2e-nightly-master.yml
vendored
4
.github/workflows/e2e-nightly-master.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
|
||||
2
.github/workflows/janitor.yml
vendored
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.9.1
|
||||
- uses: styfle/cancel-workflow-action@0.10.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.5.0
|
||||
- uses: bufbuild/buf-setup-action@v1.6.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
@@ -31,12 +31,18 @@ Special thanks to external contributors on this release:
|
||||
- [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish)
|
||||
- [abci] \#8605 Remove info, log, events, gasUsed and mempoolError fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic)
|
||||
- [abci] \#8664 Move `app_hash` parameter from `Commit` to `FinalizeBlock`. (@sergio-mena)
|
||||
- [abci] \#8656 Added cli command for `PrepareProposal`. (@jmalicevic)
|
||||
- [sink/psql] \#8637 tx_results emitted from psql sink are now json encoded, previously they were protobuf encoded
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish)
|
||||
- [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish)
|
||||
- [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui)
|
||||
- [p2p] \#8737 Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish)
|
||||
- [p2p] \#8737 Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish)
|
||||
- [p2p] \#8737 Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish)
|
||||
- [p2p] \#8737 Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish)
|
||||
|
||||
- Go API
|
||||
|
||||
|
||||
168
RELEASES.md
168
RELEASES.md
@@ -1,9 +1,8 @@
|
||||
# Releases
|
||||
|
||||
Tendermint uses modified [semantic versioning](https://semver.org/) with each
|
||||
release following a `vX.Y.Z` format. Tendermint is currently on major version
|
||||
0 and uses the minor version to signal breaking changes. The `master` branch is
|
||||
used for active development and thus it is not advisable to build against it.
|
||||
Tendermint uses [semantic versioning](https://semver.org/) with each release following
|
||||
a `vX.Y.Z` format. The `master` branch is used for active development and thus it's
|
||||
advisable not to build against it.
|
||||
|
||||
The latest changes are always initially merged into `master`.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches
|
||||
@@ -30,8 +29,8 @@ merging the pull request.
|
||||
|
||||
### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a minor version release, e.g.
|
||||
v0.25.0, you get to have the honor of creating the backport branch!
|
||||
If this is the first release candidate for a major release, you get to have the
|
||||
honor of creating the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the
|
||||
tags on `master` so that `go mod` is able to order the branches correctly. You
|
||||
@@ -78,8 +77,7 @@ the 0.35.x line.
|
||||
|
||||
After doing these steps, go back to `master` and do the following:
|
||||
|
||||
1. Tag `master` as the dev branch for the _next_ minor version release and push
|
||||
it up to GitHub.
|
||||
1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
|
||||
@@ -101,7 +99,7 @@ After doing these steps, go back to `master` and do the following:
|
||||
|
||||
## Release candidates
|
||||
|
||||
Before creating an official release, especially a minor release, we may want to create a
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
@@ -111,7 +109,7 @@ Tags for RCs should follow the "standard" release naming conventions, with `-rcX
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a minor release, you'll have to make a new backport branch (see above).
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
@@ -142,13 +140,11 @@ Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
## Minor release
|
||||
## Major release
|
||||
|
||||
This minor release process assumes that this release was preceded by release candidates.
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
Before performing these steps, be sure the [Minor Release Checklist](#minor-release-checklist) has been completed.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
@@ -180,16 +176,16 @@ Before performing these steps, be sure the [Minor Release Checklist](#minor-rele
|
||||
- Commit these changes to `master` and backport them into the backport
|
||||
branch for this release.
|
||||
|
||||
## Patch release
|
||||
## Minor release (point releases)
|
||||
|
||||
Patch releases are done differently from minor releases: They are built off of
|
||||
Minor releases are done differently from major releases: They are built off of
|
||||
long-lived backport branches, rather than from master. As non-breaking changes
|
||||
land on `master`, they should also be backported into these backport branches.
|
||||
|
||||
Patch releases don't have release candidates by default, although any tricky
|
||||
Minor releases don't have release candidates by default, although any tricky
|
||||
changes may merit a release candidate.
|
||||
|
||||
To create a patch release:
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
@@ -201,143 +197,11 @@ To create a patch release:
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during patch releases, and only field additions are valid patch changes.)
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
## Minor Release Checklist
|
||||
|
||||
The following set of steps are performed on all releases that increment the
|
||||
_minor_ version, e.g. v0.25 to v0.26. These steps ensure that Tendermint is
|
||||
well tested, stable, and suitable for adoption by the various diverse projects
|
||||
that rely on Tendermint.
|
||||
|
||||
### Feature Freeze
|
||||
|
||||
Ahead of any minor version release of Tendermint, the software enters 'Feature
|
||||
Freeze' for at least two weeks. A feature freeze means that _no_ new features
|
||||
are added to the code being prepared for release. No code changes should be made
|
||||
to the code being released that do not directly improve pressing issues of code
|
||||
quality. The following must not be merged during a feature freeze:
|
||||
|
||||
* Refactors that are not related to specific bug fixes.
|
||||
* Dependency upgrades.
|
||||
* New test code that does not test a discovered regression.
|
||||
* New features of any kind.
|
||||
* Documentation or spec improvements that are not related to the newly developed
|
||||
code.
|
||||
|
||||
This period directly follows the creation of the [backport
|
||||
branch](#creating-a-backport-branch). The Tendermint team instead directs all
|
||||
attention to ensuring that the existing code is stable and reliable. Broken
|
||||
tests are fixed, flakey-tests are remedied, end-to-end test failures are
|
||||
thoroughly diagnosed and all efforts of the team are aimed at improving the
|
||||
quality of the code. During this period, the upgrade harness tests are run
|
||||
repeatedly and a variety of in-house testnets are run to ensure Tendermint
|
||||
functions at the scale it will be used by application developers and node
|
||||
operators.
|
||||
|
||||
### Nightly End-To-End Tests
|
||||
|
||||
The Tendermint team maintains [a set of end-to-end
|
||||
tests](https://github.com/tendermint/tendermint/blob/master/test/e2e/README.md#L1)
|
||||
that run each night on the latest commit of the project and on the code in the
|
||||
tip of each supported backport branch. These tests start a network of containerized
|
||||
Tendermint processes and run automated checks that the network functions as
|
||||
expected in both stable and unstable conditions. During the feature freeze,
|
||||
these tests are run nightly and must pass consistently for a release of
|
||||
Tendermint to be considered stable.
|
||||
|
||||
### Upgrade Harness
|
||||
|
||||
> TODO(williambanfield): Change to past tense and clarify this section once
|
||||
> upgrade harness is complete.
|
||||
|
||||
The Tendermint team is creating an upgrade test harness to exercise the
|
||||
workflow of stopping an instance of Tendermint running one version of the
|
||||
software and starting up the same application running the next version. To
|
||||
support upgrade testing, we will add the ability to terminate the Tendermint
|
||||
process at specific pre-defined points in its execution so that we can verify
|
||||
upgrades work in a representative sample of stop conditions.
|
||||
|
||||
### Large Scale Testnets
|
||||
|
||||
The Tendermint end-to-end tests run a small network (~10s of nodes) to exercise
|
||||
basic consensus interactions. Real world deployments of Tendermint often have over
|
||||
a hundred nodes just in the validator set, with many others acting as full
|
||||
nodes and sentry nodes. To gain more assurance before a release, we will also run
|
||||
larger-scale test networks to shake out emergent behaviors at scale.
|
||||
|
||||
Large-scale test networks are run on a set of virtual machines (VMs). Each VM
|
||||
is equipped with 4 Gigabytes of RAM and 2 CPU cores. The network runs a very
|
||||
simple key-value store application. The application adds artificial delays to
|
||||
different ABCI calls to simulate a slow application. Each testnet is briefly
|
||||
run with no load being generated to collect a baseline performance. Once
|
||||
baseline is captured, a consistent load is applied across the network. This
|
||||
load takes the form of 10% of the running nodes all receiving a consistent
|
||||
stream of two hundred transactions per minute each.
|
||||
|
||||
During each test net, the following metrics are monitored and collected on each
|
||||
node:
|
||||
|
||||
* Consensus rounds per height
|
||||
* Maximum connected peers, Minimum connected peers, Rate of change of peer connections
|
||||
* Memory resident set size
|
||||
* CPU utilization
|
||||
* Blocks produced per minute
|
||||
* Seconds for each step of consensus (Propose, Prevote, Precommit, Commit)
|
||||
* Latency to receive block proposals
|
||||
|
||||
For these tests we intentionally target low-powered host machines (with low core
|
||||
counts and limited memory) to ensure we observe similar kinds of resource contention
|
||||
and limitation that real-world deployments of Tendermint experience in production.
|
||||
|
||||
#### 200 Node Testnet
|
||||
|
||||
To test the stability and performance of Tendermint in a real world scenario,
|
||||
a 200 node test network is run. The network comprises 5 seed nodes, 100
|
||||
validators and 95 non-validating full nodes. All nodes begin by dialing
|
||||
a subset of the seed nodes to discover peers. The network is run for several
|
||||
days, with metrics being collected continuously. In cases of changes to performance
|
||||
critical systems, testnets of larger sizes should be considered.
|
||||
|
||||
#### Rotating Node Testnet
|
||||
|
||||
Real-world deployments of Tendermint frequently see new nodes arrive and old
|
||||
nodes exit the network. The rotating node testnet ensures that Tendermint is
|
||||
able to handle this reliably. In this test, a network with 10 validators and
|
||||
3 seed nodes is started. A rolling set of 25 full nodes are started and each
|
||||
connects to the network by dialing one of the seed nodes. Once the node is able
|
||||
to blocksync to the head of the chain and begins producing blocks using
|
||||
Tendermint consensus it is stopped. Once stopped, a new node is started and
|
||||
takes its place. This network is run for several days.
|
||||
|
||||
#### Network Partition Testnet
|
||||
|
||||
Tendermint is expected to recover from network partitions. A partition where no
|
||||
subset of the nodes is left with the super-majority of the stake is expected to
|
||||
stop making blocks. Upon alleviation of the partition, the network is expected
|
||||
to once again become fully connected and capable of producing blocks. The
|
||||
network partition testnet ensures that Tendermint is able to handle this
|
||||
reliably at scale. In this test, a network with 100 validators and 95 full
|
||||
nodes is started. All validators have equal stake. Once the network is
|
||||
producing blocks, a set of firewall rules is deployed to create a partitioned
|
||||
network with 50% of the stake on one side and 50% on the other. Once the
|
||||
network stops producing blocks, the firewall rules are removed and the nodes
|
||||
are monitored to ensure they reconnect and that the network again begins
|
||||
producing blocks.
|
||||
|
||||
#### Absent Stake Testnet
|
||||
|
||||
Tendermint networks often run with _some_ portion of the voting power offline.
|
||||
The absent stake testnet ensures that large networks are able to handle this
|
||||
reliably. A set of 150 validator nodes and three seed nodes is started. The set
|
||||
of 150 validators is configured to only possess a cumulative stake of 67% of
|
||||
the total stake. The remaining 33% of the stake is configured to belong to
|
||||
a validator that is never actually run in the test network. The network is run
|
||||
for multiple days, ensuring that it is able to produce blocks without issue.
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -130,6 +131,7 @@ func addCommands(cmd *cobra.Command, logger log.Logger) {
|
||||
cmd.AddCommand(commitCmd)
|
||||
cmd.AddCommand(versionCmd)
|
||||
cmd.AddCommand(testCmd)
|
||||
cmd.AddCommand(prepareProposalCmd)
|
||||
cmd.AddCommand(getQueryCmd())
|
||||
|
||||
// examples
|
||||
@@ -170,7 +172,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "query", "check_tx", "prepare_proposal", "finalize_block", "commit"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -224,6 +226,14 @@ var versionCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var prepareProposalCmd = &cobra.Command{
|
||||
Use: "prepare_proposal",
|
||||
Short: "prepare proposal",
|
||||
Long: "prepare proposal",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: cmdPrepareProposal,
|
||||
}
|
||||
|
||||
func getQueryCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "query",
|
||||
@@ -335,6 +345,13 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
}, nil, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
},
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.PrepareProposal(ctx, client, [][]byte{
|
||||
{0x01},
|
||||
}, []types.TxRecord_TxAction{
|
||||
types.TxRecord_UNMODIFIED,
|
||||
}, nil)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -435,6 +452,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdInfo(cmd, actualArgs)
|
||||
case "query":
|
||||
return cmdQuery(cmd, actualArgs)
|
||||
case "prepare_proposal":
|
||||
return cmdPrepareProposal(cmd, actualArgs)
|
||||
default:
|
||||
return cmdUnimplemented(cmd, pArgs)
|
||||
}
|
||||
@@ -605,6 +624,64 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func inTxArray(txByteArray [][]byte, tx []byte) bool {
|
||||
for _, txTmp := range txByteArray {
|
||||
if bytes.Equal(txTmp, tx) {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
func cmdPrepareProposal(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Info: "Must provide at least one transaction",
|
||||
Log: "Must provide at least one transaction",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
txsBytesArray := make([][]byte, len(args))
|
||||
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.PrepareProposal(cmd.Context(), &types.RequestPrepareProposal{
|
||||
Txs: txsBytesArray,
|
||||
// kvstore has to have this parameter in order not to reject a tx as the default value is 0
|
||||
MaxTxBytes: 65536,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resps := make([]response, 0, len(res.TxResults)+1)
|
||||
for _, tx := range res.TxRecords {
|
||||
existingTx := inTxArray(txsBytesArray, tx.Tx)
|
||||
if tx.Action == types.TxRecord_UNKNOWN ||
|
||||
(existingTx && tx.Action == types.TxRecord_ADDED) ||
|
||||
(!existingTx && (tx.Action == types.TxRecord_UNMODIFIED || tx.Action == types.TxRecord_REMOVED)) {
|
||||
resps = append(resps, response{
|
||||
Code: codeBad,
|
||||
Log: "Failed. Tx: " + string(tx.GetTx()) + " action: " + tx.Action.String(),
|
||||
})
|
||||
} else {
|
||||
resps = append(resps, response{
|
||||
Code: code.CodeTypeOK,
|
||||
Log: "Succeeded. Tx: " + string(tx.Tx) + " action: " + tx.Action.String(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
printResponse(cmd, args, resps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
// Create the application - in memory or persisted to disk
|
||||
@@ -649,7 +726,6 @@ func printResponse(cmd *cobra.Command, args []string, rsps ...response) {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
|
||||
}
|
||||
|
||||
if len(rsp.Data) != 0 {
|
||||
|
||||
@@ -70,6 +70,19 @@ func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]by
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrepareProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []types.TxRecord_TxAction, dataExp []byte) error {
|
||||
res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes})
|
||||
for i, tx := range res.TxRecords {
|
||||
if tx.Action != codeExp[i] {
|
||||
fmt.Println("Failed test: PrepareProposal")
|
||||
fmt.Printf("PrepareProposal response code was unexpected. Got %v expected %v.",
|
||||
tx.Action, codeExp)
|
||||
return errors.New("PrepareProposal error")
|
||||
}
|
||||
}
|
||||
fmt.Println("Passed test: PrepareProposal")
|
||||
return nil
|
||||
}
|
||||
func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes})
|
||||
code, data := res.Code, res.Data
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
echo hello
|
||||
info
|
||||
prepare_proposal "abc"
|
||||
finalize_block "abc"
|
||||
commit
|
||||
info
|
||||
@@ -7,3 +8,4 @@ query "abc"
|
||||
finalize_block "def=xyz" "ghi=123"
|
||||
commit
|
||||
query "def"
|
||||
prepare_proposal "preparedef"
|
||||
@@ -8,6 +8,10 @@
|
||||
-> data: {"size":0}
|
||||
-> data.hex: 0x7B2273697A65223A307D
|
||||
|
||||
> prepare_proposal "abc"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: abc action: UNMODIFIED
|
||||
|
||||
> finalize_block "abc"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
@@ -48,3 +52,9 @@
|
||||
-> value: xyz
|
||||
-> value.hex: 78797A
|
||||
|
||||
> prepare_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: def action: ADDED
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: preparedef action: REMOVED
|
||||
|
||||
|
||||
@@ -30,6 +30,8 @@ function testExample() {
|
||||
cat "${INPUT}.out.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}.out"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}.out" "${INPUT}.out.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -33,10 +33,14 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
pid, err := strconv.ParseInt(args[0], 10, 64)
|
||||
// Using Atoi so that the size of an integer can be automatically inferred.
|
||||
pid, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pid <= 0 {
|
||||
return fmt.Errorf("PID value must be > 0; given value %q, got %d", args[0], pid)
|
||||
}
|
||||
|
||||
outFile := args[1]
|
||||
if outFile == "" {
|
||||
@@ -95,7 +99,7 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
}
|
||||
|
||||
logger.Info("killing Tendermint process")
|
||||
if err := killProc(int(pid), tmpDir); err != nil {
|
||||
if err := killProc(pid, tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -113,6 +117,9 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
// if the output file cannot be created or the tail command cannot be started.
|
||||
// An error is not returned if any subsequent syscall fails.
|
||||
func killProc(pid int, dir string) error {
|
||||
if pid <= 0 {
|
||||
return fmt.Errorf("PID must be > 0, got %d", pid)
|
||||
}
|
||||
// pipe STDERR output from tailing the Tendermint process to a file
|
||||
//
|
||||
// NOTE: This will only work on UNIX systems.
|
||||
|
||||
@@ -627,6 +627,10 @@ type P2PConfig struct { //nolint: maligned
|
||||
// outbound).
|
||||
MaxConnections uint16 `mapstructure:"max-connections"`
|
||||
|
||||
// MaxOutgoingConnections defines the maximum number of connected peers (inbound and
|
||||
// outbound).
|
||||
MaxOutgoingConnections uint16 `mapstructure:"max-outgoing-connections"`
|
||||
|
||||
// MaxIncomingConnectionAttempts rate limits the number of incoming connection
|
||||
// attempts per IP address.
|
||||
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
|
||||
@@ -667,6 +671,7 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
ExternalAddress: "",
|
||||
UPNP: false,
|
||||
MaxConnections: 64,
|
||||
MaxOutgoingConnections: 12,
|
||||
MaxIncomingConnectionAttempts: 100,
|
||||
FlushThrottleTimeout: 100 * time.Millisecond,
|
||||
// The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes.
|
||||
@@ -699,6 +704,9 @@ func (cfg *P2PConfig) ValidateBasic() error {
|
||||
if cfg.RecvRate < 0 {
|
||||
return errors.New("recv-rate can't be negative")
|
||||
}
|
||||
if cfg.MaxOutgoingConnections > cfg.MaxConnections {
|
||||
return errors.New("max-outgoing-connections cannot be larger than max-connections")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -309,6 +309,10 @@ upnp = {{ .P2P.UPNP }}
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = {{ .P2P.MaxConnections }}
|
||||
|
||||
# Maximum number of connections reserved for outgoing
|
||||
# connections. Must be less than max-connections
|
||||
max-outgoing-connections = {{ .P2P.MaxOutgoingConnections }}
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
|
||||
|
||||
|
||||
@@ -254,7 +254,7 @@ afford to lose all blockchain data!
|
||||
To reset a blockchain, stop the node and run:
|
||||
|
||||
```sh
|
||||
tendermint unsafe_reset_all
|
||||
tendermint unsafe-reset-all
|
||||
```
|
||||
|
||||
This command will remove the data directory and reset private validator and
|
||||
|
||||
40
go.mod
40
go.mod
@@ -4,7 +4,7 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.1.0
|
||||
github.com/adlio/schema v1.3.0
|
||||
github.com/adlio/schema v1.3.3
|
||||
github.com/btcsuite/btcd v0.22.1
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
@@ -25,12 +25,12 @@ require (
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/rs/zerolog v1.27.0
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/viper v1.12.0
|
||||
github.com/stretchr/testify v1.7.2
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
|
||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29
|
||||
google.golang.org/grpc v1.47.0
|
||||
pgregory.net/rapid v0.4.7
|
||||
@@ -42,7 +42,7 @@ require (
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/golangci/golangci-lint v1.46.0
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/vektra/mockery/v2 v2.12.3
|
||||
github.com/vektra/mockery/v2 v2.14.0
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
||||
@@ -55,7 +55,7 @@ require (
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.0 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
@@ -73,8 +73,8 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/charithe/durationcheck v0.0.9 // indirect
|
||||
github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect
|
||||
github.com/containerd/continuity v0.2.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/daixiang0/gci v0.3.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.4.3 // indirect
|
||||
@@ -123,6 +123,7 @@ require (
|
||||
github.com/gostaticanalysis/comment v1.4.2 // indirect
|
||||
github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
|
||||
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-version v1.4.0 // indirect
|
||||
@@ -167,9 +168,9 @@ require (
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.0.3 // indirect
|
||||
github.com/opencontainers/runc v1.1.3 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.2 // indirect
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
@@ -198,8 +199,8 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect
|
||||
github.com/stretchr/objx v0.1.1 // indirect
|
||||
github.com/subosito/gotenv v1.3.0 // indirect
|
||||
github.com/stretchr/objx v0.4.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.0 // indirect
|
||||
github.com/sylvia7788/contextcheck v1.0.4 // indirect
|
||||
github.com/tdakkota/asciicheck v0.1.1 // indirect
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect
|
||||
@@ -219,15 +220,14 @@ require (
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||
golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
golang.org/x/tools v0.1.11 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
honnef.co/go/tools v0.3.1 // indirect
|
||||
@@ -240,6 +240,6 @@ require (
|
||||
require (
|
||||
github.com/creachadair/tomledit v0.0.22
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.34.0
|
||||
github.com/prometheus/common v0.35.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
|
||||
)
|
||||
|
||||
153
go.sum
153
go.sum
@@ -1,6 +1,5 @@
|
||||
4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0=
|
||||
4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo=
|
||||
bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
|
||||
bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
@@ -70,6 +69,7 @@ github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZ
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
@@ -95,8 +95,8 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
|
||||
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
@@ -107,8 +107,8 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/adlio/schema v1.3.0 h1:eSVYLxYWbm/6ReZBCkLw4Fz7uqC+ZNoPvA39bOwi52A=
|
||||
github.com/adlio/schema v1.3.0/go.mod h1:51QzxkpeFs6lRY11kPye26IaFPOV+HqEj01t5aXXKfs=
|
||||
github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I=
|
||||
github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
@@ -150,7 +150,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A=
|
||||
github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
|
||||
github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=
|
||||
@@ -185,6 +184,8 @@ github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRt
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
@@ -196,11 +197,11 @@ github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy
|
||||
github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg=
|
||||
github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M=
|
||||
github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4/go.mod h1:W8EnPSQ8Nv4fUjc/v1/8tHFqhuOJXnRub0dTfuAQktU=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
|
||||
@@ -216,16 +217,14 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||
github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634=
|
||||
github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||
github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
@@ -235,8 +234,9 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg=
|
||||
github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc=
|
||||
github.com/creachadair/command v0.0.0-20220426235536-a748effdf6a1/go.mod h1:bAM+qFQb/KwWyCc9MLC4U1jvn3XyakqP5QRkds5T6cY=
|
||||
@@ -246,7 +246,8 @@ github.com/creachadair/tomledit v0.0.22 h1:lRtepmrwhzDq+g1gv5ftVn5itgo7CjYbm6abK
|
||||
github.com/creachadair/tomledit v0.0.22/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4=
|
||||
github.com/daixiang0/gci v0.3.3/go.mod h1:1Xr2bxnQbDxCqqulUOv8qpGqkgRw9RSCGGjEC2LjF8o=
|
||||
github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -266,8 +267,13 @@ github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KP
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/cli v20.10.14+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M=
|
||||
github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
|
||||
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
@@ -376,6 +382,7 @@ github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0=
|
||||
@@ -394,7 +401,6 @@ github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -497,6 +503,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw=
|
||||
github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -522,7 +530,6 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
@@ -546,14 +553,12 @@ github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Rep
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
@@ -610,6 +615,9 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
@@ -702,6 +710,7 @@ github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3
|
||||
github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg=
|
||||
github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
|
||||
github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
|
||||
github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
@@ -771,13 +780,17 @@ github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@@ -819,7 +832,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b h1:MKwruh+HeCSKWphkxuzvRzU4QzDkg7yiPkDVV0cDFgI=
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b/go.mod h1:TLJifjWF6eotcfzDjKZsDqWJ+73Uvj/N85MvVyrvynM=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
@@ -850,15 +862,18 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k=
|
||||
github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE=
|
||||
github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA=
|
||||
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
|
||||
github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY=
|
||||
github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM=
|
||||
github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
|
||||
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
@@ -874,8 +889,9 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw=
|
||||
github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI=
|
||||
github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
|
||||
@@ -903,7 +919,6 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
@@ -917,25 +932,21 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE=
|
||||
github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
|
||||
github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE=
|
||||
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
|
||||
github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
|
||||
github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30=
|
||||
@@ -966,7 +977,6 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U=
|
||||
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
|
||||
github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs=
|
||||
github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
@@ -980,13 +990,13 @@ github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||
github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM=
|
||||
github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA=
|
||||
github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/securego/gosec/v2 v2.11.0 h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI=
|
||||
github.com/securego/gosec/v2 v2.11.0/go.mod h1:SX8bptShuG8reGC0XS09+a4H2BoWSJi+fscA+Pulbpo=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
@@ -1022,7 +1032,6 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
|
||||
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
@@ -1031,10 +1040,10 @@ github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
||||
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
|
||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||
github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
|
||||
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
@@ -1043,9 +1052,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
||||
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
||||
github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk=
|
||||
github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
|
||||
github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
|
||||
@@ -1057,8 +1064,9 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3
|
||||
github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
@@ -1068,11 +1076,13 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI=
|
||||
github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs=
|
||||
github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs=
|
||||
github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo=
|
||||
github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04=
|
||||
github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
@@ -1103,8 +1113,6 @@ github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoi
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
|
||||
github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
|
||||
@@ -1118,11 +1126,18 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/vektra/mockery/v2 v2.12.3 h1:74h0R+p75tdr3QNwiNz3MXeCwSP/I5bYUbZY6oT4t20=
|
||||
github.com/vektra/mockery/v2 v2.12.3/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U=
|
||||
github.com/vektra/mockery/v2 v2.14.0 h1:KZ1p5Hrn8tiY+LErRMr14HHle6khxo+JKOXLBW/yfqs=
|
||||
github.com/vektra/mockery/v2 v2.14.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M=
|
||||
github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
@@ -1143,7 +1158,6 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w=
|
||||
gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
@@ -1221,11 +1235,10 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1271,8 +1284,9 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1286,7 +1300,6 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -1333,14 +1346,14 @@ golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y=
|
||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1408,7 +1421,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1431,8 +1443,8 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1452,7 +1464,6 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1465,12 +1476,15 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1483,17 +1497,21 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220403020550-483a9cbc67c0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c h1:aFV+BgZ4svzjfabn8ERpuB4JI4N6/rdy1iusx77G3oU=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 h1:EH1Deb8WZJ0xc0WK//leUHXcX9aLE5SymusoTmMZye8=
|
||||
golang.org/x/term v0.0.0-20220411215600-e5f449aeb171/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1531,6 +1549,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@@ -1611,14 +1630,14 @@ golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a h1:ofrrl6c6NG5/IOSx/R1cyiQxxjqlur0h/TvbUhkH0II=
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
||||
@@ -1764,7 +1783,6 @@ google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP
|
||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
@@ -1793,7 +1811,6 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
@@ -1830,9 +1847,9 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI=
|
||||
gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
@@ -1855,6 +1872,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
@@ -1530,7 +1530,8 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32
|
||||
}
|
||||
|
||||
sp := cs.state.ConsensusParams.Synchrony.SynchronyParamsOrDefaults()
|
||||
if cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() {
|
||||
//TODO: Remove this temporary fix when the complete solution is ready. See #8739
|
||||
if !cs.replayMode && cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() {
|
||||
logger.Debug("prevote step: Proposal is not timely; prevoting nil",
|
||||
"proposed",
|
||||
tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano),
|
||||
|
||||
@@ -10,13 +10,13 @@ import (
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/inspect/rpc"
|
||||
tmstrings "github.com/tendermint/tendermint/internal/libs/strings"
|
||||
rpccore "github.com/tendermint/tendermint/internal/rpc/core"
|
||||
"github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/state/indexer"
|
||||
"github.com/tendermint/tendermint/internal/state/indexer/sink"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmstrings "github.com/tendermint/tendermint/libs/strings"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
@@ -631,7 +631,7 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
|
||||
require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0}))
|
||||
} else {
|
||||
err = txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})
|
||||
fmt.Print(err.Error())
|
||||
require.EqualError(t, err, "test error")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) {
|
||||
expectedLen int
|
||||
}{
|
||||
{
|
||||
name: "larest priority; single tx",
|
||||
name: "largest priority; single tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
@@ -98,7 +98,7 @@ func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) {
|
||||
expectedLen: 1,
|
||||
},
|
||||
{
|
||||
name: "larest priority; multi tx",
|
||||
name: "largest priority; multi tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 17,
|
||||
totalSize: totalSize,
|
||||
@@ -106,7 +106,7 @@ func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) {
|
||||
expectedLen: 4,
|
||||
},
|
||||
{
|
||||
name: "larest priority; out of capacity",
|
||||
name: "largest priority; out of capacity",
|
||||
priority: int64(max + 1),
|
||||
txSize: totalSize + 1,
|
||||
totalSize: totalSize,
|
||||
|
||||
@@ -100,7 +100,8 @@ type MConnection struct {
|
||||
|
||||
// used to ensure FlushStop and OnStop
|
||||
// are safe to call concurrently.
|
||||
stopMtx sync.Mutex
|
||||
stopMtx sync.Mutex
|
||||
stopSignal <-chan struct{}
|
||||
|
||||
cancel context.CancelFunc
|
||||
|
||||
@@ -207,6 +208,7 @@ func (c *MConnection) OnStart(ctx context.Context) error {
|
||||
c.quitSendRoutine = make(chan struct{})
|
||||
c.doneSendRoutine = make(chan struct{})
|
||||
c.quitRecvRoutine = make(chan struct{})
|
||||
c.stopSignal = ctx.Done()
|
||||
c.setRecvLastMsgAt(time.Now())
|
||||
go c.sendRoutine(ctx)
|
||||
go c.recvRoutine(ctx)
|
||||
@@ -681,6 +683,8 @@ func (ch *channel) sendBytes(bytes []byte) bool {
|
||||
return true
|
||||
case <-time.After(defaultSendTimeout):
|
||||
return false
|
||||
case <-ch.conn.stopSignal:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,11 +14,23 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
PeersConnected: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers",
|
||||
Help: "Number of peers.",
|
||||
Name: "peers_connected",
|
||||
Help: "Number of peers connected.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersStored: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_stored",
|
||||
Help: "Nomber of peers in the peer store database.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersInactivated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_inactivated",
|
||||
Help: "Number of inactive peers stored.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
@@ -38,6 +50,36 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "peer_pending_send_bytes",
|
||||
Help: "Number of bytes pending being sent to a given peer.",
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
PeersConnectedSuccess: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_success",
|
||||
Help: "Number of successful connection attempts",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedFailure: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_failure",
|
||||
Help: "Number of failed connection attempts",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedIncoming: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_incoming",
|
||||
Help: "Number of peers connected as a result of dialing the peer.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedOutgoing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_outgoing",
|
||||
Help: "Number of peers connected as a result of the peer dialing this node.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersEvicted: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_evicted",
|
||||
Help: "Number of peers evicted by this node.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
RouterPeerQueueRecv: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -73,10 +115,17 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Peers: discard.NewGauge(),
|
||||
PeersConnected: discard.NewGauge(),
|
||||
PeersStored: discard.NewGauge(),
|
||||
PeersInactivated: discard.NewGauge(),
|
||||
PeerReceiveBytesTotal: discard.NewCounter(),
|
||||
PeerSendBytesTotal: discard.NewCounter(),
|
||||
PeerPendingSendBytes: discard.NewGauge(),
|
||||
PeersConnectedSuccess: discard.NewCounter(),
|
||||
PeersConnectedFailure: discard.NewCounter(),
|
||||
PeersConnectedIncoming: discard.NewGauge(),
|
||||
PeersConnectedOutgoing: discard.NewGauge(),
|
||||
PeersEvicted: discard.NewCounter(),
|
||||
RouterPeerQueueRecv: discard.NewHistogram(),
|
||||
RouterPeerQueueSend: discard.NewHistogram(),
|
||||
RouterChannelQueueSend: discard.NewHistogram(),
|
||||
|
||||
@@ -26,8 +26,12 @@ var (
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Number of peers.
|
||||
Peers metrics.Gauge
|
||||
// Number of peers connected.
|
||||
PeersConnected metrics.Gauge
|
||||
// Nomber of peers in the peer store database.
|
||||
PeersStored metrics.Gauge
|
||||
// Number of inactive peers stored.
|
||||
PeersInactivated metrics.Gauge
|
||||
// Number of bytes per channel received from a given peer.
|
||||
PeerReceiveBytesTotal metrics.Counter `metrics_labels:"peer_id, chID, message_type"`
|
||||
// Number of bytes per channel sent to a given peer.
|
||||
@@ -35,6 +39,21 @@ type Metrics struct {
|
||||
// Number of bytes pending being sent to a given peer.
|
||||
PeerPendingSendBytes metrics.Gauge `metrics_labels:"peer_id"`
|
||||
|
||||
// Number of successful connection attempts
|
||||
PeersConnectedSuccess metrics.Counter
|
||||
// Number of failed connection attempts
|
||||
PeersConnectedFailure metrics.Counter
|
||||
|
||||
// Number of peers connected as a result of dialing the
|
||||
// peer.
|
||||
PeersConnectedIncoming metrics.Gauge
|
||||
// Number of peers connected as a result of the peer dialing
|
||||
// this node.
|
||||
PeersConnectedOutgoing metrics.Gauge
|
||||
|
||||
// Number of peers evicted by this node.
|
||||
PeersEvicted metrics.Counter
|
||||
|
||||
// RouterPeerQueueRecv defines the time taken to read off of a peer's queue
|
||||
// before sending on the connection.
|
||||
//metrics:The time taken to read off of a peer's queue before sending on the connection.
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
|
||||
p2p "github.com/tendermint/tendermint/internal/p2p"
|
||||
|
||||
time "time"
|
||||
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -35,20 +37,20 @@ func (_m *Connection) Close() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// Handshake provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
// Handshake provides a mock function with given fields: _a0, _a1, _a2, _a3
|
||||
func (_m *Connection) Handshake(_a0 context.Context, _a1 time.Duration, _a2 types.NodeInfo, _a3 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2, _a3)
|
||||
|
||||
var r0 types.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok {
|
||||
r0 = rf(_a0, _a1, _a2, _a3)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.NodeInfo)
|
||||
}
|
||||
|
||||
var r1 crypto.PubKey
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok {
|
||||
r1 = rf(_a0, _a1, _a2)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok {
|
||||
r1 = rf(_a0, _a1, _a2, _a3)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(crypto.PubKey)
|
||||
@@ -56,8 +58,8 @@ func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 cry
|
||||
}
|
||||
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(2).(func(context.Context, types.NodeInfo, crypto.PrivKey) error); ok {
|
||||
r2 = rf(_a0, _a1, _a2)
|
||||
if rf, ok := ret.Get(2).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) error); ok {
|
||||
r2 = rf(_a0, _a1, _a2, _a3)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
|
||||
@@ -252,11 +252,13 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions)
|
||||
require.NotNil(t, ep, "transport not listening an endpoint")
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MinRetryTime: 10 * time.Millisecond,
|
||||
MaxRetryTime: 100 * time.Millisecond,
|
||||
RetryTimeJitter: time.Millisecond,
|
||||
MaxPeers: opts.MaxPeers,
|
||||
MaxConnected: opts.MaxConnected,
|
||||
MinRetryTime: 10 * time.Millisecond,
|
||||
DisconnectCooldownPeriod: 10 * time.Millisecond,
|
||||
MaxRetryTime: 100 * time.Millisecond,
|
||||
RetryTimeJitter: time.Millisecond,
|
||||
MaxPeers: opts.MaxPeers,
|
||||
MaxConnected: opts.MaxConnected,
|
||||
Metrics: p2p.NopMetrics(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -268,7 +270,7 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions)
|
||||
func() *types.NodeInfo { return &nodeInfo },
|
||||
transport,
|
||||
ep,
|
||||
p2p.RouterOptions{DialSleep: func(_ context.Context) {}},
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -38,11 +38,18 @@ const (
|
||||
PeerStatusBad PeerStatus = "bad" // peer observed as bad
|
||||
)
|
||||
|
||||
// PeerScore is a numeric score assigned to a peer (higher is better).
|
||||
type PeerScore uint8
|
||||
type peerConnectionDirection int
|
||||
|
||||
const (
|
||||
PeerScorePersistent PeerScore = math.MaxUint8 // persistent peers
|
||||
peerConnectionIncoming peerConnectionDirection = iota + 1
|
||||
peerConnectionOutgoing
|
||||
)
|
||||
|
||||
// PeerScore is a numeric score assigned to a peer (higher is better).
|
||||
type PeerScore int16
|
||||
|
||||
const (
|
||||
PeerScorePersistent PeerScore = math.MaxInt16 // persistent peers
|
||||
MaxPeerScoreNotPersistent PeerScore = PeerScorePersistent - 1
|
||||
)
|
||||
|
||||
@@ -101,6 +108,13 @@ type PeerManagerOptions struct {
|
||||
// outbound). 0 means no limit.
|
||||
MaxConnected uint16
|
||||
|
||||
// MaxOutgoingConnections specifies how many outgoing
|
||||
// connections a node will maintain. It must be lower than MaxConnected. If it is
|
||||
// 0, then all connections can be outgoing. Once this limit is
|
||||
// reached, the node will not dial peers, allowing the
|
||||
// remaining peer connections to be used by incoming connections.
|
||||
MaxOutgoingConnections uint16
|
||||
|
||||
// MaxConnectedUpgrade is the maximum number of additional connections to
|
||||
// use for probing any better-scored peers to upgrade to when all connection
|
||||
// slots are full. 0 disables peer upgrading.
|
||||
@@ -130,6 +144,10 @@ type PeerManagerOptions struct {
|
||||
// retry times, to avoid thundering herds. 0 disables jitter.
|
||||
RetryTimeJitter time.Duration
|
||||
|
||||
// DisconnectCooldownPeriod is the amount of time after we
|
||||
// disconnect from a peer before we'll consider dialing a new peer
|
||||
DisconnectCooldownPeriod time.Duration
|
||||
|
||||
// PeerScores sets fixed scores for specific peers. It is mainly used
|
||||
// for testing. A score of 0 is ignored.
|
||||
PeerScores map[types.NodeID]PeerScore
|
||||
@@ -145,6 +163,9 @@ type PeerManagerOptions struct {
|
||||
// persistentPeers provides fast PersistentPeers lookups. It is built
|
||||
// by optimize().
|
||||
persistentPeers map[types.NodeID]bool
|
||||
|
||||
// Peer Metrics
|
||||
Metrics *Metrics
|
||||
}
|
||||
|
||||
// Validate validates the options.
|
||||
@@ -193,14 +214,18 @@ func (o *PeerManagerOptions) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
if o.MaxOutgoingConnections > 0 && o.MaxConnected < o.MaxOutgoingConnections {
|
||||
return errors.New("cannot set MaxOutgoingConnections to a value larger than MaxConnected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isPersistentPeer checks if a peer is in PersistentPeers. It will panic
|
||||
// isPersistent checks if a peer is in PersistentPeers. It will panic
|
||||
// if called before optimize().
|
||||
func (o *PeerManagerOptions) isPersistent(id types.NodeID) bool {
|
||||
if o.persistentPeers == nil {
|
||||
panic("isPersistentPeer() called before optimize()")
|
||||
panic("isPersistent() called before optimize()")
|
||||
}
|
||||
return o.persistentPeers[id]
|
||||
}
|
||||
@@ -261,19 +286,20 @@ func (o *PeerManagerOptions) optimize() {
|
||||
type PeerManager struct {
|
||||
selfID types.NodeID
|
||||
options PeerManagerOptions
|
||||
metrics *Metrics
|
||||
rand *rand.Rand
|
||||
dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes
|
||||
evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes
|
||||
|
||||
mtx sync.Mutex
|
||||
store *peerStore
|
||||
subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address)
|
||||
dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail)
|
||||
upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail)
|
||||
connected map[types.NodeID]bool // connected peers (Dialed/Accepted → Disconnected)
|
||||
ready map[types.NodeID]bool // ready peers (Ready → Disconnected)
|
||||
evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext)
|
||||
evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected)
|
||||
subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address)
|
||||
dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail)
|
||||
upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail)
|
||||
connected map[types.NodeID]peerConnectionDirection // connected peers (Dialed/Accepted → Disconnected)
|
||||
ready map[types.NodeID]bool // ready peers (Ready → Disconnected)
|
||||
evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext)
|
||||
evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected)
|
||||
}
|
||||
|
||||
// NewPeerManager creates a new peer manager.
|
||||
@@ -298,16 +324,22 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec
|
||||
dialWaker: tmsync.NewWaker(),
|
||||
evictWaker: tmsync.NewWaker(),
|
||||
metrics: NopMetrics(),
|
||||
|
||||
store: store,
|
||||
dialing: map[types.NodeID]bool{},
|
||||
upgrading: map[types.NodeID]types.NodeID{},
|
||||
connected: map[types.NodeID]bool{},
|
||||
connected: map[types.NodeID]peerConnectionDirection{},
|
||||
ready: map[types.NodeID]bool{},
|
||||
evict: map[types.NodeID]bool{},
|
||||
evicting: map[types.NodeID]bool{},
|
||||
subscriptions: map[*PeerUpdates]*PeerUpdates{},
|
||||
}
|
||||
|
||||
if options.Metrics != nil {
|
||||
peerManager.metrics = options.Metrics
|
||||
}
|
||||
|
||||
if err = peerManager.configurePeers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -339,6 +371,9 @@ func (m *PeerManager) configurePeers() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m.metrics.PeersStored.Add(float64(m.store.Size()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -368,20 +403,45 @@ func (m *PeerManager) prunePeers() error {
|
||||
ranked := m.store.Ranked()
|
||||
for i := len(ranked) - 1; i >= 0; i-- {
|
||||
peerID := ranked[i].ID
|
||||
|
||||
switch {
|
||||
case m.store.Size() <= int(m.options.MaxPeers):
|
||||
return nil
|
||||
case m.dialing[peerID]:
|
||||
case m.connected[peerID]:
|
||||
case m.isConnected(peerID):
|
||||
default:
|
||||
if err := m.store.Delete(peerID); err != nil {
|
||||
return err
|
||||
}
|
||||
m.metrics.PeersStored.Add(-1)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PeerManager) isConnected(peerID types.NodeID) bool {
|
||||
_, ok := m.connected[peerID]
|
||||
return ok
|
||||
}
|
||||
|
||||
type connectionStats struct {
|
||||
incoming uint16
|
||||
outgoing uint16
|
||||
}
|
||||
|
||||
func (m *PeerManager) getConnectedInfo() connectionStats {
|
||||
out := connectionStats{}
|
||||
for _, direction := range m.connected {
|
||||
switch direction {
|
||||
case peerConnectionIncoming:
|
||||
out.incoming++
|
||||
case peerConnectionOutgoing:
|
||||
out.outgoing++
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Add adds a peer to the manager, given as an address. If the peer already
|
||||
// exists, the address is added to it if it isn't already present. This will push
|
||||
// low scoring peers out of the address book if it exceeds the maximum size.
|
||||
@@ -405,12 +465,17 @@ func (m *PeerManager) Add(address NodeAddress) (bool, error) {
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
if peer.Inactive {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// else add the new address
|
||||
peer.AddressInfo[address] = &peerAddressInfo{Address: address}
|
||||
if err := m.store.Set(peer); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
m.metrics.PeersStored.Add(1)
|
||||
if err := m.prunePeers(); err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -437,18 +502,28 @@ func (m *PeerManager) HasMaxPeerCapacity() bool {
|
||||
return len(m.connected) >= int(m.options.MaxConnected)
|
||||
}
|
||||
|
||||
func (m *PeerManager) HasDialedMaxPeers() bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
stats := m.getConnectedInfo()
|
||||
|
||||
return stats.outgoing >= m.options.MaxOutgoingConnections
|
||||
}
|
||||
|
||||
// DialNext finds an appropriate peer address to dial, and marks it as dialing.
|
||||
// If no peer is found, or all connection slots are full, it blocks until one
|
||||
// becomes available. The caller must call Dialed() or DialFailed() for the
|
||||
// returned peer.
|
||||
func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) {
|
||||
for {
|
||||
address, err := m.TryDialNext()
|
||||
if err != nil || (address != NodeAddress{}) {
|
||||
return address, err
|
||||
if address := m.TryDialNext(); (address != NodeAddress{}) {
|
||||
return address, nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-m.dialWaker.Sleep():
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
return NodeAddress{}, ctx.Err()
|
||||
}
|
||||
@@ -457,20 +532,28 @@ func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) {
|
||||
|
||||
// TryDialNext is equivalent to DialNext(), but immediately returns an empty
|
||||
// address if no peers or connection slots are available.
|
||||
func (m *PeerManager) TryDialNext() (NodeAddress, error) {
|
||||
func (m *PeerManager) TryDialNext() NodeAddress {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// We allow dialing MaxConnected+MaxConnectedUpgrade peers. Including
|
||||
// MaxConnectedUpgrade allows us to probe additional peers that have a
|
||||
// higher score than any other peers, and if successful evict it.
|
||||
if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >=
|
||||
int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return NodeAddress{}, nil
|
||||
if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return NodeAddress{}
|
||||
}
|
||||
|
||||
cinfo := m.getConnectedInfo()
|
||||
if m.options.MaxOutgoingConnections > 0 && cinfo.outgoing >= m.options.MaxOutgoingConnections {
|
||||
return NodeAddress{}
|
||||
}
|
||||
|
||||
for _, peer := range m.store.Ranked() {
|
||||
if m.dialing[peer.ID] || m.connected[peer.ID] {
|
||||
if m.dialing[peer.ID] || m.isConnected(peer.ID) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !peer.LastDisconnected.IsZero() && time.Since(peer.LastDisconnected) < m.options.DisconnectCooldownPeriod {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -479,6 +562,10 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if id, ok := m.store.Resolve(addressInfo.Address); ok && (m.isConnected(id) || m.dialing[id]) {
|
||||
continue
|
||||
}
|
||||
|
||||
// We now have an eligible address to dial. If we're full but have
|
||||
// upgrade capacity (as checked above), we find a lower-scored peer
|
||||
// we can replace and mark it as upgrading so noone else claims it.
|
||||
@@ -489,25 +576,24 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) {
|
||||
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
|
||||
upgradeFromPeer := m.findUpgradeCandidate(peer.ID, peer.Score())
|
||||
if upgradeFromPeer == "" {
|
||||
return NodeAddress{}, nil
|
||||
return NodeAddress{}
|
||||
}
|
||||
m.upgrading[upgradeFromPeer] = peer.ID
|
||||
}
|
||||
|
||||
m.dialing[peer.ID] = true
|
||||
return addressInfo.Address, nil
|
||||
return addressInfo.Address
|
||||
}
|
||||
}
|
||||
return NodeAddress{}, nil
|
||||
return NodeAddress{}
|
||||
}
|
||||
|
||||
// DialFailed reports a failed dial attempt. This will make the peer available
|
||||
// for dialing again when appropriate (possibly after a retry timeout).
|
||||
//
|
||||
// FIXME: This should probably delete or mark bad addresses/peers after some time.
|
||||
func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.metrics.PeersConnectedFailure.Add(1)
|
||||
|
||||
delete(m.dialing, address.NodeID)
|
||||
for from, to := range m.upgrading {
|
||||
@@ -527,6 +613,7 @@ func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error
|
||||
|
||||
addressInfo.LastDialFailure = time.Now().UTC()
|
||||
addressInfo.DialFailures++
|
||||
|
||||
if err := m.store.Set(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -560,6 +647,8 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
m.metrics.PeersConnectedSuccess.Add(1)
|
||||
|
||||
delete(m.dialing, address.NodeID)
|
||||
|
||||
var upgradeFromPeer types.NodeID
|
||||
@@ -574,12 +663,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
if address.NodeID == m.selfID {
|
||||
return fmt.Errorf("rejecting connection to self (%v)", address.NodeID)
|
||||
}
|
||||
if m.connected[address.NodeID] {
|
||||
if m.isConnected(address.NodeID) {
|
||||
return fmt.Errorf("peer %v is already connected", address.NodeID)
|
||||
}
|
||||
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
|
||||
if upgradeFromPeer == "" || len(m.connected) >=
|
||||
int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
if upgradeFromPeer == "" || len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return fmt.Errorf("already connected to maximum number of peers")
|
||||
}
|
||||
}
|
||||
@@ -589,6 +677,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
return fmt.Errorf("peer %q was removed while dialing", address.NodeID)
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
if peer.Inactive {
|
||||
m.metrics.PeersInactivated.Add(-1)
|
||||
}
|
||||
peer.Inactive = false
|
||||
|
||||
peer.LastConnected = now
|
||||
if addressInfo, ok := peer.AddressInfo[address]; ok {
|
||||
addressInfo.DialFailures = 0
|
||||
@@ -600,8 +693,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if upgradeFromPeer != "" && m.options.MaxConnected > 0 &&
|
||||
len(m.connected) >= int(m.options.MaxConnected) {
|
||||
if upgradeFromPeer != "" && m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
|
||||
// Look for an even lower-scored peer that may have appeared since we
|
||||
// started the upgrade.
|
||||
if p, ok := m.store.Get(upgradeFromPeer); ok {
|
||||
@@ -610,9 +702,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
}
|
||||
}
|
||||
m.evict[upgradeFromPeer] = true
|
||||
m.evictWaker.Wake()
|
||||
}
|
||||
m.connected[peer.ID] = true
|
||||
m.evictWaker.Wake()
|
||||
|
||||
m.metrics.PeersConnectedOutgoing.Add(1)
|
||||
m.connected[peer.ID] = peerConnectionOutgoing
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -641,11 +735,10 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error {
|
||||
if peerID == m.selfID {
|
||||
return fmt.Errorf("rejecting connection from self (%v)", peerID)
|
||||
}
|
||||
if m.connected[peerID] {
|
||||
if m.isConnected(peerID) {
|
||||
return fmt.Errorf("peer %q is already connected", peerID)
|
||||
}
|
||||
if m.options.MaxConnected > 0 &&
|
||||
len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return fmt.Errorf("already connected to maximum number of peers")
|
||||
}
|
||||
|
||||
@@ -670,12 +763,17 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error {
|
||||
}
|
||||
}
|
||||
|
||||
if peer.Inactive {
|
||||
m.metrics.PeersInactivated.Add(-1)
|
||||
}
|
||||
peer.Inactive = false
|
||||
peer.LastConnected = time.Now().UTC()
|
||||
if err := m.store.Set(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.connected[peerID] = true
|
||||
m.metrics.PeersConnectedIncoming.Add(1)
|
||||
m.connected[peerID] = peerConnectionIncoming
|
||||
if upgradeFromPeer != "" {
|
||||
m.evict[upgradeFromPeer] = true
|
||||
}
|
||||
@@ -694,7 +792,7 @@ func (m *PeerManager) Ready(ctx context.Context, peerID types.NodeID, channels C
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.connected[peerID] {
|
||||
if m.isConnected(peerID) {
|
||||
m.ready[peerID] = true
|
||||
m.broadcast(ctx, PeerUpdate{
|
||||
NodeID: peerID,
|
||||
@@ -730,7 +828,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) {
|
||||
// random one.
|
||||
for peerID := range m.evict {
|
||||
delete(m.evict, peerID)
|
||||
if m.connected[peerID] && !m.evicting[peerID] {
|
||||
if m.isConnected(peerID) && !m.evicting[peerID] {
|
||||
m.evicting[peerID] = true
|
||||
return peerID, nil
|
||||
}
|
||||
@@ -747,7 +845,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) {
|
||||
ranked := m.store.Ranked()
|
||||
for i := len(ranked) - 1; i >= 0; i-- {
|
||||
peer := ranked[i]
|
||||
if m.connected[peer.ID] && !m.evicting[peer.ID] {
|
||||
if m.isConnected(peer.ID) && !m.evicting[peer.ID] {
|
||||
m.evicting[peer.ID] = true
|
||||
return peer.ID, nil
|
||||
}
|
||||
@@ -762,6 +860,13 @@ func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
switch m.connected[peerID] {
|
||||
case peerConnectionIncoming:
|
||||
m.metrics.PeersConnectedIncoming.Add(-1)
|
||||
case peerConnectionOutgoing:
|
||||
m.metrics.PeersConnectedOutgoing.Add(-1)
|
||||
}
|
||||
|
||||
ready := m.ready[peerID]
|
||||
|
||||
delete(m.connected, peerID)
|
||||
@@ -770,6 +875,22 @@ func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) {
|
||||
delete(m.evicting, peerID)
|
||||
delete(m.ready, peerID)
|
||||
|
||||
if peer, ok := m.store.Get(peerID); ok {
|
||||
peer.LastDisconnected = time.Now()
|
||||
_ = m.store.Set(peer)
|
||||
// launch a thread to ping the dialWaker when the
|
||||
// disconnected peer can be dialed again.
|
||||
go func() {
|
||||
timer := time.NewTimer(m.options.DisconnectCooldownPeriod)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
m.dialWaker.Wake()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if ready {
|
||||
m.broadcast(ctx, PeerUpdate{
|
||||
NodeID: peerID,
|
||||
@@ -792,17 +913,34 @@ func (m *PeerManager) Errored(peerID types.NodeID, err error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.connected[peerID] {
|
||||
if m.isConnected(peerID) {
|
||||
m.evict[peerID] = true
|
||||
}
|
||||
|
||||
m.evictWaker.Wake()
|
||||
}
|
||||
|
||||
// Inactivate marks a peer as inactive which means we won't attempt to
|
||||
// dial this peer again. A peer can be reactivated by successfully
|
||||
// dialing and connecting to the node.
|
||||
func (m *PeerManager) Inactivate(peerID types.NodeID) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
peer, ok := m.store.peers[peerID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
peer.Inactive = true
|
||||
m.metrics.PeersInactivated.Add(1)
|
||||
return m.store.Set(*peer)
|
||||
}
|
||||
|
||||
// Advertise returns a list of peer addresses to advertise to a peer.
|
||||
//
|
||||
// FIXME: This is fairly naïve and only returns the addresses of the
|
||||
// highest-ranked peers.
|
||||
// It sorts all peers in the peer store, and assembles a list of peers
|
||||
// that is most likely to include the highest priority of peers.
|
||||
func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
@@ -815,19 +953,92 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress
|
||||
addresses = append(addresses, m.options.SelfAddress)
|
||||
}
|
||||
|
||||
for _, peer := range m.store.Ranked() {
|
||||
var numAddresses int
|
||||
var totalScore int
|
||||
ranked := m.store.Ranked()
|
||||
seenAddresses := map[NodeAddress]struct{}{}
|
||||
scores := map[types.NodeID]int{}
|
||||
|
||||
// get the total number of possible addresses
|
||||
for _, peer := range ranked {
|
||||
if peer.ID == peerID {
|
||||
continue
|
||||
}
|
||||
score := int(peer.Score())
|
||||
|
||||
for nodeAddr, addressInfo := range peer.AddressInfo {
|
||||
if len(addresses) >= int(limit) {
|
||||
return addresses
|
||||
totalScore += score
|
||||
scores[peer.ID] = score
|
||||
for addr := range peer.AddressInfo {
|
||||
if _, ok := m.options.PrivatePeers[addr.NodeID]; !ok {
|
||||
numAddresses++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var attempts uint16
|
||||
var addedLastIteration bool
|
||||
|
||||
// if the number of addresses is less than the number of peers
|
||||
// to advertise, adjust the limit downwards
|
||||
if numAddresses < int(limit) {
|
||||
limit = uint16(numAddresses)
|
||||
}
|
||||
|
||||
// collect addresses until we have the number requested
|
||||
// (limit), or we've added all known addresses, or we've tried
|
||||
// at least 256 times and the last time we iterated over
|
||||
// remaining addresses we added no new candidates.
|
||||
for len(addresses) < int(limit) && (attempts < (limit*2) || !addedLastIteration) {
|
||||
attempts++
|
||||
addedLastIteration = false
|
||||
|
||||
for idx, peer := range ranked {
|
||||
if peer.ID == peerID {
|
||||
continue
|
||||
}
|
||||
|
||||
// only add non-private NodeIDs
|
||||
if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok {
|
||||
addresses = append(addresses, addressInfo.Address)
|
||||
if len(addresses) >= int(limit) {
|
||||
break
|
||||
}
|
||||
|
||||
for nodeAddr, addressInfo := range peer.AddressInfo {
|
||||
if len(addresses) >= int(limit) {
|
||||
break
|
||||
}
|
||||
|
||||
// only look at each address once, by
|
||||
// tracking a set of addresses seen
|
||||
if _, ok := seenAddresses[addressInfo.Address]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// only add non-private NodeIDs
|
||||
if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok {
|
||||
// add the peer if the total number of ranked addresses is
|
||||
// will fit within the limit, or otherwise adding
|
||||
// addresses based on a coin flip.
|
||||
|
||||
// the coinflip is based on the score, commonly, but
|
||||
// 10% of the time we'll randomly insert a "loosing"
|
||||
// peer.
|
||||
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
if numAddresses <= int(limit) || rand.Intn(totalScore+1) <= scores[peer.ID]+1 || rand.Intn((idx+1)*10) <= idx+1 {
|
||||
addresses = append(addresses, addressInfo.Address)
|
||||
addedLastIteration = true
|
||||
seenAddresses[addressInfo.Address] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
seenAddresses[addressInfo.Address] = struct{}{}
|
||||
// if the number of addresses
|
||||
// is the same as the limit,
|
||||
// we should remove private
|
||||
// addresses from the limit so
|
||||
// we can still return early.
|
||||
if numAddresses == int(limit) {
|
||||
limit--
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -901,8 +1112,14 @@ func (m *PeerManager) processPeerEvent(ctx context.Context, pu PeerUpdate) {
|
||||
|
||||
switch pu.Status {
|
||||
case PeerStatusBad:
|
||||
if m.store.peers[pu.NodeID].MutableScore == math.MinInt16 {
|
||||
return
|
||||
}
|
||||
m.store.peers[pu.NodeID].MutableScore--
|
||||
case PeerStatusGood:
|
||||
if m.store.peers[pu.NodeID].MutableScore == math.MaxInt16 {
|
||||
return
|
||||
}
|
||||
m.store.peers[pu.NodeID].MutableScore++
|
||||
}
|
||||
}
|
||||
@@ -993,9 +1210,11 @@ func (m *PeerManager) findUpgradeCandidate(id types.NodeID, score PeerScore) typ
|
||||
for i := len(ranked) - 1; i >= 0; i-- {
|
||||
candidate := ranked[i]
|
||||
switch {
|
||||
case candidate.ID == id:
|
||||
continue
|
||||
case candidate.Score() >= score:
|
||||
return "" // no further peers can be scored lower, due to sorting
|
||||
case !m.connected[candidate.ID]:
|
||||
case !m.isConnected(candidate.ID):
|
||||
case m.evict[candidate.ID]:
|
||||
case m.evicting[candidate.ID]:
|
||||
case m.upgrading[candidate.ID] != "":
|
||||
@@ -1044,6 +1263,7 @@ func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration
|
||||
type peerStore struct {
|
||||
db dbm.DB
|
||||
peers map[types.NodeID]*peerInfo
|
||||
index map[NodeAddress]types.NodeID
|
||||
ranked []*peerInfo // cache for Ranked(), nil invalidates cache
|
||||
}
|
||||
|
||||
@@ -1063,6 +1283,7 @@ func newPeerStore(db dbm.DB) (*peerStore, error) {
|
||||
// loadPeers loads all peers from the database into memory.
|
||||
func (s *peerStore) loadPeers() error {
|
||||
peers := map[types.NodeID]*peerInfo{}
|
||||
addrs := map[NodeAddress]types.NodeID{}
|
||||
|
||||
start, end := keyPeerInfoRange()
|
||||
iter, err := s.db.Iterator(start, end)
|
||||
@@ -1082,11 +1303,18 @@ func (s *peerStore) loadPeers() error {
|
||||
return fmt.Errorf("invalid peer data: %w", err)
|
||||
}
|
||||
peers[peer.ID] = peer
|
||||
for addr := range peer.AddressInfo {
|
||||
// TODO maybe check to see if we've seen this
|
||||
// addr before for a different peer, there
|
||||
// could be duplicates.
|
||||
addrs[addr] = peer.ID
|
||||
}
|
||||
}
|
||||
if iter.Error() != nil {
|
||||
return iter.Error()
|
||||
}
|
||||
s.peers = peers
|
||||
s.index = addrs
|
||||
s.ranked = nil // invalidate cache if populated
|
||||
return nil
|
||||
}
|
||||
@@ -1098,6 +1326,12 @@ func (s *peerStore) Get(id types.NodeID) (peerInfo, bool) {
|
||||
return peer.Copy(), ok
|
||||
}
|
||||
|
||||
// Resolve returns the peer ID for a given node address if known.
|
||||
func (s *peerStore) Resolve(addr NodeAddress) (types.NodeID, bool) {
|
||||
id, ok := s.index[addr]
|
||||
return id, ok
|
||||
}
|
||||
|
||||
// Set stores peer data. The input data will be copied, and can safely be reused
|
||||
// by the caller.
|
||||
func (s *peerStore) Set(peer peerInfo) error {
|
||||
@@ -1126,20 +1360,29 @@ func (s *peerStore) Set(peer peerInfo) error {
|
||||
// update the existing pointer address.
|
||||
*current = peer
|
||||
}
|
||||
for addr := range peer.AddressInfo {
|
||||
s.index[addr] = peer.ID
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes a peer, or does nothing if it does not exist.
|
||||
func (s *peerStore) Delete(id types.NodeID) error {
|
||||
if _, ok := s.peers[id]; !ok {
|
||||
peer, ok := s.peers[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := s.db.Delete(keyPeerInfo(id)); err != nil {
|
||||
return err
|
||||
for _, addr := range peer.AddressInfo {
|
||||
delete(s.index, addr.Address)
|
||||
}
|
||||
delete(s.peers, id)
|
||||
s.ranked = nil
|
||||
|
||||
if err := s.db.Delete(keyPeerInfo(id)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1175,9 +1418,48 @@ func (s *peerStore) Ranked() []*peerInfo {
|
||||
s.ranked = append(s.ranked, peer)
|
||||
}
|
||||
sort.Slice(s.ranked, func(i, j int) bool {
|
||||
// FIXME: If necessary, consider precomputing scores before sorting,
|
||||
// to reduce the number of Score() calls.
|
||||
return s.ranked[i].Score() > s.ranked[j].Score()
|
||||
// TODO: reevaluate more wholistic sorting, perhaps as follows:
|
||||
|
||||
// // sort inactive peers after active peers
|
||||
// if s.ranked[i].Inactive && !s.ranked[j].Inactive {
|
||||
// return false
|
||||
// } else if !s.ranked[i].Inactive && s.ranked[j].Inactive {
|
||||
// return true
|
||||
// }
|
||||
|
||||
// iLastDialed, iLastDialSuccess := s.ranked[i].LastDialed()
|
||||
// jLastDialed, jLastDialSuccess := s.ranked[j].LastDialed()
|
||||
|
||||
// // sort peers who our most recent dialing attempt was
|
||||
// // successful ahead of peers with recent dialing
|
||||
// // failures
|
||||
// switch {
|
||||
// case iLastDialSuccess && jLastDialSuccess:
|
||||
// // if both peers were (are?) successfully
|
||||
// // connected, convey their score, but give the
|
||||
// // one we dialed successfully most recently a bonus
|
||||
|
||||
// iScore := s.ranked[i].Score()
|
||||
// jScore := s.ranked[j].Score()
|
||||
// if jLastDialed.Before(iLastDialed) {
|
||||
// jScore++
|
||||
// } else {
|
||||
// iScore++
|
||||
// }
|
||||
|
||||
// return iScore > jScore
|
||||
// case iLastDialSuccess:
|
||||
// return true
|
||||
// case jLastDialSuccess:
|
||||
// return false
|
||||
// default:
|
||||
// // if both peers were not successful in their
|
||||
// // most recent dialing attempt, fall back to
|
||||
// // peer score.
|
||||
|
||||
// return s.ranked[i].Score() > s.ranked[j].Score()
|
||||
// }
|
||||
})
|
||||
return s.ranked
|
||||
}
|
||||
@@ -1189,17 +1471,18 @@ func (s *peerStore) Size() int {
|
||||
|
||||
// peerInfo contains peer information stored in a peerStore.
|
||||
type peerInfo struct {
|
||||
ID types.NodeID
|
||||
AddressInfo map[NodeAddress]*peerAddressInfo
|
||||
LastConnected time.Time
|
||||
ID types.NodeID
|
||||
AddressInfo map[NodeAddress]*peerAddressInfo
|
||||
LastConnected time.Time
|
||||
LastDisconnected time.Time
|
||||
|
||||
// These fields are ephemeral, i.e. not persisted to the database.
|
||||
Persistent bool
|
||||
Seed bool
|
||||
Height int64
|
||||
FixedScore PeerScore // mainly for tests
|
||||
|
||||
MutableScore int64 // updated by router
|
||||
Inactive bool
|
||||
}
|
||||
|
||||
// peerInfoFromProto converts a Protobuf PeerInfo message to a peerInfo,
|
||||
@@ -1208,6 +1491,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) {
|
||||
p := &peerInfo{
|
||||
ID: types.NodeID(msg.ID),
|
||||
AddressInfo: map[NodeAddress]*peerAddressInfo{},
|
||||
Inactive: msg.Inactive,
|
||||
}
|
||||
if msg.LastConnected != nil {
|
||||
p.LastConnected = *msg.LastConnected
|
||||
@@ -1230,6 +1514,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) {
|
||||
func (p *peerInfo) ToProto() *p2pproto.PeerInfo {
|
||||
msg := &p2pproto.PeerInfo{
|
||||
ID: string(p.ID),
|
||||
Inactive: p.Inactive,
|
||||
LastConnected: &p.LastConnected,
|
||||
}
|
||||
for _, addressInfo := range p.AddressInfo {
|
||||
@@ -1238,6 +1523,7 @@ func (p *peerInfo) ToProto() *p2pproto.PeerInfo {
|
||||
if msg.LastConnected.IsZero() {
|
||||
msg.LastConnected = nil
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
@@ -1254,6 +1540,46 @@ func (p *peerInfo) Copy() peerInfo {
|
||||
return c
|
||||
}
|
||||
|
||||
// LastDialed returns when the peer was last dialed, and if that dial
|
||||
// attempt was successful. If the peer was never dialed the time stamp
|
||||
// is zero time.
|
||||
func (p *peerInfo) LastDialed() (time.Time, bool) {
|
||||
var (
|
||||
last time.Time
|
||||
success bool
|
||||
)
|
||||
last = last.Add(-1) // so it's after the epoch
|
||||
|
||||
for _, addr := range p.AddressInfo {
|
||||
if addr.LastDialFailure.Equal(addr.LastDialSuccess) {
|
||||
if addr.LastDialFailure.IsZero() {
|
||||
continue
|
||||
}
|
||||
if last.After(addr.LastDialSuccess) {
|
||||
continue
|
||||
}
|
||||
success = true
|
||||
last = addr.LastDialSuccess
|
||||
}
|
||||
if addr.LastDialFailure.After(last) {
|
||||
success = false
|
||||
last = addr.LastDialFailure
|
||||
}
|
||||
if addr.LastDialSuccess.After(last) || last.Equal(addr.LastDialSuccess) {
|
||||
success = true
|
||||
last = addr.LastDialSuccess
|
||||
}
|
||||
}
|
||||
|
||||
// if we never modified last, then we should return it to the
|
||||
// zero value
|
||||
if last.Add(1).IsZero() {
|
||||
return time.Time{}, success
|
||||
}
|
||||
|
||||
return last, success
|
||||
}
|
||||
|
||||
// Score calculates a score for the peer. Higher-scored peers will be
|
||||
// preferred over lower scores.
|
||||
func (p *peerInfo) Score() PeerScore {
|
||||
@@ -1275,8 +1601,8 @@ func (p *peerInfo) Score() PeerScore {
|
||||
score -= int64(addr.DialFailures)
|
||||
}
|
||||
|
||||
if score <= 0 {
|
||||
return 0
|
||||
if score < math.MinInt16 {
|
||||
score = math.MinInt16
|
||||
}
|
||||
|
||||
return PeerScore(score)
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestPeerScoring(t *testing.T) {
|
||||
|
||||
t.Run("Synchronous", func(t *testing.T) {
|
||||
// update the manager and make sure it's correct
|
||||
require.EqualValues(t, 0, peerManager.Scores()[id])
|
||||
require.Zero(t, peerManager.Scores()[id])
|
||||
|
||||
// add a bunch of good status updates and watch things increase.
|
||||
for i := 1; i < 10; i++ {
|
||||
@@ -97,3 +97,173 @@ func TestPeerScoring(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func makeMockPeerStore(t *testing.T, peers ...peerInfo) *peerStore {
|
||||
t.Helper()
|
||||
s, err := newPeerStore(dbm.NewMemDB())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for idx := range peers {
|
||||
if err := s.Set(peers[idx]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestPeerRanking(t *testing.T) {
|
||||
t.Run("InactiveSecond", func(t *testing.T) {
|
||||
t.Skip("inactive status is not currently factored into peer rank.")
|
||||
|
||||
store := makeMockPeerStore(t,
|
||||
peerInfo{ID: "second", Inactive: true},
|
||||
peerInfo{ID: "first", Inactive: false},
|
||||
)
|
||||
|
||||
ranked := store.Ranked()
|
||||
if len(ranked) != 2 {
|
||||
t.Fatal("missing peer in ranked output")
|
||||
}
|
||||
if ranked[0].ID != "first" {
|
||||
t.Error("inactive peer is first")
|
||||
}
|
||||
if ranked[1].ID != "second" {
|
||||
t.Error("active peer is second")
|
||||
}
|
||||
})
|
||||
t.Run("ScoreOrder", func(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
Name string
|
||||
First int64
|
||||
Second int64
|
||||
}{
|
||||
{
|
||||
Name: "Mirror",
|
||||
First: 100,
|
||||
Second: -100,
|
||||
},
|
||||
{
|
||||
Name: "VeryLow",
|
||||
First: 0,
|
||||
Second: -100,
|
||||
},
|
||||
{
|
||||
Name: "High",
|
||||
First: 300,
|
||||
Second: 256,
|
||||
},
|
||||
} {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
store := makeMockPeerStore(t,
|
||||
peerInfo{
|
||||
ID: "second",
|
||||
MutableScore: test.Second,
|
||||
},
|
||||
peerInfo{
|
||||
ID: "first",
|
||||
MutableScore: test.First,
|
||||
})
|
||||
|
||||
ranked := store.Ranked()
|
||||
if len(ranked) != 2 {
|
||||
t.Fatal("missing peer in ranked output")
|
||||
}
|
||||
if ranked[0].ID != "first" {
|
||||
t.Error("higher peer is first")
|
||||
}
|
||||
if ranked[1].ID != "second" {
|
||||
t.Error("higher peer is second")
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLastDialed(t *testing.T) {
|
||||
t.Run("Zero", func(t *testing.T) {
|
||||
p := &peerInfo{}
|
||||
ts, ok := p.LastDialed()
|
||||
if !ts.IsZero() {
|
||||
t.Error("timestamp should be zero:", ts)
|
||||
}
|
||||
if ok {
|
||||
t.Error("peer reported success, despite none")
|
||||
}
|
||||
})
|
||||
t.Run("NeverDialed", func(t *testing.T) {
|
||||
p := &peerInfo{
|
||||
AddressInfo: map[NodeAddress]*peerAddressInfo{
|
||||
{NodeID: "kip"}: {},
|
||||
{NodeID: "merlin"}: {},
|
||||
},
|
||||
}
|
||||
ts, ok := p.LastDialed()
|
||||
if !ts.IsZero() {
|
||||
t.Error("timestamp should be zero:", ts)
|
||||
}
|
||||
if ok {
|
||||
t.Error("peer reported success, despite none")
|
||||
}
|
||||
})
|
||||
t.Run("Ordered", func(t *testing.T) {
|
||||
base := time.Now()
|
||||
for _, test := range []struct {
|
||||
Name string
|
||||
SuccessTime time.Time
|
||||
FailTime time.Time
|
||||
ExpectedSuccess bool
|
||||
}{
|
||||
{
|
||||
Name: "Zero",
|
||||
},
|
||||
{
|
||||
Name: "Success",
|
||||
SuccessTime: base.Add(time.Hour),
|
||||
FailTime: base,
|
||||
ExpectedSuccess: true,
|
||||
},
|
||||
{
|
||||
Name: "Equal",
|
||||
SuccessTime: base,
|
||||
FailTime: base,
|
||||
ExpectedSuccess: true,
|
||||
},
|
||||
{
|
||||
Name: "Failure",
|
||||
SuccessTime: base,
|
||||
FailTime: base.Add(time.Hour),
|
||||
ExpectedSuccess: false,
|
||||
},
|
||||
} {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
p := &peerInfo{
|
||||
AddressInfo: map[NodeAddress]*peerAddressInfo{
|
||||
{NodeID: "kip"}: {LastDialSuccess: test.SuccessTime},
|
||||
{NodeID: "merlin"}: {LastDialFailure: test.FailTime},
|
||||
},
|
||||
}
|
||||
ts, ok := p.LastDialed()
|
||||
if test.ExpectedSuccess && !ts.Equal(test.SuccessTime) {
|
||||
if !ts.Equal(test.FailTime) {
|
||||
t.Fatal("got unexpected timestamp:", ts)
|
||||
}
|
||||
|
||||
t.Error("last dialed time reported incorrect value:", ts)
|
||||
}
|
||||
if !test.ExpectedSuccess && !ts.Equal(test.FailTime) {
|
||||
if !ts.Equal(test.SuccessTime) {
|
||||
t.Fatal("got unexpected timestamp:", ts)
|
||||
}
|
||||
|
||||
t.Error("last dialed time reported incorrect value:", ts)
|
||||
}
|
||||
if test.ExpectedSuccess != ok {
|
||||
t.Error("test reported incorrect outcome for last dialed type")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
@@ -384,16 +384,14 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Add b. We shouldn't be able to dial it, due to MaxConnected.
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Spawn a goroutine to fail a's dial attempt.
|
||||
@@ -427,8 +425,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.DialFailed(ctx, dial))
|
||||
failed := time.Now()
|
||||
@@ -458,8 +455,7 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) {
|
||||
err = peerManager.Accepted(a.NodeID)
|
||||
require.NoError(t, err)
|
||||
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
dctx, dcancel := context.WithTimeout(ctx, 300*time.Millisecond)
|
||||
@@ -490,8 +486,7 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
|
||||
@@ -499,16 +494,14 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// At this point, adding c will not allow dialing it.
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -524,11 +517,11 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 0,
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 2,
|
||||
d.NodeID: 3,
|
||||
e.NodeID: 0,
|
||||
a.NodeID: p2p.PeerScore(0),
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(2),
|
||||
d.NodeID: p2p.PeerScore(3),
|
||||
e.NodeID: p2p.PeerScore(0),
|
||||
},
|
||||
PersistentPeers: []types.NodeID{c.NodeID, d.NodeID},
|
||||
MaxConnected: 2,
|
||||
@@ -540,7 +533,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
dial := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
@@ -549,8 +542,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// Even though we are at capacity, we should be allowed to dial c for an
|
||||
@@ -558,8 +550,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
|
||||
// However, since we're using all upgrade slots now, we can't add and dial
|
||||
@@ -567,24 +558,20 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(d)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// We go through with c's upgrade.
|
||||
require.NoError(t, peerManager.Dialed(c))
|
||||
|
||||
// Still can't dial d.
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Now, if we disconnect a, we should be allowed to dial d because we have a
|
||||
// free upgrade slot.
|
||||
require.Error(t, peerManager.Dialed(d))
|
||||
peerManager.Disconnected(ctx, a.NodeID)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, d, dial)
|
||||
require.NoError(t, peerManager.Dialed(d))
|
||||
|
||||
// However, if we disconnect b (such that only c and d are connected), we
|
||||
@@ -594,8 +581,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(e)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -605,7 +591,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
})
|
||||
@@ -615,8 +601,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
|
||||
@@ -624,8 +609,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// Adding c and dialing it will fail, because a is the only connected
|
||||
@@ -633,8 +617,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Empty(t, dial)
|
||||
}
|
||||
|
||||
@@ -655,22 +638,19 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Adding a's TCP address will not dispense a, since it's already dialing.
|
||||
added, err = peerManager.Add(aTCP)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Marking a as dialed will still not dispense it.
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Adding b and accepting a connection from it will not dispense it either.
|
||||
@@ -678,8 +658,7 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
require.NoError(t, peerManager.Accepted(bID))
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -708,16 +687,14 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) {
|
||||
// All addresses should be dispensed as long as dialing them has failed.
|
||||
dial := []p2p.NodeAddress{}
|
||||
for range addresses {
|
||||
address, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
address := peerManager.TryDialNext()
|
||||
require.NotZero(t, address)
|
||||
require.NoError(t, peerManager.DialFailed(ctx, address))
|
||||
dial = append(dial, address)
|
||||
}
|
||||
require.ElementsMatch(t, dial, addresses)
|
||||
|
||||
address, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
address := peerManager.TryDialNext()
|
||||
require.Zero(t, address)
|
||||
}
|
||||
|
||||
@@ -742,15 +719,14 @@ func TestPeerManager_DialFailed(t *testing.T) {
|
||||
// Dialing and then calling DialFailed with a different address (same
|
||||
// NodeID) should unmark as dialing and allow us to dial the other address
|
||||
// again, but not register the failed address.
|
||||
dial, err := peerManager.TryDialNext()
|
||||
dial := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.DialFailed(ctx, p2p.NodeAddress{
|
||||
Protocol: "tcp", NodeID: aID, Hostname: "localhost"}))
|
||||
require.Equal(t, []p2p.NodeAddress{a}, peerManager.Addresses(aID))
|
||||
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Calling DialFailed on same address twice should be fine.
|
||||
@@ -771,7 +747,10 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(2),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
})
|
||||
@@ -781,8 +760,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
|
||||
@@ -790,8 +768,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// Adding c and dialing it will fail, even though it could upgrade a and we
|
||||
@@ -800,14 +777,12 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Empty(t, dial)
|
||||
|
||||
// Failing b's dial will now make c available for dialing.
|
||||
require.NoError(t, peerManager.DialFailed(ctx, b))
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
}
|
||||
|
||||
@@ -822,8 +797,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
@@ -833,8 +807,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
require.NoError(t, peerManager.Accepted(b.NodeID))
|
||||
@@ -863,8 +836,7 @@ func TestPeerManager_Dialed_MaxConnected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Marking b as dialed in the meanwhile (even without TryDialNext)
|
||||
@@ -887,7 +859,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: p2p.PeerScore(1), d.NodeID: 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -906,8 +878,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
require.NoError(t, peerManager.Dialed(c))
|
||||
|
||||
@@ -937,7 +908,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -951,8 +922,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
require.NoError(t, peerManager.Dialed(b))
|
||||
|
||||
@@ -961,8 +931,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Empty(t, dial)
|
||||
|
||||
// a should now be evicted.
|
||||
@@ -984,10 +953,10 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) {
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 3,
|
||||
b.NodeID: 2,
|
||||
c.NodeID: 10,
|
||||
d.NodeID: 1,
|
||||
a.NodeID: p2p.PeerScore(3),
|
||||
b.NodeID: p2p.PeerScore(2),
|
||||
c.NodeID: p2p.PeerScore(10),
|
||||
d.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1008,8 +977,7 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
|
||||
// In the meanwhile, a disconnects and d connects. d is even lower-scored
|
||||
@@ -1040,9 +1008,9 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) {
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 1,
|
||||
b.NodeID: 2,
|
||||
c.NodeID: 3,
|
||||
a.NodeID: p2p.PeerScore(1),
|
||||
b.NodeID: p2p.PeerScore(2),
|
||||
c.NodeID: p2p.PeerScore(3),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1062,7 +1030,7 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
dial := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c, dial)
|
||||
|
||||
@@ -1108,8 +1076,7 @@ func TestPeerManager_Accepted(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
require.NoError(t, peerManager.Accepted(c.NodeID))
|
||||
require.Error(t, peerManager.Dialed(c))
|
||||
@@ -1118,8 +1085,7 @@ func TestPeerManager_Accepted(t *testing.T) {
|
||||
added, err = peerManager.Add(d)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, d, dial)
|
||||
require.NoError(t, peerManager.Dialed(d))
|
||||
require.Error(t, peerManager.Accepted(d.NodeID))
|
||||
@@ -1161,8 +1127,8 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
c.NodeID: 1,
|
||||
d.NodeID: 2,
|
||||
c.NodeID: p2p.PeerScore(1),
|
||||
d.NodeID: p2p.PeerScore(2),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
@@ -1209,8 +1175,8 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 1,
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
@@ -1252,8 +1218,8 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 1,
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
@@ -1270,8 +1236,7 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// a has already been claimed as an upgrade of a, so accepting
|
||||
@@ -1428,7 +1393,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1)},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1445,8 +1410,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
|
||||
added, err := peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
require.NoError(t, peerManager.Dialed(b))
|
||||
}()
|
||||
@@ -1469,7 +1433,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1578,13 +1544,11 @@ func TestPeerManager_Disconnected(t *testing.T) {
|
||||
|
||||
// Disconnecting a dialing peer does not unmark it as dialing, to avoid
|
||||
// dialing it multiple times in parallel.
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
peerManager.Disconnected(ctx, a.NodeID)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -1657,8 +1621,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
|
||||
require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates())
|
||||
|
||||
// Outbound connection with peer error and eviction.
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.Empty(t, sub.Updates())
|
||||
|
||||
@@ -1681,8 +1644,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
|
||||
require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates())
|
||||
|
||||
// Outbound connection with dial failure.
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.Empty(t, sub.Updates())
|
||||
|
||||
@@ -1787,8 +1749,7 @@ func TestPeerManager_Close(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.DialFailed(ctx, a))
|
||||
}
|
||||
@@ -1833,6 +1794,7 @@ func TestPeerManager_Advertise(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
|
||||
require.Len(t, peerManager.Advertise(dID, 100), 6)
|
||||
// d should get all addresses.
|
||||
require.ElementsMatch(t, []p2p.NodeAddress{
|
||||
aTCP, aMem, bTCP, bMem, cTCP, cMem,
|
||||
@@ -1846,10 +1808,18 @@ func TestPeerManager_Advertise(t *testing.T) {
|
||||
// Asking for 0 addresses should return, well, 0.
|
||||
require.Empty(t, peerManager.Advertise(aID, 0))
|
||||
|
||||
// Asking for 2 addresses should get the highest-rated ones, i.e. a.
|
||||
require.ElementsMatch(t, []p2p.NodeAddress{
|
||||
aTCP, aMem,
|
||||
}, peerManager.Advertise(dID, 2))
|
||||
// Asking for 2 addresses should get two addresses
|
||||
// the content of the list when there are two
|
||||
addrs := peerManager.Advertise(dID, 2)
|
||||
require.Len(t, addrs, 2)
|
||||
for _, addr := range addrs {
|
||||
if dID == addr.NodeID {
|
||||
t.Fatal("never advertise self")
|
||||
}
|
||||
if cID == addr.NodeID {
|
||||
t.Fatal("should not have returned the lowest ranked peer")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerManager_Advertise_Self(t *testing.T) {
|
||||
|
||||
@@ -151,7 +151,9 @@ func (s *pqScheduler) closed() <-chan struct{} { return s.done }
|
||||
// non-empty, we pop the top Envelope and send it on the dequeueCh.
|
||||
func (s *pqScheduler) process(ctx context.Context) {
|
||||
defer close(s.done)
|
||||
dequeueReady := make(chan struct{}, 1)
|
||||
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case e := <-s.enqueueCh:
|
||||
@@ -239,28 +241,36 @@ func (s *pqScheduler) process(ctx context.Context) {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// dequeue
|
||||
|
||||
select {
|
||||
case dequeueReady <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-dequeueReady:
|
||||
for s.pq.Len() > 0 {
|
||||
pqEnv = heap.Pop(s.pq).(*pqEnvelope)
|
||||
s.size -= pqEnv.size
|
||||
|
||||
// deduct the Envelope size from all the relevant cumulative sizes
|
||||
for i := 0; i < len(s.chDescs) && pqEnv.priority <= uint(s.chDescs[i].Priority); i++ {
|
||||
s.sizes[uint(s.chDescs[i].Priority)] -= pqEnv.size
|
||||
}
|
||||
|
||||
s.metrics.PeerSendBytesTotal.With(
|
||||
"chID", chIDStr,
|
||||
"peer_id", string(pqEnv.envelope.To),
|
||||
"message_type", s.lc.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size))
|
||||
s.metrics.PeerPendingSendBytes.With(
|
||||
"peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size))
|
||||
pqEnv := heap.Pop(s.pq).(*pqEnvelope)
|
||||
select {
|
||||
case s.dequeueCh <- pqEnv.envelope:
|
||||
case <-s.closeCh:
|
||||
return
|
||||
s.size -= pqEnv.size
|
||||
|
||||
// deduct the Envelope size from all the relevant cumulative sizes
|
||||
for i := 0; i < len(s.chDescs) && pqEnv.priority <= uint(s.chDescs[i].Priority); i++ {
|
||||
s.sizes[uint(s.chDescs[i].Priority)] -= pqEnv.size
|
||||
}
|
||||
|
||||
chIDStr := strconv.Itoa(int(pqEnv.envelope.ChannelID))
|
||||
s.metrics.PeerSendBytesTotal.With(
|
||||
"chID", chIDStr,
|
||||
"peer_id", string(pqEnv.envelope.To),
|
||||
"message_type", s.lc.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size))
|
||||
s.metrics.PeerPendingSendBytes.With(
|
||||
"peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size))
|
||||
default:
|
||||
heap.Push(s.pq, pqEnv)
|
||||
select {
|
||||
case dequeueReady <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
continue LOOP
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
@@ -62,12 +61,6 @@ type RouterOptions struct {
|
||||
// return an error to reject the peer.
|
||||
FilterPeerByID func(context.Context, types.NodeID) error
|
||||
|
||||
// DialSleep controls the amount of time that the router
|
||||
// sleeps between dialing peers. If not set, a default value
|
||||
// is used that sleeps for a (random) amount of time up to 3
|
||||
// seconds between submitting each peer to be dialed.
|
||||
DialSleep func(context.Context)
|
||||
|
||||
// NumConcrruentDials controls how many parallel go routines
|
||||
// are used to dial peers. This defaults to the value of
|
||||
// runtime.NumCPU.
|
||||
@@ -310,11 +303,7 @@ func (r *Router) routeChannel(
|
||||
) {
|
||||
for {
|
||||
select {
|
||||
case envelope, ok := <-outCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
case envelope := <-outCh:
|
||||
// Mark the envelope with the channel ID to allow sendPeer() to pass
|
||||
// it on to Transport.SendMessage().
|
||||
envelope.ChannelID = chID
|
||||
@@ -391,20 +380,22 @@ func (r *Router) routeChannel(
|
||||
}
|
||||
}
|
||||
|
||||
case peerError, ok := <-errCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
shouldEvict := peerError.Fatal || r.peerManager.HasMaxPeerCapacity()
|
||||
case peerError := <-errCh:
|
||||
maxPeerCapacity := r.peerManager.HasMaxPeerCapacity()
|
||||
r.logger.Error("peer error",
|
||||
"peer", peerError.NodeID,
|
||||
"err", peerError.Err,
|
||||
"evicting", shouldEvict,
|
||||
"disconnecting", peerError.Fatal || maxPeerCapacity,
|
||||
)
|
||||
if shouldEvict {
|
||||
|
||||
if peerError.Fatal || maxPeerCapacity {
|
||||
// if the error is fatal or all peer
|
||||
// slots are in use, we can error
|
||||
// (disconnect) from the peer.
|
||||
r.peerManager.Errored(peerError.NodeID, peerError.Err)
|
||||
} else {
|
||||
// this just decrements the peer
|
||||
// score.
|
||||
r.peerManager.processPeerEvent(ctx, PeerUpdate{
|
||||
NodeID: peerError.NodeID,
|
||||
Status: PeerStatusBad,
|
||||
@@ -417,9 +408,9 @@ func (r *Router) routeChannel(
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Router) numConccurentDials() int {
|
||||
func (r *Router) numConcurrentDials() int {
|
||||
if r.options.NumConcurrentDials == nil {
|
||||
return runtime.NumCPU()
|
||||
return runtime.NumCPU() * 32
|
||||
}
|
||||
|
||||
return r.options.NumConcurrentDials()
|
||||
@@ -441,43 +432,22 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error {
|
||||
return r.options.FilterPeerByID(ctx, id)
|
||||
}
|
||||
|
||||
func (r *Router) dialSleep(ctx context.Context) {
|
||||
if r.options.DialSleep == nil {
|
||||
const (
|
||||
maxDialerInterval = 3000
|
||||
minDialerInterval = 250
|
||||
)
|
||||
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
dur := time.Duration(rand.Int63n(maxDialerInterval-minDialerInterval+1) + minDialerInterval)
|
||||
|
||||
timer := time.NewTimer(dur * time.Millisecond)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
r.options.DialSleep(ctx)
|
||||
}
|
||||
|
||||
// acceptPeers accepts inbound connections from peers on the given transport,
|
||||
// and spawns goroutines that route messages to/from them.
|
||||
func (r *Router) acceptPeers(ctx context.Context, transport Transport) {
|
||||
for {
|
||||
conn, err := transport.Accept(ctx)
|
||||
switch err {
|
||||
case nil:
|
||||
case io.EOF:
|
||||
r.logger.Debug("stopping accept routine", "transport", transport)
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded):
|
||||
r.logger.Debug("stopping accept routine", "transport", transport, "err", "context canceled")
|
||||
return
|
||||
default:
|
||||
case errors.Is(err, io.EOF):
|
||||
r.logger.Debug("stopping accept routine", "transport", transport, "err", "EOF")
|
||||
return
|
||||
case err != nil:
|
||||
// in this case we got an error from the net.Listener.
|
||||
r.logger.Error("failed to accept connection", "transport", transport, "err", err)
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
incomingIP := conn.RemoteEndpoint().IP
|
||||
@@ -489,7 +459,7 @@ func (r *Router) acceptPeers(ctx context.Context, transport Transport) {
|
||||
"close_err", closeErr,
|
||||
)
|
||||
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
// Spawn a goroutine for the handshake, to avoid head-of-line blocking.
|
||||
@@ -558,7 +528,7 @@ func (r *Router) dialPeers(ctx context.Context) {
|
||||
// able to add peers at a reasonable pace, though the number
|
||||
// is somewhat arbitrary. The action is further throttled by a
|
||||
// sleep after sending to the addresses channel.
|
||||
for i := 0; i < r.numConccurentDials(); i++ {
|
||||
for i := 0; i < r.numConcurrentDials(); i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -580,19 +550,13 @@ LOOP:
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
break LOOP
|
||||
case err != nil:
|
||||
r.logger.Error("failed to find next peer to dial", "err", err)
|
||||
break LOOP
|
||||
case address == NodeAddress{}:
|
||||
continue LOOP
|
||||
}
|
||||
|
||||
select {
|
||||
case addresses <- address:
|
||||
// this jitters the frequency that we call
|
||||
// DialNext and prevents us from attempting to
|
||||
// create connections too quickly.
|
||||
|
||||
r.dialSleep(ctx)
|
||||
continue
|
||||
continue LOOP
|
||||
case <-ctx.Done():
|
||||
close(addresses)
|
||||
break LOOP
|
||||
@@ -608,7 +572,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) {
|
||||
case errors.Is(err, context.Canceled):
|
||||
return
|
||||
case err != nil:
|
||||
r.logger.Error("failed to dial peer", "peer", address, "err", err)
|
||||
r.logger.Debug("failed to dial peer", "peer", address, "err", err)
|
||||
if err = r.peerManager.DialFailed(ctx, address); err != nil {
|
||||
r.logger.Error("failed to report dial failure", "peer", address, "err", err)
|
||||
}
|
||||
@@ -630,8 +594,8 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) {
|
||||
}
|
||||
|
||||
if err := r.runWithPeerMutex(func() error { return r.peerManager.Dialed(address) }); err != nil {
|
||||
r.logger.Error("failed to dial peer",
|
||||
"op", "outgoing/dialing", "peer", address.NodeID, "err", err)
|
||||
r.logger.Error("failed to dial peer", "op", "outgoing/dialing", "peer", address.NodeID, "err", err)
|
||||
r.peerManager.dialWaker.Wake()
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
@@ -689,12 +653,13 @@ func (r *Router) dialPeer(ctx context.Context, address NodeAddress) (Connection,
|
||||
// Internet can't and needs a different public address.
|
||||
conn, err := r.transport.Dial(dialCtx, endpoint)
|
||||
if err != nil {
|
||||
r.logger.Error("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err)
|
||||
r.logger.Debug("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err)
|
||||
} else {
|
||||
r.logger.Debug("dialed peer", "peer", address.NodeID, "endpoint", endpoint)
|
||||
return conn, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("all endpoints failed")
|
||||
}
|
||||
|
||||
@@ -706,14 +671,8 @@ func (r *Router) handshakePeer(
|
||||
expectID types.NodeID,
|
||||
) (types.NodeInfo, error) {
|
||||
|
||||
if r.options.HandshakeTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.options.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
nodeInfo := r.nodeInfoProducer()
|
||||
peerInfo, peerKey, err := conn.Handshake(ctx, *nodeInfo, r.privKey)
|
||||
peerInfo, peerKey, err := conn.Handshake(ctx, r.options.HandshakeTimeout, *nodeInfo, r.privKey)
|
||||
if err != nil {
|
||||
return peerInfo, err
|
||||
}
|
||||
@@ -721,14 +680,6 @@ func (r *Router) handshakePeer(
|
||||
return peerInfo, fmt.Errorf("invalid handshake NodeInfo: %w", err)
|
||||
}
|
||||
|
||||
if peerInfo.Network != nodeInfo.Network {
|
||||
if err := r.peerManager.store.Delete(peerInfo.NodeID); err != nil {
|
||||
return peerInfo, fmt.Errorf("problem removing peer from store from incorrect network [%s]: %w", peerInfo.Network, err)
|
||||
}
|
||||
|
||||
return peerInfo, fmt.Errorf("connected to peer from wrong network, %q, removed from peer store", peerInfo.Network)
|
||||
}
|
||||
|
||||
if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID {
|
||||
return peerInfo, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)",
|
||||
peerInfo.NodeID, types.NodeIDFromPubKey(peerKey))
|
||||
@@ -739,6 +690,10 @@ func (r *Router) handshakePeer(
|
||||
}
|
||||
|
||||
if err := nodeInfo.CompatibleWith(peerInfo); err != nil {
|
||||
if err := r.peerManager.Inactivate(peerInfo.NodeID); err != nil {
|
||||
return peerInfo, fmt.Errorf("problem inactivating peer %q: %w", peerInfo.ID(), err)
|
||||
}
|
||||
|
||||
return peerInfo, ErrRejected{
|
||||
err: err,
|
||||
id: peerInfo.ID(),
|
||||
@@ -758,7 +713,7 @@ func (r *Router) runWithPeerMutex(fn func() error) error {
|
||||
// channels. It will close the given connection and send queue when done, or if
|
||||
// they are closed elsewhere it will cause this method to shut down and return.
|
||||
func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connection, channels ChannelIDSet) {
|
||||
r.metrics.Peers.Add(1)
|
||||
r.metrics.PeersConnected.Add(1)
|
||||
r.peerManager.Ready(ctx, peerID, channels)
|
||||
|
||||
sendQueue := r.getOrMakeQueue(peerID, channels)
|
||||
@@ -771,7 +726,7 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec
|
||||
sendQueue.close()
|
||||
|
||||
r.peerManager.Disconnected(ctx, peerID)
|
||||
r.metrics.Peers.Add(-1)
|
||||
r.metrics.PeersConnected.Add(-1)
|
||||
}()
|
||||
|
||||
r.logger.Info("peer connected", "peer", peerID, "endpoint", conn)
|
||||
@@ -930,6 +885,8 @@ func (r *Router) evictPeers(ctx context.Context) {
|
||||
queue, ok := r.peerQueues[peerID]
|
||||
r.peerMtx.RUnlock()
|
||||
|
||||
r.metrics.PeersEvicted.Add(1)
|
||||
|
||||
if ok {
|
||||
queue.close()
|
||||
}
|
||||
|
||||
@@ -385,7 +385,7 @@ func TestRouter_AcceptPeers(t *testing.T) {
|
||||
connCtx, connCancel := context.WithCancel(context.Background())
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(tc.peerInfo, tc.peerKey, nil)
|
||||
mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe()
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
@@ -442,78 +442,48 @@ func TestRouter_AcceptPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouter_AcceptPeers_Error(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
func TestRouter_AcceptPeers_Errors(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
for _, err := range []error{io.EOF, context.Canceled, context.DeadlineExceeded} {
|
||||
t.Run(err.Error(), func(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Set up a mock transport that returns an error, which should prevent
|
||||
// the router from calling Accept again.
|
||||
mockTransport := &mocks.Transport{}
|
||||
mockTransport.On("String").Maybe().Return("mock")
|
||||
mockTransport.On("Accept", mock.Anything).Once().Return(nil, errors.New("boom"))
|
||||
mockTransport.On("Close").Return(nil)
|
||||
mockTransport.On("Listen", mock.Anything).Return(nil)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
// Set up a mock transport that returns io.EOF once, which should prevent
|
||||
// the router from calling Accept again.
|
||||
mockTransport := &mocks.Transport{}
|
||||
mockTransport.On("String").Maybe().Return("mock")
|
||||
mockTransport.On("Accept", mock.Anything).Once().Return(nil, err)
|
||||
mockTransport.On("Close").Return(nil)
|
||||
mockTransport.On("Listen", mock.Anything).Return(nil)
|
||||
|
||||
router, err := p2p.NewRouter(
|
||||
log.NewNopLogger(),
|
||||
p2p.NopMetrics(),
|
||||
selfKey,
|
||||
peerManager,
|
||||
func() *types.NodeInfo { return &selfInfo },
|
||||
mockTransport,
|
||||
nil,
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, router.Start(ctx))
|
||||
time.Sleep(time.Second)
|
||||
router.Stop()
|
||||
router, err := p2p.NewRouter(
|
||||
log.NewNopLogger(),
|
||||
p2p.NopMetrics(),
|
||||
selfKey,
|
||||
peerManager,
|
||||
func() *types.NodeInfo { return &selfInfo },
|
||||
mockTransport,
|
||||
nil,
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockTransport.AssertExpectations(t)
|
||||
}
|
||||
require.NoError(t, router.Start(ctx))
|
||||
time.Sleep(time.Second)
|
||||
router.Stop()
|
||||
|
||||
func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
mockTransport.AssertExpectations(t)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
})
|
||||
|
||||
// Set up a mock transport that returns io.EOF once, which should prevent
|
||||
// the router from calling Accept again.
|
||||
mockTransport := &mocks.Transport{}
|
||||
mockTransport.On("String").Maybe().Return("mock")
|
||||
mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF)
|
||||
mockTransport.On("Close").Return(nil)
|
||||
mockTransport.On("Listen", mock.Anything).Return(nil)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
router, err := p2p.NewRouter(
|
||||
log.NewNopLogger(),
|
||||
p2p.NopMetrics(),
|
||||
selfKey,
|
||||
peerManager,
|
||||
func() *types.NodeInfo { return &selfInfo },
|
||||
mockTransport,
|
||||
nil,
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, router.Start(ctx))
|
||||
time.Sleep(time.Second)
|
||||
router.Stop()
|
||||
|
||||
mockTransport.AssertExpectations(t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
|
||||
@@ -530,7 +500,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF)
|
||||
mockConnection.On("Close").Return(nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
@@ -618,7 +588,7 @@ func TestRouter_DialPeers(t *testing.T) {
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
if tc.dialErr == nil {
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(tc.peerInfo, tc.peerKey, nil)
|
||||
mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe()
|
||||
}
|
||||
@@ -704,7 +674,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF)
|
||||
mockConnection.On("Close").Return(nil)
|
||||
|
||||
@@ -745,7 +715,6 @@ func TestRouter_DialPeers_Parallel(t *testing.T) {
|
||||
mockTransport,
|
||||
nil,
|
||||
p2p.RouterOptions{
|
||||
DialSleep: func(_ context.Context) {},
|
||||
NumConcurrentDials: func() int {
|
||||
ncpu := runtime.NumCPU()
|
||||
if ncpu <= 3 {
|
||||
@@ -787,7 +756,7 @@ func TestRouter_EvictPeers(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(peerInfo, peerKey.PubKey(), nil)
|
||||
mockConnection.On("ReceiveMessage", mock.Anything).WaitUntil(closeCh).Return(chID, nil, io.EOF)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
@@ -856,7 +825,7 @@ func TestRouter_ChannelCompatability(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(incompatiblePeer, peerKey.PubKey(), nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
mockConnection.On("Close").Return(nil)
|
||||
@@ -907,7 +876,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(peer, peerKey.PubKey(), nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
mockConnection.On("Close").Return(nil)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -81,7 +82,7 @@ type Connection interface {
|
||||
// FIXME: The handshake should really be the Router's responsibility, but
|
||||
// that requires the connection interface to be byte-oriented rather than
|
||||
// message-oriented (see comment above).
|
||||
Handshake(context.Context, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error)
|
||||
Handshake(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error)
|
||||
|
||||
// ReceiveMessage returns the next message received on the connection,
|
||||
// blocking until one is available. Returns io.EOF if closed.
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/netutil"
|
||||
|
||||
@@ -274,6 +275,7 @@ func newMConnConnection(
|
||||
// Handshake implements Connection.
|
||||
func (c *mConnConnection) Handshake(
|
||||
ctx context.Context,
|
||||
timeout time.Duration,
|
||||
nodeInfo types.NodeInfo,
|
||||
privKey crypto.PrivKey,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
@@ -283,6 +285,12 @@ func (c *mConnConnection) Handshake(
|
||||
peerKey crypto.PubKey
|
||||
errCh = make(chan error, 1)
|
||||
)
|
||||
handshakeCtx := ctx
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
handshakeCtx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
// To handle context cancellation, we need to do the handshake in a
|
||||
// goroutine and abort the blocking network calls by closing the connection
|
||||
// when the context is canceled.
|
||||
@@ -295,25 +303,29 @@ func (c *mConnConnection) Handshake(
|
||||
}
|
||||
}()
|
||||
var err error
|
||||
mconn, peerInfo, peerKey, err = c.handshake(ctx, nodeInfo, privKey)
|
||||
mconn, peerInfo, peerKey, err = c.handshake(handshakeCtx, nodeInfo, privKey)
|
||||
|
||||
select {
|
||||
case errCh <- err:
|
||||
case <-ctx.Done():
|
||||
case <-handshakeCtx.Done():
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-handshakeCtx.Done():
|
||||
_ = c.Close()
|
||||
return types.NodeInfo{}, nil, ctx.Err()
|
||||
return types.NodeInfo{}, nil, handshakeCtx.Err()
|
||||
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return types.NodeInfo{}, nil, err
|
||||
}
|
||||
c.mconn = mconn
|
||||
// Start must not use the handshakeCtx. The handshakeCtx may have a
|
||||
// timeout set that is intended to terminate only the handshake procedure.
|
||||
// The context passed to Start controls the entire lifecycle of the
|
||||
// mconn.
|
||||
if err = c.mconn.Start(ctx); err != nil {
|
||||
return types.NodeInfo{}, nil, err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -273,9 +274,16 @@ func (c *MemoryConnection) RemoteEndpoint() Endpoint {
|
||||
// Handshake implements Connection.
|
||||
func (c *MemoryConnection) Handshake(
|
||||
ctx context.Context,
|
||||
timeout time.Duration,
|
||||
nodeInfo types.NodeInfo,
|
||||
privKey crypto.PrivKey,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
select {
|
||||
case c.sendCh <- memoryMessage{nodeInfo: &nodeInfo, pubKey: privKey.PubKey()}:
|
||||
c.logger.Debug("sent handshake", "nodeInfo", nodeInfo)
|
||||
|
||||
@@ -296,7 +296,7 @@ func TestConnection_Handshake(t *testing.T) {
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
// Must use assert due to goroutine.
|
||||
peerInfo, peerKey, err := ba.Handshake(ctx, bInfo, bKey)
|
||||
peerInfo, peerKey, err := ba.Handshake(ctx, 0, bInfo, bKey)
|
||||
if err == nil {
|
||||
assert.Equal(t, aInfo, peerInfo)
|
||||
assert.Equal(t, aKey.PubKey(), peerKey)
|
||||
@@ -307,7 +307,7 @@ func TestConnection_Handshake(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
peerInfo, peerKey, err := ab.Handshake(ctx, aInfo, aKey)
|
||||
peerInfo, peerKey, err := ab.Handshake(ctx, 0, aInfo, aKey)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bInfo, peerInfo)
|
||||
require.Equal(t, bKey.PubKey(), peerKey)
|
||||
@@ -328,7 +328,7 @@ func TestConnection_HandshakeCancel(t *testing.T) {
|
||||
ab, ba := dialAccept(ctx, t, a, b)
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
cancel()
|
||||
_, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
_, _, err := ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
require.Error(t, err)
|
||||
require.Equal(t, context.Canceled, err)
|
||||
_ = ab.Close()
|
||||
@@ -338,7 +338,7 @@ func TestConnection_HandshakeCancel(t *testing.T) {
|
||||
ab, ba = dialAccept(ctx, t, a, b)
|
||||
timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond)
|
||||
defer cancel()
|
||||
_, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
_, _, err = ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
require.Error(t, err)
|
||||
require.Equal(t, context.DeadlineExceeded, err)
|
||||
_ = ab.Close()
|
||||
@@ -642,13 +642,13 @@ func dialAcceptHandshake(ctx context.Context, t *testing.T, a, b p2p.Transport)
|
||||
go func() {
|
||||
privKey := ed25519.GenPrivKey()
|
||||
nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())}
|
||||
_, _, err := ba.Handshake(ctx, nodeInfo, privKey)
|
||||
_, _, err := ba.Handshake(ctx, 0, nodeInfo, privKey)
|
||||
errCh <- err
|
||||
}()
|
||||
|
||||
privKey := ed25519.GenPrivKey()
|
||||
nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())}
|
||||
_, _, err := ab.Handshake(ctx, nodeInfo, privKey)
|
||||
_, _, err := ab.Handshake(ctx, 0, nodeInfo, privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
timer := time.NewTimer(2 * time.Second)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/eventlog"
|
||||
"github.com/tendermint/tendermint/internal/libs/strings"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
tmpubsub "github.com/tendermint/tendermint/internal/pubsub"
|
||||
@@ -26,7 +27,6 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/state/indexer"
|
||||
"github.com/tendermint/tendermint/internal/statesync"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/strings"
|
||||
"github.com/tendermint/tendermint/rpc/coretypes"
|
||||
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
@@ -9,8 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/pubsub/query"
|
||||
"github.com/tendermint/tendermint/internal/state/indexer"
|
||||
@@ -177,12 +176,16 @@ INSERT INTO `+tableBlocks+` (height, chain_id, created_at)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{}
|
||||
)
|
||||
|
||||
func (es *EventSink) IndexTxEvents(txrs []*abci.TxResult) error {
|
||||
ts := time.Now().UTC()
|
||||
|
||||
for _, txr := range txrs {
|
||||
// Encode the result message in protobuf wire format for indexing.
|
||||
resultData, err := proto.Marshal(txr)
|
||||
resultData, err := jsonpbMarshaller.MarshalToString(txr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling tx_result: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"flag"
|
||||
@@ -12,7 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/adlio/schema"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
"github.com/ory/dockertest"
|
||||
"github.com/ory/dockertest/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -151,6 +152,8 @@ func TestType(t *testing.T) {
|
||||
assert.Equal(t, indexer.PSQL, psqlSink.Type())
|
||||
}
|
||||
|
||||
var jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
|
||||
func TestIndexing(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -278,13 +281,14 @@ func loadTxResult(hash []byte) (*abci.TxResult, error) {
|
||||
hashString := fmt.Sprintf("%X", hash)
|
||||
var resultData []byte
|
||||
if err := testDB().QueryRow(`
|
||||
SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1;
|
||||
`, hashString).Scan(&resultData); err != nil {
|
||||
SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1;
|
||||
`, hashString).Scan(&resultData); err != nil {
|
||||
return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err)
|
||||
}
|
||||
|
||||
reader := bytes.NewBuffer(resultData)
|
||||
txr := new(abci.TxResult)
|
||||
if err := proto.Unmarshal(resultData, txr); err != nil {
|
||||
if err := jsonpbUnmarshaller.Unmarshal(reader, txr); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling txr: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -61,20 +61,20 @@ func NewDefaultLogger(format, level string) (Logger, error) {
|
||||
}
|
||||
|
||||
func (l defaultLogger) Info(msg string, keyVals ...interface{}) {
|
||||
l.Logger.Info().Fields(getLogFields(keyVals...)).Msg(msg)
|
||||
l.Logger.Info().Fields(keyVals).Msg(msg)
|
||||
}
|
||||
|
||||
func (l defaultLogger) Error(msg string, keyVals ...interface{}) {
|
||||
l.Logger.Error().Fields(getLogFields(keyVals...)).Msg(msg)
|
||||
l.Logger.Error().Fields(keyVals).Msg(msg)
|
||||
}
|
||||
|
||||
func (l defaultLogger) Debug(msg string, keyVals ...interface{}) {
|
||||
l.Logger.Debug().Fields(getLogFields(keyVals...)).Msg(msg)
|
||||
l.Logger.Debug().Fields(keyVals).Msg(msg)
|
||||
}
|
||||
|
||||
func (l defaultLogger) With(keyVals ...interface{}) Logger {
|
||||
return &defaultLogger{
|
||||
Logger: l.Logger.With().Fields(getLogFields(keyVals...)).Logger(),
|
||||
Logger: l.Logger.With().Fields(keyVals).Logger(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,16 +99,3 @@ func OverrideWithNewLogger(logger Logger, format, level string) error {
|
||||
ol.Logger = nl.Logger
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLogFields(keyVals ...interface{}) map[string]interface{} {
|
||||
if len(keyVals)%2 != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{}, len(keyVals))
|
||||
for i := 0; i < len(keyVals); i += 2 {
|
||||
fields[fmt.Sprint(keyVals[i])] = keyVals[i+1]
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
@@ -1034,7 +1034,12 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool)
|
||||
|
||||
// process all the responses as they come in
|
||||
for i := 0; i < cap(witnessResponsesC); i++ {
|
||||
response := <-witnessResponsesC
|
||||
var response witnessResponse
|
||||
select {
|
||||
case response = <-witnessResponsesC:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
switch response.err {
|
||||
// success! We have found a new primary
|
||||
case nil:
|
||||
@@ -1063,10 +1068,6 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool)
|
||||
// return the light block that new primary responded with
|
||||
return response.lb, nil
|
||||
|
||||
// catch canceled contexts or deadlines
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
return nil, response.err
|
||||
|
||||
// process benign errors by logging them only
|
||||
case provider.ErrNoResponse, provider.ErrLightBlockNotFound, provider.ErrHeightTooHigh:
|
||||
lastError = response.err
|
||||
|
||||
@@ -203,7 +203,7 @@ func makeNode(
|
||||
}
|
||||
}
|
||||
|
||||
peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID)
|
||||
peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID, nodeMetrics.p2p)
|
||||
closers = append(closers, peerCloser)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(
|
||||
@@ -715,7 +715,9 @@ func loadStateFromDBOrGenesisDocProvider(stateStore sm.Store, genDoc *types.Gene
|
||||
|
||||
func getRouterConfig(conf *config.Config, appClient abciclient.Client) p2p.RouterOptions {
|
||||
opts := p2p.RouterOptions{
|
||||
QueueType: conf.P2P.QueueType,
|
||||
QueueType: conf.P2P.QueueType,
|
||||
HandshakeTimeout: conf.P2P.HandshakeTimeout,
|
||||
DialTimeout: conf.P2P.DialTimeout,
|
||||
}
|
||||
|
||||
if conf.FilterPeers && appClient != nil {
|
||||
|
||||
@@ -67,7 +67,7 @@ func makeSeedNode(
|
||||
// Setup Transport and Switch.
|
||||
p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID)
|
||||
|
||||
peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID)
|
||||
peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID, p2pMetrics)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(
|
||||
fmt.Errorf("failed to create peer manager: %w", err),
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
tmstrings "github.com/tendermint/tendermint/internal/libs/strings"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
@@ -28,7 +29,6 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmstrings "github.com/tendermint/tendermint/libs/strings"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmgrpc "github.com/tendermint/tendermint/privval/grpc"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -202,6 +202,7 @@ func createPeerManager(
|
||||
cfg *config.Config,
|
||||
dbProvider config.DBProvider,
|
||||
nodeID types.NodeID,
|
||||
metrics *p2p.Metrics,
|
||||
) (*p2p.PeerManager, closer, error) {
|
||||
|
||||
selfAddr, err := p2p.ParseNodeAddress(nodeID.AddressString(cfg.P2P.ExternalAddress))
|
||||
@@ -223,18 +224,29 @@ func createPeerManager(
|
||||
maxConns = 64
|
||||
}
|
||||
|
||||
var maxOutgoingConns uint16
|
||||
switch {
|
||||
case cfg.P2P.MaxOutgoingConnections > 0:
|
||||
maxOutgoingConns = cfg.P2P.MaxOutgoingConnections
|
||||
default:
|
||||
maxOutgoingConns = maxConns / 2
|
||||
}
|
||||
|
||||
maxUpgradeConns := uint16(4)
|
||||
|
||||
options := p2p.PeerManagerOptions{
|
||||
SelfAddress: selfAddr,
|
||||
MaxConnected: maxConns,
|
||||
MaxConnectedUpgrade: maxUpgradeConns,
|
||||
MaxPeers: maxUpgradeConns + 2*maxConns,
|
||||
MinRetryTime: 250 * time.Millisecond,
|
||||
MaxRetryTime: 30 * time.Minute,
|
||||
MaxRetryTimePersistent: 5 * time.Minute,
|
||||
RetryTimeJitter: 5 * time.Second,
|
||||
PrivatePeers: privatePeerIDs,
|
||||
SelfAddress: selfAddr,
|
||||
MaxConnected: maxConns,
|
||||
MaxOutgoingConnections: maxOutgoingConns,
|
||||
MaxConnectedUpgrade: maxUpgradeConns,
|
||||
DisconnectCooldownPeriod: 2 * time.Second,
|
||||
MaxPeers: maxUpgradeConns + 4*maxConns,
|
||||
MinRetryTime: 250 * time.Millisecond,
|
||||
MaxRetryTime: 30 * time.Minute,
|
||||
MaxRetryTimePersistent: 5 * time.Minute,
|
||||
RetryTimeJitter: 5 * time.Second,
|
||||
PrivatePeers: privatePeerIDs,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
peers := []p2p.NodeAddress{}
|
||||
|
||||
@@ -243,6 +243,7 @@ type PeerInfo struct {
|
||||
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
AddressInfo []*PeerAddressInfo `protobuf:"bytes,2,rep,name=address_info,json=addressInfo,proto3" json:"address_info,omitempty"`
|
||||
LastConnected *time.Time `protobuf:"bytes,3,opt,name=last_connected,json=lastConnected,proto3,stdtime" json:"last_connected,omitempty"`
|
||||
Inactive bool `protobuf:"varint,4,opt,name=inactive,proto3" json:"inactive,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PeerInfo) Reset() { *m = PeerInfo{} }
|
||||
@@ -299,6 +300,13 @@ func (m *PeerInfo) GetLastConnected() *time.Time {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PeerInfo) GetInactive() bool {
|
||||
if m != nil {
|
||||
return m.Inactive
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type PeerAddressInfo struct {
|
||||
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
|
||||
LastDialSuccess *time.Time `protobuf:"bytes,2,opt,name=last_dial_success,json=lastDialSuccess,proto3,stdtime" json:"last_dial_success,omitempty"`
|
||||
@@ -378,46 +386,46 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/p2p/types.proto", fileDescriptor_c8a29e659aeca578) }
|
||||
|
||||
var fileDescriptor_c8a29e659aeca578 = []byte{
|
||||
// 610 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x4e, 0x1b, 0x3d,
|
||||
0x14, 0xcd, 0x24, 0x21, 0x09, 0x37, 0x84, 0xf0, 0x59, 0xe8, 0xd3, 0x10, 0xa9, 0x19, 0x14, 0x36,
|
||||
0xac, 0x26, 0x52, 0xaa, 0x2e, 0xba, 0x64, 0x40, 0xad, 0x22, 0x55, 0x25, 0x9a, 0xa2, 0x2e, 0xda,
|
||||
0xc5, 0x68, 0x32, 0x76, 0x82, 0xc5, 0xc4, 0xb6, 0x3c, 0x4e, 0x4b, 0xdf, 0x82, 0x37, 0xe9, 0x63,
|
||||
0x94, 0x25, 0xcb, 0xae, 0xd2, 0x6a, 0xd8, 0xf6, 0x21, 0x2a, 0xdb, 0x33, 0x40, 0xa2, 0x2e, 0xd8,
|
||||
0xf9, 0xdc, 0xe3, 0x73, 0xee, 0x8f, 0xad, 0x0b, 0x3d, 0x45, 0x18, 0x26, 0x72, 0x41, 0x99, 0x1a,
|
||||
0x8a, 0x91, 0x18, 0xaa, 0x6f, 0x82, 0x64, 0xbe, 0x90, 0x5c, 0x71, 0xb4, 0xfb, 0xc8, 0xf9, 0x62,
|
||||
0x24, 0x7a, 0xfb, 0x73, 0x3e, 0xe7, 0x86, 0x1a, 0xea, 0x93, 0xbd, 0xd5, 0xf3, 0xe6, 0x9c, 0xcf,
|
||||
0x53, 0x32, 0x34, 0x68, 0xba, 0x9c, 0x0d, 0x15, 0x5d, 0x90, 0x4c, 0xc5, 0x0b, 0x61, 0x2f, 0x0c,
|
||||
0x2e, 0xa0, 0x3b, 0xd1, 0x87, 0x84, 0xa7, 0x1f, 0x89, 0xcc, 0x28, 0x67, 0xe8, 0x00, 0x6a, 0x62,
|
||||
0x24, 0x5c, 0xe7, 0xd0, 0x39, 0xae, 0x07, 0xcd, 0x7c, 0xe5, 0xd5, 0x26, 0xa3, 0x49, 0xa8, 0x63,
|
||||
0x68, 0x1f, 0xb6, 0xa6, 0x29, 0x4f, 0xae, 0xdc, 0xaa, 0x26, 0x43, 0x0b, 0xd0, 0x1e, 0xd4, 0x62,
|
||||
0x21, 0xdc, 0x9a, 0x89, 0xe9, 0xe3, 0xe0, 0x47, 0x15, 0x5a, 0xef, 0x39, 0x26, 0x63, 0x36, 0xe3,
|
||||
0x68, 0x02, 0x7b, 0xa2, 0x48, 0x11, 0x7d, 0xb1, 0x39, 0x8c, 0x79, 0x7b, 0xe4, 0xf9, 0xeb, 0x4d,
|
||||
0xf8, 0x1b, 0xa5, 0x04, 0xf5, 0xdb, 0x95, 0x57, 0x09, 0xbb, 0x62, 0xa3, 0xc2, 0x23, 0x68, 0x32,
|
||||
0x8e, 0x49, 0x44, 0xb1, 0x29, 0x64, 0x3b, 0x80, 0x7c, 0xe5, 0x35, 0x4c, 0xc2, 0xb3, 0xb0, 0xa1,
|
||||
0xa9, 0x31, 0x46, 0x1e, 0xb4, 0x53, 0x9a, 0x29, 0xc2, 0xa2, 0x18, 0x63, 0x69, 0xaa, 0xdb, 0x0e,
|
||||
0xc1, 0x86, 0x4e, 0x30, 0x96, 0xc8, 0x85, 0x26, 0x23, 0xea, 0x2b, 0x97, 0x57, 0x6e, 0xdd, 0x90,
|
||||
0x25, 0xd4, 0x4c, 0x59, 0xe8, 0x96, 0x65, 0x0a, 0x88, 0x7a, 0xd0, 0x4a, 0x2e, 0x63, 0xc6, 0x48,
|
||||
0x9a, 0xb9, 0x8d, 0x43, 0xe7, 0x78, 0x27, 0x7c, 0xc0, 0x5a, 0xb5, 0xe0, 0x8c, 0x5e, 0x11, 0xe9,
|
||||
0x36, 0xad, 0xaa, 0x80, 0xe8, 0x35, 0x6c, 0x71, 0x75, 0x49, 0xa4, 0xdb, 0x32, 0x6d, 0xbf, 0xd8,
|
||||
0x6c, 0xbb, 0x1c, 0xd5, 0xb9, 0xbe, 0x54, 0x34, 0x6d, 0x15, 0x83, 0xcf, 0xd0, 0x59, 0x63, 0xd1,
|
||||
0x01, 0xb4, 0xd4, 0x75, 0x44, 0x19, 0x26, 0xd7, 0x66, 0x8a, 0xdb, 0x61, 0x53, 0x5d, 0x8f, 0x35,
|
||||
0x44, 0x43, 0x68, 0x4b, 0x91, 0x98, 0x76, 0x49, 0x96, 0x15, 0xa3, 0xd9, 0xcd, 0x57, 0x1e, 0x84,
|
||||
0x93, 0xd3, 0x13, 0x1b, 0x0d, 0x41, 0x8a, 0xa4, 0x38, 0x0f, 0xbe, 0x3b, 0xd0, 0x9a, 0x10, 0x22,
|
||||
0xcd, 0x33, 0xfd, 0x0f, 0x55, 0x8a, 0xad, 0x65, 0xd0, 0xc8, 0x57, 0x5e, 0x75, 0x7c, 0x16, 0x56,
|
||||
0x29, 0x46, 0x01, 0xec, 0x14, 0x8e, 0x11, 0x65, 0x33, 0xee, 0x56, 0x0f, 0x6b, 0xff, 0x7c, 0x3a,
|
||||
0x42, 0x64, 0xe1, 0xab, 0xed, 0xc2, 0x76, 0xfc, 0x08, 0xd0, 0x5b, 0xd8, 0x4d, 0xe3, 0x4c, 0x45,
|
||||
0x09, 0x67, 0x8c, 0x24, 0x8a, 0x60, 0xf3, 0x1c, 0xed, 0x51, 0xcf, 0xb7, 0xff, 0xd3, 0x2f, 0xff,
|
||||
0xa7, 0x7f, 0x51, 0xfe, 0xcf, 0xa0, 0x7e, 0xf3, 0xcb, 0x73, 0xc2, 0x8e, 0xd6, 0x9d, 0x96, 0xb2,
|
||||
0xc1, 0x1f, 0x07, 0xba, 0x1b, 0x99, 0xf4, 0xdc, 0xcb, 0x96, 0x8b, 0x81, 0x14, 0x10, 0xbd, 0x83,
|
||||
0xff, 0x4c, 0x5a, 0x4c, 0xe3, 0x34, 0xca, 0x96, 0x49, 0x52, 0x8e, 0xe5, 0x39, 0x99, 0xbb, 0x5a,
|
||||
0x7a, 0x46, 0xe3, 0xf4, 0x83, 0x15, 0xae, 0xbb, 0xcd, 0x62, 0x9a, 0x2e, 0x25, 0x79, 0x76, 0x1f,
|
||||
0x0f, 0x6e, 0x6f, 0xac, 0x10, 0x1d, 0x41, 0xe7, 0xa9, 0x51, 0x66, 0xfe, 0x60, 0x27, 0xdc, 0xc1,
|
||||
0x8f, 0x77, 0xb2, 0xe0, 0xfc, 0x36, 0xef, 0x3b, 0x77, 0x79, 0xdf, 0xf9, 0x9d, 0xf7, 0x9d, 0x9b,
|
||||
0xfb, 0x7e, 0xe5, 0xee, 0xbe, 0x5f, 0xf9, 0x79, 0xdf, 0xaf, 0x7c, 0x7a, 0x35, 0xa7, 0xea, 0x72,
|
||||
0x39, 0xf5, 0x13, 0xbe, 0x18, 0x3e, 0xd9, 0x12, 0x4f, 0x17, 0x86, 0xd9, 0x05, 0xeb, 0x1b, 0x64,
|
||||
0xda, 0x30, 0xd1, 0x97, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x0b, 0xe9, 0x56, 0xd3, 0x5a, 0x04,
|
||||
0x00, 0x00,
|
||||
// 621 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x4f, 0xdb, 0x30,
|
||||
0x14, 0x6e, 0xda, 0xd2, 0x96, 0x57, 0x4a, 0x99, 0x85, 0xa6, 0x50, 0x69, 0x0d, 0x2a, 0x17, 0x4e,
|
||||
0x89, 0xd4, 0x69, 0x87, 0x1d, 0x09, 0x68, 0x53, 0xa5, 0x69, 0x54, 0x1e, 0xda, 0x61, 0x3b, 0x44,
|
||||
0x69, 0xec, 0x16, 0x8b, 0xd4, 0xb6, 0x12, 0x97, 0xb1, 0x7f, 0xc1, 0xbf, 0x1a, 0xd2, 0x2e, 0x1c,
|
||||
0x77, 0xea, 0xa6, 0x70, 0xdd, 0x8f, 0x98, 0xec, 0x24, 0xd0, 0x56, 0x3b, 0x70, 0xf3, 0xf7, 0x9e,
|
||||
0xbf, 0xcf, 0xdf, 0x7b, 0xcf, 0x7a, 0xd0, 0x53, 0x94, 0x13, 0x9a, 0xcc, 0x19, 0x57, 0x9e, 0x1c,
|
||||
0x4a, 0x4f, 0x7d, 0x97, 0x34, 0x75, 0x65, 0x22, 0x94, 0x40, 0xbb, 0x4f, 0x39, 0x57, 0x0e, 0x65,
|
||||
0x6f, 0x7f, 0x26, 0x66, 0xc2, 0xa4, 0x3c, 0x7d, 0xca, 0x6f, 0xf5, 0x9c, 0x99, 0x10, 0xb3, 0x98,
|
||||
0x7a, 0x06, 0x4d, 0x16, 0x53, 0x4f, 0xb1, 0x39, 0x4d, 0x55, 0x38, 0x97, 0xf9, 0x85, 0xc1, 0x05,
|
||||
0x74, 0xc7, 0xfa, 0x10, 0x89, 0xf8, 0x33, 0x4d, 0x52, 0x26, 0x38, 0x3a, 0x80, 0x9a, 0x1c, 0x4a,
|
||||
0xdb, 0x3a, 0xb4, 0x8e, 0xeb, 0x7e, 0x33, 0x5b, 0x3a, 0xb5, 0xf1, 0x70, 0x8c, 0x75, 0x0c, 0xed,
|
||||
0xc3, 0xd6, 0x24, 0x16, 0xd1, 0x95, 0x5d, 0xd5, 0x49, 0x9c, 0x03, 0xb4, 0x07, 0xb5, 0x50, 0x4a,
|
||||
0xbb, 0x66, 0x62, 0xfa, 0x38, 0xf8, 0x51, 0x85, 0xd6, 0x47, 0x41, 0xe8, 0x88, 0x4f, 0x05, 0x1a,
|
||||
0xc3, 0x9e, 0x2c, 0x9e, 0x08, 0xae, 0xf3, 0x37, 0x8c, 0x78, 0x7b, 0xe8, 0xb8, 0xeb, 0x45, 0xb8,
|
||||
0x1b, 0x56, 0xfc, 0xfa, 0xdd, 0xd2, 0xa9, 0xe0, 0xae, 0xdc, 0x70, 0x78, 0x04, 0x4d, 0x2e, 0x08,
|
||||
0x0d, 0x18, 0x31, 0x46, 0xb6, 0x7d, 0xc8, 0x96, 0x4e, 0xc3, 0x3c, 0x78, 0x86, 0x1b, 0x3a, 0x35,
|
||||
0x22, 0xc8, 0x81, 0x76, 0xcc, 0x52, 0x45, 0x79, 0x10, 0x12, 0x92, 0x18, 0x77, 0xdb, 0x18, 0xf2,
|
||||
0xd0, 0x09, 0x21, 0x09, 0xb2, 0xa1, 0xc9, 0xa9, 0xfa, 0x26, 0x92, 0x2b, 0xbb, 0x6e, 0x92, 0x25,
|
||||
0xd4, 0x99, 0xd2, 0xe8, 0x56, 0x9e, 0x29, 0x20, 0xea, 0x41, 0x2b, 0xba, 0x0c, 0x39, 0xa7, 0x71,
|
||||
0x6a, 0x37, 0x0e, 0xad, 0xe3, 0x1d, 0xfc, 0x88, 0x35, 0x6b, 0x2e, 0x38, 0xbb, 0xa2, 0x89, 0xdd,
|
||||
0xcc, 0x59, 0x05, 0x44, 0x6f, 0x61, 0x4b, 0xa8, 0x4b, 0x9a, 0xd8, 0x2d, 0x53, 0xf6, 0xab, 0xcd,
|
||||
0xb2, 0xcb, 0x56, 0x9d, 0xeb, 0x4b, 0x45, 0xd1, 0x39, 0x63, 0xf0, 0x15, 0x3a, 0x6b, 0x59, 0x74,
|
||||
0x00, 0x2d, 0x75, 0x13, 0x30, 0x4e, 0xe8, 0x8d, 0xe9, 0xe2, 0x36, 0x6e, 0xaa, 0x9b, 0x91, 0x86,
|
||||
0xc8, 0x83, 0x76, 0x22, 0x23, 0x53, 0x2e, 0x4d, 0xd3, 0xa2, 0x35, 0xbb, 0xd9, 0xd2, 0x01, 0x3c,
|
||||
0x3e, 0x3d, 0xc9, 0xa3, 0x18, 0x12, 0x19, 0x15, 0xe7, 0xc1, 0x4f, 0x0b, 0x5a, 0x63, 0x4a, 0x13,
|
||||
0x33, 0xa6, 0x97, 0x50, 0x65, 0x24, 0x97, 0xf4, 0x1b, 0xd9, 0xd2, 0xa9, 0x8e, 0xce, 0x70, 0x95,
|
||||
0x11, 0xe4, 0xc3, 0x4e, 0xa1, 0x18, 0x30, 0x3e, 0x15, 0x76, 0xf5, 0xb0, 0xf6, 0xdf, 0xd1, 0x51,
|
||||
0x9a, 0x14, 0xba, 0x5a, 0x0e, 0xb7, 0xc3, 0x27, 0x80, 0xde, 0xc3, 0x6e, 0x1c, 0xa6, 0x2a, 0x88,
|
||||
0x04, 0xe7, 0x34, 0x52, 0x94, 0x98, 0x71, 0xb4, 0x87, 0x3d, 0x37, 0xff, 0x9f, 0x6e, 0xf9, 0x3f,
|
||||
0xdd, 0x8b, 0xf2, 0x7f, 0xfa, 0xf5, 0xdb, 0xdf, 0x8e, 0x85, 0x3b, 0x9a, 0x77, 0x5a, 0xd2, 0x74,
|
||||
0xff, 0x19, 0x0f, 0x23, 0xc5, 0xae, 0xa9, 0x19, 0x5a, 0x0b, 0x3f, 0xe2, 0xc1, 0x5f, 0x0b, 0xba,
|
||||
0x1b, 0x2e, 0xf4, 0x4c, 0xca, 0x76, 0x14, 0xcd, 0x2a, 0x20, 0xfa, 0x00, 0x2f, 0x8c, 0x25, 0xc2,
|
||||
0xc2, 0x38, 0x48, 0x17, 0x51, 0x54, 0xb6, 0xec, 0x39, 0xae, 0xba, 0x9a, 0x7a, 0xc6, 0xc2, 0xf8,
|
||||
0x53, 0x4e, 0x5c, 0x57, 0x9b, 0x86, 0x2c, 0x5e, 0x24, 0xf4, 0xd9, 0x35, 0x3e, 0xaa, 0xbd, 0xcb,
|
||||
0x89, 0xe8, 0x08, 0x3a, 0xab, 0x42, 0xa9, 0x29, 0xb5, 0x83, 0x77, 0xc8, 0xd3, 0x9d, 0xd4, 0x3f,
|
||||
0xbf, 0xcb, 0xfa, 0xd6, 0x7d, 0xd6, 0xb7, 0xfe, 0x64, 0x7d, 0xeb, 0xf6, 0xa1, 0x5f, 0xb9, 0x7f,
|
||||
0xe8, 0x57, 0x7e, 0x3d, 0xf4, 0x2b, 0x5f, 0xde, 0xcc, 0x98, 0xba, 0x5c, 0x4c, 0xdc, 0x48, 0xcc,
|
||||
0xbd, 0x95, 0x0d, 0xb2, 0xba, 0x4c, 0xcc, 0x9e, 0x58, 0xdf, 0x2e, 0x93, 0x86, 0x89, 0xbe, 0xfe,
|
||||
0x17, 0x00, 0x00, 0xff, 0xff, 0x42, 0xcb, 0x37, 0x26, 0x76, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *ProtocolVersion) Marshal() (dAtA []byte, err error) {
|
||||
@@ -600,6 +608,16 @@ func (m *PeerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Inactive {
|
||||
i--
|
||||
if m.Inactive {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
}
|
||||
if m.LastConnected != nil {
|
||||
n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastConnected, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected):])
|
||||
if err3 != nil {
|
||||
@@ -792,6 +810,9 @@ func (m *PeerInfo) Size() (n int) {
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected)
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
if m.Inactive {
|
||||
n += 2
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -1487,6 +1508,26 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Inactive", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Inactive = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
|
||||
@@ -32,6 +32,7 @@ message PeerInfo {
|
||||
string id = 1 [(gogoproto.customname) = "ID"];
|
||||
repeated PeerAddressInfo address_info = 2;
|
||||
google.protobuf.Timestamp last_connected = 3 [(gogoproto.stdtime) = true];
|
||||
bool inactive = 4;
|
||||
}
|
||||
|
||||
message PeerAddressInfo {
|
||||
|
||||
@@ -342,6 +342,9 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA
|
||||
// total number of transaction bytes to exceed `req.MaxTxBytes`, we will not
|
||||
// append our special vote extension transaction.
|
||||
func (app *Application) PrepareProposal(_ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
var sum int64
|
||||
var extCount int
|
||||
for _, vote := range req.LocalLastCommit.Votes {
|
||||
@@ -423,6 +426,9 @@ func (app *Application) PrepareProposal(_ context.Context, req *abci.RequestPrep
|
||||
// ProcessProposal implements part of the Application interface.
|
||||
// It accepts any proposal that does not contain a malformed transaction.
|
||||
func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
for _, tx := range req.Txs {
|
||||
k, v, err := parseTx(tx)
|
||||
if err != nil {
|
||||
@@ -454,6 +460,9 @@ func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProc
|
||||
// key/value store ("extensionSum") with the sum of all of the numbers collected
|
||||
// from the vote extensions.
|
||||
func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
// We ignore any requests for vote extensions that don't match our expected
|
||||
// next height.
|
||||
if req.Height != int64(app.state.Height)+1 {
|
||||
@@ -485,6 +494,9 @@ func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVot
|
||||
// without doing anything about them. In this case, it just makes sure that the
|
||||
// vote extension is a well-formed integer value.
|
||||
func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
// We allow vote extensions to be optional
|
||||
if len(req.VoteExtension) == 0 {
|
||||
return &abci.ResponseVerifyVoteExtension{
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//nolint: gosec
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -77,6 +76,8 @@ func (cli *CLI) generate() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// nolint: gosec
|
||||
// G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), cli.opts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
34
test/e2e/pkg/exec/exec.go
Normal file
34
test/e2e/pkg/exec/exec.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
osexec "os/exec"
|
||||
)
|
||||
|
||||
// Command executes a shell command.
|
||||
func Command(ctx context.Context, args ...string) error {
|
||||
// nolint: gosec
|
||||
// G204: Subprocess launched with a potential tainted input or cmd arguments
|
||||
cmd := osexec.CommandContext(ctx, args[0], args[1:]...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case *osexec.ExitError:
|
||||
return fmt.Errorf("failed to run %q:\n%v", args, string(out))
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// CommandVerbose executes a shell command while displaying its output.
|
||||
func CommandVerbose(ctx context.Context, args ...string) error {
|
||||
// nolint: gosec
|
||||
// G204: Subprocess launched with a potential tainted input or cmd arguments
|
||||
cmd := osexec.CommandContext(ctx, args[0], args[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
69
test/e2e/pkg/infra/docker/compose.go
Normal file
69
test/e2e/pkg/infra/docker/compose.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"text/template"
|
||||
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
)
|
||||
|
||||
// makeDockerCompose generates a Docker Compose config for a testnet.
|
||||
func makeDockerCompose(testnet *e2e.Testnet) ([]byte, error) {
|
||||
// Must use version 2 Docker Compose format, to support IPv6.
|
||||
tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{
|
||||
"addUint32": func(x, y uint32) uint32 {
|
||||
return x + y
|
||||
},
|
||||
"isBuiltin": func(protocol e2e.Protocol, mode e2e.Mode) bool {
|
||||
return mode == e2e.ModeLight || protocol == e2e.ProtocolBuiltin
|
||||
},
|
||||
}).Parse(`version: '2.4'
|
||||
|
||||
networks:
|
||||
{{ .Name }}:
|
||||
labels:
|
||||
e2e: true
|
||||
driver: bridge
|
||||
{{- if .IPv6 }}
|
||||
enable_ipv6: true
|
||||
{{- end }}
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ .IP }}
|
||||
|
||||
services:
|
||||
{{- range .Nodes }}
|
||||
{{ .Name }}:
|
||||
labels:
|
||||
e2e: true
|
||||
container_name: {{ .Name }}
|
||||
image: tendermint/e2e-node
|
||||
{{- if isBuiltin $.ABCIProtocol .Mode }}
|
||||
entrypoint: /usr/bin/entrypoint-builtin
|
||||
{{- else if .LogLevel }}
|
||||
command: start --log-level {{ .LogLevel }}
|
||||
{{- end }}
|
||||
init: true
|
||||
ports:
|
||||
- 26656
|
||||
- {{ if .ProxyPort }}{{ addUint32 .ProxyPort 1000 }}:{{ end }}26660
|
||||
- {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657
|
||||
- 6060
|
||||
volumes:
|
||||
- ./{{ .Name }}:/tendermint
|
||||
networks:
|
||||
{{ $.Name }}:
|
||||
ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }}
|
||||
|
||||
{{end}}`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err = tmpl.Execute(&buf, testnet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
27
test/e2e/pkg/infra/docker/exec.go
Normal file
27
test/e2e/pkg/infra/docker/exec.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/exec"
|
||||
)
|
||||
|
||||
// execCompose runs a Docker Compose command for a testnet.
|
||||
func execCompose(ctx context.Context, dir string, args ...string) error {
|
||||
return exec.Command(ctx, append(
|
||||
[]string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")},
|
||||
args...)...)
|
||||
}
|
||||
|
||||
// execComposeVerbose runs a Docker Compose command for a testnet and displays its output.
|
||||
func execComposeVerbose(ctx context.Context, dir string, args ...string) error {
|
||||
return exec.CommandVerbose(ctx, append(
|
||||
[]string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")},
|
||||
args...)...)
|
||||
}
|
||||
|
||||
// execDocker runs a Docker command.
|
||||
func execDocker(ctx context.Context, args ...string) error {
|
||||
return exec.Command(ctx, append([]string{"docker"}, args...)...)
|
||||
}
|
||||
140
test/e2e/pkg/infra/docker/infra.go
Normal file
140
test/e2e/pkg/infra/docker/infra.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/exec"
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/infra"
|
||||
)
|
||||
|
||||
// testnetInfra provides an API for provisioning and manipulating
|
||||
// infrastructure for a Docker-based testnet.
|
||||
type testnetInfra struct {
|
||||
logger log.Logger
|
||||
testnet *e2e.Testnet
|
||||
}
|
||||
|
||||
var _ infra.TestnetInfra = &testnetInfra{}
|
||||
|
||||
// NewTestnetInfra constructs an infrastructure provider that allows for Docker-based
|
||||
// testnet infrastructure.
|
||||
func NewTestnetInfra(logger log.Logger, testnet *e2e.Testnet) infra.TestnetInfra {
|
||||
return &testnetInfra{
|
||||
logger: logger,
|
||||
testnet: testnet,
|
||||
}
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) Setup(ctx context.Context) error {
|
||||
compose, err := makeDockerCompose(ti.testnet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// nolint: gosec
|
||||
// G306: Expect WriteFile permissions to be 0600 or less
|
||||
err = os.WriteFile(filepath.Join(ti.testnet.Dir, "docker-compose.yml"), compose, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) StartNode(ctx context.Context, node *e2e.Node) error {
|
||||
return execCompose(ctx, ti.testnet.Dir, "up", "-d", node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) DisconnectNode(ctx context.Context, node *e2e.Node) error {
|
||||
return execDocker(ctx, "network", "disconnect", ti.testnet.Name+"_"+ti.testnet.Name, node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) ConnectNode(ctx context.Context, node *e2e.Node) error {
|
||||
return execDocker(ctx, "network", "connect", ti.testnet.Name+"_"+ti.testnet.Name, node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) KillNodeProcess(ctx context.Context, node *e2e.Node) error {
|
||||
return execCompose(ctx, ti.testnet.Dir, "kill", "-s", "SIGKILL", node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) StartNodeProcess(ctx context.Context, node *e2e.Node) error {
|
||||
return execCompose(ctx, ti.testnet.Dir, "start", node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) PauseNodeProcess(ctx context.Context, node *e2e.Node) error {
|
||||
return execCompose(ctx, ti.testnet.Dir, "pause", node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) UnpauseNodeProcess(ctx context.Context, node *e2e.Node) error {
|
||||
return execCompose(ctx, ti.testnet.Dir, "unpause", node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) TerminateNodeProcess(ctx context.Context, node *e2e.Node) error {
|
||||
return execCompose(ctx, ti.testnet.Dir, "kill", "-s", "SIGTERM", node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) Stop(ctx context.Context) error {
|
||||
return execCompose(ctx, ti.testnet.Dir, "down")
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) Pause(ctx context.Context) error {
|
||||
return execCompose(ctx, ti.testnet.Dir, "pause")
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) Unpause(ctx context.Context) error {
|
||||
return execCompose(ctx, ti.testnet.Dir, "unpause")
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) ShowLogs(ctx context.Context) error {
|
||||
return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--no-color")
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) ShowNodeLogs(ctx context.Context, node *e2e.Node) error {
|
||||
return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--no-color", node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) TailLogs(ctx context.Context) error {
|
||||
return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--follow")
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) TailNodeLogs(ctx context.Context, node *e2e.Node) error {
|
||||
return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--follow", node.Name)
|
||||
}
|
||||
|
||||
func (ti *testnetInfra) Cleanup(ctx context.Context) error {
|
||||
ti.logger.Info("Removing Docker containers and networks")
|
||||
|
||||
// GNU xargs requires the -r flag to not run when input is empty, macOS
|
||||
// does this by default. Ugly, but works.
|
||||
xargsR := `$(if [[ $OSTYPE == "linux-gnu"* ]]; then echo -n "-r"; fi)`
|
||||
|
||||
err := exec.Command(ctx, "bash", "-c", fmt.Sprintf(
|
||||
"docker container ls -qa --filter label=e2e | xargs %v docker container rm -f", xargsR))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = exec.Command(ctx, "bash", "-c", fmt.Sprintf(
|
||||
"docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// On Linux, some local files in the volume will be owned by root since Tendermint
|
||||
// runs as root inside the container, so we need to clean them up from within a
|
||||
// container running as root too.
|
||||
absDir, err := filepath.Abs(ti.testnet.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = execDocker(ctx, "run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir),
|
||||
"tendermint/e2e-node", "sh", "-c", "rm -rf /network/*/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
84
test/e2e/pkg/infra/infra.go
Normal file
84
test/e2e/pkg/infra/infra.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package infra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
)
|
||||
|
||||
// TestnetInfra provides an API for manipulating the infrastructure of a
|
||||
// specific testnet.
|
||||
type TestnetInfra interface {
|
||||
//
|
||||
// Overarching testnet infrastructure management.
|
||||
//
|
||||
|
||||
// Setup generates any necessary configuration for the infrastructure
|
||||
// provider during testnet setup.
|
||||
Setup(ctx context.Context) error
|
||||
|
||||
// Stop will stop all running processes throughout the testnet without
|
||||
// destroying any infrastructure.
|
||||
Stop(ctx context.Context) error
|
||||
|
||||
// Pause will pause all processes in the testnet.
|
||||
Pause(ctx context.Context) error
|
||||
|
||||
// Unpause will resume a paused testnet.
|
||||
Unpause(ctx context.Context) error
|
||||
|
||||
// ShowLogs prints all logs for the whole testnet to stdout.
|
||||
ShowLogs(ctx context.Context) error
|
||||
|
||||
// TailLogs tails the logs for all nodes in the testnet, if this is
|
||||
// supported by the infrastructure provider.
|
||||
TailLogs(ctx context.Context) error
|
||||
|
||||
// Cleanup stops and destroys all running testnet infrastructure and
|
||||
// deletes any generated files.
|
||||
Cleanup(ctx context.Context) error
|
||||
|
||||
//
|
||||
// Node management, including node infrastructure.
|
||||
//
|
||||
|
||||
// StartNode provisions infrastructure for the given node and starts it.
|
||||
StartNode(ctx context.Context, node *e2e.Node) error
|
||||
|
||||
// DisconnectNode modifies the specified node's network configuration such
|
||||
// that it becomes bidirectionally disconnected from the network (it cannot
|
||||
// see other nodes, and other nodes cannot see it).
|
||||
DisconnectNode(ctx context.Context, node *e2e.Node) error
|
||||
|
||||
// ConnectNode modifies the specified node's network configuration such
|
||||
// that it can become bidirectionally connected.
|
||||
ConnectNode(ctx context.Context, node *e2e.Node) error
|
||||
|
||||
// ShowNodeLogs prints all logs for the node with the give ID to stdout.
|
||||
ShowNodeLogs(ctx context.Context, node *e2e.Node) error
|
||||
|
||||
// TailNodeLogs tails the logs for a single node, if this is supported by
|
||||
// the infrastructure provider.
|
||||
TailNodeLogs(ctx context.Context, node *e2e.Node) error
|
||||
|
||||
//
|
||||
// Node process management.
|
||||
//
|
||||
|
||||
// KillNodeProcess sends SIGKILL to a node's process.
|
||||
KillNodeProcess(ctx context.Context, node *e2e.Node) error
|
||||
|
||||
// StartNodeProcess will start a stopped node's process. Assumes that the
|
||||
// node's infrastructure has previously been provisioned using
|
||||
// ProvisionNode.
|
||||
StartNodeProcess(ctx context.Context, node *e2e.Node) error
|
||||
|
||||
// PauseNodeProcess sends a signal to the node's process to pause it.
|
||||
PauseNodeProcess(ctx context.Context, node *e2e.Node) error
|
||||
|
||||
// UnpauseNodeProcess resumes a paused node's process.
|
||||
UnpauseNodeProcess(ctx context.Context, node *e2e.Node) error
|
||||
|
||||
// TerminateNodeProcess sends SIGTERM to a node's process.
|
||||
TerminateNodeProcess(ctx context.Context, node *e2e.Node) error
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
//nolint: gosec
|
||||
package e2e
|
||||
|
||||
import (
|
||||
@@ -467,7 +466,7 @@ func (n Node) AddressRPC() string {
|
||||
|
||||
// Client returns an RPC client for a node.
|
||||
func (n Node) Client() (*rpchttp.HTTP, error) {
|
||||
return rpchttp.New(fmt.Sprintf("http://127.0.0.1:%v", n.ProxyPort))
|
||||
return rpchttp.New(fmt.Sprintf("http://%s", n.AddressRPC()))
|
||||
}
|
||||
|
||||
// Stateless returns true if the node is either a seed node or a light node
|
||||
@@ -481,6 +480,8 @@ type keyGenerator struct {
|
||||
}
|
||||
|
||||
func newKeyGenerator(seed int64) *keyGenerator {
|
||||
// nolint: gosec
|
||||
// G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
return &keyGenerator{
|
||||
random: rand.New(rand.NewSource(seed)),
|
||||
}
|
||||
|
||||
@@ -1,70 +1,32 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/infra"
|
||||
)
|
||||
|
||||
// Cleanup removes the Docker Compose containers and testnet directory.
|
||||
func Cleanup(logger log.Logger, testnet *e2e.Testnet) error {
|
||||
err := cleanupDocker(logger)
|
||||
if err != nil {
|
||||
return err
|
||||
// Cleanup destroys all infrastructure and removes all generated testnet files.
|
||||
func Cleanup(ctx context.Context, logger log.Logger, testnetDir string, ti infra.TestnetInfra) error {
|
||||
if testnetDir == "" {
|
||||
return errors.New("no testnet directory set")
|
||||
}
|
||||
return cleanupDir(logger, testnet.Dir)
|
||||
}
|
||||
|
||||
// cleanupDocker removes all E2E resources (with label e2e=True), regardless
|
||||
// of testnet.
|
||||
func cleanupDocker(logger log.Logger) error {
|
||||
logger.Info("Removing Docker containers and networks")
|
||||
|
||||
// GNU xargs requires the -r flag to not run when input is empty, macOS
|
||||
// does this by default. Ugly, but works.
|
||||
xargsR := `$(if [[ $OSTYPE == "linux-gnu"* ]]; then echo -n "-r"; fi)`
|
||||
|
||||
err := exec("bash", "-c", fmt.Sprintf(
|
||||
"docker container ls -qa --filter label=e2e | xargs %v docker container rm -f", xargsR))
|
||||
if err != nil {
|
||||
if err := ti.Cleanup(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return exec("bash", "-c", fmt.Sprintf(
|
||||
"docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR))
|
||||
}
|
||||
|
||||
// cleanupDir cleans up a testnet directory
|
||||
func cleanupDir(logger log.Logger, dir string) error {
|
||||
if dir == "" {
|
||||
return errors.New("no directory set")
|
||||
}
|
||||
|
||||
_, err := os.Stat(dir)
|
||||
_, err := os.Stat(testnetDir)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info(fmt.Sprintf("Removing testnet directory %q", dir))
|
||||
|
||||
// On Linux, some local files in the volume will be owned by root since Tendermint
|
||||
// runs as root inside the container, so we need to clean them up from within a
|
||||
// container running as root too.
|
||||
absDir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = execDocker("run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir),
|
||||
"tendermint/e2e-node", "sh", "-c", "rm -rf /network/*/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.RemoveAll(dir)
|
||||
logger.Info(fmt.Sprintf("Removing testnet directory %q", testnetDir))
|
||||
return os.RemoveAll(testnetDir)
|
||||
}
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
//nolint: gosec
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
osexec "os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// execute executes a shell command.
|
||||
func exec(args ...string) error {
|
||||
cmd := osexec.Command(args[0], args[1:]...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case *osexec.ExitError:
|
||||
return fmt.Errorf("failed to run %q:\n%v", args, string(out))
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// execVerbose executes a shell command while displaying its output.
|
||||
func execVerbose(args ...string) error {
|
||||
cmd := osexec.Command(args[0], args[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// execCompose runs a Docker Compose command for a testnet.
|
||||
func execCompose(dir string, args ...string) error {
|
||||
return exec(append(
|
||||
[]string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")},
|
||||
args...)...)
|
||||
}
|
||||
|
||||
// execComposeVerbose runs a Docker Compose command for a testnet and displays its output.
|
||||
func execComposeVerbose(dir string, args ...string) error {
|
||||
return execVerbose(append(
|
||||
[]string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")},
|
||||
args...)...)
|
||||
}
|
||||
|
||||
// execDocker runs a Docker command.
|
||||
func execDocker(args ...string) error {
|
||||
return exec(append([]string{"docker"}, args...)...)
|
||||
}
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/infra"
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/infra/docker"
|
||||
)
|
||||
|
||||
const randomSeed = 2308084734268
|
||||
@@ -33,6 +35,7 @@ func main() {
|
||||
type CLI struct {
|
||||
root *cobra.Command
|
||||
testnet *e2e.Testnet
|
||||
infra infra.TestnetInfra
|
||||
preserve bool
|
||||
}
|
||||
|
||||
@@ -53,12 +56,23 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
providerID, err := cmd.Flags().GetString("provider")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch providerID {
|
||||
case "docker":
|
||||
cli.infra = docker.NewTestnetInfra(logger, testnet)
|
||||
logger.Info("Using Docker-based infrastructure provider")
|
||||
default:
|
||||
return fmt.Errorf("unrecognized infrastructure provider ID: %s", providerID)
|
||||
}
|
||||
|
||||
cli.testnet = testnet
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
if err = Cleanup(logger, cli.testnet); err != nil {
|
||||
if err = Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
@@ -67,11 +81,11 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
} else if err != nil {
|
||||
logger.Info("Preserving testnet that encountered error",
|
||||
"err", err)
|
||||
} else if err := Cleanup(logger, cli.testnet); err != nil {
|
||||
} else if err := Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil {
|
||||
logger.Error("error cleaning up testnet contents", "err", err)
|
||||
}
|
||||
}()
|
||||
if err = Setup(logger, cli.testnet); err != nil {
|
||||
if err = Setup(cmd.Context(), logger, cli.testnet, cli.infra); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -87,7 +101,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
chLoadResult <- Load(lctx, logger, r, cli.testnet)
|
||||
}()
|
||||
startAt := time.Now()
|
||||
if err = Start(ctx, logger, cli.testnet); err != nil {
|
||||
if err = Start(ctx, logger, cli.testnet, cli.infra); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -96,7 +110,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
}
|
||||
|
||||
if cli.testnet.HasPerturbations() {
|
||||
if err = Perturb(ctx, logger, cli.testnet); err != nil {
|
||||
if err = Perturb(ctx, logger, cli.testnet, cli.infra); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // allow some txs to go through
|
||||
@@ -134,7 +148,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // wait for network to settle before tests
|
||||
return err
|
||||
}
|
||||
if err := Test(cli.testnet); err != nil {
|
||||
if err := Test(ctx, cli.testnet); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -144,6 +158,8 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
cli.root.PersistentFlags().StringP("file", "f", "", "Testnet TOML manifest")
|
||||
_ = cli.root.MarkPersistentFlagRequired("file")
|
||||
|
||||
cli.root.PersistentFlags().String("provider", "docker", "Which infrastructure provider to use")
|
||||
|
||||
cli.root.Flags().BoolVarP(&cli.preserve, "preserve", "p", false,
|
||||
"Preserves the running of the test net after tests are completed")
|
||||
|
||||
@@ -156,7 +172,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Use: "setup",
|
||||
Short: "Generates the testnet directory and configuration",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return Setup(logger, cli.testnet)
|
||||
return Setup(cmd.Context(), logger, cli.testnet, cli.infra)
|
||||
},
|
||||
})
|
||||
|
||||
@@ -166,12 +182,12 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
_, err := os.Stat(cli.testnet.Dir)
|
||||
if os.IsNotExist(err) {
|
||||
err = Setup(logger, cli.testnet)
|
||||
err = Setup(cmd.Context(), logger, cli.testnet, cli.infra)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return Start(cmd.Context(), logger, cli.testnet)
|
||||
return Start(cmd.Context(), logger, cli.testnet, cli.infra)
|
||||
},
|
||||
})
|
||||
|
||||
@@ -179,7 +195,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Use: "perturb",
|
||||
Short: "Perturbs the Docker testnet, e.g. by restarting or disconnecting nodes",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return Perturb(cmd.Context(), logger, cli.testnet)
|
||||
return Perturb(cmd.Context(), logger, cli.testnet, cli.infra)
|
||||
},
|
||||
})
|
||||
|
||||
@@ -196,7 +212,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Short: "Stops the Docker testnet",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger.Info("Stopping testnet")
|
||||
return execCompose(cli.testnet.Dir, "down")
|
||||
return cli.infra.Stop(cmd.Context())
|
||||
},
|
||||
})
|
||||
|
||||
@@ -205,7 +221,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Short: "Pauses the Docker testnet",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger.Info("Pausing testnet")
|
||||
return execCompose(cli.testnet.Dir, "pause")
|
||||
return cli.infra.Pause(cmd.Context())
|
||||
},
|
||||
})
|
||||
|
||||
@@ -214,7 +230,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Short: "Resumes the Docker testnet",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger.Info("Resuming testnet")
|
||||
return execCompose(cli.testnet.Dir, "unpause")
|
||||
return cli.infra.Unpause(cmd.Context())
|
||||
},
|
||||
})
|
||||
|
||||
@@ -259,7 +275,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Use: "test",
|
||||
Short: "Runs test cases against a running testnet",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return Test(cli.testnet)
|
||||
return Test(cmd.Context(), cli.testnet)
|
||||
},
|
||||
})
|
||||
|
||||
@@ -267,17 +283,24 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Use: "cleanup",
|
||||
Short: "Removes the testnet directory",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return Cleanup(logger, cli.testnet)
|
||||
return Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra)
|
||||
},
|
||||
})
|
||||
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "logs [node]",
|
||||
Short: "Shows the testnet or a specefic node's logs",
|
||||
Short: "Shows the testnet or a specific node's logs",
|
||||
Example: "runner logs validator03",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return execComposeVerbose(cli.testnet.Dir, append([]string{"logs", "--no-color"}, args...)...)
|
||||
if len(args) > 0 {
|
||||
node := cli.testnet.LookupNode(args[0])
|
||||
if node == nil {
|
||||
return fmt.Errorf("no such node: %s", args[0])
|
||||
}
|
||||
return cli.infra.ShowNodeLogs(cmd.Context(), node)
|
||||
}
|
||||
return cli.infra.ShowLogs(cmd.Context())
|
||||
},
|
||||
})
|
||||
|
||||
@@ -287,9 +310,13 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
return execComposeVerbose(cli.testnet.Dir, "logs", "--follow", args[0])
|
||||
node := cli.testnet.LookupNode(args[0])
|
||||
if node == nil {
|
||||
return fmt.Errorf("no such node: %s", args[0])
|
||||
}
|
||||
return cli.infra.TailNodeLogs(cmd.Context(), node)
|
||||
}
|
||||
return execComposeVerbose(cli.testnet.Dir, "logs", "--follow")
|
||||
return cli.infra.TailLogs(cmd.Context())
|
||||
},
|
||||
})
|
||||
|
||||
@@ -302,20 +329,20 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Min Block Interval
|
||||
Max Block Interval
|
||||
over a 100 block sampling period.
|
||||
|
||||
|
||||
Does not run any perbutations.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := Cleanup(logger, cli.testnet); err != nil {
|
||||
if err := Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := Cleanup(logger, cli.testnet); err != nil {
|
||||
if err := Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil {
|
||||
logger.Error("error cleaning up testnet contents", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := Setup(logger, cli.testnet); err != nil {
|
||||
if err := Setup(cmd.Context(), logger, cli.testnet, cli.infra); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -331,7 +358,7 @@ Does not run any perbutations.
|
||||
chLoadResult <- Load(lctx, logger, r, cli.testnet)
|
||||
}()
|
||||
|
||||
if err := Start(ctx, logger, cli.testnet); err != nil {
|
||||
if err := Start(ctx, logger, cli.testnet, cli.infra); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -8,10 +8,11 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
rpctypes "github.com/tendermint/tendermint/rpc/coretypes"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/infra"
|
||||
)
|
||||
|
||||
// Perturbs a running testnet.
|
||||
func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error {
|
||||
func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infra.TestnetInfra) error {
|
||||
timer := time.NewTimer(0) // first tick fires immediately; reset below
|
||||
defer timer.Stop()
|
||||
|
||||
@@ -21,7 +22,7 @@ func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
_, err := PerturbNode(ctx, logger, node, perturbation)
|
||||
_, err := PerturbNode(ctx, logger, node, perturbation, ti)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -36,46 +37,45 @@ func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error
|
||||
|
||||
// PerturbNode perturbs a node with a given perturbation, returning its status
|
||||
// after recovering.
|
||||
func PerturbNode(ctx context.Context, logger log.Logger, node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) {
|
||||
testnet := node.Testnet
|
||||
func PerturbNode(ctx context.Context, logger log.Logger, node *e2e.Node, perturbation e2e.Perturbation, ti infra.TestnetInfra) (*rpctypes.ResultStatus, error) {
|
||||
switch perturbation {
|
||||
case e2e.PerturbationDisconnect:
|
||||
logger.Info(fmt.Sprintf("Disconnecting node %v...", node.Name))
|
||||
if err := execDocker("network", "disconnect", testnet.Name+"_"+testnet.Name, node.Name); err != nil {
|
||||
if err := ti.DisconnectNode(ctx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
time.Sleep(10 * time.Second)
|
||||
if err := execDocker("network", "connect", testnet.Name+"_"+testnet.Name, node.Name); err != nil {
|
||||
if err := ti.ConnectNode(ctx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case e2e.PerturbationKill:
|
||||
logger.Info(fmt.Sprintf("Killing node %v...", node.Name))
|
||||
if err := execCompose(testnet.Dir, "kill", "-s", "SIGKILL", node.Name); err != nil {
|
||||
if err := ti.KillNodeProcess(ctx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
time.Sleep(10 * time.Second)
|
||||
if err := execCompose(testnet.Dir, "start", node.Name); err != nil {
|
||||
if err := ti.StartNodeProcess(ctx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case e2e.PerturbationPause:
|
||||
logger.Info(fmt.Sprintf("Pausing node %v...", node.Name))
|
||||
if err := execCompose(testnet.Dir, "pause", node.Name); err != nil {
|
||||
if err := ti.PauseNodeProcess(ctx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
time.Sleep(10 * time.Second)
|
||||
if err := execCompose(testnet.Dir, "unpause", node.Name); err != nil {
|
||||
if err := ti.UnpauseNodeProcess(ctx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case e2e.PerturbationRestart:
|
||||
logger.Info(fmt.Sprintf("Restarting node %v...", node.Name))
|
||||
if err := execCompose(testnet.Dir, "kill", "-s", "SIGTERM", node.Name); err != nil {
|
||||
if err := ti.TerminateNodeProcess(ctx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
time.Sleep(10 * time.Second)
|
||||
if err := execCompose(testnet.Dir, "start", node.Name); err != nil {
|
||||
if err := ti.StartNodeProcess(ctx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty
|
||||
clients = map[string]*rpchttp.HTTP{}
|
||||
lastHeight int64
|
||||
lastIncrease = time.Now()
|
||||
nodesAtHeight = map[string]struct{}{}
|
||||
nodesAtHeight = map[string]int64{}
|
||||
numRunningNodes int
|
||||
)
|
||||
if height == 0 {
|
||||
@@ -85,7 +85,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty
|
||||
|
||||
// add this node to the set of target
|
||||
// height nodes
|
||||
nodesAtHeight[node.Name] = struct{}{}
|
||||
nodesAtHeight[node.Name] = result.SyncInfo.LatestBlockHeight
|
||||
|
||||
// if not all of the nodes that we
|
||||
// have clients for have reached the
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
// nolint: gosec
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/infra"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -39,7 +39,7 @@ const (
|
||||
)
|
||||
|
||||
// Setup sets up the testnet configuration.
|
||||
func Setup(logger log.Logger, testnet *e2e.Testnet) error {
|
||||
func Setup(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infra.TestnetInfra) error {
|
||||
logger.Info(fmt.Sprintf("Generating testnet files in %q", testnet.Dir))
|
||||
|
||||
err := os.MkdirAll(testnet.Dir, os.ModePerm)
|
||||
@@ -47,15 +47,6 @@ func Setup(logger log.Logger, testnet *e2e.Testnet) error {
|
||||
return err
|
||||
}
|
||||
|
||||
compose, err := MakeDockerCompose(testnet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
genesis, err := MakeGenesis(testnet)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -92,6 +83,8 @@ func Setup(logger log.Logger, testnet *e2e.Testnet) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// nolint: gosec
|
||||
// G306: Expect WriteFile permissions to be 0600 or less
|
||||
err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -131,70 +124,13 @@ func Setup(logger log.Logger, testnet *e2e.Testnet) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := ti.Setup(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeDockerCompose generates a Docker Compose config for a testnet.
|
||||
func MakeDockerCompose(testnet *e2e.Testnet) ([]byte, error) {
|
||||
// Must use version 2 Docker Compose format, to support IPv6.
|
||||
tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{
|
||||
"addUint32": func(x, y uint32) uint32 {
|
||||
return x + y
|
||||
},
|
||||
"isBuiltin": func(protocol e2e.Protocol, mode e2e.Mode) bool {
|
||||
return mode == e2e.ModeLight || protocol == e2e.ProtocolBuiltin
|
||||
},
|
||||
}).Parse(`version: '2.4'
|
||||
|
||||
networks:
|
||||
{{ .Name }}:
|
||||
labels:
|
||||
e2e: true
|
||||
driver: bridge
|
||||
{{- if .IPv6 }}
|
||||
enable_ipv6: true
|
||||
{{- end }}
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ .IP }}
|
||||
|
||||
services:
|
||||
{{- range .Nodes }}
|
||||
{{ .Name }}:
|
||||
labels:
|
||||
e2e: true
|
||||
container_name: {{ .Name }}
|
||||
image: tendermint/e2e-node
|
||||
{{- if isBuiltin $.ABCIProtocol .Mode }}
|
||||
entrypoint: /usr/bin/entrypoint-builtin
|
||||
{{- else if .LogLevel }}
|
||||
command: start --log-level {{ .LogLevel }}
|
||||
{{- end }}
|
||||
init: true
|
||||
ports:
|
||||
- 26656
|
||||
- {{ if .ProxyPort }}{{ addUint32 .ProxyPort 1000 }}:{{ end }}26660
|
||||
- {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657
|
||||
- 6060
|
||||
volumes:
|
||||
- ./{{ .Name }}:/tendermint
|
||||
networks:
|
||||
{{ $.Name }}:
|
||||
ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }}
|
||||
|
||||
{{end}}`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err = tmpl.Execute(&buf, testnet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// MakeGenesis generates a genesis document.
|
||||
func MakeGenesis(testnet *e2e.Testnet) (types.GenesisDoc, error) {
|
||||
genesis := types.GenesisDoc{
|
||||
@@ -421,5 +357,7 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error {
|
||||
}
|
||||
bz = regexp.MustCompile(`(?m)^trust-height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-height = %v`, height)))
|
||||
bz = regexp.MustCompile(`(?m)^trust-hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-hash = "%X"`, hash)))
|
||||
// nolint: gosec
|
||||
// G306: Expect WriteFile permissions to be 0600 or less
|
||||
return os.WriteFile(cfgPath, bz, 0644)
|
||||
}
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/infra"
|
||||
)
|
||||
|
||||
func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error {
|
||||
func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infra.TestnetInfra) error {
|
||||
if len(testnet.Nodes) == 0 {
|
||||
return fmt.Errorf("no nodes in testnet")
|
||||
}
|
||||
@@ -44,7 +45,7 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error {
|
||||
for len(nodeQueue) > 0 && nodeQueue[0].StartAt == 0 {
|
||||
node := nodeQueue[0]
|
||||
nodeQueue = nodeQueue[1:]
|
||||
if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil {
|
||||
if err := ti.StartNode(ctx, node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -58,7 +59,7 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error {
|
||||
return err
|
||||
}
|
||||
node.HasStarted = true
|
||||
logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort))
|
||||
logger.Info(fmt.Sprintf("Node %v up on http://%v:%v", node.IP, node.Name, node.ProxyPort))
|
||||
}
|
||||
|
||||
networkHeight := testnet.InitialHeight
|
||||
@@ -106,7 +107,7 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil {
|
||||
if err := ti.StartNode(ctx, node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -128,8 +129,8 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error {
|
||||
} else {
|
||||
lastNodeHeight = status.SyncInfo.LatestBlockHeight
|
||||
}
|
||||
logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v at height %v",
|
||||
node.Name, node.ProxyPort, lastNodeHeight))
|
||||
logger.Info(fmt.Sprintf("Node %v up on http://%v:%v at height %v",
|
||||
node.IP, node.Name, node.ProxyPort, lastNodeHeight))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,17 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/test/e2e/pkg/exec"
|
||||
)
|
||||
|
||||
// Test runs test cases under tests/
|
||||
func Test(testnet *e2e.Testnet) error {
|
||||
func Test(ctx context.Context, testnet *e2e.Testnet) error {
|
||||
err := os.Setenv("E2E_MANIFEST", testnet.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return execVerbose("./build/tests", "-test.count=1", "-test.v")
|
||||
return exec.CommandVerbose(ctx, "./build/tests", "-test.count=1")
|
||||
}
|
||||
|
||||
@@ -7,37 +7,48 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Tests that all nodes have peered with each other, regardless of discovery method.
|
||||
func TestNet_Peers(t *testing.T) {
|
||||
// FIXME Skip test since nodes aren't always able to fully mesh
|
||||
t.SkipNow()
|
||||
|
||||
testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) {
|
||||
client, err := node.Client()
|
||||
require.NoError(t, err)
|
||||
netInfo, err := client.NetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(node.Testnet.Nodes)-1, netInfo.NPeers,
|
||||
"node is not fully meshed with peers")
|
||||
|
||||
// FIXME: https://github.com/tendermint/tendermint/issues/8848
|
||||
// We should be able to assert that we can discover all peers in a network
|
||||
expectedPeers := len(node.Testnet.Nodes)
|
||||
peers := make(map[string]*e2e.Node, 0)
|
||||
seen := map[string]bool{}
|
||||
for _, n := range node.Testnet.Nodes {
|
||||
seen[n.Name] = (n.Name == node.Name) // we've clearly seen ourself
|
||||
}
|
||||
for _, peerInfo := range netInfo.Peers {
|
||||
id := peerInfo.ID
|
||||
peer := node.Testnet.LookupNode(string(id))
|
||||
require.NotNil(t, peer, "unknown node %v", id)
|
||||
require.Contains(t, peerInfo.URL, peer.IP.String(),
|
||||
"unexpected IP address for peer %v", id)
|
||||
seen[string(id)] = true
|
||||
// we never save light client addresses as they use RPC or ourselves
|
||||
if n.Mode == e2e.ModeLight || n.Name == node.Name {
|
||||
expectedPeers--
|
||||
continue
|
||||
}
|
||||
peers[string(types.NodeIDFromPubKey(n.NodeKey.PubKey()))] = n
|
||||
seen[n.Name] = false
|
||||
}
|
||||
|
||||
for name := range seen {
|
||||
require.True(t, seen[name], "node %v not peered with %v", node.Name, name)
|
||||
require.GreaterOrEqual(t, netInfo.NPeers, expectedPeers-1,
|
||||
"node is not fully meshed with peers")
|
||||
|
||||
for _, peerInfo := range netInfo.Peers {
|
||||
id := string(peerInfo.ID)
|
||||
peer, ok := peers[id]
|
||||
require.True(t, ok, "unknown node %v", id)
|
||||
require.Contains(t, peerInfo.URL, peer.IP.String(),
|
||||
"unexpected IP address for peer %v", id)
|
||||
seen[peer.Name] = true
|
||||
}
|
||||
|
||||
// FIXME: https://github.com/tendermint/tendermint/issues/8848
|
||||
// We should be able to assert that we can discover all peers in a network
|
||||
// for name := range seen {
|
||||
// require.True(t, seen[name], "node %v not peered with %v", node.Name, name)
|
||||
// }
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
tmstrings "github.com/tendermint/tendermint/internal/libs/strings"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
tmstrings "github.com/tendermint/tendermint/libs/strings"
|
||||
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
)
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
"github.com/tendermint/tendermint/crypto/sr25519"
|
||||
tmstrings "github.com/tendermint/tendermint/libs/strings"
|
||||
tmstrings "github.com/tendermint/tendermint/internal/libs/strings"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -13,7 +13,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
nilVoteStr string = "nil-Vote"
|
||||
absentVoteStr string = "Vote{absent}"
|
||||
nilVoteStr string = "nil"
|
||||
|
||||
// The maximum supported number of bytes in a vote extension.
|
||||
MaxVoteExtensionSize int = 1024 * 1024
|
||||
@@ -189,7 +190,7 @@ func (vote *Vote) Copy() *Vote {
|
||||
// 10. timestamp
|
||||
func (vote *Vote) String() string {
|
||||
if vote == nil {
|
||||
return nilVoteStr
|
||||
return absentVoteStr
|
||||
}
|
||||
|
||||
var typeString string
|
||||
@@ -202,16 +203,22 @@ func (vote *Vote) String() string {
|
||||
panic("Unknown vote type")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X %X @ %s}",
|
||||
var blockHashString string
|
||||
if len(vote.BlockID.Hash) > 0 {
|
||||
blockHashString = fmt.Sprintf("%X", tmbytes.Fingerprint(vote.BlockID.Hash))
|
||||
} else {
|
||||
blockHashString = nilVoteStr
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Vote{%v:%X %v/%d %s %s %X %d @ %s}",
|
||||
vote.ValidatorIndex,
|
||||
tmbytes.Fingerprint(vote.ValidatorAddress),
|
||||
vote.Height,
|
||||
vote.Round,
|
||||
vote.Type,
|
||||
typeString,
|
||||
tmbytes.Fingerprint(vote.BlockID.Hash),
|
||||
blockHashString,
|
||||
tmbytes.Fingerprint(vote.Signature),
|
||||
tmbytes.Fingerprint(vote.Extension),
|
||||
len(vote.Extension),
|
||||
CanonicalTime(vote.Timestamp),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -505,7 +505,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string {
|
||||
voteStrings := make([]string, len(voteSet.votes))
|
||||
for i, vote := range voteSet.votes {
|
||||
if vote == nil {
|
||||
voteStrings[i] = nilVoteStr
|
||||
voteStrings[i] = absentVoteStr
|
||||
} else {
|
||||
voteStrings[i] = vote.String()
|
||||
}
|
||||
@@ -570,7 +570,7 @@ func (voteSet *VoteSet) voteStrings() []string {
|
||||
voteStrings := make([]string, len(voteSet.votes))
|
||||
for i, vote := range voteSet.votes {
|
||||
if vote == nil {
|
||||
voteStrings[i] = nilVoteStr
|
||||
voteStrings[i] = absentVoteStr
|
||||
} else {
|
||||
voteStrings[i] = vote.String()
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -16,6 +17,22 @@ import (
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
//nolint: lll
|
||||
preCommitTestStr = `Vote{56789:6AF1F4111082 12345/2 Precommit 8B01023386C3 000000000000 0 @ 2017-12-25T03:00:01.234Z}`
|
||||
//nolint: lll
|
||||
preVoteTestStr = `Vote{56789:6AF1F4111082 12345/2 Prevote 8B01023386C3 000000000000 0 @ 2017-12-25T03:00:01.234Z}`
|
||||
)
|
||||
|
||||
var (
|
||||
// nolint: lll
|
||||
nilVoteTestStr = fmt.Sprintf(`Vote{56789:6AF1F4111082 12345/2 Precommit %s 000000000000 0 @ 2017-12-25T03:00:01.234Z}`, nilVoteStr)
|
||||
formatNonEmptyVoteExtensionFn = func(voteExtensionLength int) string {
|
||||
// nolint: lll
|
||||
return fmt.Sprintf(`Vote{56789:6AF1F4111082 12345/2 Precommit 8B01023386C3 000000000000 %d @ 2017-12-25T03:00:01.234Z}`, voteExtensionLength)
|
||||
}
|
||||
)
|
||||
|
||||
func examplePrevote(t *testing.T) *Vote {
|
||||
t.Helper()
|
||||
return exampleVote(t, byte(tmproto.PrevoteType))
|
||||
@@ -321,16 +338,43 @@ func TestVoteVerify(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVoteString(t *testing.T) {
|
||||
str := examplePrecommit(t).String()
|
||||
expected := `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests
|
||||
if str != expected {
|
||||
t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str)
|
||||
testcases := map[string]struct {
|
||||
vote *Vote
|
||||
expectedResult string
|
||||
}{
|
||||
"pre-commit": {
|
||||
vote: examplePrecommit(t),
|
||||
expectedResult: preCommitTestStr,
|
||||
},
|
||||
"pre-vote": {
|
||||
vote: examplePrevote(t),
|
||||
expectedResult: preVoteTestStr,
|
||||
},
|
||||
"absent vote": {
|
||||
expectedResult: absentVoteStr,
|
||||
},
|
||||
"nil vote": {
|
||||
vote: func() *Vote {
|
||||
v := examplePrecommit(t)
|
||||
v.BlockID.Hash = nil
|
||||
return v
|
||||
}(),
|
||||
expectedResult: nilVoteTestStr,
|
||||
},
|
||||
"non-empty vote extension": {
|
||||
vote: func() *Vote {
|
||||
v := examplePrecommit(t)
|
||||
v.Extension = []byte{1, 2}
|
||||
return v
|
||||
}(),
|
||||
expectedResult: formatNonEmptyVoteExtensionFn(2),
|
||||
},
|
||||
}
|
||||
|
||||
str2 := examplePrevote(t).String()
|
||||
expected = `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PREVOTE(Prevote) 8B01023386C3 000000000000 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests
|
||||
if str2 != expected {
|
||||
t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str2)
|
||||
for name, tc := range testcases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
require.Equal(t, tc.expectedResult, tc.vote.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user