mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-18 10:42:50 +00:00
Compare commits
75 Commits
v0.35.6
...
v035-testi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf3bcbaa4c | ||
|
|
222a25284d | ||
|
|
cae81ce43d | ||
|
|
3e8daaeb44 | ||
|
|
aa2d6ee64a | ||
|
|
2b189852b0 | ||
|
|
3790968156 | ||
|
|
9e64c95e56 | ||
|
|
cb93d3b587 | ||
|
|
f98de20f7e | ||
|
|
b17f044a1c | ||
|
|
451e697331 | ||
|
|
a8c419f126 | ||
|
|
20c1ffd03a | ||
|
|
e3292a48e3 | ||
|
|
6a354a1e8d | ||
|
|
2750cb26a9 | ||
|
|
a04759c4f6 | ||
|
|
1daf7b939d | ||
|
|
09c54a8d5c | ||
|
|
156c305b08 | ||
|
|
bc49f66c35 | ||
|
|
9b02094827 | ||
|
|
bf1ab9c3d8 | ||
|
|
da83edc588 | ||
|
|
25f6557174 | ||
|
|
047d7c927b | ||
|
|
49788adde5 | ||
|
|
91b32b93cd | ||
|
|
3940d64ba6 | ||
|
|
babae90f8f | ||
|
|
210e8a02f7 | ||
|
|
e414d0a878 | ||
|
|
e66d76f6e9 | ||
|
|
fbcb965c75 | ||
|
|
6a646f366e | ||
|
|
dc0e77f41e | ||
|
|
815e611c68 | ||
|
|
01984cb3b2 | ||
|
|
11456f9edf | ||
|
|
b5f92f5d2e | ||
|
|
288cb31040 | ||
|
|
e2d2c04aac | ||
|
|
204281fa66 | ||
|
|
486370ac68 | ||
|
|
978f754ad3 | ||
|
|
c4ef566071 | ||
|
|
f19e52e6f2 | ||
|
|
19b98c7005 | ||
|
|
826f224c2d | ||
|
|
2df4c2b19d | ||
|
|
6f4ef72964 | ||
|
|
3398f37979 | ||
|
|
8ef63fe3d9 | ||
|
|
9daea43375 | ||
|
|
df9363c67c | ||
|
|
24701cd587 | ||
|
|
e9c87a3c49 | ||
|
|
034a9f8422 | ||
|
|
4322f7d0b9 | ||
|
|
83526cacbc | ||
|
|
25d724b920 | ||
|
|
3945cec115 | ||
|
|
74c6d8100d | ||
|
|
e2d01cdcff | ||
|
|
bee6597b28 | ||
|
|
ce8284c027 | ||
|
|
d02f58e191 | ||
|
|
28c38522e0 | ||
|
|
0b63e293f1 | ||
|
|
af0590a819 | ||
|
|
46c27b45ab | ||
|
|
3c29b6996b | ||
|
|
138be1f7b0 | ||
|
|
98411962c6 |
2
.github/workflows/janitor.yml
vendored
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.9.1
|
||||
- uses: styfle/cancel-workflow-action@0.10.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
20
CHANGELOG.md
20
CHANGELOG.md
@@ -2,9 +2,29 @@
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.7
|
||||
|
||||
June 16, 2022
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [p2p] [\#8692](https://github.com/tendermint/tendermint/pull/8692) scale the number of stored peers by the configured maximum connections (#8684)
|
||||
- [rpc] [\#8715](https://github.com/tendermint/tendermint/pull/8715) always close http bodies (backport #8712)
|
||||
- [p2p] [\#8760](https://github.com/tendermint/tendermint/pull/8760) accept should not abort on first error (backport #8759)
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish)
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish)
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish)
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish)
|
||||
|
||||
## v0.35.6
|
||||
|
||||
June 3, 2022
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [migrate] [\#8672](https://github.com/tendermint/tendermint/pull/8672) provide function for database production (backport #8614) (@tychoish)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.7
|
||||
## v0.35.8
|
||||
|
||||
Month DD, YYYY
|
||||
|
||||
@@ -22,6 +22,10 @@ Special thanks to external contributors on this release:
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8675] Add command to force compact goleveldb databases (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] \#8944 Fix unbounded heap growth in the priority mempool. (@creachadair)
|
||||
|
||||
@@ -801,3 +801,18 @@ func (_m *Client) String() string {
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewClient interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewClient(t mockConstructorTestingTNewClient) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
69
cmd/tendermint/commands/compact.go
Normal file
69
cmd/tendermint/commands/compact.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
func MakeCompactDBCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "experimental-compact-goleveldb",
|
||||
Short: "force compacts the tendermint storage engine (only GoLevelDB supported)",
|
||||
Long: `
|
||||
This is a temporary utility command that performs a force compaction on the state
|
||||
and blockstores to reduce disk space for a pruning node. This should only be run
|
||||
once the node has stopped. This command will likely be omitted in the future after
|
||||
the planned refactor to the storage engine.
|
||||
|
||||
Currently, only GoLevelDB is supported.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if config.DBBackend != "goleveldb" {
|
||||
return errors.New("compaction is currently only supported with goleveldb")
|
||||
}
|
||||
|
||||
compactGoLevelDBs(config.RootDir, logger)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func compactGoLevelDBs(rootDir string, logger log.Logger) {
|
||||
dbNames := []string{"state", "blockstore"}
|
||||
o := &opt.Options{
|
||||
DisableSeeksCompaction: true,
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for _, dbName := range dbNames {
|
||||
dbName := dbName
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
dbPath := filepath.Join(rootDir, "data", dbName+".db")
|
||||
store, err := leveldb.OpenFile(dbPath, o)
|
||||
if err != nil {
|
||||
logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err)
|
||||
return
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
logger.Info("starting compaction...", "db", dbPath)
|
||||
|
||||
err = store.CompactRange(util.Range{Start: nil, Limit: nil})
|
||||
if err != nil {
|
||||
logger.Error("failed to compact tendermint db", "path", dbPath, "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -34,9 +34,6 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
config.PrivValidator.ListenAddr,
|
||||
"socket address to listen on for connections from external priv-validator process")
|
||||
|
||||
// node flags
|
||||
cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing")
|
||||
|
||||
// TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle
|
||||
// This check was added to give users an upgrade prompt to use the new flag for syncing.
|
||||
//
|
||||
|
||||
@@ -32,6 +32,7 @@ func main() {
|
||||
cmd.InspectCmd,
|
||||
cmd.RollbackStateCmd,
|
||||
cmd.MakeKeyMigrateCommand(),
|
||||
cmd.MakeCompactDBCommand(),
|
||||
debug.DebugCmd,
|
||||
cli.NewCompletionCmd(rootCmd, true),
|
||||
)
|
||||
|
||||
@@ -712,6 +712,10 @@ type P2PConfig struct { //nolint: maligned
|
||||
// outbound).
|
||||
MaxConnections uint16 `mapstructure:"max-connections"`
|
||||
|
||||
// MaxOutgoingConnections defines the maximum number of connected peers (inbound and
|
||||
// outbound).
|
||||
MaxOutgoingConnections uint16 `mapstructure:"max-outgoing-connections"`
|
||||
|
||||
// MaxIncomingConnectionAttempts rate limits the number of incoming connection
|
||||
// attempts per IP address.
|
||||
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
|
||||
@@ -774,6 +778,7 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
MaxNumInboundPeers: 40,
|
||||
MaxNumOutboundPeers: 10,
|
||||
MaxConnections: 64,
|
||||
MaxOutgoingConnections: 12,
|
||||
MaxIncomingConnectionAttempts: 100,
|
||||
PersistentPeersMaxDialPeriod: 0 * time.Second,
|
||||
FlushThrottleTimeout: 100 * time.Millisecond,
|
||||
@@ -833,6 +838,9 @@ func (cfg *P2PConfig) ValidateBasic() error {
|
||||
if cfg.RecvRate < 0 {
|
||||
return errors.New("recv-rate can't be negative")
|
||||
}
|
||||
if cfg.MaxOutgoingConnections > cfg.MaxConnections {
|
||||
return errors.New("max-outgoing-connections cannot be larger than max-connections")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -355,6 +355,10 @@ max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }}
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = {{ .P2P.MaxConnections }}
|
||||
|
||||
# Maximum number of connections reserved for outgoing
|
||||
# connections. Must be less than max-connections
|
||||
max-outgoing-connections = {{ .P2P.MaxOutgoingConnections }}
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
|
||||
|
||||
|
||||
20
go.mod
20
go.mod
@@ -5,9 +5,10 @@ go 1.16
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.1.0
|
||||
github.com/Workiva/go-datastructures v1.0.53
|
||||
github.com/adlio/schema v1.3.0
|
||||
github.com/adlio/schema v1.3.3
|
||||
github.com/btcsuite/btcd v0.22.1
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/creachadair/atomicfile v0.2.6
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/creachadair/tomledit v0.0.22
|
||||
@@ -23,10 +24,11 @@ require (
|
||||
github.com/google/orderedcode v0.0.1
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
github.com/libp2p/go-buffer-pool v0.1.0
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/mroth/weightedrand v0.4.1
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
||||
@@ -34,18 +36,20 @@ require (
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/rs/zerolog v1.26.1
|
||||
github.com/rs/zerolog v1.27.0
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/viper v1.12.0
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
github.com/vektra/mockery/v2 v2.12.3
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
|
||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2
|
||||
github.com/vektra/mockery/v2 v2.14.0
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29
|
||||
google.golang.org/grpc v1.47.0
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
pgregory.net/rapid v0.4.7
|
||||
)
|
||||
|
||||
159
go.sum
159
go.sum
@@ -1,6 +1,5 @@
|
||||
4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0=
|
||||
4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo=
|
||||
bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
|
||||
bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
@@ -70,6 +69,7 @@ github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZ
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
@@ -95,8 +95,8 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
|
||||
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
@@ -109,8 +109,8 @@ github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrd
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig=
|
||||
github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A=
|
||||
github.com/adlio/schema v1.3.0 h1:eSVYLxYWbm/6ReZBCkLw4Fz7uqC+ZNoPvA39bOwi52A=
|
||||
github.com/adlio/schema v1.3.0/go.mod h1:51QzxkpeFs6lRY11kPye26IaFPOV+HqEj01t5aXXKfs=
|
||||
github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I=
|
||||
github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
@@ -150,7 +150,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A=
|
||||
github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
|
||||
github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=
|
||||
@@ -183,6 +182,8 @@ github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRt
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
@@ -194,11 +195,11 @@ github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy
|
||||
github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg=
|
||||
github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M=
|
||||
github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4/go.mod h1:W8EnPSQ8Nv4fUjc/v1/8tHFqhuOJXnRub0dTfuAQktU=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
|
||||
@@ -214,24 +215,24 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||
github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634=
|
||||
github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||
github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg=
|
||||
github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc=
|
||||
github.com/creachadair/command v0.0.0-20220426235536-a748effdf6a1/go.mod h1:bAM+qFQb/KwWyCc9MLC4U1jvn3XyakqP5QRkds5T6cY=
|
||||
@@ -241,7 +242,8 @@ github.com/creachadair/tomledit v0.0.22 h1:lRtepmrwhzDq+g1gv5ftVn5itgo7CjYbm6abK
|
||||
github.com/creachadair/tomledit v0.0.22/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4=
|
||||
github.com/daixiang0/gci v0.3.3/go.mod h1:1Xr2bxnQbDxCqqulUOv8qpGqkgRw9RSCGGjEC2LjF8o=
|
||||
github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -261,8 +263,13 @@ github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KP
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/cli v20.10.14+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M=
|
||||
github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
|
||||
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
@@ -371,6 +378,7 @@ github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@@ -387,7 +395,6 @@ github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -489,6 +496,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw=
|
||||
github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -514,7 +523,6 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
@@ -538,14 +546,12 @@ github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Rep
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
@@ -602,6 +608,9 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
@@ -680,14 +689,15 @@ github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3
|
||||
github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg=
|
||||
github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
|
||||
github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
|
||||
github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
|
||||
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/lufeee/execinquery v1.0.0 h1:1XUTuLIVPDlFvUU3LXmmZwHDsolsxXnY67lzhpeqe0I=
|
||||
github.com/lufeee/execinquery v1.0.0/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
@@ -750,13 +760,17 @@ github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@@ -798,7 +812,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b h1:MKwruh+HeCSKWphkxuzvRzU4QzDkg7yiPkDVV0cDFgI=
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b/go.mod h1:TLJifjWF6eotcfzDjKZsDqWJ+73Uvj/N85MvVyrvynM=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ=
|
||||
@@ -828,15 +841,18 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k=
|
||||
github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE=
|
||||
github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA=
|
||||
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
|
||||
github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY=
|
||||
github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM=
|
||||
github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
|
||||
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
@@ -851,8 +867,9 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw=
|
||||
github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI=
|
||||
github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
|
||||
@@ -879,7 +896,6 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
@@ -893,8 +909,6 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
@@ -903,14 +917,12 @@ github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
|
||||
github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
|
||||
github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30=
|
||||
@@ -943,8 +955,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U=
|
||||
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
|
||||
github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
|
||||
github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs=
|
||||
github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -954,7 +966,6 @@ github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||
github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM=
|
||||
github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA=
|
||||
github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA=
|
||||
@@ -962,7 +973,8 @@ github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dms
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4=
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/securego/gosec/v2 v2.11.0 h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI=
|
||||
github.com/securego/gosec/v2 v2.11.0/go.mod h1:SX8bptShuG8reGC0XS09+a4H2BoWSJi+fscA+Pulbpo=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
@@ -998,7 +1010,6 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
|
||||
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
@@ -1007,10 +1018,10 @@ github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
||||
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
|
||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||
github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
|
||||
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
@@ -1019,9 +1030,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
||||
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
||||
github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk=
|
||||
github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
|
||||
github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
|
||||
@@ -1033,8 +1042,9 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3
|
||||
github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
@@ -1043,11 +1053,14 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI=
|
||||
github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs=
|
||||
github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs=
|
||||
github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo=
|
||||
github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04=
|
||||
github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
@@ -1080,8 +1093,6 @@ github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6l
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
|
||||
github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
|
||||
@@ -1095,11 +1106,18 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/vektra/mockery/v2 v2.12.3 h1:74h0R+p75tdr3QNwiNz3MXeCwSP/I5bYUbZY6oT4t20=
|
||||
github.com/vektra/mockery/v2 v2.12.3/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U=
|
||||
github.com/vektra/mockery/v2 v2.14.0 h1:KZ1p5Hrn8tiY+LErRMr14HHle6khxo+JKOXLBW/yfqs=
|
||||
github.com/vektra/mockery/v2 v2.14.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M=
|
||||
github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
@@ -1120,7 +1138,6 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w=
|
||||
gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
@@ -1190,11 +1207,10 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1240,8 +1256,9 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1255,7 +1272,6 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -1302,14 +1318,14 @@ golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y=
|
||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1377,7 +1393,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1400,8 +1415,8 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1421,7 +1436,6 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1433,12 +1447,15 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1451,16 +1468,20 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220403020550-483a9cbc67c0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c h1:aFV+BgZ4svzjfabn8ERpuB4JI4N6/rdy1iusx77G3oU=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1498,6 +1519,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@@ -1579,14 +1601,14 @@ golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a h1:ofrrl6c6NG5/IOSx/R1cyiQxxjqlur0h/TvbUhkH0II=
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
||||
@@ -1732,7 +1754,6 @@ google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP
|
||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
@@ -1761,7 +1782,6 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
@@ -1797,9 +1817,9 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI=
|
||||
gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
@@ -1817,10 +1837,13 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
@@ -26,3 +26,18 @@ func (_m *ConsSyncReactor) SetStateSyncingMetrics(_a0 float64) {
|
||||
func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) {
|
||||
_m.Called(_a0, _a1)
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewConsSyncReactor interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewConsSyncReactor creates a new instance of ConsSyncReactor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewConsSyncReactor(t mockConstructorTestingTNewConsSyncReactor) *ConsSyncReactor {
|
||||
mock := &ConsSyncReactor{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -119,9 +119,9 @@ func (t *timeoutTicker) timeoutRoutine() {
|
||||
// NOTE time.Timer allows duration to be non-positive
|
||||
ti = newti
|
||||
t.timer.Reset(ti.Duration)
|
||||
t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
t.Logger.Debug("Internal state machine timeout scheduled", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
case <-t.timer.C:
|
||||
t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
t.Logger.Debug("Internal state machine timeout elapsed ", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
// go routine here guarantees timeoutRoutine doesn't block.
|
||||
// Determinism comes from playback in the receiveRoutine.
|
||||
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
|
||||
|
||||
@@ -57,3 +57,18 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewBlockStore interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore {
|
||||
mock := &BlockStore{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -242,17 +242,13 @@ func (mem *CListMempool) CheckTx(
|
||||
// so we only record the sender for txs still in the mempool.
|
||||
if e, ok := mem.txsMap.Load(tx.Key()); ok {
|
||||
memTx := e.(*clist.CElement).Value.(*mempoolTx)
|
||||
_, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true)
|
||||
memTx.senders.LoadOrStore(txInfo.SenderID, true)
|
||||
// TODO: consider punishing peer for dups,
|
||||
// its non-trivial since invalid txs can become valid,
|
||||
// but they can spam the same tx with little cost to them atm.
|
||||
if loaded {
|
||||
return types.ErrTxInCache
|
||||
}
|
||||
}
|
||||
|
||||
mem.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash())
|
||||
return nil
|
||||
return types.ErrTxInCache
|
||||
}
|
||||
|
||||
if ctx == nil {
|
||||
|
||||
@@ -200,7 +200,7 @@ func TestMempoolUpdate(t *testing.T) {
|
||||
err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// 2. Removes valid txs from the mempool
|
||||
@@ -305,11 +305,15 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
|
||||
// a must be added to the cache
|
||||
err = mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, types.ErrTxInCache, err)
|
||||
}
|
||||
|
||||
// b must remain in the cache
|
||||
err = mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, types.ErrTxInCache, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 2. An invalid transaction must remain in the cache
|
||||
|
||||
@@ -156,6 +156,15 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error {
|
||||
|
||||
for _, tx := range protoTxs {
|
||||
if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil {
|
||||
if errors.Is(err, types.ErrTxInCache) {
|
||||
// if the tx is in the cache,
|
||||
// then we've been gossiped a
|
||||
// Tx that we've already
|
||||
// got. Gossip should be
|
||||
// smarter, but it's not a
|
||||
// problem.
|
||||
continue
|
||||
}
|
||||
logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -95,6 +95,18 @@ func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
|
||||
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
|
||||
}
|
||||
|
||||
// mustCheckTx invokes txmp.CheckTx for the given transaction and waits until
|
||||
// its callback has finished executing. It fails t if CheckTx fails.
|
||||
func mustCheckTx(t *testing.T, txmp *TxMempool, spec string) {
|
||||
done := make(chan struct{})
|
||||
if err := txmp.CheckTx(context.Background(), []byte(spec), func(*abci.Response) {
|
||||
close(done)
|
||||
}, mempool.TxInfo{}); err != nil {
|
||||
t.Fatalf("CheckTx for %q failed: %v", spec, err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
|
||||
func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
|
||||
txs := make([]testTx, numTxs)
|
||||
txInfo := mempool.TxInfo{SenderID: peerID}
|
||||
@@ -196,6 +208,76 @@ func TestTxMempool_Size(t *testing.T) {
|
||||
require.Equal(t, int64(2850), txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_Eviction(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txmp.config.Size = 5
|
||||
txmp.config.MaxTxsBytes = 60
|
||||
txExists := func(spec string) bool {
|
||||
txmp.Lock()
|
||||
defer txmp.Unlock()
|
||||
key := types.Tx(spec).Key()
|
||||
_, ok := txmp.txByKey[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// A transaction bigger than the mempool should be rejected even when there
|
||||
// are slots available.
|
||||
mustCheckTx(t, txmp, "big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1")
|
||||
require.Equal(t, 0, txmp.Size())
|
||||
|
||||
// Nearly-fill the mempool with a low-priority transaction, to show that it
|
||||
// is evicted even when slots are available for a higher-priority tx.
|
||||
const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2"
|
||||
mustCheckTx(t, txmp, bigTx)
|
||||
require.Equal(t, 1, txmp.Size()) // bigTx is the only element
|
||||
require.True(t, txExists(bigTx))
|
||||
require.Equal(t, int64(len(bigTx)), txmp.SizeBytes())
|
||||
|
||||
// The next transaction should evict bigTx, because it is higher priority
|
||||
// but does not fit on size.
|
||||
mustCheckTx(t, txmp, "key1=0000=25")
|
||||
require.True(t, txExists("key1=0000=25"))
|
||||
require.False(t, txExists(bigTx))
|
||||
require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes())
|
||||
|
||||
// Now fill up the rest of the slots with other transactions.
|
||||
mustCheckTx(t, txmp, "key2=0001=5")
|
||||
mustCheckTx(t, txmp, "key3=0002=10")
|
||||
mustCheckTx(t, txmp, "key4=0003=3")
|
||||
mustCheckTx(t, txmp, "key5=0004=3")
|
||||
|
||||
// A new transaction with low priority should be discarded.
|
||||
mustCheckTx(t, txmp, "key6=0005=1")
|
||||
require.False(t, txExists("key6=0005=1"))
|
||||
|
||||
// A new transaction with higher priority should evict key5, which is the
|
||||
// newest of the two transactions with lowest priority.
|
||||
mustCheckTx(t, txmp, "key7=0006=7")
|
||||
require.True(t, txExists("key7=0006=7")) // new transaction added
|
||||
require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted
|
||||
require.True(t, txExists("key4=0003=3")) // older low-priority tx retained
|
||||
|
||||
// Another new transaction evicts the other low-priority element.
|
||||
mustCheckTx(t, txmp, "key8=0007=20")
|
||||
require.True(t, txExists("key8=0007=20"))
|
||||
require.False(t, txExists("key4=0003=3"))
|
||||
|
||||
// Now the lowest-priority tx is 5, so that should be the next to go.
|
||||
mustCheckTx(t, txmp, "key9=0008=9")
|
||||
require.True(t, txExists("key9=0008=9"))
|
||||
require.False(t, txExists("k3y2=0001=5"))
|
||||
|
||||
// Add a transaction that requires eviction of multiple lower-priority
|
||||
// entries, in order to fit the size of the element.
|
||||
mustCheckTx(t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11
|
||||
require.True(t, txExists("key1=0000=25"))
|
||||
require.True(t, txExists("key8=0007=20"))
|
||||
require.True(t, txExists("key10=0123456789abcdef=11"))
|
||||
require.False(t, txExists("key3=0002=10"))
|
||||
require.False(t, txExists("key9=0008=9"))
|
||||
require.False(t, txExists("key7=0006=7"))
|
||||
}
|
||||
|
||||
func TestTxMempool_Flush(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txs := checkTxs(t, txmp, 100, 0)
|
||||
@@ -438,6 +520,51 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
|
||||
require.Zero(t, txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_ExpiredTxs_Timestamp(t *testing.T) {
|
||||
txmp := setup(t, 50)
|
||||
txmp.config.TTLDuration = 5 * time.Millisecond
|
||||
|
||||
added1 := checkTxs(t, txmp, 25, 0)
|
||||
require.Equal(t, len(added1), txmp.Size())
|
||||
|
||||
// Wait a while, then add some more transactions that should not be expired
|
||||
// when the first batch TTLs out.
|
||||
//
|
||||
// ms: 0 1 2 3 4 5 6
|
||||
// ^ ^ ^ ^
|
||||
// | | | +-- Update (triggers pruning)
|
||||
// | | +------ first batch expires
|
||||
// | +-------------- second batch added
|
||||
// +-------------------------- first batch added
|
||||
//
|
||||
// The exact intervals are not important except that the delta should be
|
||||
// large relative to the cost of CheckTx (ms vs. ns is fine here).
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
added2 := checkTxs(t, txmp, 25, 1)
|
||||
|
||||
// Wait a while longer, so that the first batch will expire.
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
|
||||
// Trigger an update so that pruning will occur.
|
||||
txmp.Lock()
|
||||
defer txmp.Unlock()
|
||||
require.NoError(t, txmp.Update(txmp.height+1, nil, nil, nil, nil))
|
||||
|
||||
// All the transactions in the original set should have been purged.
|
||||
for _, tx := range added1 {
|
||||
if _, ok := txmp.txByKey[tx.tx.Key()]; ok {
|
||||
t.Errorf("Transaction %X should have been purged for TTL", tx.tx.Key())
|
||||
}
|
||||
}
|
||||
|
||||
// All the transactions added later should still be around.
|
||||
for _, tx := range added2 {
|
||||
if _, ok := txmp.txByKey[tx.tx.Key()]; !ok {
|
||||
t.Errorf("Transaction %X should still be in the mempool, but is not", tx.tx.Key())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp := setup(t, 500)
|
||||
txmp.height = 100
|
||||
@@ -445,7 +572,6 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
|
||||
tTxs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, 100, txmp.heightIndex.Size())
|
||||
|
||||
// reap 5 txs at the next height -- no txs should expire
|
||||
reapedTxs := txmp.ReapMaxTxs(5)
|
||||
@@ -459,12 +585,10 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp.Unlock()
|
||||
|
||||
require.Equal(t, 95, txmp.Size())
|
||||
require.Equal(t, 95, txmp.heightIndex.Size())
|
||||
|
||||
// check more txs at height 101
|
||||
_ = checkTxs(t, txmp, 50, 1)
|
||||
require.Equal(t, 145, txmp.Size())
|
||||
require.Equal(t, 145, txmp.heightIndex.Size())
|
||||
|
||||
// Reap 5 txs at a height that would expire all the transactions from before
|
||||
// the previous Update (height 100).
|
||||
@@ -485,7 +609,6 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp.Unlock()
|
||||
|
||||
require.GreaterOrEqual(t, txmp.Size(), 45)
|
||||
require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45)
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sort"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
)
|
||||
|
||||
var _ heap.Interface = (*TxPriorityQueue)(nil)
|
||||
|
||||
// TxPriorityQueue defines a thread-safe priority queue for valid transactions.
|
||||
type TxPriorityQueue struct {
|
||||
mtx tmsync.RWMutex
|
||||
txs []*WrappedTx
|
||||
}
|
||||
|
||||
func NewTxPriorityQueue() *TxPriorityQueue {
|
||||
pq := &TxPriorityQueue{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
}
|
||||
|
||||
heap.Init(pq)
|
||||
|
||||
return pq
|
||||
}
|
||||
|
||||
// GetEvictableTxs attempts to find and return a list of *WrappedTx than can be
|
||||
// evicted to make room for another *WrappedTx with higher priority. If no such
|
||||
// list of *WrappedTx exists, nil will be returned. The returned list of *WrappedTx
|
||||
// indicate that these transactions can be removed due to them being of lower
|
||||
// priority and that their total sum in size allows room for the incoming
|
||||
// transaction according to the mempool's configured limits.
|
||||
func (pq *TxPriorityQueue) GetEvictableTxs(priority, txSize, totalSize, cap int64) []*WrappedTx {
|
||||
pq.mtx.RLock()
|
||||
defer pq.mtx.RUnlock()
|
||||
|
||||
txs := make([]*WrappedTx, len(pq.txs))
|
||||
copy(txs, pq.txs)
|
||||
|
||||
sort.Slice(txs, func(i, j int) bool {
|
||||
return txs[i].priority < txs[j].priority
|
||||
})
|
||||
|
||||
var (
|
||||
toEvict []*WrappedTx
|
||||
i int
|
||||
)
|
||||
|
||||
currSize := totalSize
|
||||
|
||||
// Loop over all transactions in ascending priority order evaluating those
|
||||
// that are only of less priority than the provided argument. We continue
|
||||
// evaluating transactions until there is sufficient capacity for the new
|
||||
// transaction (size) as defined by txSize.
|
||||
for i < len(txs) && txs[i].priority < priority {
|
||||
toEvict = append(toEvict, txs[i])
|
||||
currSize -= int64(txs[i].Size())
|
||||
|
||||
if currSize+txSize <= cap {
|
||||
return toEvict
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumTxs returns the number of transactions in the priority queue. It is
|
||||
// thread safe.
|
||||
func (pq *TxPriorityQueue) NumTxs() int {
|
||||
pq.mtx.RLock()
|
||||
defer pq.mtx.RUnlock()
|
||||
|
||||
return len(pq.txs)
|
||||
}
|
||||
|
||||
// RemoveTx removes a specific transaction from the priority queue.
|
||||
func (pq *TxPriorityQueue) RemoveTx(tx *WrappedTx) {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
if tx.heapIndex < len(pq.txs) {
|
||||
heap.Remove(pq, tx.heapIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// PushTx adds a valid transaction to the priority queue. It is thread safe.
|
||||
func (pq *TxPriorityQueue) PushTx(tx *WrappedTx) {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
heap.Push(pq, tx)
|
||||
}
|
||||
|
||||
// PopTx removes the top priority transaction from the queue. It is thread safe.
|
||||
func (pq *TxPriorityQueue) PopTx() *WrappedTx {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
x := heap.Pop(pq)
|
||||
if x != nil {
|
||||
return x.(*WrappedTx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Push implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Push. Use PushTx instead.
|
||||
func (pq *TxPriorityQueue) Push(x interface{}) {
|
||||
n := len(pq.txs)
|
||||
item := x.(*WrappedTx)
|
||||
item.heapIndex = n
|
||||
pq.txs = append(pq.txs, item)
|
||||
}
|
||||
|
||||
// Pop implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Pop. Use PopTx instead.
|
||||
func (pq *TxPriorityQueue) Pop() interface{} {
|
||||
old := pq.txs
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil // avoid memory leak
|
||||
item.heapIndex = -1 // for safety
|
||||
pq.txs = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// Len implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Len. Use NumTxs instead.
|
||||
func (pq *TxPriorityQueue) Len() int {
|
||||
return len(pq.txs)
|
||||
}
|
||||
|
||||
// Less implements the Heap interface. It returns true if the transaction at
|
||||
// position i in the queue is of less priority than the transaction at position j.
|
||||
func (pq *TxPriorityQueue) Less(i, j int) bool {
|
||||
// If there exists two transactions with the same priority, consider the one
|
||||
// that we saw the earliest as the higher priority transaction.
|
||||
if pq.txs[i].priority == pq.txs[j].priority {
|
||||
return pq.txs[i].timestamp.Before(pq.txs[j].timestamp)
|
||||
}
|
||||
|
||||
// We want Pop to give us the highest, not lowest, priority so we use greater
|
||||
// than here.
|
||||
return pq.txs[i].priority > pq.txs[j].priority
|
||||
}
|
||||
|
||||
// Swap implements the Heap interface. It swaps two transactions in the queue.
|
||||
func (pq *TxPriorityQueue) Swap(i, j int) {
|
||||
pq.txs[i], pq.txs[j] = pq.txs[j], pq.txs[i]
|
||||
pq.txs[i].heapIndex = i
|
||||
pq.txs[j].heapIndex = j
|
||||
}
|
||||
@@ -1,176 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTxPriorityQueue(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
numTxs := 1000
|
||||
|
||||
priorities := make([]int, numTxs)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 1; i <= numTxs; i++ {
|
||||
priorities[i-1] = i
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int) {
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: int64(i),
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.IntSlice(priorities)))
|
||||
|
||||
wg.Wait()
|
||||
require.Equal(t, numTxs, pq.NumTxs())
|
||||
|
||||
// Wait a second and push a tx with a duplicate priority
|
||||
time.Sleep(time.Second)
|
||||
now := time.Now()
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: 1000,
|
||||
timestamp: now,
|
||||
})
|
||||
require.Equal(t, 1001, pq.NumTxs())
|
||||
|
||||
tx := pq.PopTx()
|
||||
require.Equal(t, 1000, pq.NumTxs())
|
||||
require.Equal(t, int64(1000), tx.priority)
|
||||
require.NotEqual(t, now, tx.timestamp)
|
||||
|
||||
gotPriorities := make([]int, 0)
|
||||
for pq.NumTxs() > 0 {
|
||||
gotPriorities = append(gotPriorities, int(pq.PopTx().priority))
|
||||
}
|
||||
|
||||
require.Equal(t, priorities, gotPriorities)
|
||||
}
|
||||
|
||||
func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
values := make([]int, 1000)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
tx := make([]byte, 5) // each tx is 5 bytes
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
x := rng.Intn(100000)
|
||||
pq.PushTx(&WrappedTx{
|
||||
tx: tx,
|
||||
priority: int64(x),
|
||||
})
|
||||
|
||||
values[i] = x
|
||||
}
|
||||
|
||||
sort.Ints(values)
|
||||
|
||||
max := values[len(values)-1]
|
||||
min := values[0]
|
||||
totalSize := int64(len(values) * 5)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
priority, txSize, totalSize, cap int64
|
||||
expectedLen int
|
||||
}{
|
||||
{
|
||||
name: "larest priority; single tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 1,
|
||||
},
|
||||
{
|
||||
name: "larest priority; multi tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 17,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 4,
|
||||
},
|
||||
{
|
||||
name: "larest priority; out of capacity",
|
||||
priority: int64(max + 1),
|
||||
txSize: totalSize + 1,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "smallest priority; no tx",
|
||||
priority: int64(min - 1),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "small priority; no tx",
|
||||
priority: int64(min),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
evictTxs := pq.GetEvictableTxs(tc.priority, tc.txSize, tc.totalSize, tc.cap)
|
||||
require.Len(t, evictTxs, tc.expectedLen)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxPriorityQueue_RemoveTx(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
numTxs := 1000
|
||||
|
||||
values := make([]int, numTxs)
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
x := rng.Intn(100000)
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: int64(x),
|
||||
})
|
||||
|
||||
values[i] = x
|
||||
}
|
||||
|
||||
require.Equal(t, numTxs, pq.NumTxs())
|
||||
|
||||
sort.Ints(values)
|
||||
max := values[len(values)-1]
|
||||
|
||||
wtx := pq.txs[pq.NumTxs()/2]
|
||||
pq.RemoveTx(wtx)
|
||||
require.Equal(t, numTxs-1, pq.NumTxs())
|
||||
require.Equal(t, int64(max), pq.PopTx().priority)
|
||||
require.Equal(t, numTxs-2, pq.NumTxs())
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
pq.RemoveTx(&WrappedTx{heapIndex: numTxs})
|
||||
pq.RemoveTx(&WrappedTx{heapIndex: numTxs + 1})
|
||||
})
|
||||
require.Equal(t, numTxs-2, pq.NumTxs())
|
||||
}
|
||||
@@ -163,6 +163,15 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error {
|
||||
|
||||
for _, tx := range protoTxs {
|
||||
if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil {
|
||||
if errors.Is(err, types.ErrTxInCache) {
|
||||
// if the tx is in the cache,
|
||||
// then we've been gossiped a
|
||||
// Tx that we've already
|
||||
// got. Gossip should be
|
||||
// smarter, but it's not a
|
||||
// problem.
|
||||
continue
|
||||
}
|
||||
logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err)
|
||||
}
|
||||
}
|
||||
@@ -299,9 +308,6 @@ func (r *Reactor) processPeerUpdates() {
|
||||
}
|
||||
|
||||
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) {
|
||||
peerMempoolID := r.ids.GetForPeer(peerID)
|
||||
var nextGossipTx *clist.CElement
|
||||
|
||||
// remove the peer ID from the map of routines and mark the waitgroup as done
|
||||
defer func() {
|
||||
r.mtx.Lock()
|
||||
@@ -320,6 +326,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
}
|
||||
}()
|
||||
|
||||
peerMempoolID := r.ids.GetForPeer(peerID)
|
||||
var nextGossipTx *clist.CElement
|
||||
for {
|
||||
if !r.IsRunning() {
|
||||
return
|
||||
@@ -330,8 +338,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
// start from the beginning.
|
||||
if nextGossipTx == nil {
|
||||
select {
|
||||
case <-r.mempool.WaitForNextTx(): // wait until a tx is available
|
||||
if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil {
|
||||
case <-r.mempool.TxsWaitChan(): // wait until a tx is available
|
||||
if nextGossipTx = r.mempool.TxsFront(); nextGossipTx == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -349,9 +357,11 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
|
||||
memTx := nextGossipTx.Value.(*WrappedTx)
|
||||
|
||||
// Send the transaction to a peer if we didn't receive it from that peer.
|
||||
//
|
||||
// NOTE: Transaction batching was disabled due to:
|
||||
// https://github.com/tendermint/tendermint/issues/5796
|
||||
if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok {
|
||||
if !memTx.HasPeer(peerMempoolID) {
|
||||
// Send the mempool tx to the corresponding peer. Note, the peer may be
|
||||
// behind and thus would not be able to process the mempool tx correctly.
|
||||
r.mempoolCh.Out <- p2p.Envelope{
|
||||
|
||||
@@ -134,7 +134,9 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
primaryMempool.Lock()
|
||||
primaryMempool.insertTx(next)
|
||||
primaryMempool.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
|
||||
@@ -1,281 +1,87 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// WrappedTx defines a wrapper around a raw transaction with additional metadata
|
||||
// that is used for indexing.
|
||||
type WrappedTx struct {
|
||||
// tx represents the raw binary transaction data
|
||||
tx types.Tx
|
||||
tx types.Tx // the original transaction data
|
||||
hash types.TxKey // the transaction hash
|
||||
height int64 // height when this transaction was initially checked (for expiry)
|
||||
timestamp time.Time // time when transaction was entered (for TTL)
|
||||
|
||||
// hash defines the transaction hash and the primary key used in the mempool
|
||||
hash types.TxKey
|
||||
|
||||
// height defines the height at which the transaction was validated at
|
||||
height int64
|
||||
|
||||
// gasWanted defines the amount of gas the transaction sender requires
|
||||
gasWanted int64
|
||||
|
||||
// priority defines the transaction's priority as specified by the application
|
||||
// in the ResponseCheckTx response.
|
||||
priority int64
|
||||
|
||||
// sender defines the transaction's sender as specified by the application in
|
||||
// the ResponseCheckTx response.
|
||||
sender string
|
||||
|
||||
// timestamp is the time at which the node first received the transaction from
|
||||
// a peer. It is used as a second dimension is prioritizing transactions when
|
||||
// two transactions have the same priority.
|
||||
timestamp time.Time
|
||||
|
||||
// peers records a mapping of all peers that sent a given transaction
|
||||
peers map[uint16]struct{}
|
||||
|
||||
// heapIndex defines the index of the item in the heap
|
||||
heapIndex int
|
||||
|
||||
// gossipEl references the linked-list element in the gossip index
|
||||
gossipEl *clist.CElement
|
||||
|
||||
// removed marks the transaction as removed from the mempool. This is set
|
||||
// during RemoveTx and is needed due to the fact that a given existing
|
||||
// transaction in the mempool can be evicted when it is simultaneously having
|
||||
// a reCheckTx callback executed.
|
||||
removed bool
|
||||
mtx sync.Mutex
|
||||
gasWanted int64 // app: gas required to execute this transaction
|
||||
priority int64 // app: priority value for this transaction
|
||||
sender string // app: assigned sender label
|
||||
peers map[uint16]bool // peer IDs who have sent us this transaction
|
||||
}
|
||||
|
||||
func (wtx *WrappedTx) Size() int {
|
||||
return len(wtx.tx)
|
||||
}
|
||||
// Size reports the size of the raw transaction in bytes.
|
||||
func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) }
|
||||
|
||||
// TxStore implements a thread-safe mapping of valid transaction(s).
|
||||
//
|
||||
// NOTE:
|
||||
// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative
|
||||
// access is not allowed. Regardless, it is not expected for the mempool to
|
||||
// need mutative access.
|
||||
type TxStore struct {
|
||||
mtx tmsync.RWMutex
|
||||
hashTxs map[types.TxKey]*WrappedTx // primary index
|
||||
senderTxs map[string]*WrappedTx // sender is defined by the ABCI application
|
||||
}
|
||||
|
||||
func NewTxStore() *TxStore {
|
||||
return &TxStore{
|
||||
senderTxs: make(map[string]*WrappedTx),
|
||||
hashTxs: make(map[types.TxKey]*WrappedTx),
|
||||
// SetPeer adds the specified peer ID as a sender of w.
|
||||
func (w *WrappedTx) SetPeer(id uint16) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
if w.peers == nil {
|
||||
w.peers = map[uint16]bool{id: true}
|
||||
} else {
|
||||
w.peers[id] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the total number of transactions in the store.
|
||||
func (txs *TxStore) Size() int {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return len(txs.hashTxs)
|
||||
}
|
||||
|
||||
// GetAllTxs returns all the transactions currently in the store.
|
||||
func (txs *TxStore) GetAllTxs() []*WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wTxs := make([]*WrappedTx, len(txs.hashTxs))
|
||||
i := 0
|
||||
for _, wtx := range txs.hashTxs {
|
||||
wTxs[i] = wtx
|
||||
i++
|
||||
}
|
||||
|
||||
return wTxs
|
||||
}
|
||||
|
||||
// GetTxBySender returns a *WrappedTx by the transaction's sender property
|
||||
// defined by the ABCI application.
|
||||
func (txs *TxStore) GetTxBySender(sender string) *WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return txs.senderTxs[sender]
|
||||
}
|
||||
|
||||
// GetTxByHash returns a *WrappedTx by the transaction's hash.
|
||||
func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return txs.hashTxs[hash]
|
||||
}
|
||||
|
||||
// IsTxRemoved returns true if a transaction by hash is marked as removed and
|
||||
// false otherwise.
|
||||
func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wtx, ok := txs.hashTxs[hash]
|
||||
if ok {
|
||||
return wtx.removed
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a
|
||||
// non-empty sender, we additionally store the transaction by the sender as
|
||||
// defined by the ABCI application.
|
||||
func (txs *TxStore) SetTx(wtx *WrappedTx) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
if len(wtx.sender) > 0 {
|
||||
txs.senderTxs[wtx.sender] = wtx
|
||||
}
|
||||
|
||||
txs.hashTxs[wtx.tx.Key()] = wtx
|
||||
}
|
||||
|
||||
// RemoveTx removes a *WrappedTx from the transaction store. It deletes all
|
||||
// indexes of the transaction.
|
||||
func (txs *TxStore) RemoveTx(wtx *WrappedTx) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
if len(wtx.sender) > 0 {
|
||||
delete(txs.senderTxs, wtx.sender)
|
||||
}
|
||||
|
||||
delete(txs.hashTxs, wtx.tx.Key())
|
||||
wtx.removed = true
|
||||
}
|
||||
|
||||
// TxHasPeer returns true if a transaction by hash has a given peer ID and false
|
||||
// otherwise. If the transaction does not exist, false is returned.
|
||||
func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wtx := txs.hashTxs[hash]
|
||||
if wtx == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := wtx.peers[peerID]
|
||||
// HasPeer reports whether the specified peer ID is a sender of w.
|
||||
func (w *WrappedTx) HasPeer(id uint16) bool {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
_, ok := w.peers[id]
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the
|
||||
// given peerID to the WrappedTx's set of peers that sent us this transaction.
|
||||
// We return true if we've already recorded the given peer for this transaction
|
||||
// and false otherwise. If the transaction does not exist by hash, we return
|
||||
// (nil, false).
|
||||
func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
wtx := txs.hashTxs[hash]
|
||||
if wtx == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if wtx.peers == nil {
|
||||
wtx.peers = make(map[uint16]struct{})
|
||||
}
|
||||
|
||||
if _, ok := wtx.peers[peerID]; ok {
|
||||
return wtx, true
|
||||
}
|
||||
|
||||
wtx.peers[peerID] = struct{}{}
|
||||
return wtx, false
|
||||
// SetGasWanted sets the application-assigned gas requirement of w.
|
||||
func (w *WrappedTx) SetGasWanted(gas int64) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.gasWanted = gas
|
||||
}
|
||||
|
||||
// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be
|
||||
// used to build generic transaction indexes in the mempool. It accepts a
|
||||
// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx
|
||||
// references which is used during Insert in order to determine sorted order. If
|
||||
// less returns true, a <= b.
|
||||
type WrappedTxList struct {
|
||||
mtx tmsync.RWMutex
|
||||
txs []*WrappedTx
|
||||
less func(*WrappedTx, *WrappedTx) bool
|
||||
// GasWanted reports the application-assigned gas requirement of w.
|
||||
func (w *WrappedTx) GasWanted() int64 {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.gasWanted
|
||||
}
|
||||
|
||||
func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList {
|
||||
return &WrappedTxList{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
less: less,
|
||||
}
|
||||
// SetSender sets the application-assigned sender of w.
|
||||
func (w *WrappedTx) SetSender(sender string) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.sender = sender
|
||||
}
|
||||
|
||||
// Size returns the number of WrappedTx objects in the list.
|
||||
func (wtl *WrappedTxList) Size() int {
|
||||
wtl.mtx.RLock()
|
||||
defer wtl.mtx.RUnlock()
|
||||
|
||||
return len(wtl.txs)
|
||||
// Sender reports the application-assigned sender of w.
|
||||
func (w *WrappedTx) Sender() string {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.sender
|
||||
}
|
||||
|
||||
// Reset resets the list of transactions to an empty list.
|
||||
func (wtl *WrappedTxList) Reset() {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
wtl.txs = make([]*WrappedTx, 0)
|
||||
// SetPriority sets the application-assigned priority of w.
|
||||
func (w *WrappedTx) SetPriority(p int64) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.priority = p
|
||||
}
|
||||
|
||||
// Insert inserts a WrappedTx reference into the sorted list based on the list's
|
||||
// comparator function.
|
||||
func (wtl *WrappedTxList) Insert(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
if i == len(wtl.txs) {
|
||||
// insert at the end
|
||||
wtl.txs = append(wtl.txs, wtx)
|
||||
return
|
||||
}
|
||||
|
||||
// Make space for the inserted element by shifting values at the insertion
|
||||
// index up one index.
|
||||
//
|
||||
// NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs).
|
||||
wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...)
|
||||
wtl.txs[i] = wtx
|
||||
}
|
||||
|
||||
// Remove attempts to remove a WrappedTx from the sorted list.
|
||||
func (wtl *WrappedTxList) Remove(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
// Since the list is sorted, we evaluate all elements starting at i. Note, if
|
||||
// the element does not exist, we may potentially evaluate the entire remainder
|
||||
// of the list. However, a caller should not be expected to call Remove with a
|
||||
// non-existing element.
|
||||
for i < len(wtl.txs) {
|
||||
if wtl.txs[i] == wtx {
|
||||
wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...)
|
||||
return
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
// Priority reports the application-assigned priority of w.
|
||||
func (w *WrappedTx) Priority() int64 {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.priority
|
||||
}
|
||||
|
||||
@@ -1,230 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestTxStore_GetTxBySender(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
sender: "foo",
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
res := txs.GetTxBySender(wtx.sender)
|
||||
require.Nil(t, res)
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxBySender(wtx.sender)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_GetTxByHash(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
sender: "foo",
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.Nil(t, res)
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_SetTx(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res := txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
|
||||
wtx.sender = "foo"
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15)
|
||||
require.Nil(t, res)
|
||||
require.False(t, ok)
|
||||
|
||||
res, ok = txs.GetOrSetPeerByTxHash(key, 15)
|
||||
require.NotNil(t, res)
|
||||
require.False(t, ok)
|
||||
|
||||
res, ok = txs.GetOrSetPeerByTxHash(key, 15)
|
||||
require.NotNil(t, res)
|
||||
require.True(t, ok)
|
||||
|
||||
require.True(t, txs.TxHasPeer(key, 15))
|
||||
require.False(t, txs.TxHasPeer(key, 16))
|
||||
}
|
||||
|
||||
func TestTxStore_RemoveTx(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
|
||||
txs.RemoveTx(res)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.Nil(t, res)
|
||||
}
|
||||
|
||||
func TestTxStore_Size(t *testing.T) {
|
||||
txStore := NewTxStore()
|
||||
numTxs := 1000
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
txStore.SetTx(&WrappedTx{
|
||||
tx: []byte(fmt.Sprintf("test_tx_%d", i)),
|
||||
priority: int64(i),
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
require.Equal(t, numTxs, txStore.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Reset(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
require.Zero(t, list.Size())
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
list.Insert(&WrappedTx{height: int64(i)})
|
||||
}
|
||||
|
||||
require.Equal(t, 100, list.Size())
|
||||
|
||||
list.Reset()
|
||||
require.Zero(t, list.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Insert(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var expected []int
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
expected = append(expected, int(height))
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
|
||||
if i%10 == 0 {
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
expected = append(expected, int(height))
|
||||
}
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Remove(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var txs []*WrappedTx
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
tx := &WrappedTx{height: height}
|
||||
|
||||
txs = append(txs, tx)
|
||||
list.Insert(tx)
|
||||
|
||||
if i%10 == 0 {
|
||||
tx = &WrappedTx{height: height}
|
||||
list.Insert(tx)
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// remove a tx that does not exist
|
||||
list.Remove(&WrappedTx{height: 20000})
|
||||
|
||||
// remove a tx that exists (by height) but not referenced
|
||||
list.Remove(&WrappedTx{height: txs[0].height})
|
||||
|
||||
// remove a few existing txs
|
||||
for i := 0; i < 25; i++ {
|
||||
j := rng.Intn(len(txs))
|
||||
list.Remove(txs[j])
|
||||
txs = append(txs[:j], txs[j+1:]...)
|
||||
}
|
||||
|
||||
expected := make([]int, len(txs))
|
||||
for i, tx := range txs {
|
||||
expected[i] = int(tx.height)
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
@@ -807,6 +807,8 @@ func (ch *Channel) sendBytes(bytes []byte) bool {
|
||||
return true
|
||||
case <-time.After(defaultSendTimeout):
|
||||
return false
|
||||
case <-ch.conn.Quit():
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -27,8 +27,13 @@ var (
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Number of peers.
|
||||
// Number of peers connected.
|
||||
Peers metrics.Gauge
|
||||
// Nomber of peers in the peer store database.
|
||||
PeersStored metrics.Gauge
|
||||
// Number of inactive peers stored.
|
||||
PeersInactivated metrics.Gauge
|
||||
|
||||
// Number of bytes received from a given peer.
|
||||
PeerReceiveBytesTotal metrics.Counter
|
||||
// Number of bytes sent to a given peer.
|
||||
@@ -36,6 +41,21 @@ type Metrics struct {
|
||||
// Pending bytes to be sent to a given peer.
|
||||
PeerPendingSendBytes metrics.Gauge
|
||||
|
||||
// Number of successful connection attempts
|
||||
PeersConnectedSuccess metrics.Counter
|
||||
// Number of failed connection attempts
|
||||
PeersConnectedFailure metrics.Counter
|
||||
|
||||
// Number of peers connected as a result of dialing the
|
||||
// peer.
|
||||
PeersConnectedOutgoing metrics.Gauge
|
||||
// Number of peers connected as a result of the peer dialing
|
||||
// this node.
|
||||
PeersConnectedIncoming metrics.Gauge
|
||||
|
||||
// Number of peers evicted by this node.
|
||||
PeersEvicted metrics.Counter
|
||||
|
||||
// RouterPeerQueueRecv defines the time taken to read off of a peer's queue
|
||||
// before sending on the connection.
|
||||
RouterPeerQueueRecv metrics.Histogram
|
||||
@@ -73,7 +93,49 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers",
|
||||
Help: "Number of peers.",
|
||||
Help: "Number of peers connected.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersStored: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_stored",
|
||||
Help: "Number of peers in the peer Store",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersInactivated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_inactivated",
|
||||
Help: "Number of peers inactivated",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedSuccess: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_success",
|
||||
Help: "Number of successful peer connection attempts",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersEvicted: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_evicted",
|
||||
Help: "Number of connected peers evicted",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedFailure: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_failure",
|
||||
Help: "Number of unsuccessful peer connection attempts",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedIncoming: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_incoming",
|
||||
Help: "Number of peers connected by peer dialing this node",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedOutgoing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_outgoing",
|
||||
Help: "Number of peers connected by this node dialing the peer",
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
@@ -141,6 +203,13 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Peers: discard.NewGauge(),
|
||||
PeersStored: discard.NewGauge(),
|
||||
PeersConnectedSuccess: discard.NewCounter(),
|
||||
PeersConnectedFailure: discard.NewCounter(),
|
||||
PeersConnectedIncoming: discard.NewGauge(),
|
||||
PeersConnectedOutgoing: discard.NewGauge(),
|
||||
PeersInactivated: discard.NewGauge(),
|
||||
PeersEvicted: discard.NewCounter(),
|
||||
PeerReceiveBytesTotal: discard.NewCounter(),
|
||||
PeerSendBytesTotal: discard.NewCounter(),
|
||||
PeerPendingSendBytes: discard.NewGauge(),
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
|
||||
p2p "github.com/tendermint/tendermint/internal/p2p"
|
||||
|
||||
time "time"
|
||||
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -49,20 +51,20 @@ func (_m *Connection) FlushClose() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// Handshake provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
// Handshake provides a mock function with given fields: _a0, _a1, _a2, _a3
|
||||
func (_m *Connection) Handshake(_a0 context.Context, _a1 time.Duration, _a2 types.NodeInfo, _a3 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2, _a3)
|
||||
|
||||
var r0 types.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok {
|
||||
r0 = rf(_a0, _a1, _a2, _a3)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.NodeInfo)
|
||||
}
|
||||
|
||||
var r1 crypto.PubKey
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok {
|
||||
r1 = rf(_a0, _a1, _a2)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok {
|
||||
r1 = rf(_a0, _a1, _a2, _a3)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(crypto.PubKey)
|
||||
@@ -70,8 +72,8 @@ func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 cry
|
||||
}
|
||||
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(2).(func(context.Context, types.NodeInfo, crypto.PrivKey) error); ok {
|
||||
r2 = rf(_a0, _a1, _a2)
|
||||
if rf, ok := ret.Get(2).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) error); ok {
|
||||
r2 = rf(_a0, _a1, _a2, _a3)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
@@ -206,3 +208,18 @@ func (_m *Connection) TrySendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewConnection interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewConnection creates a new instance of Connection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewConnection(t mockConstructorTestingTNewConnection) *Connection {
|
||||
mock := &Connection{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -332,3 +332,18 @@ func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool {
|
||||
func (_m *Peer) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewPeer interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewPeer creates a new instance of Peer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewPeer(t mockConstructorTestingTNewPeer) *Peer {
|
||||
mock := &Peer{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -119,3 +119,18 @@ func (_m *Transport) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewTransport interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewTransport creates a new instance of Transport. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewTransport(t mockConstructorTestingTNewTransport) *Transport {
|
||||
mock := &Transport{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package p2ptest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -238,11 +237,13 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node {
|
||||
require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint")
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MinRetryTime: 10 * time.Millisecond,
|
||||
MaxRetryTime: 100 * time.Millisecond,
|
||||
RetryTimeJitter: time.Millisecond,
|
||||
MaxPeers: opts.MaxPeers,
|
||||
MaxConnected: opts.MaxConnected,
|
||||
MinRetryTime: 10 * time.Millisecond,
|
||||
DisconnectCooldownPeriod: 10 * time.Millisecond,
|
||||
MaxRetryTime: 100 * time.Millisecond,
|
||||
RetryTimeJitter: time.Millisecond,
|
||||
MaxPeers: opts.MaxPeers,
|
||||
MaxConnected: opts.MaxConnected,
|
||||
Metrics: p2p.NopMetrics(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -253,7 +254,7 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node {
|
||||
privKey,
|
||||
peerManager,
|
||||
[]p2p.Transport{transport},
|
||||
p2p.RouterOptions{DialSleep: func(_ context.Context) {}},
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, router.Start())
|
||||
|
||||
@@ -90,7 +90,7 @@ func createOutboundPeerAndPerformHandshake(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peerInfo, _, err := pc.conn.Handshake(context.Background(), ourNodeInfo, pk)
|
||||
peerInfo, _, err := pc.conn.Handshake(context.Background(), 0, ourNodeInfo, pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey)
|
||||
_, _, err = pc.conn.Handshake(context.Background(), 0, rp.nodeInfo(), rp.PrivKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -213,7 +213,7 @@ func (rp *remotePeer) accept() {
|
||||
if err != nil {
|
||||
golog.Printf("Failed to create a peer: %+v", err)
|
||||
}
|
||||
_, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey)
|
||||
_, _, err = pc.conn.Handshake(context.Background(), 0, rp.nodeInfo(), rp.PrivKey)
|
||||
if err != nil {
|
||||
golog.Printf("Failed to handshake a peer: %+v", err)
|
||||
}
|
||||
|
||||
@@ -38,11 +38,19 @@ const (
|
||||
PeerStatusBad PeerStatus = "bad" // peer observed as bad
|
||||
)
|
||||
|
||||
// PeerScore is a numeric score assigned to a peer (higher is better).
|
||||
type PeerScore uint8
|
||||
type peerConnectionDirection int
|
||||
|
||||
const (
|
||||
PeerScorePersistent PeerScore = math.MaxUint8 // persistent peers
|
||||
peerConnectionIncoming peerConnectionDirection = iota + 1
|
||||
peerConnectionOutgoing
|
||||
)
|
||||
|
||||
// PeerScore is a numeric score assigned to a peer (higher is better).
|
||||
type PeerScore int16
|
||||
|
||||
const (
|
||||
PeerScorePersistent PeerScore = math.MaxInt16 // persistent peers
|
||||
MaxPeerScoreNotPersistent PeerScore = PeerScorePersistent - 1
|
||||
)
|
||||
|
||||
// PeerUpdate is a peer update event sent via PeerUpdates.
|
||||
@@ -118,6 +126,13 @@ type PeerManagerOptions struct {
|
||||
// outbound). 0 means no limit.
|
||||
MaxConnected uint16
|
||||
|
||||
// MaxOutgoingConnections specifies how many outgoing
|
||||
// connections. It must be lower than MaxConnected. If it is
|
||||
// 0, then all connections can be outgoing. Once this limit is
|
||||
// reached, the node will not dial peers, allowing the
|
||||
// remaining peer connections to be used by incoming connections.
|
||||
MaxOutgoingConnections uint16
|
||||
|
||||
// MaxConnectedUpgrade is the maximum number of additional connections to
|
||||
// use for probing any better-scored peers to upgrade to when all connection
|
||||
// slots are full. 0 disables peer upgrading.
|
||||
@@ -147,6 +162,10 @@ type PeerManagerOptions struct {
|
||||
// retry times, to avoid thundering herds. 0 disables jitter.
|
||||
RetryTimeJitter time.Duration
|
||||
|
||||
// DisconnectCooldownPeriod is the amount of time after we
|
||||
// disconnect from a peer before we'll consider dialing a new peer
|
||||
DisconnectCooldownPeriod time.Duration
|
||||
|
||||
// PeerScores sets fixed scores for specific peers. It is mainly used
|
||||
// for testing. A score of 0 is ignored.
|
||||
PeerScores map[types.NodeID]PeerScore
|
||||
@@ -162,6 +181,9 @@ type PeerManagerOptions struct {
|
||||
// persistentPeers provides fast PersistentPeers lookups. It is built
|
||||
// by optimize().
|
||||
persistentPeers map[types.NodeID]bool
|
||||
|
||||
// Peer Metrics
|
||||
Metrics *Metrics
|
||||
}
|
||||
|
||||
// Validate validates the options.
|
||||
@@ -212,6 +234,10 @@ func (o *PeerManagerOptions) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
if o.MaxOutgoingConnections > 0 && o.MaxConnected < o.MaxOutgoingConnections {
|
||||
return errors.New("cannot set MaxOutgoingConnections to a value larger than MaxConnected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -280,6 +306,7 @@ func (o *PeerManagerOptions) optimize() {
|
||||
type PeerManager struct {
|
||||
selfID types.NodeID
|
||||
options PeerManagerOptions
|
||||
metrics *Metrics
|
||||
rand *rand.Rand
|
||||
dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes
|
||||
evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes
|
||||
@@ -288,13 +315,13 @@ type PeerManager struct {
|
||||
|
||||
mtx sync.Mutex
|
||||
store *peerStore
|
||||
subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address)
|
||||
dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail)
|
||||
upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail)
|
||||
connected map[types.NodeID]bool // connected peers (Dialed/Accepted → Disconnected)
|
||||
ready map[types.NodeID]bool // ready peers (Ready → Disconnected)
|
||||
evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext)
|
||||
evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected)
|
||||
subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address)
|
||||
dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail)
|
||||
upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail)
|
||||
connected map[types.NodeID]peerConnectionDirection // connected peers (Dialed/Accepted → Disconnected)
|
||||
ready map[types.NodeID]bool // ready peers (Ready → Disconnected)
|
||||
evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext)
|
||||
evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected)
|
||||
}
|
||||
|
||||
// NewPeerManager creates a new peer manager.
|
||||
@@ -314,28 +341,34 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio
|
||||
}
|
||||
|
||||
peerManager := &PeerManager{
|
||||
selfID: selfID,
|
||||
options: options,
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec
|
||||
dialWaker: tmsync.NewWaker(),
|
||||
evictWaker: tmsync.NewWaker(),
|
||||
closeCh: make(chan struct{}),
|
||||
|
||||
selfID: selfID,
|
||||
options: options,
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec
|
||||
dialWaker: tmsync.NewWaker(),
|
||||
evictWaker: tmsync.NewWaker(),
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: NopMetrics(),
|
||||
store: store,
|
||||
dialing: map[types.NodeID]bool{},
|
||||
upgrading: map[types.NodeID]types.NodeID{},
|
||||
connected: map[types.NodeID]bool{},
|
||||
connected: map[types.NodeID]peerConnectionDirection{},
|
||||
ready: map[types.NodeID]bool{},
|
||||
evict: map[types.NodeID]bool{},
|
||||
evicting: map[types.NodeID]bool{},
|
||||
subscriptions: map[*PeerUpdates]*PeerUpdates{},
|
||||
}
|
||||
|
||||
if options.Metrics != nil {
|
||||
peerManager.metrics = options.Metrics
|
||||
}
|
||||
|
||||
if err = peerManager.configurePeers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = peerManager.prunePeers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return peerManager, nil
|
||||
}
|
||||
|
||||
@@ -361,6 +394,7 @@ func (m *PeerManager) configurePeers() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
m.metrics.PeersStored.Add(float64(m.store.Size()))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -390,20 +424,45 @@ func (m *PeerManager) prunePeers() error {
|
||||
ranked := m.store.Ranked()
|
||||
for i := len(ranked) - 1; i >= 0; i-- {
|
||||
peerID := ranked[i].ID
|
||||
|
||||
switch {
|
||||
case m.store.Size() <= int(m.options.MaxPeers):
|
||||
return nil
|
||||
case m.dialing[peerID]:
|
||||
case m.connected[peerID]:
|
||||
case m.isConnected(peerID):
|
||||
default:
|
||||
if err := m.store.Delete(peerID); err != nil {
|
||||
return err
|
||||
}
|
||||
m.metrics.PeersStored.Add(-1)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PeerManager) isConnected(peerID types.NodeID) bool {
|
||||
_, ok := m.connected[peerID]
|
||||
return ok
|
||||
}
|
||||
|
||||
type connectionStats struct {
|
||||
incoming uint16
|
||||
outgoing uint16
|
||||
}
|
||||
|
||||
func (m *PeerManager) getConnectedInfo() connectionStats {
|
||||
out := connectionStats{}
|
||||
for _, direction := range m.connected {
|
||||
switch direction {
|
||||
case peerConnectionIncoming:
|
||||
out.incoming++
|
||||
case peerConnectionOutgoing:
|
||||
out.outgoing++
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Add adds a peer to the manager, given as an address. If the peer already
|
||||
// exists, the address is added to it if it isn't already present. This will push
|
||||
// low scoring peers out of the address book if it exceeds the maximum size.
|
||||
@@ -427,12 +486,17 @@ func (m *PeerManager) Add(address NodeAddress) (bool, error) {
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
if peer.Inactive {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// else add the new address
|
||||
peer.AddressInfo[address] = &peerAddressInfo{Address: address}
|
||||
if err := m.store.Set(peer); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
m.metrics.PeersStored.Add(1)
|
||||
if err := m.prunePeers(); err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -459,18 +523,28 @@ func (m *PeerManager) HasMaxPeerCapacity() bool {
|
||||
return len(m.connected) >= int(m.options.MaxConnected)
|
||||
}
|
||||
|
||||
func (m *PeerManager) HasDialedMaxPeers() bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
stats := m.getConnectedInfo()
|
||||
|
||||
return stats.outgoing >= m.options.MaxOutgoingConnections
|
||||
}
|
||||
|
||||
// DialNext finds an appropriate peer address to dial, and marks it as dialing.
|
||||
// If no peer is found, or all connection slots are full, it blocks until one
|
||||
// becomes available. The caller must call Dialed() or DialFailed() for the
|
||||
// returned peer.
|
||||
func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) {
|
||||
for {
|
||||
address, err := m.TryDialNext()
|
||||
if err != nil || (address != NodeAddress{}) {
|
||||
return address, err
|
||||
if address := m.TryDialNext(); (address != NodeAddress{}) {
|
||||
return address, nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-m.dialWaker.Sleep():
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
return NodeAddress{}, ctx.Err()
|
||||
}
|
||||
@@ -479,20 +553,28 @@ func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) {
|
||||
|
||||
// TryDialNext is equivalent to DialNext(), but immediately returns an empty
|
||||
// address if no peers or connection slots are available.
|
||||
func (m *PeerManager) TryDialNext() (NodeAddress, error) {
|
||||
func (m *PeerManager) TryDialNext() NodeAddress {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// We allow dialing MaxConnected+MaxConnectedUpgrade peers. Including
|
||||
// MaxConnectedUpgrade allows us to probe additional peers that have a
|
||||
// higher score than any other peers, and if successful evict it.
|
||||
if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >=
|
||||
int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return NodeAddress{}, nil
|
||||
if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return NodeAddress{}
|
||||
}
|
||||
|
||||
cinfo := m.getConnectedInfo()
|
||||
if m.options.MaxOutgoingConnections > 0 && cinfo.outgoing >= m.options.MaxOutgoingConnections {
|
||||
return NodeAddress{}
|
||||
}
|
||||
|
||||
for _, peer := range m.store.Ranked() {
|
||||
if m.dialing[peer.ID] || m.connected[peer.ID] {
|
||||
if m.dialing[peer.ID] || m.isConnected(peer.ID) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !peer.LastDisconnected.IsZero() && time.Since(peer.LastDisconnected) < m.options.DisconnectCooldownPeriod {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -501,6 +583,10 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if id, ok := m.store.Resolve(addressInfo.Address); ok && (m.isConnected(id) || m.dialing[id]) {
|
||||
continue
|
||||
}
|
||||
|
||||
// We now have an eligible address to dial. If we're full but have
|
||||
// upgrade capacity (as checked above), we find a lower-scored peer
|
||||
// we can replace and mark it as upgrading so noone else claims it.
|
||||
@@ -511,25 +597,24 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) {
|
||||
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
|
||||
upgradeFromPeer := m.findUpgradeCandidate(peer.ID, peer.Score())
|
||||
if upgradeFromPeer == "" {
|
||||
return NodeAddress{}, nil
|
||||
return NodeAddress{}
|
||||
}
|
||||
m.upgrading[upgradeFromPeer] = peer.ID
|
||||
}
|
||||
|
||||
m.dialing[peer.ID] = true
|
||||
return addressInfo.Address, nil
|
||||
return addressInfo.Address
|
||||
}
|
||||
}
|
||||
return NodeAddress{}, nil
|
||||
return NodeAddress{}
|
||||
}
|
||||
|
||||
// DialFailed reports a failed dial attempt. This will make the peer available
|
||||
// for dialing again when appropriate (possibly after a retry timeout).
|
||||
//
|
||||
// FIXME: This should probably delete or mark bad addresses/peers after some time.
|
||||
func (m *PeerManager) DialFailed(address NodeAddress) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.metrics.PeersConnectedFailure.Add(1)
|
||||
|
||||
delete(m.dialing, address.NodeID)
|
||||
for from, to := range m.upgrading {
|
||||
@@ -549,6 +634,7 @@ func (m *PeerManager) DialFailed(address NodeAddress) error {
|
||||
|
||||
addressInfo.LastDialFailure = time.Now().UTC()
|
||||
addressInfo.DialFailures++
|
||||
|
||||
if err := m.store.Set(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -582,6 +668,8 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
m.metrics.PeersConnectedSuccess.Add(1)
|
||||
|
||||
delete(m.dialing, address.NodeID)
|
||||
|
||||
var upgradeFromPeer types.NodeID
|
||||
@@ -596,12 +684,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
if address.NodeID == m.selfID {
|
||||
return fmt.Errorf("rejecting connection to self (%v)", address.NodeID)
|
||||
}
|
||||
if m.connected[address.NodeID] {
|
||||
if m.isConnected(address.NodeID) {
|
||||
return fmt.Errorf("peer %v is already connected", address.NodeID)
|
||||
}
|
||||
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
|
||||
if upgradeFromPeer == "" || len(m.connected) >=
|
||||
int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
if upgradeFromPeer == "" || len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return fmt.Errorf("already connected to maximum number of peers")
|
||||
}
|
||||
}
|
||||
@@ -611,6 +698,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
return fmt.Errorf("peer %q was removed while dialing", address.NodeID)
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
if peer.Inactive {
|
||||
m.metrics.PeersInactivated.Add(-1)
|
||||
}
|
||||
peer.Inactive = false
|
||||
|
||||
peer.LastConnected = now
|
||||
if addressInfo, ok := peer.AddressInfo[address]; ok {
|
||||
addressInfo.DialFailures = 0
|
||||
@@ -622,8 +714,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if upgradeFromPeer != "" && m.options.MaxConnected > 0 &&
|
||||
len(m.connected) >= int(m.options.MaxConnected) {
|
||||
if upgradeFromPeer != "" && m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
|
||||
// Look for an even lower-scored peer that may have appeared since we
|
||||
// started the upgrade.
|
||||
if p, ok := m.store.Get(upgradeFromPeer); ok {
|
||||
@@ -632,9 +723,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
}
|
||||
}
|
||||
m.evict[upgradeFromPeer] = true
|
||||
m.evictWaker.Wake()
|
||||
}
|
||||
m.connected[peer.ID] = true
|
||||
m.evictWaker.Wake()
|
||||
|
||||
m.metrics.PeersConnectedOutgoing.Add(1)
|
||||
m.connected[peer.ID] = peerConnectionOutgoing
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -663,11 +756,10 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error {
|
||||
if peerID == m.selfID {
|
||||
return fmt.Errorf("rejecting connection from self (%v)", peerID)
|
||||
}
|
||||
if m.connected[peerID] {
|
||||
if m.isConnected(peerID) {
|
||||
return fmt.Errorf("peer %q is already connected", peerID)
|
||||
}
|
||||
if m.options.MaxConnected > 0 &&
|
||||
len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return fmt.Errorf("already connected to maximum number of peers")
|
||||
}
|
||||
|
||||
@@ -692,12 +784,17 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error {
|
||||
}
|
||||
}
|
||||
|
||||
if peer.Inactive {
|
||||
m.metrics.PeersInactivated.Add(-1)
|
||||
}
|
||||
peer.Inactive = false
|
||||
peer.LastConnected = time.Now().UTC()
|
||||
if err := m.store.Set(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.connected[peerID] = true
|
||||
m.metrics.PeersConnectedIncoming.Add(1)
|
||||
m.connected[peerID] = peerConnectionIncoming
|
||||
if upgradeFromPeer != "" {
|
||||
m.evict[upgradeFromPeer] = true
|
||||
}
|
||||
@@ -716,7 +813,7 @@ func (m *PeerManager) Ready(peerID types.NodeID, channels ChannelIDSet) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.connected[peerID] {
|
||||
if m.isConnected(peerID) {
|
||||
m.ready[peerID] = true
|
||||
m.broadcast(PeerUpdate{
|
||||
NodeID: peerID,
|
||||
@@ -752,7 +849,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) {
|
||||
// random one.
|
||||
for peerID := range m.evict {
|
||||
delete(m.evict, peerID)
|
||||
if m.connected[peerID] && !m.evicting[peerID] {
|
||||
if m.isConnected(peerID) && !m.evicting[peerID] {
|
||||
m.evicting[peerID] = true
|
||||
return peerID, nil
|
||||
}
|
||||
@@ -769,7 +866,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) {
|
||||
ranked := m.store.Ranked()
|
||||
for i := len(ranked) - 1; i >= 0; i-- {
|
||||
peer := ranked[i]
|
||||
if m.connected[peer.ID] && !m.evicting[peer.ID] {
|
||||
if m.isConnected(peer.ID) && !m.evicting[peer.ID] {
|
||||
m.evicting[peer.ID] = true
|
||||
return peer.ID, nil
|
||||
}
|
||||
@@ -784,6 +881,13 @@ func (m *PeerManager) Disconnected(peerID types.NodeID) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
switch m.connected[peerID] {
|
||||
case peerConnectionIncoming:
|
||||
m.metrics.PeersConnectedIncoming.Add(-1)
|
||||
case peerConnectionOutgoing:
|
||||
m.metrics.PeersConnectedOutgoing.Add(-1)
|
||||
}
|
||||
|
||||
ready := m.ready[peerID]
|
||||
|
||||
delete(m.connected, peerID)
|
||||
@@ -792,6 +896,22 @@ func (m *PeerManager) Disconnected(peerID types.NodeID) {
|
||||
delete(m.evicting, peerID)
|
||||
delete(m.ready, peerID)
|
||||
|
||||
if peer, ok := m.store.Get(peerID); ok {
|
||||
peer.LastDisconnected = time.Now()
|
||||
_ = m.store.Set(peer)
|
||||
// launch a thread to ping the dialWaker when the
|
||||
// disconnected peer can be dialed again.
|
||||
go func() {
|
||||
timer := time.NewTimer(m.options.DisconnectCooldownPeriod)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
m.dialWaker.Wake()
|
||||
case <-m.closeCh:
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if ready {
|
||||
m.broadcast(PeerUpdate{
|
||||
NodeID: peerID,
|
||||
@@ -814,17 +934,34 @@ func (m *PeerManager) Errored(peerID types.NodeID, err error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.connected[peerID] {
|
||||
if m.isConnected(peerID) {
|
||||
m.evict[peerID] = true
|
||||
}
|
||||
|
||||
m.evictWaker.Wake()
|
||||
}
|
||||
|
||||
// Inactivate marks a peer as inactive which means we won't attempt to
|
||||
// dial this peer again. A peer can be reactivated by successfully
|
||||
// dialing and connecting to the node.
|
||||
func (m *PeerManager) Inactivate(peerID types.NodeID) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
peer, ok := m.store.peers[peerID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
peer.Inactive = true
|
||||
m.metrics.PeersInactivated.Add(1)
|
||||
return m.store.Set(*peer)
|
||||
}
|
||||
|
||||
// Advertise returns a list of peer addresses to advertise to a peer.
|
||||
//
|
||||
// FIXME: This is fairly naïve and only returns the addresses of the
|
||||
// highest-ranked peers.
|
||||
// It sorts all peers in the peer store, and assembles a list of peers
|
||||
// that is most likely to include the highest priority of peers.
|
||||
func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
@@ -837,19 +974,98 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress
|
||||
addresses = append(addresses, m.options.SelfAddress)
|
||||
}
|
||||
|
||||
for _, peer := range m.store.Ranked() {
|
||||
var numAddresses int
|
||||
var totalAbsScore int
|
||||
ranked := m.store.Ranked()
|
||||
seenAddresses := map[NodeAddress]struct{}{}
|
||||
scores := map[types.NodeID]int{}
|
||||
|
||||
// get the total number of possible addresses
|
||||
for _, peer := range ranked {
|
||||
if peer.ID == peerID {
|
||||
continue
|
||||
}
|
||||
score := int(peer.Score())
|
||||
if score < 0 {
|
||||
totalAbsScore += -score
|
||||
} else {
|
||||
totalAbsScore += score
|
||||
}
|
||||
|
||||
for nodeAddr, addressInfo := range peer.AddressInfo {
|
||||
if len(addresses) >= int(limit) {
|
||||
return addresses
|
||||
scores[peer.ID] = score
|
||||
for addr := range peer.AddressInfo {
|
||||
if _, ok := m.options.PrivatePeers[addr.NodeID]; !ok {
|
||||
numAddresses++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
meanAbsScore := (totalAbsScore + 1) / (len(scores) + 1)
|
||||
|
||||
var attempts uint16
|
||||
var addedLastIteration bool
|
||||
|
||||
// if the number of addresses is less than the number of peers
|
||||
// to advertise, adjust the limit downwards
|
||||
if numAddresses < int(limit) {
|
||||
limit = uint16(numAddresses)
|
||||
}
|
||||
|
||||
// collect addresses until we have the number requested
|
||||
// (limit), or we've added all known addresses, or we've tried
|
||||
// at least 256 times and the last time we iterated over
|
||||
// remaining addresses we added no new candidates.
|
||||
for len(addresses) < int(limit) && (attempts < (limit*2) || !addedLastIteration) {
|
||||
attempts++
|
||||
addedLastIteration = false
|
||||
|
||||
for idx, peer := range ranked {
|
||||
if peer.ID == peerID {
|
||||
continue
|
||||
}
|
||||
|
||||
// only add non-private NodeIDs
|
||||
if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok {
|
||||
addresses = append(addresses, addressInfo.Address)
|
||||
if len(addresses) >= int(limit) {
|
||||
break
|
||||
}
|
||||
|
||||
for nodeAddr, addressInfo := range peer.AddressInfo {
|
||||
if len(addresses) >= int(limit) {
|
||||
break
|
||||
}
|
||||
|
||||
// only look at each address once, by
|
||||
// tracking a set of addresses seen
|
||||
if _, ok := seenAddresses[addressInfo.Address]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// only add non-private NodeIDs
|
||||
if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok {
|
||||
// add the peer if the total number of ranked addresses is
|
||||
// will fit within the limit, or otherwise adding
|
||||
// addresses based on a coin flip.
|
||||
|
||||
// the coinflip is based on the score, commonly, but
|
||||
// 10% of the time we'll randomly insert a "loosing"
|
||||
// peer.
|
||||
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
if numAddresses <= int(limit) || rand.Intn((meanAbsScore*2)+1) <= scores[peer.ID]+1 || rand.Intn((idx+1)*10) <= idx+1 {
|
||||
addresses = append(addresses, addressInfo.Address)
|
||||
addedLastIteration = true
|
||||
seenAddresses[addressInfo.Address] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
seenAddresses[addressInfo.Address] = struct{}{}
|
||||
// if the number of addresses
|
||||
// is the same as the limit,
|
||||
// we should remove private
|
||||
// addresses from the limit so
|
||||
// we can still return early.
|
||||
if numAddresses == int(limit) {
|
||||
limit--
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -919,8 +1135,14 @@ func (m *PeerManager) processPeerEvent(pu PeerUpdate) {
|
||||
|
||||
switch pu.Status {
|
||||
case PeerStatusBad:
|
||||
if m.store.peers[pu.NodeID].MutableScore == math.MinInt16 {
|
||||
return
|
||||
}
|
||||
m.store.peers[pu.NodeID].MutableScore--
|
||||
case PeerStatusGood:
|
||||
if m.store.peers[pu.NodeID].MutableScore == math.MaxInt16 {
|
||||
return
|
||||
}
|
||||
m.store.peers[pu.NodeID].MutableScore++
|
||||
}
|
||||
}
|
||||
@@ -1021,9 +1243,11 @@ func (m *PeerManager) findUpgradeCandidate(id types.NodeID, score PeerScore) typ
|
||||
for i := len(ranked) - 1; i >= 0; i-- {
|
||||
candidate := ranked[i]
|
||||
switch {
|
||||
case candidate.ID == id:
|
||||
continue
|
||||
case candidate.Score() >= score:
|
||||
return "" // no further peers can be scored lower, due to sorting
|
||||
case !m.connected[candidate.ID]:
|
||||
case !m.isConnected(candidate.ID):
|
||||
case m.evict[candidate.ID]:
|
||||
case m.evicting[candidate.ID]:
|
||||
case m.upgrading[candidate.ID] != "":
|
||||
@@ -1072,6 +1296,7 @@ func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration
|
||||
type peerStore struct {
|
||||
db dbm.DB
|
||||
peers map[types.NodeID]*peerInfo
|
||||
index map[NodeAddress]types.NodeID
|
||||
ranked []*peerInfo // cache for Ranked(), nil invalidates cache
|
||||
}
|
||||
|
||||
@@ -1091,6 +1316,7 @@ func newPeerStore(db dbm.DB) (*peerStore, error) {
|
||||
// loadPeers loads all peers from the database into memory.
|
||||
func (s *peerStore) loadPeers() error {
|
||||
peers := map[types.NodeID]*peerInfo{}
|
||||
addrs := map[NodeAddress]types.NodeID{}
|
||||
|
||||
start, end := keyPeerInfoRange()
|
||||
iter, err := s.db.Iterator(start, end)
|
||||
@@ -1110,11 +1336,18 @@ func (s *peerStore) loadPeers() error {
|
||||
return fmt.Errorf("invalid peer data: %w", err)
|
||||
}
|
||||
peers[peer.ID] = peer
|
||||
for addr := range peer.AddressInfo {
|
||||
// TODO maybe check to see if we've seen this
|
||||
// addr before for a different peer, there
|
||||
// could be duplicates.
|
||||
addrs[addr] = peer.ID
|
||||
}
|
||||
}
|
||||
if iter.Error() != nil {
|
||||
return iter.Error()
|
||||
}
|
||||
s.peers = peers
|
||||
s.index = addrs
|
||||
s.ranked = nil // invalidate cache if populated
|
||||
return nil
|
||||
}
|
||||
@@ -1126,6 +1359,12 @@ func (s *peerStore) Get(id types.NodeID) (peerInfo, bool) {
|
||||
return peer.Copy(), ok
|
||||
}
|
||||
|
||||
// Resolve returns the peer ID for a given node address if known.
|
||||
func (s *peerStore) Resolve(addr NodeAddress) (types.NodeID, bool) {
|
||||
id, ok := s.index[addr]
|
||||
return id, ok
|
||||
}
|
||||
|
||||
// Set stores peer data. The input data will be copied, and can safely be reused
|
||||
// by the caller.
|
||||
func (s *peerStore) Set(peer peerInfo) error {
|
||||
@@ -1154,20 +1393,29 @@ func (s *peerStore) Set(peer peerInfo) error {
|
||||
// update the existing pointer address.
|
||||
*current = peer
|
||||
}
|
||||
for addr := range peer.AddressInfo {
|
||||
s.index[addr] = peer.ID
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes a peer, or does nothing if it does not exist.
|
||||
func (s *peerStore) Delete(id types.NodeID) error {
|
||||
if _, ok := s.peers[id]; !ok {
|
||||
peer, ok := s.peers[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := s.db.Delete(keyPeerInfo(id)); err != nil {
|
||||
return err
|
||||
for _, addr := range peer.AddressInfo {
|
||||
delete(s.index, addr.Address)
|
||||
}
|
||||
delete(s.peers, id)
|
||||
s.ranked = nil
|
||||
|
||||
if err := s.db.Delete(keyPeerInfo(id)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1203,8 +1451,6 @@ func (s *peerStore) Ranked() []*peerInfo {
|
||||
s.ranked = append(s.ranked, peer)
|
||||
}
|
||||
sort.Slice(s.ranked, func(i, j int) bool {
|
||||
// FIXME: If necessary, consider precomputing scores before sorting,
|
||||
// to reduce the number of Score() calls.
|
||||
return s.ranked[i].Score() > s.ranked[j].Score()
|
||||
})
|
||||
return s.ranked
|
||||
@@ -1217,17 +1463,18 @@ func (s *peerStore) Size() int {
|
||||
|
||||
// peerInfo contains peer information stored in a peerStore.
|
||||
type peerInfo struct {
|
||||
ID types.NodeID
|
||||
AddressInfo map[NodeAddress]*peerAddressInfo
|
||||
LastConnected time.Time
|
||||
ID types.NodeID
|
||||
AddressInfo map[NodeAddress]*peerAddressInfo
|
||||
LastConnected time.Time
|
||||
LastDisconnected time.Time
|
||||
|
||||
// These fields are ephemeral, i.e. not persisted to the database.
|
||||
Persistent bool
|
||||
Seed bool
|
||||
Height int64
|
||||
FixedScore PeerScore // mainly for tests
|
||||
|
||||
MutableScore int64 // updated by router
|
||||
Inactive bool
|
||||
}
|
||||
|
||||
// peerInfoFromProto converts a Protobuf PeerInfo message to a peerInfo,
|
||||
@@ -1236,6 +1483,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) {
|
||||
p := &peerInfo{
|
||||
ID: types.NodeID(msg.ID),
|
||||
AddressInfo: map[NodeAddress]*peerAddressInfo{},
|
||||
Inactive: msg.Inactive,
|
||||
}
|
||||
if msg.LastConnected != nil {
|
||||
p.LastConnected = *msg.LastConnected
|
||||
@@ -1258,6 +1506,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) {
|
||||
func (p *peerInfo) ToProto() *p2pproto.PeerInfo {
|
||||
msg := &p2pproto.PeerInfo{
|
||||
ID: string(p.ID),
|
||||
Inactive: p.Inactive,
|
||||
LastConnected: &p.LastConnected,
|
||||
}
|
||||
for _, addressInfo := range p.AddressInfo {
|
||||
@@ -1266,6 +1515,7 @@ func (p *peerInfo) ToProto() *p2pproto.PeerInfo {
|
||||
if msg.LastConnected.IsZero() {
|
||||
msg.LastConnected = nil
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
@@ -1282,6 +1532,45 @@ func (p *peerInfo) Copy() peerInfo {
|
||||
return c
|
||||
}
|
||||
|
||||
// LastDialed returns when the peer was last dialed, and if that dial
|
||||
// attempt was successful. If the peer was never dialed the time stamp
|
||||
// is zero time.
|
||||
func (p *peerInfo) LastDialed() (time.Time, bool) {
|
||||
var (
|
||||
last time.Time
|
||||
success bool
|
||||
)
|
||||
last = last.Add(-1) // so it's after the epoch
|
||||
|
||||
for _, addr := range p.AddressInfo {
|
||||
if addr.LastDialFailure.Equal(addr.LastDialSuccess) {
|
||||
if addr.LastDialFailure.IsZero() {
|
||||
continue
|
||||
}
|
||||
if last.After(addr.LastDialSuccess) {
|
||||
continue
|
||||
}
|
||||
success = true
|
||||
last = addr.LastDialSuccess
|
||||
}
|
||||
if addr.LastDialFailure.After(last) {
|
||||
success = false
|
||||
last = addr.LastDialFailure
|
||||
}
|
||||
if addr.LastDialSuccess.After(last) || last.Equal(addr.LastDialSuccess) {
|
||||
success = true
|
||||
last = addr.LastDialSuccess
|
||||
}
|
||||
}
|
||||
|
||||
// if we never modified last, then
|
||||
if last.Add(1).IsZero() {
|
||||
return time.Time{}, success
|
||||
}
|
||||
|
||||
return last, success
|
||||
}
|
||||
|
||||
// Score calculates a score for the peer. Higher-scored peers will be
|
||||
// preferred over lower scores.
|
||||
func (p *peerInfo) Score() PeerScore {
|
||||
@@ -1300,12 +1589,8 @@ func (p *peerInfo) Score() PeerScore {
|
||||
score -= int64(addr.DialFailures)
|
||||
}
|
||||
|
||||
if score <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
if score >= math.MaxUint8 {
|
||||
return PeerScore(math.MaxUint8)
|
||||
if score < math.MinInt16 {
|
||||
score = math.MinInt16
|
||||
}
|
||||
|
||||
return PeerScore(score)
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestPeerScoring(t *testing.T) {
|
||||
|
||||
t.Run("Synchronous", func(t *testing.T) {
|
||||
// update the manager and make sure it's correct
|
||||
require.EqualValues(t, 0, peerManager.Scores()[id])
|
||||
require.Zero(t, peerManager.Scores()[id])
|
||||
|
||||
// add a bunch of good status updates and watch things increase.
|
||||
for i := 1; i < 10; i++ {
|
||||
@@ -80,3 +80,173 @@ func TestPeerScoring(t *testing.T) {
|
||||
"startAt=%d score=%d", start, peerManager.Scores()[id])
|
||||
})
|
||||
}
|
||||
|
||||
func makeMockPeerStore(t *testing.T, peers ...peerInfo) *peerStore {
|
||||
t.Helper()
|
||||
s, err := newPeerStore(dbm.NewMemDB())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for idx := range peers {
|
||||
if err := s.Set(peers[idx]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestPeerRanking(t *testing.T) {
|
||||
t.Run("InactiveSecond", func(t *testing.T) {
|
||||
t.Skip("inactive status is not currently factored into peer rank.")
|
||||
|
||||
store := makeMockPeerStore(t,
|
||||
peerInfo{ID: "second", Inactive: true},
|
||||
peerInfo{ID: "first", Inactive: false},
|
||||
)
|
||||
|
||||
ranked := store.Ranked()
|
||||
if len(ranked) != 2 {
|
||||
t.Fatal("missing peer in ranked output")
|
||||
}
|
||||
if ranked[0].ID != "first" {
|
||||
t.Error("inactive peer is first")
|
||||
}
|
||||
if ranked[1].ID != "second" {
|
||||
t.Error("active peer is second")
|
||||
}
|
||||
})
|
||||
t.Run("ScoreOrder", func(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
Name string
|
||||
First int64
|
||||
Second int64
|
||||
}{
|
||||
{
|
||||
Name: "Mirror",
|
||||
First: 100,
|
||||
Second: -100,
|
||||
},
|
||||
{
|
||||
Name: "VeryLow",
|
||||
First: 0,
|
||||
Second: -100,
|
||||
},
|
||||
{
|
||||
Name: "High",
|
||||
First: 300,
|
||||
Second: 256,
|
||||
},
|
||||
} {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
store := makeMockPeerStore(t,
|
||||
peerInfo{
|
||||
ID: "second",
|
||||
MutableScore: test.Second,
|
||||
},
|
||||
peerInfo{
|
||||
ID: "first",
|
||||
MutableScore: test.First,
|
||||
})
|
||||
|
||||
ranked := store.Ranked()
|
||||
if len(ranked) != 2 {
|
||||
t.Fatal("missing peer in ranked output")
|
||||
}
|
||||
if ranked[0].ID != "first" {
|
||||
t.Error("higher peer is first")
|
||||
}
|
||||
if ranked[1].ID != "second" {
|
||||
t.Error("higher peer is second")
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLastDialed(t *testing.T) {
|
||||
t.Run("Zero", func(t *testing.T) {
|
||||
p := &peerInfo{}
|
||||
ts, ok := p.LastDialed()
|
||||
if !ts.IsZero() {
|
||||
t.Error("timestamp should be zero:", ts)
|
||||
}
|
||||
if ok {
|
||||
t.Error("peer reported success, despite none")
|
||||
}
|
||||
})
|
||||
t.Run("NeverDialed", func(t *testing.T) {
|
||||
p := &peerInfo{
|
||||
AddressInfo: map[NodeAddress]*peerAddressInfo{
|
||||
{NodeID: "kip"}: {},
|
||||
{NodeID: "merlin"}: {},
|
||||
},
|
||||
}
|
||||
ts, ok := p.LastDialed()
|
||||
if !ts.IsZero() {
|
||||
t.Error("timestamp should be zero:", ts)
|
||||
}
|
||||
if ok {
|
||||
t.Error("peer reported success, despite none")
|
||||
}
|
||||
})
|
||||
t.Run("Ordered", func(t *testing.T) {
|
||||
base := time.Now()
|
||||
for _, test := range []struct {
|
||||
Name string
|
||||
SuccessTime time.Time
|
||||
FailTime time.Time
|
||||
ExpectedSuccess bool
|
||||
}{
|
||||
{
|
||||
Name: "Zero",
|
||||
},
|
||||
{
|
||||
Name: "Success",
|
||||
SuccessTime: base.Add(time.Hour),
|
||||
FailTime: base,
|
||||
ExpectedSuccess: true,
|
||||
},
|
||||
{
|
||||
Name: "Equal",
|
||||
SuccessTime: base,
|
||||
FailTime: base,
|
||||
ExpectedSuccess: true,
|
||||
},
|
||||
{
|
||||
Name: "Failure",
|
||||
SuccessTime: base,
|
||||
FailTime: base.Add(time.Hour),
|
||||
ExpectedSuccess: false,
|
||||
},
|
||||
} {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
p := &peerInfo{
|
||||
AddressInfo: map[NodeAddress]*peerAddressInfo{
|
||||
{NodeID: "kip"}: {LastDialSuccess: test.SuccessTime},
|
||||
{NodeID: "merlin"}: {LastDialFailure: test.FailTime},
|
||||
},
|
||||
}
|
||||
ts, ok := p.LastDialed()
|
||||
if test.ExpectedSuccess && !ts.Equal(test.SuccessTime) {
|
||||
if !ts.Equal(test.FailTime) {
|
||||
t.Fatal("got unexpected timestamp:", ts)
|
||||
}
|
||||
|
||||
t.Error("last dialed time reported incorrect value:", ts)
|
||||
}
|
||||
if !test.ExpectedSuccess && !ts.Equal(test.FailTime) {
|
||||
if !ts.Equal(test.SuccessTime) {
|
||||
t.Fatal("got unexpected timestamp:", ts)
|
||||
}
|
||||
|
||||
t.Error("last dialed time reported incorrect value:", ts)
|
||||
}
|
||||
if test.ExpectedSuccess != ok {
|
||||
t.Error("test reported incorrect outcome for last dialed type")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
@@ -378,16 +378,14 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Add b. We shouldn't be able to dial it, due to MaxConnected.
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Spawn a goroutine to fail a's dial attempt.
|
||||
@@ -415,8 +413,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.DialFailed(dial))
|
||||
failed := time.Now()
|
||||
@@ -443,8 +440,7 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) {
|
||||
err = peerManager.Accepted(a.NodeID)
|
||||
require.NoError(t, err)
|
||||
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
go func() {
|
||||
@@ -473,8 +469,7 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
|
||||
@@ -482,16 +477,14 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// At this point, adding c will not allow dialing it.
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -504,11 +497,11 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 0,
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 2,
|
||||
d.NodeID: 3,
|
||||
e.NodeID: 0,
|
||||
a.NodeID: p2p.PeerScore(0),
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(2),
|
||||
d.NodeID: p2p.PeerScore(3),
|
||||
e.NodeID: p2p.PeerScore(0),
|
||||
},
|
||||
PersistentPeers: []types.NodeID{c.NodeID, d.NodeID},
|
||||
MaxConnected: 2,
|
||||
@@ -520,7 +513,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
dial := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
@@ -529,8 +522,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// Even though we are at capacity, we should be allowed to dial c for an
|
||||
@@ -538,8 +530,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
|
||||
// However, since we're using all upgrade slots now, we can't add and dial
|
||||
@@ -547,24 +538,20 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(d)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// We go through with c's upgrade.
|
||||
require.NoError(t, peerManager.Dialed(c))
|
||||
|
||||
// Still can't dial d.
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Now, if we disconnect a, we should be allowed to dial d because we have a
|
||||
// free upgrade slot.
|
||||
require.Error(t, peerManager.Dialed(d))
|
||||
peerManager.Disconnected(a.NodeID)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, d, dial)
|
||||
require.NoError(t, peerManager.Dialed(d))
|
||||
|
||||
// However, if we disconnect b (such that only c and d are connected), we
|
||||
@@ -574,8 +561,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(e)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -585,7 +571,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
})
|
||||
@@ -595,8 +581,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
|
||||
@@ -604,8 +589,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// Adding c and dialing it will fail, because a is the only connected
|
||||
@@ -613,8 +597,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Empty(t, dial)
|
||||
}
|
||||
|
||||
@@ -635,22 +618,19 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Adding a's TCP address will not dispense a, since it's already dialing.
|
||||
added, err = peerManager.Add(aTCP)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Marking a as dialed will still not dispense it.
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Adding b and accepting a connection from it will not dispense it either.
|
||||
@@ -658,8 +638,7 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
require.NoError(t, peerManager.Accepted(bID))
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -685,16 +664,14 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) {
|
||||
// All addresses should be dispensed as long as dialing them has failed.
|
||||
dial := []p2p.NodeAddress{}
|
||||
for range addresses {
|
||||
address, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
address := peerManager.TryDialNext()
|
||||
require.NotZero(t, address)
|
||||
require.NoError(t, peerManager.DialFailed(address))
|
||||
dial = append(dial, address)
|
||||
}
|
||||
require.ElementsMatch(t, dial, addresses)
|
||||
|
||||
address, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
address := peerManager.TryDialNext()
|
||||
require.Zero(t, address)
|
||||
}
|
||||
|
||||
@@ -716,15 +693,14 @@ func TestPeerManager_DialFailed(t *testing.T) {
|
||||
// Dialing and then calling DialFailed with a different address (same
|
||||
// NodeID) should unmark as dialing and allow us to dial the other address
|
||||
// again, but not register the failed address.
|
||||
dial, err := peerManager.TryDialNext()
|
||||
dial := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.DialFailed(p2p.NodeAddress{
|
||||
Protocol: "tcp", NodeID: aID, Hostname: "localhost"}))
|
||||
require.Equal(t, []p2p.NodeAddress{a}, peerManager.Addresses(aID))
|
||||
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Calling DialFailed on same address twice should be fine.
|
||||
@@ -742,7 +718,10 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(2),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
})
|
||||
@@ -752,8 +731,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
|
||||
@@ -761,8 +739,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// Adding c and dialing it will fail, even though it could upgrade a and we
|
||||
@@ -771,14 +748,12 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Empty(t, dial)
|
||||
|
||||
// Failing b's dial will now make c available for dialing.
|
||||
require.NoError(t, peerManager.DialFailed(b))
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
}
|
||||
|
||||
@@ -793,8 +768,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
@@ -804,8 +778,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
require.NoError(t, peerManager.Accepted(b.NodeID))
|
||||
@@ -834,8 +807,7 @@ func TestPeerManager_Dialed_MaxConnected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Marking b as dialed in the meanwhile (even without TryDialNext)
|
||||
@@ -858,7 +830,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: p2p.PeerScore(1), d.NodeID: 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -877,8 +849,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
require.NoError(t, peerManager.Dialed(c))
|
||||
|
||||
@@ -908,7 +879,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -922,8 +893,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
require.NoError(t, peerManager.Dialed(b))
|
||||
|
||||
@@ -932,8 +902,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Empty(t, dial)
|
||||
|
||||
// a should now be evicted.
|
||||
@@ -952,10 +921,10 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) {
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 3,
|
||||
b.NodeID: 2,
|
||||
c.NodeID: 10,
|
||||
d.NodeID: 1,
|
||||
a.NodeID: p2p.PeerScore(3),
|
||||
b.NodeID: p2p.PeerScore(2),
|
||||
c.NodeID: p2p.PeerScore(10),
|
||||
d.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -976,8 +945,7 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
|
||||
// In the meanwhile, a disconnects and d connects. d is even lower-scored
|
||||
@@ -1005,9 +973,9 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) {
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 1,
|
||||
b.NodeID: 2,
|
||||
c.NodeID: 3,
|
||||
a.NodeID: p2p.PeerScore(1),
|
||||
b.NodeID: p2p.PeerScore(2),
|
||||
c.NodeID: p2p.PeerScore(3),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1027,7 +995,7 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
dial := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c, dial)
|
||||
|
||||
@@ -1073,8 +1041,7 @@ func TestPeerManager_Accepted(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
require.NoError(t, peerManager.Accepted(c.NodeID))
|
||||
require.Error(t, peerManager.Dialed(c))
|
||||
@@ -1083,8 +1050,7 @@ func TestPeerManager_Accepted(t *testing.T) {
|
||||
added, err = peerManager.Add(d)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, d, dial)
|
||||
require.NoError(t, peerManager.Dialed(d))
|
||||
require.Error(t, peerManager.Accepted(d.NodeID))
|
||||
@@ -1126,8 +1092,8 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
c.NodeID: 1,
|
||||
d.NodeID: 2,
|
||||
c.NodeID: p2p.PeerScore(1),
|
||||
d.NodeID: p2p.PeerScore(2),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
@@ -1171,8 +1137,8 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 1,
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
@@ -1214,8 +1180,8 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 1,
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
@@ -1232,8 +1198,7 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// a has already been claimed as an upgrade of a, so accepting
|
||||
@@ -1376,7 +1341,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1)},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1393,8 +1358,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
|
||||
added, err := peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
require.NoError(t, peerManager.Dialed(b))
|
||||
}()
|
||||
@@ -1414,7 +1378,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1518,13 +1484,11 @@ func TestPeerManager_Disconnected(t *testing.T) {
|
||||
|
||||
// Disconnecting a dialing peer does not unmark it as dialing, to avoid
|
||||
// dialing it multiple times in parallel.
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
peerManager.Disconnected(a.NodeID)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -1592,8 +1556,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
|
||||
require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates())
|
||||
|
||||
// Outbound connection with peer error and eviction.
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.Empty(t, sub.Updates())
|
||||
|
||||
@@ -1616,8 +1579,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
|
||||
require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates())
|
||||
|
||||
// Outbound connection with dial failure.
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.Empty(t, sub.Updates())
|
||||
|
||||
@@ -1713,8 +1675,7 @@ func TestPeerManager_Close(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.DialFailed(a))
|
||||
|
||||
@@ -1763,6 +1724,7 @@ func TestPeerManager_Advertise(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
|
||||
require.Len(t, peerManager.Advertise(dID, 100), 6)
|
||||
// d should get all addresses.
|
||||
require.ElementsMatch(t, []p2p.NodeAddress{
|
||||
aTCP, aMem, bTCP, bMem, cTCP, cMem,
|
||||
@@ -1776,10 +1738,24 @@ func TestPeerManager_Advertise(t *testing.T) {
|
||||
// Asking for 0 addresses should return, well, 0.
|
||||
require.Empty(t, peerManager.Advertise(aID, 0))
|
||||
|
||||
// Asking for 2 addresses should get the highest-rated ones, i.e. a.
|
||||
require.ElementsMatch(t, []p2p.NodeAddress{
|
||||
aTCP, aMem,
|
||||
}, peerManager.Advertise(dID, 2))
|
||||
// Asking for 2 addresses should get two addresses
|
||||
// and usually not the lowest ranked one
|
||||
numLowestRanked := 0
|
||||
for i := 0; i < 100; i++ {
|
||||
addrs := peerManager.Advertise(dID, 2)
|
||||
require.Len(t, addrs, 2)
|
||||
for _, addr := range addrs {
|
||||
if dID == addr.NodeID {
|
||||
t.Fatal("never advertise self")
|
||||
}
|
||||
if cID == addr.NodeID {
|
||||
numLowestRanked++
|
||||
}
|
||||
}
|
||||
}
|
||||
if numLowestRanked > 20 {
|
||||
t.Errorf("lowest ranked peer returned in results too often: %d", numLowestRanked)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerManager_Advertise_Self(t *testing.T) {
|
||||
|
||||
@@ -29,8 +29,16 @@ func (pq priorityQueue) get(i int) *pqEnvelope { return pq[i] }
|
||||
func (pq priorityQueue) Len() int { return len(pq) }
|
||||
|
||||
func (pq priorityQueue) Less(i, j int) bool {
|
||||
// if both elements have the same priority, prioritize based on most recent
|
||||
// if both elements have the same priority, prioritize based
|
||||
// on most recent and largest
|
||||
if pq[i].priority == pq[j].priority {
|
||||
diff := pq[i].timestamp.Sub(pq[j].timestamp)
|
||||
if diff < 0 {
|
||||
diff *= -1
|
||||
}
|
||||
if diff < 10*time.Millisecond {
|
||||
return pq[i].size > pq[j].size
|
||||
}
|
||||
return pq[i].timestamp.After(pq[j].timestamp)
|
||||
}
|
||||
|
||||
@@ -272,12 +280,10 @@ func (s *pqScheduler) process() {
|
||||
}
|
||||
|
||||
func (s *pqScheduler) push(pqEnv *pqEnvelope) {
|
||||
chIDStr := strconv.Itoa(int(pqEnv.envelope.channelID))
|
||||
|
||||
// enqueue the incoming Envelope
|
||||
heap.Push(s.pq, pqEnv)
|
||||
s.size += pqEnv.size
|
||||
s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Add(float64(pqEnv.size))
|
||||
s.metrics.PeerQueueMsgSize.With("ch_id", strconv.Itoa(int(pqEnv.envelope.channelID))).Add(float64(pqEnv.size))
|
||||
|
||||
// Update the cumulative sizes by adding the Envelope's size to every
|
||||
// priority less than or equal to it.
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
@@ -41,6 +40,10 @@ type Envelope struct {
|
||||
channelID ChannelID
|
||||
}
|
||||
|
||||
func (e Envelope) IsZero() bool {
|
||||
return e.From == "" && e.To == "" && e.Message == nil
|
||||
}
|
||||
|
||||
// PeerError is a peer error reported via Channel.Error.
|
||||
//
|
||||
// FIXME: This currently just disconnects the peer, which is too simplistic.
|
||||
@@ -160,12 +163,6 @@ type RouterOptions struct {
|
||||
// return an error to reject the peer.
|
||||
FilterPeerByID func(context.Context, types.NodeID) error
|
||||
|
||||
// DialSleep controls the amount of time that the router
|
||||
// sleeps between dialing peers. If not set, a default value
|
||||
// is used that sleeps for a (random) amount of time up to 3
|
||||
// seconds between submitting each peer to be dialed.
|
||||
DialSleep func(context.Context)
|
||||
|
||||
// NumConcrruentDials controls how many parallel go routines
|
||||
// are used to dial peers. This defaults to the value of
|
||||
// runtime.NumCPU.
|
||||
@@ -173,9 +170,10 @@ type RouterOptions struct {
|
||||
}
|
||||
|
||||
const (
|
||||
queueTypeFifo = "fifo"
|
||||
queueTypePriority = "priority"
|
||||
queueTypeWDRR = "wdrr"
|
||||
queueTypeFifo = "fifo"
|
||||
queueTypePriority = "priority"
|
||||
queueTypeWDRR = "wdrr"
|
||||
queueTypeSimplePriority = "simple-priority"
|
||||
)
|
||||
|
||||
// Validate validates router options.
|
||||
@@ -183,8 +181,8 @@ func (o *RouterOptions) Validate() error {
|
||||
switch o.QueueType {
|
||||
case "":
|
||||
o.QueueType = queueTypeFifo
|
||||
case queueTypeFifo, queueTypeWDRR, queueTypePriority:
|
||||
// passI me
|
||||
case queueTypeFifo, queueTypeWDRR, queueTypePriority, queueTypeSimplePriority:
|
||||
// pass
|
||||
default:
|
||||
return fmt.Errorf("queue type %q is not supported", o.QueueType)
|
||||
}
|
||||
@@ -291,7 +289,7 @@ func NewRouter(
|
||||
|
||||
router := &Router{
|
||||
logger: logger,
|
||||
metrics: metrics,
|
||||
metrics: NopMetrics(),
|
||||
nodeInfo: nodeInfo,
|
||||
privKey: privKey,
|
||||
connTracker: newConnTracker(
|
||||
@@ -312,6 +310,10 @@ func NewRouter(
|
||||
|
||||
router.BaseService = service.NewBaseService(logger, "router", router)
|
||||
|
||||
if metrics != nil {
|
||||
router.metrics = metrics
|
||||
}
|
||||
|
||||
qf, err := router.createQueueFactory()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -357,6 +359,9 @@ func (r *Router) createQueueFactory() (func(int) queue, error) {
|
||||
return q
|
||||
}, nil
|
||||
|
||||
case queueTypeSimplePriority:
|
||||
return func(size int) queue { return newSimplePriorityQueue(r.stopCtx(), size, r.chDescs) }, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot construct queue of type %q", r.options.QueueType)
|
||||
}
|
||||
@@ -425,8 +430,9 @@ func (r *Router) routeChannel(
|
||||
case envelope, ok := <-outCh:
|
||||
if !ok {
|
||||
return
|
||||
} else if envelope.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Mark the envelope with the channel ID to allow sendPeer() to pass
|
||||
// it on to Transport.SendMessage().
|
||||
envelope.channelID = chID
|
||||
@@ -507,16 +513,21 @@ func (r *Router) routeChannel(
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
shouldEvict := peerError.Fatal || r.peerManager.HasMaxPeerCapacity()
|
||||
maxPeerCapacity := r.peerManager.HasMaxPeerCapacity()
|
||||
r.logger.Error("peer error",
|
||||
"peer", peerError.NodeID,
|
||||
"err", peerError.Err,
|
||||
"evicting", shouldEvict,
|
||||
"disconnecting", peerError.Fatal || maxPeerCapacity,
|
||||
)
|
||||
if shouldEvict {
|
||||
|
||||
if peerError.Fatal || maxPeerCapacity {
|
||||
// if the error is fatal or all peer
|
||||
// slots are in use, we can error
|
||||
// (disconnect) from the peer.
|
||||
r.peerManager.Errored(peerError.NodeID, peerError.Err)
|
||||
} else {
|
||||
// this just decrements the peer
|
||||
// score.
|
||||
r.peerManager.processPeerEvent(PeerUpdate{
|
||||
NodeID: peerError.NodeID,
|
||||
Status: PeerStatusBad,
|
||||
@@ -528,9 +539,9 @@ func (r *Router) routeChannel(
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Router) numConccurentDials() int {
|
||||
func (r *Router) numConcurrentDials() int {
|
||||
if r.options.NumConcurrentDials == nil {
|
||||
return runtime.NumCPU()
|
||||
return runtime.NumCPU() * 32
|
||||
}
|
||||
|
||||
return r.options.NumConcurrentDials()
|
||||
@@ -552,23 +563,6 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error {
|
||||
return r.options.FilterPeerByID(ctx, id)
|
||||
}
|
||||
|
||||
func (r *Router) dialSleep(ctx context.Context) {
|
||||
if r.options.DialSleep == nil {
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
r.options.DialSleep(ctx)
|
||||
}
|
||||
|
||||
// acceptPeers accepts inbound connections from peers on the given transport,
|
||||
// and spawns goroutines that route messages to/from them.
|
||||
func (r *Router) acceptPeers(transport Transport) {
|
||||
@@ -576,14 +570,14 @@ func (r *Router) acceptPeers(transport Transport) {
|
||||
ctx := r.stopCtx()
|
||||
for {
|
||||
conn, err := transport.Accept()
|
||||
switch err {
|
||||
case nil:
|
||||
case io.EOF:
|
||||
r.logger.Debug("stopping accept routine", "transport", transport)
|
||||
switch {
|
||||
case errors.Is(err, io.EOF):
|
||||
r.logger.Debug("stopping accept routine", "transport", transport, "err", "EOF")
|
||||
return
|
||||
default:
|
||||
case err != nil:
|
||||
// in this case we got an error from the net.Listener.
|
||||
r.logger.Error("failed to accept connection", "transport", transport, "err", err)
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
incomingIP := conn.RemoteEndpoint().IP
|
||||
@@ -595,7 +589,7 @@ func (r *Router) acceptPeers(transport Transport) {
|
||||
"close_err", closeErr,
|
||||
)
|
||||
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
// Spawn a goroutine for the handshake, to avoid head-of-line blocking.
|
||||
@@ -667,7 +661,7 @@ func (r *Router) dialPeers() {
|
||||
// able to add peers at a reasonable pace, though the number
|
||||
// is somewhat arbitrary. The action is further throttled by a
|
||||
// sleep after sending to the addresses channel.
|
||||
for i := 0; i < r.numConccurentDials(); i++ {
|
||||
for i := 0; i < r.numConcurrentDials(); i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -690,19 +684,13 @@ LOOP:
|
||||
case errors.Is(err, context.Canceled):
|
||||
r.logger.Debug("stopping dial routine")
|
||||
break LOOP
|
||||
case err != nil:
|
||||
r.logger.Error("failed to find next peer to dial", "err", err)
|
||||
break LOOP
|
||||
case address == NodeAddress{}:
|
||||
continue LOOP
|
||||
}
|
||||
|
||||
select {
|
||||
case addresses <- address:
|
||||
// this jitters the frequency that we call
|
||||
// DialNext and prevents us from attempting to
|
||||
// create connections too quickly.
|
||||
|
||||
r.dialSleep(ctx)
|
||||
continue
|
||||
continue LOOP
|
||||
case <-ctx.Done():
|
||||
close(addresses)
|
||||
break LOOP
|
||||
@@ -718,7 +706,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) {
|
||||
case errors.Is(err, context.Canceled):
|
||||
return
|
||||
case err != nil:
|
||||
r.logger.Error("failed to dial peer", "peer", address, "err", err)
|
||||
r.logger.Debug("failed to dial peer", "peer", address, "err", err)
|
||||
if err = r.peerManager.DialFailed(address); err != nil {
|
||||
r.logger.Error("failed to report dial failure", "peer", address, "err", err)
|
||||
}
|
||||
@@ -740,8 +728,8 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) {
|
||||
}
|
||||
|
||||
if err := r.runWithPeerMutex(func() error { return r.peerManager.Dialed(address) }); err != nil {
|
||||
r.logger.Error("failed to dial peer",
|
||||
"op", "outgoing/dialing", "peer", address.NodeID, "err", err)
|
||||
r.logger.Error("failed to dial peer", "op", "outgoing/dialing", "peer", address.NodeID, "err", err)
|
||||
r.peerManager.dialWaker.Wake()
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
@@ -805,12 +793,13 @@ func (r *Router) dialPeer(ctx context.Context, address NodeAddress) (Connection,
|
||||
// Internet can't and needs a different public address.
|
||||
conn, err := transport.Dial(dialCtx, endpoint)
|
||||
if err != nil {
|
||||
r.logger.Error("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err)
|
||||
r.logger.Debug("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err)
|
||||
} else {
|
||||
r.logger.Debug("dialed peer", "peer", address.NodeID, "endpoint", endpoint)
|
||||
return conn, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("all endpoints failed")
|
||||
}
|
||||
|
||||
@@ -822,13 +811,7 @@ func (r *Router) handshakePeer(
|
||||
expectID types.NodeID,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
|
||||
if r.options.HandshakeTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.options.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
peerInfo, peerKey, err := conn.Handshake(ctx, r.nodeInfo, r.privKey)
|
||||
peerInfo, peerKey, err := conn.Handshake(ctx, r.options.HandshakeTimeout, r.nodeInfo, r.privKey)
|
||||
if err != nil {
|
||||
return peerInfo, peerKey, err
|
||||
}
|
||||
@@ -836,14 +819,6 @@ func (r *Router) handshakePeer(
|
||||
return peerInfo, peerKey, fmt.Errorf("invalid handshake NodeInfo: %w", err)
|
||||
}
|
||||
|
||||
if peerInfo.Network != r.nodeInfo.Network {
|
||||
if err := r.peerManager.store.Delete(peerInfo.NodeID); err != nil {
|
||||
return peerInfo, peerKey, fmt.Errorf("problem removing peer from store from incorrect network [%s]: %w", peerInfo.Network, err)
|
||||
}
|
||||
|
||||
return peerInfo, peerKey, fmt.Errorf("connected to peer from wrong network, %q, removed from peer store", peerInfo.Network)
|
||||
}
|
||||
|
||||
if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID {
|
||||
return peerInfo, peerKey, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)",
|
||||
peerInfo.NodeID, types.NodeIDFromPubKey(peerKey))
|
||||
@@ -854,6 +829,10 @@ func (r *Router) handshakePeer(
|
||||
}
|
||||
|
||||
if err := r.nodeInfo.CompatibleWith(peerInfo); err != nil {
|
||||
if err := r.peerManager.Inactivate(peerInfo.NodeID); err != nil {
|
||||
return peerInfo, peerKey, fmt.Errorf("problem inactivating peer %q: %w", peerInfo.ID(), err)
|
||||
}
|
||||
|
||||
return peerInfo, peerKey, ErrRejected{
|
||||
err: err,
|
||||
id: peerInfo.ID(),
|
||||
@@ -1032,6 +1011,8 @@ func (r *Router) evictPeers() {
|
||||
queue, ok := r.peerQueues[peerID]
|
||||
r.peerMtx.RUnlock()
|
||||
|
||||
r.metrics.PeersEvicted.Add(1)
|
||||
|
||||
if ok {
|
||||
queue.close()
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package p2p_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -133,13 +132,6 @@ func TestRouter_Channel_Basic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, router.NodeInfo().Channels, chDesc2.ID)
|
||||
|
||||
// Closing the channel, then opening it again should be fine.
|
||||
channel.Close()
|
||||
time.Sleep(100 * time.Millisecond) // yes yes, but Close() is async...
|
||||
|
||||
channel, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should be able to send on the channel, even though there are no peers.
|
||||
p2ptest.RequireSend(t, channel, p2p.Envelope{
|
||||
To: types.NodeID(strings.Repeat("a", 40)),
|
||||
@@ -352,7 +344,7 @@ func TestRouter_AcceptPeers(t *testing.T) {
|
||||
closer := tmsync.NewCloser()
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(tc.peerInfo, tc.peerKey, nil)
|
||||
mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
@@ -413,72 +405,42 @@ func TestRouter_AcceptPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouter_AcceptPeers_Error(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
func TestRouter_AcceptPeers_Errors(t *testing.T) {
|
||||
for _, err := range []error{io.EOF} {
|
||||
t.Run(err.Error(), func(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Set up a mock transport that returns an error, which should prevent
|
||||
// the router from calling Accept again.
|
||||
mockTransport := &mocks.Transport{}
|
||||
mockTransport.On("String").Maybe().Return("mock")
|
||||
mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"})
|
||||
mockTransport.On("Accept").Once().Return(nil, errors.New("boom"))
|
||||
mockTransport.On("Close").Return(nil)
|
||||
// Set up a mock transport that returns io.EOF once, which should prevent
|
||||
// the router from calling Accept again.
|
||||
mockTransport := &mocks.Transport{}
|
||||
mockTransport.On("String").Maybe().Return("mock")
|
||||
mockTransport.On("Accept", mock.Anything).Once().Return(nil, err)
|
||||
mockTransport.On("Listen", mock.Anything).Return(nil).Maybe()
|
||||
mockTransport.On("Close").Return(nil)
|
||||
mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"})
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
router, err := p2p.NewRouter(
|
||||
log.TestingLogger(),
|
||||
p2p.NopMetrics(),
|
||||
selfInfo,
|
||||
selfKey,
|
||||
peerManager,
|
||||
[]p2p.Transport{mockTransport},
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
router, err := p2p.NewRouter(
|
||||
log.TestingLogger(),
|
||||
p2p.NopMetrics(),
|
||||
selfInfo,
|
||||
selfKey,
|
||||
peerManager,
|
||||
[]p2p.Transport{mockTransport},
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, router.Start())
|
||||
time.Sleep(time.Second)
|
||||
require.NoError(t, router.Stop())
|
||||
|
||||
require.NoError(t, router.Start())
|
||||
time.Sleep(time.Second)
|
||||
require.NoError(t, router.Stop())
|
||||
mockTransport.AssertExpectations(t)
|
||||
|
||||
mockTransport.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Set up a mock transport that returns io.EOF once, which should prevent
|
||||
// the router from calling Accept again.
|
||||
mockTransport := &mocks.Transport{}
|
||||
mockTransport.On("String").Maybe().Return("mock")
|
||||
mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"})
|
||||
mockTransport.On("Accept").Once().Return(nil, io.EOF)
|
||||
mockTransport.On("Close").Return(nil)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
router, err := p2p.NewRouter(
|
||||
log.TestingLogger(),
|
||||
p2p.NopMetrics(),
|
||||
selfInfo,
|
||||
selfKey,
|
||||
peerManager,
|
||||
[]p2p.Transport{mockTransport},
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, router.Start())
|
||||
time.Sleep(time.Second)
|
||||
require.NoError(t, router.Stop())
|
||||
|
||||
mockTransport.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
|
||||
@@ -492,7 +454,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF)
|
||||
mockConnection.On("Close").Return(nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
@@ -573,7 +535,7 @@ func TestRouter_DialPeers(t *testing.T) {
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
if tc.dialErr == nil {
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(tc.peerInfo, tc.peerKey, nil)
|
||||
mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil)
|
||||
}
|
||||
@@ -660,7 +622,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF)
|
||||
mockConnection.On("Close").Return(nil)
|
||||
|
||||
@@ -701,7 +663,6 @@ func TestRouter_DialPeers_Parallel(t *testing.T) {
|
||||
peerManager,
|
||||
[]p2p.Transport{mockTransport},
|
||||
p2p.RouterOptions{
|
||||
DialSleep: func(_ context.Context) {},
|
||||
NumConcurrentDials: func() int {
|
||||
ncpu := runtime.NumCPU()
|
||||
if ncpu <= 3 {
|
||||
@@ -740,7 +701,7 @@ func TestRouter_EvictPeers(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(peerInfo, peerKey.PubKey(), nil)
|
||||
mockConnection.On("ReceiveMessage").WaitUntil(closeCh).Return(chID, nil, io.EOF)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
@@ -809,7 +770,7 @@ func TestRouter_ChannelCompatability(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(incompatiblePeer, peerKey.PubKey(), nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
mockConnection.On("Close").Return(nil)
|
||||
@@ -858,7 +819,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(peer, peerKey.PubKey(), nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
mockConnection.On("Close").Return(nil)
|
||||
|
||||
112
internal/p2p/rqueue.go
Normal file
112
internal/p2p/rqueue.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"context"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
type simpleQueue struct {
|
||||
input chan Envelope
|
||||
output chan Envelope
|
||||
closeFn func()
|
||||
closeCh <-chan struct{}
|
||||
|
||||
maxSize int
|
||||
chDescs []ChannelDescriptor
|
||||
}
|
||||
|
||||
func newSimplePriorityQueue(ctx context.Context, size int, chDescs []ChannelDescriptor) *simpleQueue {
|
||||
if size%2 != 0 {
|
||||
size++
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
q := &simpleQueue{
|
||||
input: make(chan Envelope, size*2),
|
||||
output: make(chan Envelope, size/2),
|
||||
maxSize: size * size,
|
||||
closeCh: ctx.Done(),
|
||||
closeFn: cancel,
|
||||
}
|
||||
|
||||
go q.run(ctx)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *simpleQueue) enqueue() chan<- Envelope { return q.input }
|
||||
func (q *simpleQueue) dequeue() <-chan Envelope { return q.output }
|
||||
func (q *simpleQueue) close() { q.closeFn() }
|
||||
func (q *simpleQueue) closed() <-chan struct{} { return q.closeCh }
|
||||
|
||||
func (q *simpleQueue) run(ctx context.Context) {
|
||||
defer q.closeFn()
|
||||
|
||||
var chPriorities = make(map[ChannelID]uint, len(q.chDescs))
|
||||
for _, chDesc := range q.chDescs {
|
||||
chID := ChannelID(chDesc.ID)
|
||||
chPriorities[chID] = uint(chDesc.Priority)
|
||||
}
|
||||
|
||||
pq := make(priorityQueue, 0, q.maxSize)
|
||||
heap.Init(&pq)
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
// must have a buffer of exactly one because both sides of
|
||||
// this channel are used in this loop, and simply signals adds
|
||||
// to the heap
|
||||
signal := make(chan struct{}, 1)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-q.closeCh:
|
||||
return
|
||||
case e := <-q.input:
|
||||
// enqueue the incoming Envelope
|
||||
heap.Push(&pq, &pqEnvelope{
|
||||
envelope: e,
|
||||
size: uint(proto.Size(e.Message)),
|
||||
priority: chPriorities[e.channelID],
|
||||
timestamp: time.Now().UTC(),
|
||||
})
|
||||
|
||||
select {
|
||||
case signal <- struct{}{}:
|
||||
default:
|
||||
if len(pq) > q.maxSize {
|
||||
sort.Sort(pq)
|
||||
pq = pq[:q.maxSize]
|
||||
}
|
||||
}
|
||||
|
||||
case <-ticker.C:
|
||||
if len(pq) > q.maxSize {
|
||||
sort.Sort(pq)
|
||||
pq = pq[:q.maxSize]
|
||||
}
|
||||
if len(pq) > 0 {
|
||||
select {
|
||||
case signal <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
case <-signal:
|
||||
SEND:
|
||||
for len(pq) > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-q.closeCh:
|
||||
return
|
||||
case q.output <- heap.Pop(&pq).(*pqEnvelope).envelope:
|
||||
continue SEND
|
||||
default:
|
||||
break SEND
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
47
internal/p2p/rqueue_test.go
Normal file
47
internal/p2p/rqueue_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSimpleQueue(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// set up a small queue with very small buffers so we can
|
||||
// watch it shed load, then send a bunch of messages to the
|
||||
// queue, most of which we'll watch it drop.
|
||||
sq := newSimplePriorityQueue(ctx, 1, nil)
|
||||
for i := 0; i < 100; i++ {
|
||||
sq.enqueue() <- Envelope{From: "merlin"}
|
||||
}
|
||||
|
||||
seen := 0
|
||||
|
||||
RETRY:
|
||||
for seen <= 2 {
|
||||
select {
|
||||
case e := <-sq.dequeue():
|
||||
if e.From != "merlin" {
|
||||
continue
|
||||
}
|
||||
seen++
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
break RETRY
|
||||
}
|
||||
}
|
||||
// if we don't see any messages, then it's just broken.
|
||||
if seen == 0 {
|
||||
t.Errorf("seen %d messages, should have seen more than one", seen)
|
||||
}
|
||||
// ensure that load shedding happens: there can be at most 3
|
||||
// messages that we get out of this, one that was buffered
|
||||
// plus 2 that were under the cap, everything else gets
|
||||
// dropped.
|
||||
if seen > 3 {
|
||||
t.Errorf("saw %d messages, should have seen 5 or fewer", seen)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -417,7 +417,7 @@ func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
|
||||
// RemovePeer is finished.
|
||||
// https://github.com/tendermint/tendermint/issues/3338
|
||||
if sw.peers.Remove(peer) {
|
||||
sw.metrics.Peers.Add(float64(-1))
|
||||
sw.metrics.Peers.Add(-1)
|
||||
}
|
||||
|
||||
sw.conns.RemoveAddr(peer.RemoteAddr())
|
||||
@@ -865,11 +865,11 @@ func (sw *Switch) handshakePeer(
|
||||
c Connection,
|
||||
expectPeerID types.NodeID,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
// Moved from transport and hardcoded until legacy P2P stack removal.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
peerInfo, peerKey, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
// Moved timeout from transport and hardcoded until legacy P2P stack removal.
|
||||
peerInfo, peerKey, err := c.Handshake(ctx, 5*time.Second, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
return peerInfo, peerKey, ErrRejected{
|
||||
conn: c.(*mConnConnection).conn,
|
||||
@@ -1035,7 +1035,7 @@ func (sw *Switch) addPeer(p Peer) error {
|
||||
if err := sw.peers.Add(p); err != nil {
|
||||
return err
|
||||
}
|
||||
sw.metrics.Peers.Add(float64(1))
|
||||
sw.metrics.Peers.Add(1)
|
||||
|
||||
// Start all the reactor protocols on the peer.
|
||||
for _, reactor := range sw.reactors {
|
||||
|
||||
@@ -267,7 +267,7 @@ func TestSwitchPeerFilter(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerInfo, _, err := c.Handshake(ctx, 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -324,7 +324,7 @@ func TestSwitchPeerFilterTimeout(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerInfo, _, err := c.Handshake(ctx, 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -360,7 +360,7 @@ func TestSwitchPeerFilterDuplicate(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerInfo, _, err := c.Handshake(ctx, 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -415,7 +415,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerInfo, _, err := c.Handshake(ctx, 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
if err := conn.Close(); err != nil {
|
||||
sw.Logger.Error("Error closing connection", "err", err)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
@@ -84,7 +85,7 @@ type Connection interface {
|
||||
// FIXME: The handshake should really be the Router's responsibility, but
|
||||
// that requires the connection interface to be byte-oriented rather than
|
||||
// message-oriented (see comment above).
|
||||
Handshake(context.Context, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error)
|
||||
Handshake(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error)
|
||||
|
||||
// ReceiveMessage returns the next message received on the connection,
|
||||
// blocking until one is available. Returns io.EOF if closed.
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/netutil"
|
||||
|
||||
@@ -255,6 +256,7 @@ func newMConnConnection(
|
||||
// Handshake implements Connection.
|
||||
func (c *mConnConnection) Handshake(
|
||||
ctx context.Context,
|
||||
timeout time.Duration,
|
||||
nodeInfo types.NodeInfo,
|
||||
privKey crypto.PrivKey,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
@@ -264,6 +266,12 @@ func (c *mConnConnection) Handshake(
|
||||
peerKey crypto.PubKey
|
||||
errCh = make(chan error, 1)
|
||||
)
|
||||
handshakeCtx := ctx
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
handshakeCtx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
// To handle context cancellation, we need to do the handshake in a
|
||||
// goroutine and abort the blocking network calls by closing the connection
|
||||
// when the context is canceled.
|
||||
@@ -276,14 +284,19 @@ func (c *mConnConnection) Handshake(
|
||||
}
|
||||
}()
|
||||
var err error
|
||||
mconn, peerInfo, peerKey, err = c.handshake(ctx, nodeInfo, privKey)
|
||||
errCh <- err
|
||||
mconn, peerInfo, peerKey, err = c.handshake(handshakeCtx, nodeInfo, privKey)
|
||||
|
||||
select {
|
||||
case errCh <- err:
|
||||
case <-handshakeCtx.Done():
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-handshakeCtx.Done():
|
||||
_ = c.Close()
|
||||
return types.NodeInfo{}, nil, ctx.Err()
|
||||
return types.NodeInfo{}, nil, handshakeCtx.Err()
|
||||
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
@@ -270,9 +271,16 @@ func (c *MemoryConnection) Status() conn.ConnectionStatus {
|
||||
// Handshake implements Connection.
|
||||
func (c *MemoryConnection) Handshake(
|
||||
ctx context.Context,
|
||||
timeout time.Duration,
|
||||
nodeInfo types.NodeInfo,
|
||||
privKey crypto.PrivKey,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
select {
|
||||
case c.sendCh <- memoryMessage{nodeInfo: &nodeInfo, pubKey: privKey.PubKey()}:
|
||||
c.logger.Debug("sent handshake", "nodeInfo", nodeInfo)
|
||||
|
||||
@@ -265,7 +265,7 @@ func TestConnection_Handshake(t *testing.T) {
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
// Must use assert due to goroutine.
|
||||
peerInfo, peerKey, err := ba.Handshake(ctx, bInfo, bKey)
|
||||
peerInfo, peerKey, err := ba.Handshake(ctx, 0, bInfo, bKey)
|
||||
if err == nil {
|
||||
assert.Equal(t, aInfo, peerInfo)
|
||||
assert.Equal(t, aKey.PubKey(), peerKey)
|
||||
@@ -273,7 +273,7 @@ func TestConnection_Handshake(t *testing.T) {
|
||||
errCh <- err
|
||||
}()
|
||||
|
||||
peerInfo, peerKey, err := ab.Handshake(ctx, aInfo, aKey)
|
||||
peerInfo, peerKey, err := ab.Handshake(ctx, 0, aInfo, aKey)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bInfo, peerInfo)
|
||||
require.Equal(t, bKey.PubKey(), peerKey)
|
||||
@@ -291,7 +291,7 @@ func TestConnection_HandshakeCancel(t *testing.T) {
|
||||
ab, ba := dialAccept(t, a, b)
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
cancel()
|
||||
_, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
_, _, err := ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
require.Error(t, err)
|
||||
require.Equal(t, context.Canceled, err)
|
||||
_ = ab.Close()
|
||||
@@ -301,7 +301,7 @@ func TestConnection_HandshakeCancel(t *testing.T) {
|
||||
ab, ba = dialAccept(t, a, b)
|
||||
timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond)
|
||||
defer cancel()
|
||||
_, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
_, _, err = ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
require.Error(t, err)
|
||||
require.Equal(t, context.DeadlineExceeded, err)
|
||||
_ = ab.Close()
|
||||
@@ -630,13 +630,13 @@ func dialAcceptHandshake(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.
|
||||
go func() {
|
||||
privKey := ed25519.GenPrivKey()
|
||||
nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())}
|
||||
_, _, err := ba.Handshake(ctx, nodeInfo, privKey)
|
||||
_, _, err := ba.Handshake(ctx, 0, nodeInfo, privKey)
|
||||
errCh <- err
|
||||
}()
|
||||
|
||||
privKey := ed25519.GenPrivKey()
|
||||
nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())}
|
||||
_, _, err := ab.Handshake(ctx, nodeInfo, privKey)
|
||||
_, _, err := ab.Handshake(ctx, 0, nodeInfo, privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
timer := time.NewTimer(2 * time.Second)
|
||||
|
||||
@@ -150,3 +150,18 @@ func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.Request
|
||||
func (_m *AppConnConsensus) SetResponseCallback(_a0 abciclient.Callback) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewAppConnConsensus interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewAppConnConsensus creates a new instance of AppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewAppConnConsensus(t mockConstructorTestingTNewAppConnConsensus) *AppConnConsensus {
|
||||
mock := &AppConnConsensus{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -118,3 +118,18 @@ func (_m *AppConnMempool) FlushSync(_a0 context.Context) error {
|
||||
func (_m *AppConnMempool) SetResponseCallback(_a0 abciclient.Callback) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewAppConnMempool interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewAppConnMempool creates a new instance of AppConnMempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewAppConnMempool(t mockConstructorTestingTNewAppConnMempool) *AppConnMempool {
|
||||
mock := &AppConnMempool{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -97,3 +97,18 @@ func (_m *AppConnQuery) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewAppConnQuery interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewAppConnQuery creates a new instance of AppConnQuery. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewAppConnQuery(t mockConstructorTestingTNewAppConnQuery) *AppConnQuery {
|
||||
mock := &AppConnQuery{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -120,3 +120,18 @@ func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 context.Context, _a1 types.Requ
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewAppConnSnapshot interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewAppConnSnapshot creates a new instance of AppConnSnapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewAppConnSnapshot(t mockConstructorTestingTNewAppConnSnapshot) *AppConnSnapshot {
|
||||
mock := &AppConnSnapshot{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -165,3 +165,18 @@ func (_m *EventSink) Type() indexer.EventSinkType {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewEventSink interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewEventSink creates a new instance of EventSink. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewEventSink(t mockConstructorTestingTNewEventSink) *EventSink {
|
||||
mock := &EventSink{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -208,3 +208,18 @@ func (_m *BlockStore) Size() int64 {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewBlockStore interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore {
|
||||
mock := &BlockStore{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -68,3 +68,18 @@ func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64
|
||||
func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) {
|
||||
_m.Called(_a0, _a1)
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewEvidencePool interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewEvidencePool(t mockConstructorTestingTNewEvidencePool) *EvidencePool {
|
||||
mock := &EvidencePool{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -186,3 +186,18 @@ func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewStore interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewStore(t mockConstructorTestingTNewStore) *Store {
|
||||
mock := &Store{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -82,3 +82,18 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State,
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewStateProvider interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewStateProvider(t mockConstructorTestingTNewStateProvider) *StateProvider {
|
||||
mock := &StateProvider{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func MustNewDefaultLogger(format, level string, trace bool) Logger {
|
||||
}
|
||||
|
||||
func (l defaultLogger) Info(msg string, keyVals ...interface{}) {
|
||||
l.Logger.Info().Fields(getLogFields(keyVals...)).Msg(msg)
|
||||
l.Logger.Info().Fields(keyVals).Msg(msg)
|
||||
}
|
||||
|
||||
func (l defaultLogger) Error(msg string, keyVals ...interface{}) {
|
||||
@@ -84,29 +84,16 @@ func (l defaultLogger) Error(msg string, keyVals ...interface{}) {
|
||||
e = e.Stack()
|
||||
}
|
||||
|
||||
e.Fields(getLogFields(keyVals...)).Msg(msg)
|
||||
e.Fields(keyVals).Msg(msg)
|
||||
}
|
||||
|
||||
func (l defaultLogger) Debug(msg string, keyVals ...interface{}) {
|
||||
l.Logger.Debug().Fields(getLogFields(keyVals...)).Msg(msg)
|
||||
l.Logger.Debug().Fields(keyVals).Msg(msg)
|
||||
}
|
||||
|
||||
func (l defaultLogger) With(keyVals ...interface{}) Logger {
|
||||
return defaultLogger{
|
||||
Logger: l.Logger.With().Fields(getLogFields(keyVals...)).Logger(),
|
||||
Logger: l.Logger.With().Fields(keyVals).Logger(),
|
||||
trace: l.trace,
|
||||
}
|
||||
}
|
||||
|
||||
func getLogFields(keyVals ...interface{}) map[string]interface{} {
|
||||
if len(keyVals)%2 != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{}, len(keyVals))
|
||||
for i := 0; i < len(keyVals); i += 2 {
|
||||
fields[fmt.Sprint(keyVals[i])] = keyVals[i+1]
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
@@ -1018,7 +1018,12 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool)
|
||||
|
||||
// process all the responses as they come in
|
||||
for i := 0; i < cap(witnessResponsesC); i++ {
|
||||
response := <-witnessResponsesC
|
||||
var response witnessResponse
|
||||
select {
|
||||
case response = <-witnessResponsesC:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
switch response.err {
|
||||
// success! We have found a new primary
|
||||
case nil:
|
||||
@@ -1047,10 +1052,6 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool)
|
||||
// return the light block that new primary responded with
|
||||
return response.lb, nil
|
||||
|
||||
// catch canceled contexts or deadlines
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
return nil, response.err
|
||||
|
||||
// process benign errors by logging them only
|
||||
case provider.ErrNoResponse, provider.ErrLightBlockNotFound, provider.ErrHeightTooHigh:
|
||||
lastError = response.err
|
||||
|
||||
@@ -51,3 +51,18 @@ func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) erro
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewProvider interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewProvider(t mockConstructorTestingTNewProvider) *Provider {
|
||||
mock := &Provider{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -99,3 +99,18 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewLightClient interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewLightClient(t mockConstructorTestingTNewLightClient) *LightClient {
|
||||
mock := &LightClient{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
51
node/node.go
51
node/node.go
@@ -250,7 +250,7 @@ func makeNode(cfg *config.Config,
|
||||
|
||||
// Determine whether we should do block sync. This must happen after the handshake, since the
|
||||
// app may modify the validator set, specifying ourself as the only validator.
|
||||
blockSync := cfg.BlockSync.Enable && !onlyValidatorIsUs(state, pubKey)
|
||||
blockSync := !onlyValidatorIsUs(state, pubKey)
|
||||
|
||||
logNodeStartupInfo(state, pubKey, logger, consensusLogger, cfg.Mode)
|
||||
|
||||
@@ -265,7 +265,7 @@ func makeNode(cfg *config.Config,
|
||||
p2pLogger := logger.With("module", "p2p")
|
||||
transport := createTransport(p2pLogger, cfg)
|
||||
|
||||
peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID)
|
||||
peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID, nodeMetrics.p2p)
|
||||
closers = append(closers, peerCloser)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(
|
||||
@@ -561,7 +561,7 @@ func makeSeedNode(cfg *config.Config,
|
||||
p2pLogger := logger.With("module", "p2p")
|
||||
transport := createTransport(p2pLogger, cfg)
|
||||
|
||||
peerManager, closer, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID)
|
||||
peerManager, closer, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID, p2pMetrics)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(
|
||||
fmt.Errorf("failed to create peer manager: %w", err),
|
||||
@@ -700,10 +700,8 @@ func (n *nodeImpl) OnStart() error {
|
||||
}
|
||||
|
||||
if n.config.Mode != config.ModeSeed {
|
||||
if n.config.BlockSync.Version == config.BlockSyncV0 {
|
||||
if err := n.bcReactor.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.bcReactor.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start the real consensus reactor separately since the switch uses the shim.
|
||||
@@ -787,22 +785,18 @@ func (n *nodeImpl) OnStart() error {
|
||||
// TODO: Some form of orchestrator is needed here between the state
|
||||
// advancing reactors to be able to control which one of the three
|
||||
// is running
|
||||
if n.config.BlockSync.Enable {
|
||||
// FIXME Very ugly to have these metrics bleed through here.
|
||||
n.consensusReactor.SetBlockSyncingMetrics(1)
|
||||
if err := bcR.SwitchToBlockSync(state); err != nil {
|
||||
n.Logger.Error("failed to switch to block sync", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight}
|
||||
if err := n.eventBus.PublishEventBlockSyncStatus(d); err != nil {
|
||||
n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
n.consensusReactor.SwitchToConsensus(state, true)
|
||||
// FIXME Very ugly to have these metrics bleed through here.
|
||||
n.consensusReactor.SetBlockSyncingMetrics(1)
|
||||
if err := bcR.SwitchToBlockSync(state); err != nil {
|
||||
n.Logger.Error("failed to switch to block sync", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
s := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight}
|
||||
if err := n.eventBus.PublishEventBlockSyncStatus(s); err != nil {
|
||||
n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err)
|
||||
}
|
||||
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -830,11 +824,10 @@ func (n *nodeImpl) OnStop() {
|
||||
|
||||
if n.config.Mode != config.ModeSeed {
|
||||
// now stop the reactors
|
||||
if n.config.BlockSync.Version == config.BlockSyncV0 {
|
||||
// Stop the real blockchain reactor separately since the switch uses the shim.
|
||||
if err := n.bcReactor.Stop(); err != nil {
|
||||
n.Logger.Error("failed to stop the blockchain reactor", "err", err)
|
||||
}
|
||||
|
||||
// Stop the real blockchain reactor separately since the switch uses the shim.
|
||||
if err := n.bcReactor.Stop(); err != nil {
|
||||
n.Logger.Error("failed to stop the blockchain reactor", "err", err)
|
||||
}
|
||||
|
||||
// Stop the real consensus reactor separately since the switch uses the shim.
|
||||
@@ -1246,7 +1239,9 @@ func createAndStartPrivValidatorGRPCClient(
|
||||
|
||||
func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOptions {
|
||||
opts := p2p.RouterOptions{
|
||||
QueueType: conf.P2P.QueueType,
|
||||
QueueType: conf.P2P.QueueType,
|
||||
HandshakeTimeout: conf.P2P.HandshakeTimeout,
|
||||
DialTimeout: conf.P2P.DialTimeout,
|
||||
}
|
||||
|
||||
if conf.P2P.MaxNumInboundPeers > 0 {
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0"
|
||||
bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2"
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
@@ -339,6 +338,10 @@ func createBlockchainReactor(
|
||||
metrics *consensus.Metrics,
|
||||
) (*p2p.ReactorShim, service.Service, error) {
|
||||
|
||||
if !cfg.BlockSync.Enable {
|
||||
logger.Error("blocksync.enable = false, but Tendermint no longer allows blocksync to be disabled. This setting is now ignored and will be removed in the next version.")
|
||||
}
|
||||
|
||||
logger = logger.With("module", "blockchain")
|
||||
|
||||
switch cfg.BlockSync.Version {
|
||||
@@ -442,12 +445,23 @@ func createConsensusReactor(
|
||||
}
|
||||
|
||||
func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport {
|
||||
var maxAccepted uint32
|
||||
switch {
|
||||
case cfg.P2P.MaxConnections > 0 && !cfg.P2P.UseLegacy:
|
||||
maxAccepted = uint32(cfg.P2P.MaxConnections) +
|
||||
uint32(len(tmstrings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")))
|
||||
|
||||
case cfg.P2P.MaxNumInboundPeers > 0:
|
||||
maxAccepted = uint32(cfg.P2P.MaxNumInboundPeers) +
|
||||
uint32(len(tmstrings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")))
|
||||
default:
|
||||
maxAccepted = 0
|
||||
}
|
||||
|
||||
return p2p.NewMConnTransport(
|
||||
logger, p2p.MConnConfig(cfg.P2P), []*p2p.ChannelDescriptor{},
|
||||
p2p.MConnTransportOptions{
|
||||
MaxAcceptedConnections: uint32(cfg.P2P.MaxNumInboundPeers +
|
||||
len(tmstrings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")),
|
||||
),
|
||||
MaxAcceptedConnections: maxAccepted,
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -457,6 +471,7 @@ func createPeerManager(
|
||||
dbProvider config.DBProvider,
|
||||
p2pLogger log.Logger,
|
||||
nodeID types.NodeID,
|
||||
metrics *p2p.Metrics,
|
||||
) (*p2p.PeerManager, closer, error) {
|
||||
|
||||
selfAddr, err := p2p.ParseNodeAddress(nodeID.AddressString(cfg.P2P.ExternalAddress))
|
||||
@@ -487,21 +502,34 @@ func createPeerManager(
|
||||
maxConns = 64
|
||||
}
|
||||
|
||||
var maxOutgoingConns uint16
|
||||
switch {
|
||||
case cfg.P2P.MaxOutgoingConnections > 0:
|
||||
maxOutgoingConns = cfg.P2P.MaxOutgoingConnections
|
||||
default:
|
||||
maxOutgoingConns = maxConns / 2
|
||||
}
|
||||
|
||||
privatePeerIDs := make(map[types.NodeID]struct{})
|
||||
for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") {
|
||||
privatePeerIDs[types.NodeID(id)] = struct{}{}
|
||||
}
|
||||
|
||||
const maxUpgradeConns = 4
|
||||
|
||||
options := p2p.PeerManagerOptions{
|
||||
SelfAddress: selfAddr,
|
||||
MaxConnected: maxConns,
|
||||
MaxConnectedUpgrade: 4,
|
||||
MaxPeers: 1000,
|
||||
MinRetryTime: 250 * time.Millisecond,
|
||||
MaxRetryTime: 30 * time.Minute,
|
||||
MaxRetryTimePersistent: 5 * time.Minute,
|
||||
RetryTimeJitter: 5 * time.Second,
|
||||
PrivatePeers: privatePeerIDs,
|
||||
SelfAddress: selfAddr,
|
||||
MaxConnected: maxConns,
|
||||
MaxOutgoingConnections: maxOutgoingConns,
|
||||
MaxConnectedUpgrade: maxUpgradeConns,
|
||||
DisconnectCooldownPeriod: 2 * time.Second,
|
||||
MaxPeers: maxUpgradeConns + 4*maxConns,
|
||||
MinRetryTime: 250 * time.Millisecond,
|
||||
MaxRetryTime: 30 * time.Minute,
|
||||
MaxRetryTimePersistent: 5 * time.Minute,
|
||||
RetryTimeJitter: 5 * time.Second,
|
||||
PrivatePeers: privatePeerIDs,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
peers := []p2p.NodeAddress{}
|
||||
@@ -732,10 +760,8 @@ func makeNodeInfo(
|
||||
switch cfg.BlockSync.Version {
|
||||
case config.BlockSyncV0:
|
||||
bcChannel = byte(bcv0.BlockSyncChannel)
|
||||
|
||||
case config.BlockSyncV2:
|
||||
bcChannel = bcv2.BlockchainChannel
|
||||
|
||||
return types.NodeInfo{}, fmt.Errorf("unsupported blocksync version %s", cfg.BlockSync.Version)
|
||||
default:
|
||||
return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", cfg.BlockSync.Version)
|
||||
}
|
||||
|
||||
@@ -243,6 +243,7 @@ type PeerInfo struct {
|
||||
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
AddressInfo []*PeerAddressInfo `protobuf:"bytes,2,rep,name=address_info,json=addressInfo,proto3" json:"address_info,omitempty"`
|
||||
LastConnected *time.Time `protobuf:"bytes,3,opt,name=last_connected,json=lastConnected,proto3,stdtime" json:"last_connected,omitempty"`
|
||||
Inactive bool `protobuf:"varint,4,opt,name=inactive,proto3" json:"inactive,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PeerInfo) Reset() { *m = PeerInfo{} }
|
||||
@@ -299,6 +300,13 @@ func (m *PeerInfo) GetLastConnected() *time.Time {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PeerInfo) GetInactive() bool {
|
||||
if m != nil {
|
||||
return m.Inactive
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type PeerAddressInfo struct {
|
||||
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
|
||||
LastDialSuccess *time.Time `protobuf:"bytes,2,opt,name=last_dial_success,json=lastDialSuccess,proto3,stdtime" json:"last_dial_success,omitempty"`
|
||||
@@ -378,46 +386,46 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/p2p/types.proto", fileDescriptor_c8a29e659aeca578) }
|
||||
|
||||
var fileDescriptor_c8a29e659aeca578 = []byte{
|
||||
// 610 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x4e, 0x1b, 0x3d,
|
||||
0x14, 0xcd, 0x24, 0x21, 0x09, 0x37, 0x84, 0xf0, 0x59, 0xe8, 0xd3, 0x10, 0xa9, 0x19, 0x14, 0x36,
|
||||
0xac, 0x26, 0x52, 0xaa, 0x2e, 0xba, 0x64, 0x40, 0xad, 0x22, 0x55, 0x25, 0x9a, 0xa2, 0x2e, 0xda,
|
||||
0xc5, 0x68, 0x32, 0x76, 0x82, 0xc5, 0xc4, 0xb6, 0x3c, 0x4e, 0x4b, 0xdf, 0x82, 0x37, 0xe9, 0x63,
|
||||
0x94, 0x25, 0xcb, 0xae, 0xd2, 0x6a, 0xd8, 0xf6, 0x21, 0x2a, 0xdb, 0x33, 0x40, 0xa2, 0x2e, 0xd8,
|
||||
0xf9, 0xdc, 0xe3, 0x73, 0xee, 0x8f, 0xad, 0x0b, 0x3d, 0x45, 0x18, 0x26, 0x72, 0x41, 0x99, 0x1a,
|
||||
0x8a, 0x91, 0x18, 0xaa, 0x6f, 0x82, 0x64, 0xbe, 0x90, 0x5c, 0x71, 0xb4, 0xfb, 0xc8, 0xf9, 0x62,
|
||||
0x24, 0x7a, 0xfb, 0x73, 0x3e, 0xe7, 0x86, 0x1a, 0xea, 0x93, 0xbd, 0xd5, 0xf3, 0xe6, 0x9c, 0xcf,
|
||||
0x53, 0x32, 0x34, 0x68, 0xba, 0x9c, 0x0d, 0x15, 0x5d, 0x90, 0x4c, 0xc5, 0x0b, 0x61, 0x2f, 0x0c,
|
||||
0x2e, 0xa0, 0x3b, 0xd1, 0x87, 0x84, 0xa7, 0x1f, 0x89, 0xcc, 0x28, 0x67, 0xe8, 0x00, 0x6a, 0x62,
|
||||
0x24, 0x5c, 0xe7, 0xd0, 0x39, 0xae, 0x07, 0xcd, 0x7c, 0xe5, 0xd5, 0x26, 0xa3, 0x49, 0xa8, 0x63,
|
||||
0x68, 0x1f, 0xb6, 0xa6, 0x29, 0x4f, 0xae, 0xdc, 0xaa, 0x26, 0x43, 0x0b, 0xd0, 0x1e, 0xd4, 0x62,
|
||||
0x21, 0xdc, 0x9a, 0x89, 0xe9, 0xe3, 0xe0, 0x47, 0x15, 0x5a, 0xef, 0x39, 0x26, 0x63, 0x36, 0xe3,
|
||||
0x68, 0x02, 0x7b, 0xa2, 0x48, 0x11, 0x7d, 0xb1, 0x39, 0x8c, 0x79, 0x7b, 0xe4, 0xf9, 0xeb, 0x4d,
|
||||
0xf8, 0x1b, 0xa5, 0x04, 0xf5, 0xdb, 0x95, 0x57, 0x09, 0xbb, 0x62, 0xa3, 0xc2, 0x23, 0x68, 0x32,
|
||||
0x8e, 0x49, 0x44, 0xb1, 0x29, 0x64, 0x3b, 0x80, 0x7c, 0xe5, 0x35, 0x4c, 0xc2, 0xb3, 0xb0, 0xa1,
|
||||
0xa9, 0x31, 0x46, 0x1e, 0xb4, 0x53, 0x9a, 0x29, 0xc2, 0xa2, 0x18, 0x63, 0x69, 0xaa, 0xdb, 0x0e,
|
||||
0xc1, 0x86, 0x4e, 0x30, 0x96, 0xc8, 0x85, 0x26, 0x23, 0xea, 0x2b, 0x97, 0x57, 0x6e, 0xdd, 0x90,
|
||||
0x25, 0xd4, 0x4c, 0x59, 0xe8, 0x96, 0x65, 0x0a, 0x88, 0x7a, 0xd0, 0x4a, 0x2e, 0x63, 0xc6, 0x48,
|
||||
0x9a, 0xb9, 0x8d, 0x43, 0xe7, 0x78, 0x27, 0x7c, 0xc0, 0x5a, 0xb5, 0xe0, 0x8c, 0x5e, 0x11, 0xe9,
|
||||
0x36, 0xad, 0xaa, 0x80, 0xe8, 0x35, 0x6c, 0x71, 0x75, 0x49, 0xa4, 0xdb, 0x32, 0x6d, 0xbf, 0xd8,
|
||||
0x6c, 0xbb, 0x1c, 0xd5, 0xb9, 0xbe, 0x54, 0x34, 0x6d, 0x15, 0x83, 0xcf, 0xd0, 0x59, 0x63, 0xd1,
|
||||
0x01, 0xb4, 0xd4, 0x75, 0x44, 0x19, 0x26, 0xd7, 0x66, 0x8a, 0xdb, 0x61, 0x53, 0x5d, 0x8f, 0x35,
|
||||
0x44, 0x43, 0x68, 0x4b, 0x91, 0x98, 0x76, 0x49, 0x96, 0x15, 0xa3, 0xd9, 0xcd, 0x57, 0x1e, 0x84,
|
||||
0x93, 0xd3, 0x13, 0x1b, 0x0d, 0x41, 0x8a, 0xa4, 0x38, 0x0f, 0xbe, 0x3b, 0xd0, 0x9a, 0x10, 0x22,
|
||||
0xcd, 0x33, 0xfd, 0x0f, 0x55, 0x8a, 0xad, 0x65, 0xd0, 0xc8, 0x57, 0x5e, 0x75, 0x7c, 0x16, 0x56,
|
||||
0x29, 0x46, 0x01, 0xec, 0x14, 0x8e, 0x11, 0x65, 0x33, 0xee, 0x56, 0x0f, 0x6b, 0xff, 0x7c, 0x3a,
|
||||
0x42, 0x64, 0xe1, 0xab, 0xed, 0xc2, 0x76, 0xfc, 0x08, 0xd0, 0x5b, 0xd8, 0x4d, 0xe3, 0x4c, 0x45,
|
||||
0x09, 0x67, 0x8c, 0x24, 0x8a, 0x60, 0xf3, 0x1c, 0xed, 0x51, 0xcf, 0xb7, 0xff, 0xd3, 0x2f, 0xff,
|
||||
0xa7, 0x7f, 0x51, 0xfe, 0xcf, 0xa0, 0x7e, 0xf3, 0xcb, 0x73, 0xc2, 0x8e, 0xd6, 0x9d, 0x96, 0xb2,
|
||||
0xc1, 0x1f, 0x07, 0xba, 0x1b, 0x99, 0xf4, 0xdc, 0xcb, 0x96, 0x8b, 0x81, 0x14, 0x10, 0xbd, 0x83,
|
||||
0xff, 0x4c, 0x5a, 0x4c, 0xe3, 0x34, 0xca, 0x96, 0x49, 0x52, 0x8e, 0xe5, 0x39, 0x99, 0xbb, 0x5a,
|
||||
0x7a, 0x46, 0xe3, 0xf4, 0x83, 0x15, 0xae, 0xbb, 0xcd, 0x62, 0x9a, 0x2e, 0x25, 0x79, 0x76, 0x1f,
|
||||
0x0f, 0x6e, 0x6f, 0xac, 0x10, 0x1d, 0x41, 0xe7, 0xa9, 0x51, 0x66, 0xfe, 0x60, 0x27, 0xdc, 0xc1,
|
||||
0x8f, 0x77, 0xb2, 0xe0, 0xfc, 0x36, 0xef, 0x3b, 0x77, 0x79, 0xdf, 0xf9, 0x9d, 0xf7, 0x9d, 0x9b,
|
||||
0xfb, 0x7e, 0xe5, 0xee, 0xbe, 0x5f, 0xf9, 0x79, 0xdf, 0xaf, 0x7c, 0x7a, 0x35, 0xa7, 0xea, 0x72,
|
||||
0x39, 0xf5, 0x13, 0xbe, 0x18, 0x3e, 0xd9, 0x12, 0x4f, 0x17, 0x86, 0xd9, 0x05, 0xeb, 0x1b, 0x64,
|
||||
0xda, 0x30, 0xd1, 0x97, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x0b, 0xe9, 0x56, 0xd3, 0x5a, 0x04,
|
||||
0x00, 0x00,
|
||||
// 621 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x4f, 0xdb, 0x30,
|
||||
0x14, 0x6e, 0xda, 0xd2, 0x96, 0x57, 0x4a, 0x99, 0x85, 0xa6, 0x50, 0x69, 0x0d, 0x2a, 0x17, 0x4e,
|
||||
0x89, 0xd4, 0x69, 0x87, 0x1d, 0x09, 0x68, 0x53, 0xa5, 0x69, 0x54, 0x1e, 0xda, 0x61, 0x3b, 0x44,
|
||||
0x69, 0xec, 0x16, 0x8b, 0xd4, 0xb6, 0x12, 0x97, 0xb1, 0x7f, 0xc1, 0xbf, 0x1a, 0xd2, 0x2e, 0x1c,
|
||||
0x77, 0xea, 0xa6, 0x70, 0xdd, 0x8f, 0x98, 0xec, 0x24, 0xd0, 0x56, 0x3b, 0x70, 0xf3, 0xf7, 0x9e,
|
||||
0xbf, 0xcf, 0xdf, 0x7b, 0xcf, 0x7a, 0xd0, 0x53, 0x94, 0x13, 0x9a, 0xcc, 0x19, 0x57, 0x9e, 0x1c,
|
||||
0x4a, 0x4f, 0x7d, 0x97, 0x34, 0x75, 0x65, 0x22, 0x94, 0x40, 0xbb, 0x4f, 0x39, 0x57, 0x0e, 0x65,
|
||||
0x6f, 0x7f, 0x26, 0x66, 0xc2, 0xa4, 0x3c, 0x7d, 0xca, 0x6f, 0xf5, 0x9c, 0x99, 0x10, 0xb3, 0x98,
|
||||
0x7a, 0x06, 0x4d, 0x16, 0x53, 0x4f, 0xb1, 0x39, 0x4d, 0x55, 0x38, 0x97, 0xf9, 0x85, 0xc1, 0x05,
|
||||
0x74, 0xc7, 0xfa, 0x10, 0x89, 0xf8, 0x33, 0x4d, 0x52, 0x26, 0x38, 0x3a, 0x80, 0x9a, 0x1c, 0x4a,
|
||||
0xdb, 0x3a, 0xb4, 0x8e, 0xeb, 0x7e, 0x33, 0x5b, 0x3a, 0xb5, 0xf1, 0x70, 0x8c, 0x75, 0x0c, 0xed,
|
||||
0xc3, 0xd6, 0x24, 0x16, 0xd1, 0x95, 0x5d, 0xd5, 0x49, 0x9c, 0x03, 0xb4, 0x07, 0xb5, 0x50, 0x4a,
|
||||
0xbb, 0x66, 0x62, 0xfa, 0x38, 0xf8, 0x51, 0x85, 0xd6, 0x47, 0x41, 0xe8, 0x88, 0x4f, 0x05, 0x1a,
|
||||
0xc3, 0x9e, 0x2c, 0x9e, 0x08, 0xae, 0xf3, 0x37, 0x8c, 0x78, 0x7b, 0xe8, 0xb8, 0xeb, 0x45, 0xb8,
|
||||
0x1b, 0x56, 0xfc, 0xfa, 0xdd, 0xd2, 0xa9, 0xe0, 0xae, 0xdc, 0x70, 0x78, 0x04, 0x4d, 0x2e, 0x08,
|
||||
0x0d, 0x18, 0x31, 0x46, 0xb6, 0x7d, 0xc8, 0x96, 0x4e, 0xc3, 0x3c, 0x78, 0x86, 0x1b, 0x3a, 0x35,
|
||||
0x22, 0xc8, 0x81, 0x76, 0xcc, 0x52, 0x45, 0x79, 0x10, 0x12, 0x92, 0x18, 0x77, 0xdb, 0x18, 0xf2,
|
||||
0xd0, 0x09, 0x21, 0x09, 0xb2, 0xa1, 0xc9, 0xa9, 0xfa, 0x26, 0x92, 0x2b, 0xbb, 0x6e, 0x92, 0x25,
|
||||
0xd4, 0x99, 0xd2, 0xe8, 0x56, 0x9e, 0x29, 0x20, 0xea, 0x41, 0x2b, 0xba, 0x0c, 0x39, 0xa7, 0x71,
|
||||
0x6a, 0x37, 0x0e, 0xad, 0xe3, 0x1d, 0xfc, 0x88, 0x35, 0x6b, 0x2e, 0x38, 0xbb, 0xa2, 0x89, 0xdd,
|
||||
0xcc, 0x59, 0x05, 0x44, 0x6f, 0x61, 0x4b, 0xa8, 0x4b, 0x9a, 0xd8, 0x2d, 0x53, 0xf6, 0xab, 0xcd,
|
||||
0xb2, 0xcb, 0x56, 0x9d, 0xeb, 0x4b, 0x45, 0xd1, 0x39, 0x63, 0xf0, 0x15, 0x3a, 0x6b, 0x59, 0x74,
|
||||
0x00, 0x2d, 0x75, 0x13, 0x30, 0x4e, 0xe8, 0x8d, 0xe9, 0xe2, 0x36, 0x6e, 0xaa, 0x9b, 0x91, 0x86,
|
||||
0xc8, 0x83, 0x76, 0x22, 0x23, 0x53, 0x2e, 0x4d, 0xd3, 0xa2, 0x35, 0xbb, 0xd9, 0xd2, 0x01, 0x3c,
|
||||
0x3e, 0x3d, 0xc9, 0xa3, 0x18, 0x12, 0x19, 0x15, 0xe7, 0xc1, 0x4f, 0x0b, 0x5a, 0x63, 0x4a, 0x13,
|
||||
0x33, 0xa6, 0x97, 0x50, 0x65, 0x24, 0x97, 0xf4, 0x1b, 0xd9, 0xd2, 0xa9, 0x8e, 0xce, 0x70, 0x95,
|
||||
0x11, 0xe4, 0xc3, 0x4e, 0xa1, 0x18, 0x30, 0x3e, 0x15, 0x76, 0xf5, 0xb0, 0xf6, 0xdf, 0xd1, 0x51,
|
||||
0x9a, 0x14, 0xba, 0x5a, 0x0e, 0xb7, 0xc3, 0x27, 0x80, 0xde, 0xc3, 0x6e, 0x1c, 0xa6, 0x2a, 0x88,
|
||||
0x04, 0xe7, 0x34, 0x52, 0x94, 0x98, 0x71, 0xb4, 0x87, 0x3d, 0x37, 0xff, 0x9f, 0x6e, 0xf9, 0x3f,
|
||||
0xdd, 0x8b, 0xf2, 0x7f, 0xfa, 0xf5, 0xdb, 0xdf, 0x8e, 0x85, 0x3b, 0x9a, 0x77, 0x5a, 0xd2, 0x74,
|
||||
0xff, 0x19, 0x0f, 0x23, 0xc5, 0xae, 0xa9, 0x19, 0x5a, 0x0b, 0x3f, 0xe2, 0xc1, 0x5f, 0x0b, 0xba,
|
||||
0x1b, 0x2e, 0xf4, 0x4c, 0xca, 0x76, 0x14, 0xcd, 0x2a, 0x20, 0xfa, 0x00, 0x2f, 0x8c, 0x25, 0xc2,
|
||||
0xc2, 0x38, 0x48, 0x17, 0x51, 0x54, 0xb6, 0xec, 0x39, 0xae, 0xba, 0x9a, 0x7a, 0xc6, 0xc2, 0xf8,
|
||||
0x53, 0x4e, 0x5c, 0x57, 0x9b, 0x86, 0x2c, 0x5e, 0x24, 0xf4, 0xd9, 0x35, 0x3e, 0xaa, 0xbd, 0xcb,
|
||||
0x89, 0xe8, 0x08, 0x3a, 0xab, 0x42, 0xa9, 0x29, 0xb5, 0x83, 0x77, 0xc8, 0xd3, 0x9d, 0xd4, 0x3f,
|
||||
0xbf, 0xcb, 0xfa, 0xd6, 0x7d, 0xd6, 0xb7, 0xfe, 0x64, 0x7d, 0xeb, 0xf6, 0xa1, 0x5f, 0xb9, 0x7f,
|
||||
0xe8, 0x57, 0x7e, 0x3d, 0xf4, 0x2b, 0x5f, 0xde, 0xcc, 0x98, 0xba, 0x5c, 0x4c, 0xdc, 0x48, 0xcc,
|
||||
0xbd, 0x95, 0x0d, 0xb2, 0xba, 0x4c, 0xcc, 0x9e, 0x58, 0xdf, 0x2e, 0x93, 0x86, 0x89, 0xbe, 0xfe,
|
||||
0x17, 0x00, 0x00, 0xff, 0xff, 0x42, 0xcb, 0x37, 0x26, 0x76, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *ProtocolVersion) Marshal() (dAtA []byte, err error) {
|
||||
@@ -600,6 +608,16 @@ func (m *PeerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Inactive {
|
||||
i--
|
||||
if m.Inactive {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
}
|
||||
if m.LastConnected != nil {
|
||||
n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastConnected, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected):])
|
||||
if err3 != nil {
|
||||
@@ -792,6 +810,9 @@ func (m *PeerInfo) Size() (n int) {
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected)
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
if m.Inactive {
|
||||
n += 2
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -1487,6 +1508,26 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Inactive", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Inactive = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
|
||||
@@ -32,6 +32,7 @@ message PeerInfo {
|
||||
string id = 1 [(gogoproto.customname) = "ID"];
|
||||
repeated PeerAddressInfo address_info = 2;
|
||||
google.protobuf.Timestamp last_connected = 3 [(gogoproto.stdtime) = true];
|
||||
bool inactive = 4;
|
||||
}
|
||||
|
||||
message PeerAddressInfo {
|
||||
|
||||
@@ -800,3 +800,18 @@ func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perP
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewClient interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewClient(t mockConstructorTestingTNewClient) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -147,9 +147,15 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
|
||||
}
|
||||
}
|
||||
|
||||
func ensureBodyClose(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
// Since the pattern "/" matches all paths not matched by other registered patterns,
|
||||
// we check whether the path is indeed "/", otherwise return a 404 error
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
|
||||
@@ -217,7 +217,7 @@ func TestRPCNotificationInBatch(t *testing.T) {
|
||||
|
||||
func TestUnknownRPCPath(t *testing.T) {
|
||||
mux := testMux()
|
||||
req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil)
|
||||
req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", strings.NewReader(""))
|
||||
rec := httptest.NewRecorder()
|
||||
mux.ServeHTTP(rec, req)
|
||||
res := rec.Result()
|
||||
|
||||
@@ -15,11 +15,11 @@ import (
|
||||
func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger log.Logger) {
|
||||
// HTTP endpoints
|
||||
for funcName, rpcFunc := range funcMap {
|
||||
mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, logger))
|
||||
mux.HandleFunc("/"+funcName, ensureBodyClose(makeHTTPHandler(rpcFunc, logger)))
|
||||
}
|
||||
|
||||
// JSONRPC endpoints
|
||||
mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger)))
|
||||
mux.HandleFunc("/", ensureBodyClose(handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))))
|
||||
}
|
||||
|
||||
// Function introspection
|
||||
|
||||
@@ -1,3 +1,15 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Invoke Mockery v2 to update generated mocks for the given type.
|
||||
#
|
||||
# This script runs a locally-installed "mockery" if available, otherwise it
|
||||
# runs the published Docker container. This legerdemain is so that the CI build
|
||||
# and a local build can work off the same script.
|
||||
#
|
||||
if ! which mockery ; then
|
||||
mockery() {
|
||||
docker run --rm -v "$PWD":/w --workdir=/w vektra/mockery:v2.12.3
|
||||
}
|
||||
fi
|
||||
|
||||
go run github.com/vektra/mockery/v2 --disable-version-string --case underscore --name $*
|
||||
mockery --disable-version-string --case underscore --name "$@"
|
||||
|
||||
@@ -14,17 +14,19 @@ var (
|
||||
// testnetCombinations defines global testnet options, where we generate a
|
||||
// separate testnet for each combination (Cartesian product) of options.
|
||||
testnetCombinations = map[string][]interface{}{
|
||||
"topology": {"single", "quad", "large"},
|
||||
"p2p": {NewP2PMode, LegacyP2PMode, HybridP2PMode},
|
||||
"queueType": {"priority"}, // "fifo", "wdrr"
|
||||
"initialHeight": {0, 1000},
|
||||
"topology": {"single", "quad", "large"},
|
||||
"p2p": {NewP2PMode, LegacyP2PMode, HybridP2PMode},
|
||||
"queueType": {"priority"}, // "fifo", "wdrr"
|
||||
"initialState": {
|
||||
map[string]string{},
|
||||
map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"},
|
||||
},
|
||||
"validators": {"genesis", "initchain"},
|
||||
"txSize": {1024, 2048, 4096, 8192},
|
||||
}
|
||||
|
||||
initalHeights = uniformChoice{0, 1000}
|
||||
|
||||
// The following specify randomly chosen values for testnet nodes.
|
||||
nodeDatabases = weightedChoice{
|
||||
"goleveldb": 35,
|
||||
@@ -45,8 +47,6 @@ var (
|
||||
"tcp": 20,
|
||||
"unix": 10,
|
||||
}
|
||||
// FIXME: v2 disabled due to flake
|
||||
nodeBlockSyncs = uniformChoice{"v0"} // "v2"
|
||||
nodeMempools = uniformChoice{"v0", "v1"}
|
||||
nodeStateSyncs = weightedChoice{
|
||||
e2e.StateSyncDisabled: 10,
|
||||
@@ -63,7 +63,6 @@ var (
|
||||
"restart": 0.1,
|
||||
}
|
||||
evidence = uniformChoice{0, 1, 10}
|
||||
txSize = uniformChoice{1024, 4096} // either 1kb or 4kb
|
||||
ipv6 = uniformChoice{false, true}
|
||||
keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1}
|
||||
)
|
||||
@@ -99,12 +98,21 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) {
|
||||
if opt["p2p"] == HybridP2PMode {
|
||||
continue
|
||||
}
|
||||
if opt["txSize"].(int) != 2048 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if opts.MaxNetworkSize > 0 && len(manifest.Nodes) >= opts.MaxNetworkSize {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(manifest.Nodes) > 4 {
|
||||
if opt["txSize"].(int) == 8192 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if opt["p2p"] == HybridP2PMode {
|
||||
numLegacy := 0
|
||||
for _, n := range manifest.Nodes {
|
||||
@@ -150,7 +158,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
|
||||
manifest := e2e.Manifest{
|
||||
IPv6: ipv6.Choose(r).(bool),
|
||||
ABCIProtocol: nodeABCIProtocols.Choose(r),
|
||||
InitialHeight: int64(opt["initialHeight"].(int)),
|
||||
InitialHeight: int64(initalHeights.Choose(r).(int)),
|
||||
InitialState: opt["initialState"].(map[string]string),
|
||||
Validators: &map[string]int64{},
|
||||
ValidatorUpdates: map[string]map[string]int64{},
|
||||
@@ -158,7 +166,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
|
||||
KeyType: keyType.Choose(r).(string),
|
||||
Evidence: evidence.Choose(r).(int),
|
||||
QueueType: opt["queueType"].(string),
|
||||
TxSize: txSize.Choose(r).(int),
|
||||
TxSize: opt["txSize"].(int),
|
||||
}
|
||||
|
||||
p2pMode := opt["p2p"].(P2PMode)
|
||||
@@ -387,7 +395,7 @@ func generateNode(
|
||||
StartAt: startAt,
|
||||
Database: nodeDatabases.Choose(r),
|
||||
PrivvalProtocol: nodePrivvalProtocols.Choose(r),
|
||||
BlockSync: nodeBlockSyncs.Choose(r).(string),
|
||||
BlockSync: "v0",
|
||||
Mempool: nodeMempools.Choose(r).(string),
|
||||
StateSync: e2e.StateSyncDisabled,
|
||||
PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))),
|
||||
|
||||
@@ -43,7 +43,6 @@ persist_interval = 0
|
||||
perturb = ["restart"]
|
||||
privval_protocol = "tcp"
|
||||
seeds = ["seed01"]
|
||||
block_sync = "v0"
|
||||
|
||||
[node.validator03]
|
||||
database = "badgerdb"
|
||||
@@ -52,7 +51,6 @@ abci_protocol = "grpc"
|
||||
persist_interval = 3
|
||||
perturb = ["kill"]
|
||||
privval_protocol = "grpc"
|
||||
block_sync = "v0"
|
||||
retain_blocks = 10
|
||||
|
||||
[node.validator04]
|
||||
@@ -61,11 +59,9 @@ snapshot_interval = 5
|
||||
database = "rocksdb"
|
||||
persistent_peers = ["validator01"]
|
||||
perturb = ["pause"]
|
||||
block_sync = "v0"
|
||||
|
||||
[node.validator05]
|
||||
database = "cleveldb"
|
||||
block_sync = "v0"
|
||||
state_sync = "p2p"
|
||||
seeds = ["seed01"]
|
||||
start_at = 1005 # Becomes part of the validator set at 1010
|
||||
@@ -76,7 +72,6 @@ privval_protocol = "tcp"
|
||||
[node.full01]
|
||||
mode = "full"
|
||||
start_at = 1010
|
||||
block_sync = "v0"
|
||||
persistent_peers = ["validator01", "validator02", "validator03", "validator04"]
|
||||
perturb = ["restart"]
|
||||
retain_blocks = 10
|
||||
|
||||
@@ -178,7 +178,7 @@ func LoadTestnet(file string) (*Testnet, error) {
|
||||
ABCIProtocol: Protocol(testnet.ABCIProtocol),
|
||||
PrivvalProtocol: ProtocolFile,
|
||||
StartAt: nodeManifest.StartAt,
|
||||
BlockSync: nodeManifest.BlockSync,
|
||||
BlockSync: "v0",
|
||||
Mempool: nodeManifest.Mempool,
|
||||
StateSync: nodeManifest.StateSync,
|
||||
PersistInterval: 1,
|
||||
|
||||
@@ -22,7 +22,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty
|
||||
clients = map[string]*rpchttp.HTTP{}
|
||||
lastHeight int64
|
||||
lastIncrease = time.Now()
|
||||
nodesAtHeight = map[string]struct{}{}
|
||||
nodesAtHeight = map[string]int64{}
|
||||
numRunningNodes int
|
||||
)
|
||||
if height == 0 {
|
||||
@@ -86,7 +86,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty
|
||||
|
||||
// add this node to the set of target
|
||||
// height nodes
|
||||
nodesAtHeight[node.Name] = struct{}{}
|
||||
nodesAtHeight[node.Name] = result.SyncInfo.LatestBlockHeight
|
||||
|
||||
// if not all of the nodes that we
|
||||
// have clients for have reached the
|
||||
@@ -111,7 +111,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty
|
||||
if len(clients) == 0 {
|
||||
return nil, nil, errors.New("unable to connect to any network nodes")
|
||||
}
|
||||
if time.Since(lastIncrease) >= time.Minute {
|
||||
if time.Since(lastIncrease) >= 90*time.Second {
|
||||
if lastHeight == 0 {
|
||||
return nil, nil, errors.New("chain stalled at unknown height (most likely upon starting)")
|
||||
}
|
||||
|
||||
@@ -15,5 +15,5 @@ func Test(testnet *e2e.Testnet) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return execVerbose("./build/tests", "-test.count=1", "-test.v")
|
||||
return execVerbose("./build/tests", "-test.count=1")
|
||||
}
|
||||
|
||||
@@ -6,37 +6,48 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Tests that all nodes have peered with each other, regardless of discovery method.
|
||||
func TestNet_Peers(t *testing.T) {
|
||||
// FIXME Skip test since nodes aren't always able to fully mesh
|
||||
t.SkipNow()
|
||||
|
||||
testNode(t, func(t *testing.T, node e2e.Node) {
|
||||
client, err := node.Client()
|
||||
require.NoError(t, err)
|
||||
netInfo, err := client.NetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(node.Testnet.Nodes)-1, netInfo.NPeers,
|
||||
"node is not fully meshed with peers")
|
||||
|
||||
// FIXME: https://github.com/tendermint/tendermint/issues/8848
|
||||
// We should be able to assert that we can discover all peers in a network
|
||||
expectedPeers := len(node.Testnet.Nodes)
|
||||
peers := make(map[string]*e2e.Node, 0)
|
||||
seen := map[string]bool{}
|
||||
for _, n := range node.Testnet.Nodes {
|
||||
seen[n.Name] = (n.Name == node.Name) // we've clearly seen ourself
|
||||
}
|
||||
for _, peerInfo := range netInfo.Peers {
|
||||
id := peerInfo.ID
|
||||
peer := node.Testnet.LookupNode(string(id))
|
||||
require.NotNil(t, peer, "unknown node %v", id)
|
||||
require.Contains(t, peerInfo.URL, peer.IP.String(),
|
||||
"unexpected IP address for peer %v", id)
|
||||
seen[string(id)] = true
|
||||
// we never save light client addresses as they use RPC or ourselves
|
||||
if n.Mode == e2e.ModeLight || n.Name == node.Name {
|
||||
expectedPeers--
|
||||
continue
|
||||
}
|
||||
peers[string(types.NodeIDFromPubKey(n.NodeKey.PubKey()))] = n
|
||||
seen[n.Name] = false
|
||||
}
|
||||
|
||||
for name := range seen {
|
||||
require.True(t, seen[name], "node %v not peered with %v", node.Name, name)
|
||||
require.GreaterOrEqual(t, netInfo.NPeers, expectedPeers-1,
|
||||
"node is not fully meshed with peers")
|
||||
|
||||
for _, peerInfo := range netInfo.Peers {
|
||||
id := string(peerInfo.ID)
|
||||
peer, ok := peers[id]
|
||||
require.True(t, ok, "unknown node %v", id)
|
||||
require.Contains(t, peerInfo.URL, peer.IP.String(),
|
||||
"unexpected IP address for peer %v", id)
|
||||
seen[peer.Name] = true
|
||||
}
|
||||
|
||||
// FIXME: https://github.com/tendermint/tendermint/issues/8848
|
||||
// We should be able to assert that we can discover all peers in a network
|
||||
// for name := range seen {
|
||||
// require.True(t, seen[name], "node %v not peered with %v", node.Name, name)
|
||||
// }
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var mp mempool.Mempool
|
||||
@@ -24,7 +25,7 @@ func init() {
|
||||
cfg := config.DefaultMempoolConfig()
|
||||
cfg.Broadcast = false
|
||||
|
||||
mp = mempoolv1.NewCListMempool(cfg, appConnMem, 0)
|
||||
mp = mempoolv1.NewTxMempool(log.NewNopLogger(), cfg, appConnMem, 0)
|
||||
}
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
|
||||
@@ -10,7 +10,7 @@ const (
|
||||
|
||||
// TMVersionDefault is the used as the fallback version of Tendermint Core
|
||||
// when not using git describe. It is formatted with semantic versioning.
|
||||
TMVersionDefault = "0.35.6"
|
||||
TMVersionDefault = "0.35.7"
|
||||
|
||||
// ABCISemVer is the semantic version of the ABCI library
|
||||
ABCISemVer = "0.17.0"
|
||||
|
||||
Reference in New Issue
Block a user