mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-14 00:32:52 +00:00
Compare commits
18 Commits
cal/node-c
...
wb/issue-9
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
06050e4324 | ||
|
|
4fbfea79ad | ||
|
|
241be64da5 | ||
|
|
c755d03611 | ||
|
|
7a2e44184b | ||
|
|
77e7318613 | ||
|
|
caa75ae791 | ||
|
|
f9307cac51 | ||
|
|
38d1b2f873 | ||
|
|
e4fb662c8d | ||
|
|
810b9c613b | ||
|
|
341cabec0e | ||
|
|
5b98095ac3 | ||
|
|
59b28e71a0 | ||
|
|
071d787a45 | ||
|
|
a54a424478 | ||
|
|
c961fb58eb | ||
|
|
c7140bf817 |
12
.github/dependabot.yml
vendored
12
.github/dependabot.yml
vendored
@@ -53,10 +53,10 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
interval: weekly
|
||||
target-branch: "v0.37.x"
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
# Only allow automated security-related dependency updates until we cut the
|
||||
# final v0.37.0 release.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
@@ -65,11 +65,9 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -41,7 +41,7 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
uses: docker/setup-buildx-action@v2.1.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
41
.github/workflows/gosec.yml
vendored
Normal file
41
.github/workflows/gosec.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Run Gosec
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'feature/*'
|
||||
- 'v0.37.x'
|
||||
- 'v0.34.x'
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
|
||||
jobs:
|
||||
Gosec:
|
||||
permissions:
|
||||
security-events: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout Source
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run Gosec Security Scanner
|
||||
uses: cosmos/gosec@master
|
||||
with:
|
||||
# Let the report trigger a failure with the Github Security scanner features.
|
||||
args: "-no-fail -fmt sarif -out results.sarif ./..."
|
||||
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: results.sarif
|
||||
5
.github/workflows/lint.yml
vendored
5
.github/workflows/lint.yml
vendored
@@ -31,7 +31,10 @@ jobs:
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.50.1
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.47.3
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
19
.github/workflows/markdown-links.yml
vendored
19
.github/workflows/markdown-links.yml
vendored
@@ -1,20 +1,23 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# 2am UTC daily
|
||||
- cron: '0 2 * * *'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
strategy:
|
||||
matrix:
|
||||
branch: ['main', 'v0.37.x', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
ref: ${{ matrix.branch }}
|
||||
- uses: informalsystems/github-action-markdown-link-check@main
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
|
||||
27
.github/workflows/pre-release.yml
vendored
27
.github/workflows/pre-release.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
prerelease:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -38,28 +38,3 @@ jobs:
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
prerelease-success:
|
||||
needs: prerelease
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.9.0
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
27
.github/workflows/release.yml
vendored
27
.github/workflows/release.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
release:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -35,28 +35,3 @@ jobs:
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-success:
|
||||
needs: release
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
@@ -25,6 +26,7 @@ linters:
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
|
||||
@@ -2,16 +2,5 @@
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503],
|
||||
"httpHeaders": [
|
||||
{
|
||||
"urls": [
|
||||
"https://docs.github.com/",
|
||||
"https://help.github.com/"
|
||||
],
|
||||
"headers": {
|
||||
"Accept-Encoding": "zstd, br, gzip, deflate"
|
||||
}
|
||||
}
|
||||
]
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [p2p] \#9625 Remove unused p2p/trust package (@cmwaters)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Syncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Syncing: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blocksync"
|
||||
)
|
||||
|
||||
//go:generate go run ../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
Syncing metrics.Gauge
|
||||
}
|
||||
@@ -19,6 +19,58 @@ const (
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
|
||||
@@ -99,9 +99,6 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// OnStart implements service.Service by spawning requesters routine and recording
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
if pool.height == 0 {
|
||||
return errors.New("height not set")
|
||||
}
|
||||
go pool.makeRequestersRoutine()
|
||||
pool.startTime = time.Now()
|
||||
return nil
|
||||
@@ -114,19 +111,22 @@ func (pool *BlockPool) makeRequestersRoutine() {
|
||||
break
|
||||
}
|
||||
|
||||
height, maxPeerHeight, numPending, lenRequesters := pool.GetStatus()
|
||||
if height >= maxPeerHeight ||
|
||||
numPending >= maxPendingRequests ||
|
||||
lenRequesters >= maxTotalRequesters {
|
||||
_, numPending, lenRequesters := pool.GetStatus()
|
||||
switch {
|
||||
case numPending >= maxPendingRequests:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
continue
|
||||
case lenRequesters >= maxTotalRequesters:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
default:
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester()
|
||||
}
|
||||
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,11 +156,11 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
|
||||
// GetStatus returns pool's height, numPending requests and the number of
|
||||
// requesters.
|
||||
func (pool *BlockPool) GetStatus() (height, maxPeerHeight int64, numPending int32, lenRequesters int) {
|
||||
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
return pool.height, pool.maxPeerHeight, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// IsCaughtUp returns true if this node is caught up, false - otherwise.
|
||||
@@ -302,7 +302,6 @@ func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) {
|
||||
}
|
||||
|
||||
if height > pool.maxPeerHeight {
|
||||
pool.Logger.Info("new max peer height", "height", height)
|
||||
pool.maxPeerHeight = height
|
||||
}
|
||||
}
|
||||
@@ -389,7 +388,7 @@ func (pool *BlockPool) makeNextRequester() {
|
||||
|
||||
err := request.Start()
|
||||
if err != nil {
|
||||
pool.Logger.Error("Error starting request", "err", err)
|
||||
request.Logger.Error("Error starting request", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ const (
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blocksync reactor and block sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool) error
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
type peerError struct {
|
||||
@@ -54,15 +54,16 @@ type Reactor struct {
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
blockSync bool
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, metrics *Metrics) *Reactor {
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
blockSync bool) *Reactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
@@ -84,9 +85,9 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: pool,
|
||||
blockSync: blockSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
metrics: metrics,
|
||||
}
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("Reactor", bcR)
|
||||
return bcR
|
||||
@@ -100,22 +101,22 @@ func (bcR *Reactor) SetLogger(l log.Logger) {
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *Reactor) OnStart() error {
|
||||
if bcR.blockSync {
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go bcR.poolRoutine(false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsSyncing returns whether the node is using blocksync to advance heights
|
||||
func (bcR *Reactor) IsSyncing() bool {
|
||||
return bcR.pool.IsRunning()
|
||||
}
|
||||
|
||||
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
|
||||
func (bcR *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
bcR.blockSync = true
|
||||
bcR.initialState = state
|
||||
if state.LastBlockHeight == 0 {
|
||||
bcR.pool.height = state.InitialHeight
|
||||
} else {
|
||||
bcR.pool.height = state.LastBlockHeight + 1
|
||||
}
|
||||
|
||||
bcR.pool.height = state.LastBlockHeight + 1
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -126,7 +127,7 @@ func (bcR *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *Reactor) OnStop() {
|
||||
if bcR.pool.IsRunning() {
|
||||
if bcR.blockSync {
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
@@ -142,20 +143,21 @@ func (bcR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
})
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
peer.Send(BlocksyncChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -180,53 +182,69 @@ func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bl},
|
||||
})
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
})
|
||||
|
||||
msgBytes, err := EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *Reactor) Receive(e p2p.Envelope) {
|
||||
if err := ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
bcR.Switch.StopPeerForError(e.Src, err)
|
||||
func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
if err = ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch msg := e.Message.(type) {
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, e.Src)
|
||||
bcR.respondToPeer(msg, src)
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Block content is invalid", "err", err)
|
||||
return
|
||||
}
|
||||
bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size())
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
e.Src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
},
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlocksyncChannel, msgBytes)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height)
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
case *bcproto.NoBlockResponse:
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -235,8 +253,6 @@ func (bcR *Reactor) Receive(e p2p.Envelope) {
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
bcR.metrics.Syncing.Set(1)
|
||||
defer bcR.metrics.Syncing.Set(0)
|
||||
|
||||
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
defer trySyncTicker.Stop()
|
||||
@@ -269,10 +285,13 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
queued := peer.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
})
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlocksyncChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
@@ -284,7 +303,7 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest()
|
||||
go bcR.BroadcastStatusRequest() //nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
@@ -294,28 +313,24 @@ FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-switchToConsensusTicker.C:
|
||||
height, peerHeight, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
|
||||
"outbound", outbound, "inbound", inbound, "peerHeight", peerHeight)
|
||||
"outbound", outbound, "inbound", inbound)
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
|
||||
// TODO: node struct should be responsible for switching from block sync to
|
||||
// consensus. It's messy to have to grab the consensus reactor from the switch
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
err := conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
bcR.Logger.Error("failed to switch to consensus", "err", err)
|
||||
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
// else {
|
||||
// should only happen during testing
|
||||
// }
|
||||
|
||||
return
|
||||
break FOR_LOOP
|
||||
}
|
||||
|
||||
case <-trySyncTicker.C: // chan time
|
||||
@@ -414,9 +429,14 @@ FOR_LOOP:
|
||||
}
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *Reactor) BroadcastStatusRequest() {
|
||||
bcR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
func (bcR *Reactor) BroadcastStatusRequest() error {
|
||||
bm, err := EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
return fmt.Errorf("could not convert msg to proto: %w", err)
|
||||
}
|
||||
|
||||
bcR.Switch.Broadcast(BlocksyncChannel, bm)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -95,6 +95,14 @@ func newReactor(
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
// Make the Reactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mp, sm.EmptyEvidencePool{}, blockStore)
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
@@ -137,7 +145,7 @@ func newReactor(
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, NopMetrics())
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blocksync"))
|
||||
|
||||
return ReactorPair{bcReactor, proxyApp}
|
||||
@@ -148,9 +156,6 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]ReactorPair, 2)
|
||||
@@ -164,12 +169,6 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
for _, reactor := range reactorPairs {
|
||||
// turn on the syncing algorithm
|
||||
err := reactor.reactor.SwitchToBlockSync(state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
@@ -219,9 +218,6 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
// Other chain needs a different validator set
|
||||
@@ -248,12 +244,6 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
for _, reactor := range reactorPairs {
|
||||
// turn on the syncing algorithm
|
||||
err := reactor.reactor.SwitchToBlockSync(state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
@@ -297,11 +287,6 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
otherState, err := sm.MakeGenesisState(otherGenDoc)
|
||||
require.NoError(t, err)
|
||||
err = lastReactorPair.reactor.SwitchToBlockSync(otherState)
|
||||
require.NoError(t, err)
|
||||
|
||||
for {
|
||||
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
|
||||
@@ -67,8 +67,7 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
//nolint:all
|
||||
resp, err := http.Get(endpoint)
|
||||
resp, err := http.Get(endpoint) //nolint: gosec
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/tendermint/tendermint/store"
|
||||
)
|
||||
|
||||
var removeBlock = false
|
||||
var removeBlock bool = false
|
||||
|
||||
func init() {
|
||||
RollbackStateCmd.Flags().BoolVar(&removeBlock, "hard", false, "remove last block as well as state")
|
||||
|
||||
@@ -423,7 +423,6 @@ type RPCConfig struct {
|
||||
TLSKeyFile string `mapstructure:"tls_key_file"`
|
||||
|
||||
// pprof listen address (https://golang.org/pkg/net/http/pprof)
|
||||
// FIXME: This should be moved under the instrumentation section
|
||||
PprofListenAddress string `mapstructure:"pprof_laddr"`
|
||||
}
|
||||
|
||||
@@ -507,10 +506,6 @@ func (cfg *RPCConfig) IsCorsEnabled() bool {
|
||||
return len(cfg.CORSAllowedOrigins) != 0
|
||||
}
|
||||
|
||||
func (cfg *RPCConfig) IsPprofEnabled() bool {
|
||||
return len(cfg.PprofListenAddress) != 0
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) KeyFile() string {
|
||||
path := cfg.TLSKeyFile
|
||||
if filepath.IsAbs(path) {
|
||||
@@ -1206,10 +1201,6 @@ func (cfg *InstrumentationConfig) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *InstrumentationConfig) IsPrometheusEnabled() bool {
|
||||
return cfg.Prometheus && cfg.PrometheusListenAddr != ""
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Utils
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -46,8 +45,6 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
appFunc := newKVStore
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
css := make([]*State, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -56,6 +53,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
@@ -103,7 +101,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(cs.Logger)
|
||||
// set private validator
|
||||
pv := privVals[i]
|
||||
@@ -126,8 +124,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
// Note, we dont start the consensus states
|
||||
reactors[i] = NewReactor(css[i])
|
||||
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
|
||||
reactors[i].SetLogger(css[i].Logger)
|
||||
|
||||
// eventBus is already started with the cs
|
||||
@@ -168,16 +165,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i, peer := range peerList {
|
||||
if i < len(peerList)/2 {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: prevote1.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1}))
|
||||
} else {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: prevote2.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2}))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -256,7 +247,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
// start the consensus reactors
|
||||
for i := 0; i < nValidators; i++ {
|
||||
require.NoError(t, reactors[i].SwitchToConsensus(state.Copy(), false))
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s, false)
|
||||
}
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
@@ -315,7 +307,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
N := 4
|
||||
logger := consensusLogger().With("test", "byzantine")
|
||||
app := newKVStore
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
defer cleanup()
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
@@ -364,8 +356,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Note, we don't start the consensus states
|
||||
conR := NewReactor(css[i])
|
||||
conR := NewReactor(css[i], true) // so we don't start the consensus states
|
||||
conR.SetLogger(logger.With("validator", i))
|
||||
conR.SetEventBus(eventBus)
|
||||
|
||||
@@ -409,13 +400,13 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
// note these must be started before the byz
|
||||
for i := 1; i < N; i++ {
|
||||
cr := reactors[i].(*Reactor)
|
||||
require.NoError(t, cr.SwitchToConsensus(cr.conS.GetState(), false))
|
||||
cr.SwitchToConsensus(cr.conS.GetState(), false)
|
||||
}
|
||||
|
||||
// start the byzantine state machine
|
||||
byzR := reactors[0].(*ByzantineReactor)
|
||||
s := byzR.reactor.conS.GetState()
|
||||
require.NoError(t, byzR.reactor.SwitchToConsensus(s, false))
|
||||
byzR.reactor.SwitchToConsensus(s, false)
|
||||
|
||||
// byz proposer sends one block to peers[0]
|
||||
// and the other block to peers[1] and peers[2].
|
||||
@@ -529,26 +520,18 @@ func sendProposalAndParts(
|
||||
parts *types.PartSet,
|
||||
) {
|
||||
// proposal
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *proposal.ToProto()},
|
||||
})
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
|
||||
// parts
|
||||
for i := 0; i < int(parts.Total()); i++ {
|
||||
part := parts.GetPart(i)
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err) // TODO: wbanfield better error handling
|
||||
msg := &BlockPartMessage{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
}
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: *pp,
|
||||
},
|
||||
})
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
}
|
||||
|
||||
// votes
|
||||
@@ -556,14 +539,9 @@ func sendProposalAndParts(
|
||||
prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
|
||||
precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
|
||||
cs.mtx.Unlock()
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: prevote.ToProto()},
|
||||
})
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
})
|
||||
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -594,14 +572,14 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
if br.reactor.conS.IsRunning() {
|
||||
if !br.reactor.waitSync {
|
||||
br.reactor.sendNewRoundStepMessage(peer)
|
||||
}
|
||||
}
|
||||
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
br.reactor.RemovePeer(peer, reason)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(e p2p.Envelope) {
|
||||
br.reactor.Receive(e)
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
}
|
||||
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
|
||||
|
||||
@@ -440,7 +440,7 @@ func newStateWithConfigAndBlockStore(
|
||||
}
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
@@ -747,17 +747,18 @@ func consensusLogger() log.Logger {
|
||||
}).With("module", "consensus")
|
||||
}
|
||||
|
||||
func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) {
|
||||
t.Helper()
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
for _, opt := range configOpts {
|
||||
@@ -771,7 +772,6 @@ func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc
|
||||
css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
css[i].updateToState(state.Copy())
|
||||
}
|
||||
return css, func() {
|
||||
for _, dir := range configRootDirs {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -19,7 +18,7 @@ import (
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
@@ -95,10 +94,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
|
||||
peers := sw.Peers().List()
|
||||
for _, peer := range peers {
|
||||
cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer)
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -118,6 +118,18 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "latest_block_height",
|
||||
Help: "The latest block height.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "state_syncing",
|
||||
Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -196,6 +208,8 @@ func NopMetrics() *Metrics {
|
||||
BlockSizeBytes: discard.NewGauge(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
BlockSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
StepDurationSeconds: discard.NewHistogram(),
|
||||
BlockGossipPartsReceived: discard.NewCounter(),
|
||||
|
||||
@@ -61,6 +61,10 @@ type Metrics struct {
|
||||
TotalTxs metrics.Gauge
|
||||
// The latest block height.
|
||||
CommittedHeight metrics.Gauge `metrics_name:"latest_block_height"`
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
BlockSyncing metrics.Gauge
|
||||
// Whether or not a node is state syncing. 1 if yes, 0 if no.
|
||||
StateSyncing metrics.Gauge
|
||||
|
||||
// Number of block parts transmitted by each peer.
|
||||
BlockParts metrics.Counter `metrics_labels:"peer_id"`
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
@@ -14,147 +15,173 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message.
|
||||
//
|
||||
// TODO: This needs to be removed, but WALToProto depends on this.
|
||||
func MsgToProto(msg Message) (proto.Message, error) {
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message
|
||||
func MsgToProto(msg Message) (*tmcons.Message, error) {
|
||||
if msg == nil {
|
||||
return nil, errors.New("consensus: message is nil")
|
||||
}
|
||||
var pb proto.Message
|
||||
var pb tmcons.Message
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
pb = &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *NewValidBlockMessage:
|
||||
pbPartSetHeader := msg.BlockPartSetHeader.ToProto()
|
||||
pbBits := msg.BlockParts.ToProto()
|
||||
pb = &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *ProposalMessage:
|
||||
pbP := msg.Proposal.ToProto()
|
||||
pb = &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *ProposalPOLMessage:
|
||||
pbBits := msg.ProposalPOL.ToProto()
|
||||
pb = &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *BlockPartMessage:
|
||||
parts, err := msg.Part.ToProto()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msg to proto error: %w", err)
|
||||
}
|
||||
pb = &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *VoteMessage:
|
||||
vote := msg.Vote.ToProto()
|
||||
pb = &tmcons.Vote{
|
||||
Vote: vote,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: vote,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *HasVoteMessage:
|
||||
pb = &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_HasVote{
|
||||
HasVote: &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *VoteSetMaj23Message:
|
||||
bi := msg.BlockID.ToProto()
|
||||
pb = &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *VoteSetBitsMessage:
|
||||
bi := msg.BlockID.ToProto()
|
||||
bits := msg.Votes.ToProto()
|
||||
|
||||
vsb := &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
vsb := &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
}
|
||||
|
||||
if bits != nil {
|
||||
vsb.Votes = *bits
|
||||
vsb.VoteSetBits.Votes = *bits
|
||||
}
|
||||
|
||||
pb = vsb
|
||||
pb = tmcons.Message{
|
||||
Sum: vsb,
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("consensus: message not recognized: %T", msg)
|
||||
}
|
||||
|
||||
return pb, nil
|
||||
return &pb, nil
|
||||
}
|
||||
|
||||
// MsgFromProto takes a consensus proto message and returns the native go type
|
||||
func MsgFromProto(p proto.Message) (Message, error) {
|
||||
if p == nil {
|
||||
func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
if msg == nil {
|
||||
return nil, errors.New("consensus: nil message")
|
||||
}
|
||||
var pb Message
|
||||
|
||||
switch msg := p.(type) {
|
||||
case *tmcons.NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.Step))
|
||||
switch msg := msg.Sum.(type) {
|
||||
case *tmcons.Message_NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step))
|
||||
// deny message based on possible overflow
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("denying message due to possible overflow: %w", err)
|
||||
}
|
||||
pb = &NewRoundStepMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Height: msg.NewRoundStep.Height,
|
||||
Round: msg.NewRoundStep.Round,
|
||||
Step: cstypes.RoundStepType(rs),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.NewRoundStep.LastCommitRound,
|
||||
}
|
||||
case *tmcons.NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.BlockPartSetHeader)
|
||||
case *tmcons.Message_NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parts to proto error: %w", err)
|
||||
}
|
||||
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(msg.BlockParts)
|
||||
pbBits.FromProto(msg.NewValidBlock.BlockParts)
|
||||
|
||||
pb = &NewValidBlockMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Height: msg.NewValidBlock.Height,
|
||||
Round: msg.NewValidBlock.Round,
|
||||
BlockPartSetHeader: *pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
IsCommit: msg.NewValidBlock.IsCommit,
|
||||
}
|
||||
case *tmcons.Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal)
|
||||
case *tmcons.Message_Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("proposal msg to proto error: %w", err)
|
||||
}
|
||||
@@ -162,26 +189,26 @@ func MsgFromProto(p proto.Message) (Message, error) {
|
||||
pb = &ProposalMessage{
|
||||
Proposal: pbP,
|
||||
}
|
||||
case *tmcons.ProposalPOL:
|
||||
case *tmcons.Message_ProposalPol:
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(&msg.ProposalPol)
|
||||
pbBits.FromProto(&msg.ProposalPol.ProposalPol)
|
||||
pb = &ProposalPOLMessage{
|
||||
Height: msg.Height,
|
||||
ProposalPOLRound: msg.ProposalPolRound,
|
||||
Height: msg.ProposalPol.Height,
|
||||
ProposalPOLRound: msg.ProposalPol.ProposalPolRound,
|
||||
ProposalPOL: pbBits,
|
||||
}
|
||||
case *tmcons.BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.Part)
|
||||
case *tmcons.Message_BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.BlockPart.Part)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blockpart msg to proto error: %w", err)
|
||||
}
|
||||
pb = &BlockPartMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Height: msg.BlockPart.Height,
|
||||
Round: msg.BlockPart.Round,
|
||||
Part: parts,
|
||||
}
|
||||
case *tmcons.Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote)
|
||||
case *tmcons.Message_Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote.Vote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("vote msg to proto error: %w", err)
|
||||
}
|
||||
@@ -189,36 +216,36 @@ func MsgFromProto(p proto.Message) (Message, error) {
|
||||
pb = &VoteMessage{
|
||||
Vote: vote,
|
||||
}
|
||||
case *tmcons.HasVote:
|
||||
case *tmcons.Message_HasVote:
|
||||
pb = &HasVoteMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
Height: msg.HasVote.Height,
|
||||
Round: msg.HasVote.Round,
|
||||
Type: msg.HasVote.Type,
|
||||
Index: msg.HasVote.Index,
|
||||
}
|
||||
case *tmcons.VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
case *tmcons.Message_VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err)
|
||||
}
|
||||
pb = &VoteSetMaj23Message{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Height: msg.VoteSetMaj23.Height,
|
||||
Round: msg.VoteSetMaj23.Round,
|
||||
Type: msg.VoteSetMaj23.Type,
|
||||
BlockID: *bi,
|
||||
}
|
||||
case *tmcons.VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
case *tmcons.Message_VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err)
|
||||
}
|
||||
bits := new(bits.BitArray)
|
||||
bits.FromProto(&msg.Votes)
|
||||
bits.FromProto(&msg.VoteSetBits.Votes)
|
||||
|
||||
pb = &VoteSetBitsMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Height: msg.VoteSetBits.Height,
|
||||
Round: msg.VoteSetBits.Round,
|
||||
Type: msg.VoteSetBits.Type,
|
||||
BlockID: *bi,
|
||||
Votes: bits,
|
||||
}
|
||||
@@ -233,6 +260,20 @@ func MsgFromProto(p proto.Message) (Message, error) {
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// MustEncode takes the reactors msg, makes it proto and marshals it
|
||||
// this mimics `MustMarshalBinaryBare` in that is panics on error
|
||||
func MustEncode(msg Message) []byte {
|
||||
pb, err := MsgToProto(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return enc
|
||||
}
|
||||
|
||||
// WALToProto takes a WAL message and return a proto walMessage and error
|
||||
func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
var pb tmcons.WALMessage
|
||||
@@ -253,14 +294,10 @@ func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if w, ok := consMsg.(p2p.Wrapper); ok {
|
||||
consMsg = w.Wrap()
|
||||
}
|
||||
cm := consMsg.(*tmcons.Message)
|
||||
pb = tmcons.WALMessage{
|
||||
Sum: &tmcons.WALMessage_MsgInfo{
|
||||
MsgInfo: &tmcons.MsgInfo{
|
||||
Msg: *cm,
|
||||
Msg: *consMsg,
|
||||
PeerID: string(msg.PeerID),
|
||||
},
|
||||
},
|
||||
@@ -306,11 +343,7 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) {
|
||||
Step: msg.EventDataRoundState.Step,
|
||||
}
|
||||
case *tmcons.WALMessage_MsgInfo:
|
||||
um, err := msg.MsgInfo.Msg.Unwrap()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unwrap message: %w", err)
|
||||
}
|
||||
walMsg, err := MsgFromProto(um)
|
||||
walMsg, err := MsgFromProto(&msg.MsgInfo.Msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msgInfo from proto error: %w", err)
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestMsgToProto(t *testing.T) {
|
||||
testsCases := []struct {
|
||||
testName string
|
||||
msg Message
|
||||
want proto.Message
|
||||
want *tmcons.Message
|
||||
wantErr bool
|
||||
}{
|
||||
{"successful NewRoundStepMessage", &NewRoundStepMessage{
|
||||
@@ -80,15 +80,17 @@ func TestMsgToProto(t *testing.T) {
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
}, &tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
|
||||
{"successful NewValidBlockMessage", &NewValidBlockMessage{
|
||||
Height: 1,
|
||||
@@ -96,78 +98,92 @@ func TestMsgToProto(t *testing.T) {
|
||||
BlockPartSetHeader: psh,
|
||||
BlockParts: bits,
|
||||
IsCommit: false,
|
||||
}, &tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful BlockPartMessage", &BlockPartMessage{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: &parts,
|
||||
}, &tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful ProposalPOLMessage", &ProposalPOLMessage{
|
||||
Height: 1,
|
||||
ProposalPOLRound: 1,
|
||||
ProposalPOL: bits,
|
||||
}, &tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
}}, false},
|
||||
{"successful ProposalMessage", &ProposalMessage{
|
||||
Proposal: &proposal,
|
||||
}, &tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful VoteMessage", &VoteMessage{
|
||||
Vote: vote,
|
||||
}, &tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful VoteSetMaj23", &VoteSetMaj23Message{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
}, &tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful VoteSetBits", &VoteSetBitsMessage{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
Votes: bits,
|
||||
}, &tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"failure", nil, &tmcons.Message{}, true},
|
||||
}
|
||||
for _, tt := range testsCases {
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
@@ -42,6 +44,7 @@ type Reactor struct {
|
||||
conS *State
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
waitSync bool
|
||||
eventBus *types.EventBus
|
||||
rs *cstypes.RoundState
|
||||
|
||||
@@ -52,11 +55,12 @@ type ReactorOption func(*Reactor)
|
||||
|
||||
// NewReactor returns a new Reactor with the given
|
||||
// consensusState.
|
||||
func NewReactor(consensusState *State, options ...ReactorOption) *Reactor {
|
||||
func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor {
|
||||
conR := &Reactor{
|
||||
conS: consensusState,
|
||||
rs: consensusState.GetRoundState(),
|
||||
Metrics: NopMetrics(),
|
||||
conS: consensusState,
|
||||
waitSync: waitSync,
|
||||
rs: consensusState.GetRoundState(),
|
||||
Metrics: NopMetrics(),
|
||||
}
|
||||
conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
|
||||
|
||||
@@ -70,12 +74,21 @@ func NewReactor(consensusState *State, options ...ReactorOption) *Reactor {
|
||||
// OnStart implements BaseService by subscribing to events, which later will be
|
||||
// broadcasted to other peers and starting state if we're not in block sync.
|
||||
func (conR *Reactor) OnStart() error {
|
||||
conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync())
|
||||
|
||||
// start routine that computes peer statistics for evaluating peer quality
|
||||
go conR.peerStatsRoutine()
|
||||
|
||||
conR.subscribeToBroadcastEvents()
|
||||
go conR.updateRoundStateRoutine()
|
||||
|
||||
if !conR.WaitSync() {
|
||||
err := conR.conS.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -83,34 +96,47 @@ func (conR *Reactor) OnStart() error {
|
||||
// state.
|
||||
func (conR *Reactor) OnStop() {
|
||||
conR.unsubscribeFromBroadcastEvents()
|
||||
if conR.conS.IsRunning() {
|
||||
if err := conR.conS.Stop(); err != nil {
|
||||
conR.Logger.Error("Error stopping consensus state", "err", err)
|
||||
}
|
||||
if err := conR.conS.Stop(); err != nil {
|
||||
conR.Logger.Error("Error stopping consensus state", "err", err)
|
||||
}
|
||||
if !conR.WaitSync() {
|
||||
conR.conS.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (conR *Reactor) IsConsensusRunning() bool {
|
||||
return conR.conS.IsRunning()
|
||||
}
|
||||
|
||||
// SwitchToConsensus switches from block_sync mode to consensus mode.
|
||||
// It resets the state, turns off block_sync, and starts the consensus state-machine
|
||||
func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) error {
|
||||
func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
|
||||
conR.Logger.Info("SwitchToConsensus")
|
||||
|
||||
// We have no votes, so reconstruct LastCommit from SeenCommit.
|
||||
if state.LastBlockHeight > state.InitialHeight {
|
||||
if state.LastBlockHeight > 0 {
|
||||
conR.conS.reconstructLastCommit(state)
|
||||
}
|
||||
|
||||
// NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a
|
||||
// NewRoundStepMessage.
|
||||
conR.conS.updateToState(state)
|
||||
|
||||
conR.mtx.Lock()
|
||||
conR.waitSync = false
|
||||
conR.mtx.Unlock()
|
||||
conR.Metrics.BlockSyncing.Set(0)
|
||||
conR.Metrics.StateSyncing.Set(0)
|
||||
|
||||
if skipWAL {
|
||||
conR.conS.doWALCatchup = false
|
||||
}
|
||||
return conR.conS.Start()
|
||||
err := conR.conS.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf(`Failed to start consensus state: %v
|
||||
|
||||
conS:
|
||||
%+v
|
||||
|
||||
conR:
|
||||
%+v`, err, conR.conS, conR))
|
||||
}
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
@@ -122,7 +148,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
Priority: 6,
|
||||
SendQueueCapacity: 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
|
||||
@@ -131,7 +156,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteChannel,
|
||||
@@ -139,7 +163,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 100 * 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteSetBitsChannel,
|
||||
@@ -147,7 +170,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 2,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -177,7 +199,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're block_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
if conR.conS.IsRunning() {
|
||||
if !conR.WaitSync() {
|
||||
conR.sendNewRoundStepMessage(peer)
|
||||
}
|
||||
}
|
||||
@@ -201,33 +223,34 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
// Peer state updates can happen in parallel, but processing of
|
||||
// proposals, block parts, and votes are ordered by the receiveRoutine
|
||||
// NOTE: blocks on consensus state for proposals, block parts, and votes
|
||||
func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
if !conR.IsRunning() {
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID)
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
|
||||
return
|
||||
}
|
||||
msg, err := MsgFromProto(e.Message)
|
||||
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = msg.ValidateBasic(); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg)
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
// Get peer states
|
||||
ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState)
|
||||
ps, ok := src.Get(types.PeerStateKey).(*PeerState)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("Peer %v has no state", e.Src))
|
||||
panic(fmt.Sprintf("Peer %v has no state", src))
|
||||
}
|
||||
|
||||
switch e.ChannelID {
|
||||
switch chID {
|
||||
case StateChannel:
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
@@ -235,8 +258,8 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
initialHeight := conR.conS.state.InitialHeight
|
||||
conR.conS.mtx.Unlock()
|
||||
if err = msg.ValidateHeight(initialHeight); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
ps.ApplyNewRoundStepMessage(msg)
|
||||
@@ -255,7 +278,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
// Peer claims to have a maj23 for some BlockID at H,R,S,
|
||||
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
|
||||
if err != nil {
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
// Respond with a VoteSetBitsMessage showing which votes we have.
|
||||
@@ -269,44 +292,38 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
default:
|
||||
panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
|
||||
}
|
||||
eMsg := &tmcons.VoteSetBits{
|
||||
src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: msg.BlockID.ToProto(),
|
||||
}
|
||||
if votes := ourVotes.ToProto(); votes != nil {
|
||||
eMsg.Votes = *votes
|
||||
}
|
||||
e.Src.TrySend(p2p.Envelope{
|
||||
ChannelID: VoteSetBitsChannel,
|
||||
Message: eMsg,
|
||||
})
|
||||
BlockID: msg.BlockID,
|
||||
Votes: ourVotes,
|
||||
}))
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
case DataChannel:
|
||||
if !conR.conS.IsRunning() {
|
||||
if conR.WaitSync() {
|
||||
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
|
||||
return
|
||||
}
|
||||
switch msg := msg.(type) {
|
||||
case *ProposalMessage:
|
||||
ps.SetHasProposal(msg.Proposal)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
case *ProposalPOLMessage:
|
||||
ps.ApplyProposalPOLMessage(msg)
|
||||
case *BlockPartMessage:
|
||||
ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
|
||||
conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
case VoteChannel:
|
||||
if !conR.conS.IsRunning() {
|
||||
if conR.WaitSync() {
|
||||
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
|
||||
return
|
||||
}
|
||||
@@ -320,7 +337,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
|
||||
ps.SetHasVote(msg.Vote)
|
||||
|
||||
cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
cs.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
|
||||
default:
|
||||
// don't punish (leave room for soft upgrades)
|
||||
@@ -328,7 +345,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
|
||||
case VoteSetBitsChannel:
|
||||
if !conR.conS.IsRunning() {
|
||||
if conR.WaitSync() {
|
||||
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
|
||||
return
|
||||
}
|
||||
@@ -359,7 +376,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -369,6 +386,13 @@ func (conR *Reactor) SetEventBus(b *types.EventBus) {
|
||||
conR.conS.SetEventBus(b)
|
||||
}
|
||||
|
||||
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
|
||||
func (conR *Reactor) WaitSync() bool {
|
||||
conR.mtx.RLock()
|
||||
defer conR.mtx.RUnlock()
|
||||
return conR.waitSync
|
||||
}
|
||||
|
||||
//--------------------------------------
|
||||
|
||||
// subscribeToBroadcastEvents subscribes for new round steps and votes
|
||||
@@ -406,39 +430,29 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() {
|
||||
|
||||
func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
conR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
})
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg))
|
||||
}
|
||||
|
||||
func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
|
||||
psh := rs.ProposalBlockParts.Header()
|
||||
csMsg := &tmcons.NewValidBlock{
|
||||
csMsg := &NewValidBlockMessage{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
BlockPartSetHeader: psh.ToProto(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray().ToProto(),
|
||||
BlockPartSetHeader: rs.ProposalBlockParts.Header(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray(),
|
||||
IsCommit: rs.Step == cstypes.RoundStepCommit,
|
||||
}
|
||||
conR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: csMsg,
|
||||
})
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(csMsg))
|
||||
}
|
||||
|
||||
// Broadcasts HasVoteMessage to peers that care.
|
||||
func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
msg := &tmcons.HasVote{
|
||||
msg := &HasVoteMessage{
|
||||
Height: vote.Height,
|
||||
Round: vote.Round,
|
||||
Type: vote.Type,
|
||||
Index: vote.ValidatorIndex,
|
||||
}
|
||||
conR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: msg,
|
||||
})
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(msg))
|
||||
/*
|
||||
// TODO: Make this broadcast more selective.
|
||||
for _, peer := range conR.Switch.Peers().List() {
|
||||
@@ -449,11 +463,7 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
prs := ps.GetRoundState()
|
||||
if prs.Height == vote.Height {
|
||||
// TODO: Also filter on round?
|
||||
e := p2p.Envelope{
|
||||
ChannelID: StateChannel, struct{ ConsensusMessage }{msg},
|
||||
Message: p,
|
||||
}
|
||||
peer.TrySend(e)
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg})
|
||||
} else {
|
||||
// Height doesn't match
|
||||
// TODO: check a field, maybe CatchupCommitRound?
|
||||
@@ -463,11 +473,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
*/
|
||||
}
|
||||
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep) {
|
||||
nrsMsg = &tmcons.NewRoundStep{
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) {
|
||||
nrsMsg = &NewRoundStepMessage{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: uint32(rs.Step),
|
||||
Step: rs.Step,
|
||||
SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
|
||||
LastCommitRound: rs.LastCommit.GetRound(),
|
||||
}
|
||||
@@ -477,10 +487,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep)
|
||||
func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
|
||||
rs := conR.getRoundState()
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
})
|
||||
peer.Send(StateChannel, MustEncode(nrsMsg))
|
||||
}
|
||||
|
||||
func (conR *Reactor) updateRoundStateRoutine() {
|
||||
@@ -512,11 +519,6 @@ OUTER_LOOP:
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
if !conR.IsConsensusRunning() {
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
@@ -524,19 +526,13 @@ OUTER_LOOP:
|
||||
if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
|
||||
if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
|
||||
part := rs.ProposalBlockParts.GetPart(index)
|
||||
parts, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
msg := &BlockPartMessage{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
}
|
||||
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: *parts,
|
||||
},
|
||||
}) {
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
@@ -582,11 +578,9 @@ OUTER_LOOP:
|
||||
if rs.Proposal != nil && !prs.Proposal {
|
||||
// Proposal: share the proposal metadata with peer.
|
||||
{
|
||||
msg := &ProposalMessage{Proposal: rs.Proposal}
|
||||
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()},
|
||||
}) {
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
|
||||
ps.SetHasProposal(rs.Proposal)
|
||||
}
|
||||
@@ -596,15 +590,13 @@ OUTER_LOOP:
|
||||
// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
|
||||
// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
|
||||
if 0 <= rs.Proposal.POLRound {
|
||||
msg := &ProposalPOLMessage{
|
||||
Height: rs.Height,
|
||||
ProposalPOLRound: rs.Proposal.POLRound,
|
||||
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
|
||||
}
|
||||
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.ProposalPOL{
|
||||
Height: rs.Height,
|
||||
ProposalPolRound: rs.Proposal.POLRound,
|
||||
ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(),
|
||||
},
|
||||
})
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
@@ -641,27 +633,20 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt
|
||||
return
|
||||
}
|
||||
// Send the part
|
||||
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
logger.Error("Could not convert part to proto", "index", index, "error", err)
|
||||
return
|
||||
msg := &BlockPartMessage{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: part,
|
||||
}
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: *pp,
|
||||
},
|
||||
}) {
|
||||
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
} else {
|
||||
logger.Debug("Sending block part for catchup failed")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// logger.Info("No parts to send in catch-up, sleeping")
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
}
|
||||
|
||||
@@ -671,16 +656,12 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
// Simple hack to throttle logs upon sleep.
|
||||
var sleeping = 0
|
||||
|
||||
OUTER_LOOP:
|
||||
for {
|
||||
// Manage disconnects from self or peer.
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
if !conR.IsConsensusRunning() {
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
continue
|
||||
}
|
||||
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
@@ -691,11 +672,14 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
sleeping = 0
|
||||
}
|
||||
|
||||
// logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round,
|
||||
// "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step)
|
||||
|
||||
// If height matches, then send LastCommit, Prevotes, Precommits.
|
||||
if rs.Height == prs.Height {
|
||||
heightLogger := logger.With("height", prs.Height)
|
||||
if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) {
|
||||
continue
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -704,7 +688,7 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
if prs.Height != 0 && rs.Height == prs.Height+1 {
|
||||
if ps.PickSendVote(rs.LastCommit) {
|
||||
logger.Debug("Picked rs.LastCommit to send", "height", prs.Height)
|
||||
continue
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -717,7 +701,7 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil {
|
||||
if ps.PickSendVote(commit) {
|
||||
logger.Debug("Picked Catchup commit to send", "height", prs.Height)
|
||||
continue
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -734,7 +718,7 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
}
|
||||
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
continue
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -808,27 +792,18 @@ OUTER_LOOP:
|
||||
return
|
||||
}
|
||||
|
||||
if !conR.IsConsensusRunning() {
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
// Maybe send Height/Round/Prevotes
|
||||
{
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
|
||||
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -840,15 +815,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -860,16 +832,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
|
||||
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -884,20 +852,19 @@ OUTER_LOOP:
|
||||
if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
|
||||
prs.Height >= conR.conS.blockStore.Base() {
|
||||
if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
@@ -1104,13 +1071,9 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in
|
||||
// Returns true if vote was sent.
|
||||
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
|
||||
if vote, ok := ps.PickVoteToSend(votes); ok {
|
||||
msg := &VoteMessage{vote}
|
||||
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
|
||||
if ps.peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{
|
||||
Vote: vote.ToProto(),
|
||||
},
|
||||
}) {
|
||||
if ps.peer.Send(VoteChannel, MustEncode(msg)) {
|
||||
ps.SetHasVote(vote)
|
||||
return true
|
||||
}
|
||||
@@ -1476,6 +1439,15 @@ func init() {
|
||||
tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits")
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg Message, err error) {
|
||||
pb := &tmcons.Message{}
|
||||
if err = proto.Unmarshal(bz, pb); err != nil {
|
||||
return msg, err
|
||||
}
|
||||
|
||||
return MsgFromProto(pb)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// NewRoundStepMessage is sent for every step taken in the ConsensusState.
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
@@ -55,8 +54,9 @@ func startConsensusNet(t *testing.T, css []*State, n int) (
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, n)
|
||||
for i := 0; i < n; i++ {
|
||||
// Note, we dont start the consensus states
|
||||
reactors[i] = NewReactor(css[i])
|
||||
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
|
||||
if err != nil { t.Fatal(err)}*/
|
||||
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
|
||||
reactors[i].SetLogger(css[i].Logger)
|
||||
|
||||
// eventBus is already started with the cs
|
||||
@@ -87,8 +87,7 @@ func startConsensusNet(t *testing.T, css []*State, n int) (
|
||||
// TODO: is this still true with new pubsub?
|
||||
for i := 0; i < n; i++ {
|
||||
s := reactors[i].conS.GetState()
|
||||
err := reactors[i].SwitchToConsensus(s, false)
|
||||
require.NoError(t, err)
|
||||
reactors[i].SwitchToConsensus(s, false)
|
||||
}
|
||||
return reactors, blocksSubs, eventBuses
|
||||
}
|
||||
@@ -113,7 +112,7 @@ func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*type
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorBasic(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -135,8 +134,6 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -144,6 +141,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
@@ -204,7 +202,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool2, WithState(state.Copy()))
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
@@ -238,7 +236,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// Ensure a testnet makes blocks when there are txs
|
||||
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore,
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
@@ -259,7 +257,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
|
||||
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -267,25 +265,22 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
reactor.AddPeer(peer)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -293,25 +288,22 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
// we should call InitPeer here
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.Panics(t, func() {
|
||||
reactor.Receive(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
// Test we record stats about votes and block parts from other peers.
|
||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -337,7 +329,6 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
nVals := 4
|
||||
logger := log.TestingLogger()
|
||||
css, cleanup := randConsensusNet(
|
||||
t,
|
||||
nVals,
|
||||
"consensus_voting_power_changes_test",
|
||||
newMockTickerFunc(true),
|
||||
@@ -533,7 +524,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
// Check we can make blocks with skip_timeout_commit=false
|
||||
func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore)
|
||||
defer cleanup()
|
||||
// override default SkipTimeoutCommit == true for tests
|
||||
for i := 0; i < N; i++ {
|
||||
|
||||
@@ -129,8 +129,8 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error
|
||||
}
|
||||
pb.cs.Wait()
|
||||
|
||||
newCS := NewState(pb.cs.config, pb.cs.blockExec,
|
||||
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, WithState(pb.genesisState.Copy()))
|
||||
newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
|
||||
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool)
|
||||
newCS.SetEventBus(pb.cs.eventBus)
|
||||
newCS.startForReplay()
|
||||
|
||||
@@ -332,8 +332,8 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
|
||||
consensusState := NewState(csConfig, blockExec,
|
||||
blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
consensusState := NewState(csConfig, state.Copy(), blockExec,
|
||||
blockStore, mempool, evpool)
|
||||
|
||||
consensusState.SetEventBus(eventBus)
|
||||
return consensusState
|
||||
|
||||
@@ -67,8 +67,7 @@ func TestMain(m *testing.M) {
|
||||
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
|
||||
logger := log.TestingLogger()
|
||||
state, err := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
cs := newStateWithConfigAndBlockStore(
|
||||
consensusReplayConfig,
|
||||
@@ -82,7 +81,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
bytes, _ := os.ReadFile(cs.config.WalFile())
|
||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||
|
||||
err = cs.Start()
|
||||
err := cs.Start()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := cs.Stop(); err != nil {
|
||||
@@ -556,40 +555,40 @@ func TestSimulateValidatorsChange(t *testing.T) {
|
||||
// Sync from scratch
|
||||
func TestHandshakeReplayAll(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 0, m, false)
|
||||
testHandshakeReplay(t, config, 0, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 0, m, true)
|
||||
testHandshakeReplay(t, config, 0, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync many, not from scratch
|
||||
func TestHandshakeReplaySome(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 2, m, false)
|
||||
testHandshakeReplay(t, config, 2, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 2, m, true)
|
||||
testHandshakeReplay(t, config, 2, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from lagging by one
|
||||
func TestHandshakeReplayOne(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, numBlocks-1, m, false)
|
||||
testHandshakeReplay(t, config, numBlocks-1, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, numBlocks-1, m, true)
|
||||
testHandshakeReplay(t, config, numBlocks-1, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from caught up
|
||||
func TestHandshakeReplayNone(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, numBlocks, m, false)
|
||||
testHandshakeReplay(t, config, numBlocks, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, numBlocks, m, true)
|
||||
testHandshakeReplay(t, config, numBlocks, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -661,27 +660,25 @@ func tempWALWithData(data []byte) string {
|
||||
|
||||
// Make some blocks. Start a fresh app and apply nBlocks blocks.
|
||||
// Then restart the app and sync it up with the remaining blocks
|
||||
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint, testValidatorsChange bool) {
|
||||
var (
|
||||
chain []*types.Block
|
||||
commits []*types.Commit
|
||||
store *mockBlockStore
|
||||
stateDB dbm.DB
|
||||
genesisState sm.State
|
||||
config *cfg.Config
|
||||
)
|
||||
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) {
|
||||
var chain []*types.Block
|
||||
var commits []*types.Commit
|
||||
var store *mockBlockStore
|
||||
var stateDB dbm.DB
|
||||
var genesisState sm.State
|
||||
if testValidatorsChange {
|
||||
config = ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
|
||||
defer os.RemoveAll(testConfig.RootDir)
|
||||
stateDB = dbm.NewMemDB()
|
||||
|
||||
genesisState = sim.GenesisState
|
||||
config = sim.Config
|
||||
chain = append([]*types.Block{}, sim.Chain...) // copy chain
|
||||
commits = sim.Commits
|
||||
store = newMockBlockStore(t, config, genesisState.ConsensusParams)
|
||||
} else { // test single node
|
||||
config = ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
|
||||
defer os.RemoveAll(testConfig.RootDir)
|
||||
walBody, err := WALWithNBlocks(t, numBlocks)
|
||||
require.NoError(t, err)
|
||||
walFile := tempWALWithData(walBody)
|
||||
@@ -814,11 +811,14 @@ func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
_, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo
|
||||
panic(err)
|
||||
}
|
||||
switch mode {
|
||||
case 0:
|
||||
for i := 0; i < nBlocks; i++ {
|
||||
|
||||
@@ -148,6 +148,7 @@ type StateOption func(*State)
|
||||
// NewState returns a new State.
|
||||
func NewState(
|
||||
config *cfg.ConsensusConfig,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore sm.BlockStore,
|
||||
txNotifier txNotifier,
|
||||
@@ -176,6 +177,13 @@ func NewState(
|
||||
cs.doPrevote = cs.defaultDoPrevote
|
||||
cs.setProposal = cs.defaultSetProposal
|
||||
|
||||
// We have no votes, so reconstruct LastCommit from SeenCommit.
|
||||
if state.LastBlockHeight > 0 {
|
||||
cs.reconstructLastCommit(state)
|
||||
}
|
||||
|
||||
cs.updateToState(state)
|
||||
|
||||
// NOTE: we do not call scheduleRound0 yet, we do that upon Start()
|
||||
|
||||
cs.BaseService = *service.NewBaseService(nil, "State", cs)
|
||||
@@ -199,19 +207,10 @@ func (cs *State) SetEventBus(b *types.EventBus) {
|
||||
}
|
||||
|
||||
// StateMetrics sets the metrics.
|
||||
func WithMetrics(metrics *Metrics) StateOption {
|
||||
func StateMetrics(metrics *Metrics) StateOption {
|
||||
return func(cs *State) { cs.metrics = metrics }
|
||||
}
|
||||
|
||||
func WithState(state sm.State) StateOption {
|
||||
return func(cs *State) {
|
||||
if state.LastBlockHeight > 0 {
|
||||
cs.reconstructLastCommit(state)
|
||||
}
|
||||
cs.updateToState(state)
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string.
|
||||
func (cs *State) String() string {
|
||||
// better not to access shared variables
|
||||
@@ -298,10 +297,6 @@ func (cs *State) LoadCommit(height int64) *types.Commit {
|
||||
// OnStart loads the latest state via the WAL, and starts the timeout and
|
||||
// receive routines.
|
||||
func (cs *State) OnStart() error {
|
||||
if cs.state.IsEmpty() {
|
||||
return errors.New("no state to commence consensus on")
|
||||
}
|
||||
|
||||
// We may set the WAL in testing before calling Start, so only OpenWAL if its
|
||||
// still the nilWAL.
|
||||
if _, ok := cs.wal.(nilWAL); ok {
|
||||
@@ -617,7 +612,7 @@ func (cs *State) updateToState(state sm.State) {
|
||||
// signal the new round step, because other services (eg. txNotifier)
|
||||
// depend on having an up-to-date peer state!
|
||||
if state.LastBlockHeight <= cs.state.LastBlockHeight {
|
||||
cs.Logger.Info(
|
||||
cs.Logger.Debug(
|
||||
"ignoring updateToState()",
|
||||
"new_height", state.LastBlockHeight+1,
|
||||
"old_height", cs.state.LastBlockHeight+1,
|
||||
@@ -2280,10 +2275,11 @@ func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: pass pubKey to signVote
|
||||
vote, err := cs.signVote(msgType, hash, header)
|
||||
if err == nil {
|
||||
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
|
||||
cs.Logger.Info("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote)
|
||||
cs.Logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote)
|
||||
return vote
|
||||
}
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
return
|
||||
}
|
||||
voteSet := hvs.getVoteSet(vote.Round, vote.Type)
|
||||
if voteSet.IsEmpty() {
|
||||
if voteSet == nil {
|
||||
if rndz := hvs.peerCatchupRounds[peerID]; len(rndz) < 2 {
|
||||
hvs.addRound(vote.Round)
|
||||
voteSet = hvs.getVoteSet(vote.Round, vote.Type)
|
||||
@@ -166,7 +166,7 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) {
|
||||
func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType) *types.VoteSet {
|
||||
rvs, ok := hvs.roundVoteSets[round]
|
||||
if !ok {
|
||||
return &types.VoteSet{}
|
||||
return nil
|
||||
}
|
||||
switch voteType {
|
||||
case tmproto.PrevoteType:
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
@@ -31,23 +30,30 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
|
||||
vote999_0 := makeVoteHR(t, 1, 0, 999, privVals)
|
||||
added, err := hvs.AddVote(vote999_0, "peer1")
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from peer", added, err)
|
||||
}
|
||||
|
||||
vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals)
|
||||
added, err = hvs.AddVote(vote1000_0, "peer1")
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from peer", added, err)
|
||||
}
|
||||
|
||||
vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals)
|
||||
added, err = hvs.AddVote(vote1001_0, "peer1")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, ErrGotVoteFromUnwantedRound, err)
|
||||
require.False(t, added)
|
||||
if err != ErrGotVoteFromUnwantedRound {
|
||||
t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err)
|
||||
}
|
||||
if added {
|
||||
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
|
||||
}
|
||||
|
||||
added, err = hvs.AddVote(vote1001_0, "peer2")
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from another peer")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote {
|
||||
|
||||
@@ -86,7 +86,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
mempool := emptyMempool{}
|
||||
evpool := sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
consensusState := NewState(config.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
|
||||
consensusState.SetLogger(logger)
|
||||
consensusState.SetEventBus(eventBus)
|
||||
if privValidator != nil {
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
clist "github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -56,7 +55,6 @@ func (evR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
ID: EvidenceChannel,
|
||||
Priority: 6,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmproto.EvidenceList{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -68,11 +66,11 @@ func (evR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
|
||||
// Receive implements Reactor.
|
||||
// It adds any received evidence to the evpool.
|
||||
func (evR *Reactor) Receive(e p2p.Envelope) {
|
||||
evis, err := evidenceListFromProto(e.Message)
|
||||
func (evR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
evis, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
evR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
|
||||
evR.Switch.StopPeerForError(e.Src, err)
|
||||
evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
evR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -82,7 +80,7 @@ func (evR *Reactor) Receive(e p2p.Envelope) {
|
||||
case *types.ErrInvalidEvidence:
|
||||
evR.Logger.Error(err.Error())
|
||||
// punish peer
|
||||
evR.Switch.StopPeerForError(e.Src, err)
|
||||
evR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
case nil:
|
||||
default:
|
||||
@@ -128,15 +126,11 @@ func (evR *Reactor) broadcastEvidenceRoutine(peer p2p.Peer) {
|
||||
evis := evR.prepareEvidenceMessage(peer, ev)
|
||||
if len(evis) > 0 {
|
||||
evR.Logger.Debug("Gossiping evidence to peer", "ev", ev, "peer", peer)
|
||||
evp, err := evidenceListToProto(evis)
|
||||
msgBytes, err := encodeMsg(evis)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
success := peer.Send(p2p.Envelope{
|
||||
ChannelID: EvidenceChannel,
|
||||
Message: evp,
|
||||
})
|
||||
success := peer.Send(EvidenceChannel, msgBytes)
|
||||
if !success {
|
||||
time.Sleep(peerRetryMessageIntervalMS * time.Millisecond)
|
||||
continue
|
||||
@@ -216,7 +210,7 @@ type PeerState interface {
|
||||
|
||||
// encodemsg takes a array of evidence
|
||||
// returns the byte encoding of the List Message
|
||||
func evidenceListToProto(evis []types.Evidence) (*tmproto.EvidenceList, error) {
|
||||
func encodeMsg(evis []types.Evidence) ([]byte, error) {
|
||||
evi := make([]tmproto.Evidence, len(evis))
|
||||
for i := 0; i < len(evis); i++ {
|
||||
ev, err := types.EvidenceToProto(evis[i])
|
||||
@@ -228,13 +222,19 @@ func evidenceListToProto(evis []types.Evidence) (*tmproto.EvidenceList, error) {
|
||||
epl := tmproto.EvidenceList{
|
||||
Evidence: evi,
|
||||
}
|
||||
return &epl, nil
|
||||
|
||||
return epl.Marshal()
|
||||
}
|
||||
|
||||
func evidenceListFromProto(m proto.Message) ([]types.Evidence, error) {
|
||||
lm := m.(*tmproto.EvidenceList)
|
||||
// decodemsg takes an array of bytes
|
||||
// returns an array of evidence
|
||||
func decodeMsg(bz []byte) (evis []types.Evidence, err error) {
|
||||
lm := tmproto.EvidenceList{}
|
||||
if err := lm.Unmarshal(bz); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
evis := make([]types.Evidence, len(lm.Evidence))
|
||||
evis = make([]types.Evidence, len(lm.Evidence))
|
||||
for i := 0; i < len(lm.Evidence); i++ {
|
||||
ev, err := types.EvidenceFromProto(&lm.Evidence[i])
|
||||
if err != nil {
|
||||
|
||||
@@ -208,10 +208,7 @@ func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) {
|
||||
// i.e. broadcastEvidenceRoutine finishes when peer is stopped
|
||||
defer leaktest.CheckTimeout(t, 10*time.Second)()
|
||||
|
||||
p.On("Send", mock.MatchedBy(func(i interface{}) bool {
|
||||
e, ok := i.(p2p.Envelope)
|
||||
return ok && e.ChannelID == evidence.EvidenceChannel
|
||||
})).Return(false)
|
||||
p.On("Send", evidence.EvidenceChannel, mock.AnythingOfType("[]uint8")).Return(false)
|
||||
quitChan := make(<-chan struct{})
|
||||
p.On("Quit").Return(quitChan)
|
||||
ps := peerState{2}
|
||||
|
||||
104
go.mod
104
go.mod
@@ -11,7 +11,7 @@ require (
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/go-logfmt/logfmt v0.5.1
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/golangci/golangci-lint v1.50.1
|
||||
github.com/golangci/golangci-lint v1.49.0
|
||||
github.com/google/orderedcode v0.0.1
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/informalsystems/tm-load-test v1.0.0
|
||||
@@ -22,42 +22,41 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pointlander/peg v1.0.1
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/sasha-s/go-deadlock v0.3.1
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/cobra v1.6.0
|
||||
github.com/spf13/viper v1.13.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
golang.org/x/crypto v0.1.0
|
||||
golang.org/x/net v0.1.0
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234
|
||||
google.golang.org/grpc v1.50.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bufbuild/buf v1.9.0
|
||||
github.com/bufbuild/buf v1.8.0
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2
|
||||
github.com/cosmos/gogoproto v1.4.2
|
||||
github.com/gofrs/uuid v4.3.0+incompatible
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae
|
||||
github.com/vektra/mockery/v2 v2.14.1
|
||||
github.com/vektra/mockery/v2 v2.14.0
|
||||
gonum.org/v1/gonum v0.12.0
|
||||
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8
|
||||
google.golang.org/protobuf v1.28.1
|
||||
)
|
||||
|
||||
require (
|
||||
4d63.com/gochecknoglobals v0.1.0 // indirect
|
||||
github.com/Abirdcfly/dupword v0.0.7 // indirect
|
||||
github.com/Antonboom/errname v0.1.7 // indirect
|
||||
github.com/Antonboom/nilnil v0.1.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
@@ -65,9 +64,9 @@ require (
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.0 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
github.com/alingse/asasalint v0.0.11 // indirect
|
||||
github.com/ashanbrown/forbidigo v1.3.0 // indirect
|
||||
@@ -78,8 +77,7 @@ require (
|
||||
github.com/bombsimon/wsl/v3 v3.3.0 // indirect
|
||||
github.com/breml/bidichk v0.2.3 // indirect
|
||||
github.com/breml/errchkjson v0.3.0 // indirect
|
||||
github.com/bufbuild/connect-go v1.0.0 // indirect
|
||||
github.com/bufbuild/protocompile v0.1.0 // indirect
|
||||
github.com/bufbuild/connect-go v0.4.0 // indirect
|
||||
github.com/butuzov/ireturn v0.1.1 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
@@ -89,8 +87,8 @@ require (
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/curioswitch/go-reassign v0.2.0 // indirect
|
||||
github.com/daixiang0/gci v0.8.1 // indirect
|
||||
github.com/curioswitch/go-reassign v0.1.2 // indirect
|
||||
github.com/daixiang0/gci v0.6.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.4.3 // indirect
|
||||
@@ -98,9 +96,9 @@ require (
|
||||
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.19+incompatible // indirect
|
||||
github.com/docker/docker v20.10.17+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/esimonov/ifshort v1.0.4 // indirect
|
||||
github.com/ettle/strcase v0.1.1 // indirect
|
||||
@@ -110,12 +108,12 @@ require (
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.0.7 // indirect
|
||||
github.com/go-critic/go-critic v0.6.5 // indirect
|
||||
github.com/go-critic/go-critic v0.6.4 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-toolsmith/astcast v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.0.2 // indirect
|
||||
github.com/go-toolsmith/astequal v1.0.3 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.0.1 // indirect
|
||||
github.com/go-toolsmith/astequal v1.0.2 // indirect
|
||||
github.com/go-toolsmith/astfmt v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astp v1.0.0 // indirect
|
||||
github.com/go-toolsmith/strparse v1.0.0 // indirect
|
||||
@@ -129,14 +127,14 @@ require (
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
|
||||
github.com/golangci/misspell v0.3.5 // indirect
|
||||
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect
|
||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect
|
||||
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
|
||||
github.com/gostaticanalysis/comment v1.4.2 // indirect
|
||||
@@ -151,14 +149,15 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect
|
||||
github.com/jgautheron/goconst v1.5.1 // indirect
|
||||
github.com/jhump/protocompile v0.0.0-20220812162104-d108583e055d // indirect
|
||||
github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b // indirect
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
|
||||
github.com/jmhodges/levigo v1.0.0 // indirect
|
||||
github.com/julz/importas v0.1.0 // indirect
|
||||
github.com/kisielk/errcheck v1.6.2 // indirect
|
||||
github.com/kisielk/gotool v1.0.0 // indirect
|
||||
github.com/kkHAIKE/contextcheck v1.1.3 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kulti/thelper v0.6.3 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.6 // indirect
|
||||
@@ -168,7 +167,6 @@ require (
|
||||
github.com/leonklingele/grouper v1.1.0 // indirect
|
||||
github.com/lufeee/execinquery v1.2.1 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/maratori/testableexamples v1.0.0 // indirect
|
||||
github.com/maratori/testpackage v1.1.0 // indirect
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
@@ -176,20 +174,20 @@ require (
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 // indirect
|
||||
github.com/mgechev/revive v1.2.4 // indirect
|
||||
github.com/mgechev/revive v1.2.3 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/buildkit v0.10.4 // indirect
|
||||
github.com/moby/buildkit v0.10.3 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/moricho/tparallel v0.2.1 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/nakabonne/nestif v0.3.1 // indirect
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
||||
github.com/nishanths/exhaustive v0.8.3 // indirect
|
||||
github.com/nishanths/exhaustive v0.8.1 // indirect
|
||||
github.com/nishanths/predeclared v0.2.2 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/opencontainers/runc v1.1.3 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
@@ -200,10 +198,10 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 // indirect
|
||||
github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 // indirect
|
||||
github.com/polyfloyd/go-errorlint v1.0.5 // indirect
|
||||
github.com/polyfloyd/go-errorlint v1.0.2 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.18 // indirect
|
||||
github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.17 // indirect
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
|
||||
github.com/rs/zerolog v1.27.0 // indirect
|
||||
@@ -212,7 +210,7 @@ require (
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect
|
||||
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
|
||||
github.com/sashamelentyev/usestdlibvars v1.20.0 // indirect
|
||||
github.com/sashamelentyev/usestdlibvars v1.13.0 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.13.1 // indirect
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
||||
@@ -228,15 +226,16 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/stretchr/objx v0.4.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/sylvia7788/contextcheck v1.0.6 // indirect
|
||||
github.com/tdakkota/asciicheck v0.1.1 // indirect
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect
|
||||
github.com/tetafro/godot v1.4.11 // indirect
|
||||
github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect
|
||||
github.com/timonwong/loggercheck v0.9.3 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.7.0 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
|
||||
github.com/timonwong/logrlint v0.1.0 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.2 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect
|
||||
github.com/ultraware/funlen v0.0.3 // indirect
|
||||
github.com/ultraware/whitespace v0.0.5 // indirect
|
||||
github.com/uudashr/gocognit v1.0.6 // indirect
|
||||
@@ -245,27 +244,26 @@ require (
|
||||
gitlab.com/bosi/decorder v0.2.3 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 // indirect
|
||||
go.opentelemetry.io/otel v1.11.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.32.3 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.34.0 // indirect
|
||||
go.opentelemetry.io/otel v1.9.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.9.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
go.uber.org/zap v1.22.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde // indirect
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 // indirect
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
honnef.co/go/tools v0.3.3 // indirect
|
||||
mvdan.cc/gofumpt v0.4.0 // indirect
|
||||
mvdan.cc/gofumpt v0.3.1 // indirect
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
|
||||
mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect
|
||||
|
||||
211
go.sum
211
go.sum
@@ -45,8 +45,6 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Abirdcfly/dupword v0.0.7 h1:z14n0yytA3wNO2gpCD/jVtp/acEXPGmYu0esewpBt6Q=
|
||||
github.com/Abirdcfly/dupword v0.0.7/go.mod h1:K/4M1kj+Zh39d2aotRwypvasonOyAMH1c/IZJzE0dmk=
|
||||
github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako=
|
||||
github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU=
|
||||
github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q=
|
||||
@@ -74,14 +72,14 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA=
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
|
||||
github.com/OpenPeeDeeP/depguard v1.1.0 h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o=
|
||||
github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
|
||||
@@ -151,8 +149,8 @@ github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA=
|
||||
github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0 h1:S/6K1GEwlEsFzZP4cOOl5mg6PEd/pr0zz7hvXcaxhJ4=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8=
|
||||
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ=
|
||||
@@ -171,12 +169,10 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/bufbuild/buf v1.9.0 h1:8a60qapVuRj6crerWR0rny4UUV/MhZSL5gagJuBxmx8=
|
||||
github.com/bufbuild/buf v1.9.0/go.mod h1:1Q+rMHiMVcfgScEF/GOldxmu4o9TrQ2sQQh58K6MscE=
|
||||
github.com/bufbuild/connect-go v1.0.0 h1:htSflKUT8y1jxhoPhPYTZMrsY3ipUXjjrbcZR5O2cVo=
|
||||
github.com/bufbuild/connect-go v1.0.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I=
|
||||
github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r40WHw4s=
|
||||
github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4=
|
||||
github.com/bufbuild/buf v1.8.0 h1:53qJ3QY/KOHwSjWgCQYkQaR3jGWst7aOfTXnFe8e+VQ=
|
||||
github.com/bufbuild/buf v1.8.0/go.mod h1:tBzKkd1fzCcBV6KKSO7zo3rlhk3o1YQ0F2tQKSC2aNU=
|
||||
github.com/bufbuild/connect-go v0.4.0 h1:fIMyUYG8mXSTH+nnlOx9KmRUf3mBF0R2uKK+BQBoOHE=
|
||||
github.com/bufbuild/connect-go v0.4.0/go.mod h1:ZEtBnQ7J/m7bvWOW+H8T/+hKQCzPVfhhhICuvtcnjlI=
|
||||
github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY=
|
||||
github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
@@ -253,13 +249,13 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cristalhq/acmd v0.8.1/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ=
|
||||
github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
|
||||
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
|
||||
github.com/cristalhq/acmd v0.7.0/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ=
|
||||
github.com/curioswitch/go-reassign v0.1.2 h1:ekM07+z+VFT560Exz4mTv0/s1yU9gem6CJc/tlYpkmI=
|
||||
github.com/curioswitch/go-reassign v0.1.2/go.mod h1:bFJIHgtTM3hRm2sKXSPkbwNjSFyGURQXyn4IXD2qwfQ=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/daixiang0/gci v0.8.1 h1:T4xpSC+hmsi4CSyuYfIJdMZAr9o7xZmHpQVygMghGZ4=
|
||||
github.com/daixiang0/gci v0.8.1/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c=
|
||||
github.com/daixiang0/gci v0.6.3 h1:wUAqXChk8HbwXn8AfxD9DYSCp9Bpz1L3e6Q4Roe+q9E=
|
||||
github.com/daixiang0/gci v0.6.3/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -283,13 +279,12 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
|
||||
github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.19+incompatible h1:lzEmjivyNHFHMNAFLXORMBXyGIhw/UP4DvJwvyKYq64=
|
||||
github.com/docker/docker v20.10.19+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
|
||||
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
@@ -343,8 +338,8 @@ github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlya
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8=
|
||||
github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo=
|
||||
github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY=
|
||||
github.com/go-critic/go-critic v0.6.4 h1:tucuG1pvOyYgpBIrVxw0R6gwO42lNa92Aq3VaDoIs+E=
|
||||
github.com/go-critic/go-critic v0.6.4/go.mod h1:qL5SOlk7NtY6sJPoVCTKDIgzNOxHkkkOCVDyi9wJe1U=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -373,12 +368,13 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g=
|
||||
github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
|
||||
github.com/go-toolsmith/astcopy v1.0.2 h1:YnWf5Rnh1hUudj11kei53kI57quN/VH6Hp1n+erozn0=
|
||||
github.com/go-toolsmith/astcopy v1.0.2/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y=
|
||||
github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
|
||||
github.com/go-toolsmith/astcopy v1.0.1 h1:l09oBhAPyV74kLJ3ZO31iBU8htZGTwr9LTjuMCyL8go=
|
||||
github.com/go-toolsmith/astcopy v1.0.1/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y=
|
||||
github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
|
||||
github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw=
|
||||
github.com/go-toolsmith/astequal v1.0.2 h1:+XvaV8zNxua+9+Oa4AHmgmpo4RYAbwr/qjNppLfX2yM=
|
||||
github.com/go-toolsmith/astequal v1.0.2/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4=
|
||||
github.com/go-toolsmith/astequal v1.0.3 h1:+LVdyRatFS+XO78SGV4I3TCEA0AC7fKEGma+fH+674o=
|
||||
github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4=
|
||||
github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k=
|
||||
github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
|
||||
github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg=
|
||||
@@ -455,10 +451,10 @@ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo=
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
|
||||
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY=
|
||||
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
|
||||
github.com/golangci/golangci-lint v1.50.1 h1:C829clMcZXEORakZlwpk7M4iDw2XiwxxKaG504SZ9zY=
|
||||
github.com/golangci/golangci-lint v1.50.1/go.mod h1:AQjHBopYS//oB8xs0y0M/dtxdKHkdhl0RvmjUct0/4w=
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
|
||||
github.com/golangci/golangci-lint v1.49.0 h1:I8WHOavragDttlLHtSraHn/h39C+R60bEQ5NoGcHQr8=
|
||||
github.com/golangci/golangci-lint v1.49.0/go.mod h1:+V/7lLv449R6w9mQ3WdV0EKh7Je/jTylMeSwBZcLeWE=
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
|
||||
@@ -484,9 +480,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
@@ -619,7 +614,14 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
|
||||
github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
|
||||
github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM=
|
||||
github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
|
||||
github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
|
||||
github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ=
|
||||
github.com/jhump/protocompile v0.0.0-20220812162104-d108583e055d h1:1BLWxsvcb5w9/vGjtyEo//r3dwEPNg7z73nbQ/XV4/s=
|
||||
github.com/jhump/protocompile v0.0.0-20220812162104-d108583e055d/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q=
|
||||
github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E=
|
||||
github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b h1:izTof8BKh/nE1wrKOrloNA5q4odOarjf+Xpe+4qow98=
|
||||
github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI=
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
|
||||
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
|
||||
@@ -654,13 +656,11 @@ github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7
|
||||
github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
|
||||
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkHAIKE/contextcheck v1.1.3 h1:l4pNvrb8JSwRd51ojtcOxOeHJzHek+MtOyXbaR0uvmw=
|
||||
github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkSVWyLJOFW5qoo=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
|
||||
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@@ -704,8 +704,6 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
|
||||
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI=
|
||||
github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE=
|
||||
github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q=
|
||||
github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc=
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA=
|
||||
@@ -739,8 +737,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
|
||||
github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
|
||||
github.com/mgechev/revive v1.2.4 h1:+2Hd/S8oO2H0Ikq2+egtNwQsVhAeELHjxjIUFX5ajLI=
|
||||
github.com/mgechev/revive v1.2.4/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q=
|
||||
github.com/mgechev/revive v1.2.3 h1:NzIEEa9+WimQ6q2Ov7OcNeySS/IOcwtkQ8RAh0R5UJ4=
|
||||
github.com/mgechev/revive v1.2.3/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
@@ -763,8 +761,8 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/buildkit v0.10.4 h1:FvC+buO8isGpUFZ1abdSLdGHZVqg9sqI4BbFL8tlzP4=
|
||||
github.com/moby/buildkit v0.10.4/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug=
|
||||
github.com/moby/buildkit v0.10.3 h1:/dGykD8FW+H4p++q5+KqKEo6gAkYKyBQHdawdjVwVAU=
|
||||
github.com/moby/buildkit v0.10.3/go.mod h1:jxeOuly98l9gWHai0Ojrbnczrk/rf+o9/JqNhY+UCSo=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI=
|
||||
@@ -800,8 +798,8 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6Fx
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM=
|
||||
github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg=
|
||||
github.com/nishanths/exhaustive v0.8.1 h1:0QKNascWv9qIHY7zRoZSxeRr6kuk5aAT3YXLTiDmjTo=
|
||||
github.com/nishanths/exhaustive v0.8.1/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg=
|
||||
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
|
||||
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
@@ -833,8 +831,8 @@ github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQ
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||
@@ -898,8 +896,8 @@ github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 h1:RHHRCZeaNy
|
||||
github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4/go.mod h1:RdR1j20Aj5pB6+fw6Y9Ur7lMHpegTEjY1vc19hEZL40=
|
||||
github.com/pointlander/peg v1.0.1 h1:mgA/GQE8TeS9MdkU6Xn6iEzBmQUQCNuWD7rHCK6Mjs0=
|
||||
github.com/pointlander/peg v1.0.1/go.mod h1:5hsGDQR2oZI4QoWz0/Kdg3VSVEC31iJw/b7WjqCBGRI=
|
||||
github.com/polyfloyd/go-errorlint v1.0.5 h1:AHB5JRCjlmelh9RrLxT9sgzpalIwwq4hqE8EkwIwKdY=
|
||||
github.com/polyfloyd/go-errorlint v1.0.5/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI=
|
||||
github.com/polyfloyd/go-errorlint v1.0.2 h1:kp1yvHflYhTmw5m3MmBy8SCyQkKPjwDthVuMH0ug6Yk=
|
||||
github.com/polyfloyd/go-errorlint v1.0.2/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
@@ -919,9 +917,8 @@ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
@@ -948,14 +945,14 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30=
|
||||
github.com/quasilyte/go-ruleguard v0.3.18 h1:sd+abO1PEI9fkYennwzHn9kl3nqP6M5vE7FiOzZ+5CE=
|
||||
github.com/quasilyte/go-ruleguard v0.3.18/go.mod h1:lOIzcYlgxrQ2sGJ735EHXmf/e9MJ516j16K/Ifcttvs=
|
||||
github.com/quasilyte/go-ruleguard v0.3.17 h1:cDdoaSbQg11LXPDQqiCK54QmQXsEQQCTIgdcpeULGSI=
|
||||
github.com/quasilyte/go-ruleguard v0.3.17/go.mod h1:sST5PvaR7yb/Az5ksX8oc88usJ4EGjmJv7cK7y3jyig=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc=
|
||||
github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50=
|
||||
github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f h1:6Gtn2i04RD0gVyYf2/IUMTIs+qYleBt4zxDqkLTcu4U=
|
||||
github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8=
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM=
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY=
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs=
|
||||
@@ -967,7 +964,7 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U=
|
||||
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
@@ -991,8 +988,8 @@ github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71e
|
||||
github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
|
||||
github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw=
|
||||
github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
|
||||
github.com/sashamelentyev/usestdlibvars v1.20.0 h1:K6CXjqqtSYSsuyRDDC7Sjn6vTMLiSJa4ZmDkiokoqtw=
|
||||
github.com/sashamelentyev/usestdlibvars v1.20.0/go.mod h1:0GaP+ecfZMXShS0A94CJn6aEuPRILv8h/VuWI9n1ygg=
|
||||
github.com/sashamelentyev/usestdlibvars v1.13.0 h1:uObNudVEEHf6JbOJy5bgKJloA1bWjxR9fwgNFpPzKnI=
|
||||
github.com/sashamelentyev/usestdlibvars v1.13.0/go.mod h1:D2Wb7niIYmTB+gB8z7kh8tyP5ccof1dQ+SFk+WW5NtY=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
@@ -1045,8 +1042,8 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
|
||||
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
||||
github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI=
|
||||
github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
@@ -1072,9 +1069,8 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
|
||||
github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
@@ -1083,12 +1079,13 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
|
||||
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
github.com/sylvia7788/contextcheck v1.0.6 h1:o2EZgVPyMKE/Mtoqym61DInKEjwEbsmyoxg3VrmjNO4=
|
||||
github.com/sylvia7788/contextcheck v1.0.6/go.mod h1:9XDxwvxyuKD+8N+a7Gs7bfWLityh5t70g/GjdEt2N2M=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
@@ -1109,14 +1106,14 @@ github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw=
|
||||
github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
|
||||
github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro=
|
||||
github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
|
||||
github.com/timonwong/loggercheck v0.9.3 h1:ecACo9fNiHxX4/Bc02rW2+kaJIAMAes7qJ7JKxt0EZI=
|
||||
github.com/timonwong/loggercheck v0.9.3/go.mod h1:wUqnk9yAOIKtGA39l1KLE9Iz0QiTocu/YZoOf+OzFdw=
|
||||
github.com/timonwong/logrlint v0.1.0 h1:phZCcypL/vtx6cGxObJgWZ5wexZF5SXFPLOM+ru0e/M=
|
||||
github.com/timonwong/logrlint v0.1.0/go.mod h1:Zleg4Gw+kRxNej+Ra7o+tEaW5k1qthTaYKU7rSD39LU=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.7.0 h1:J/F8DbSKJC83bAvC6FoZaRjZiZ/iKoueSdrEkmGeacA=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.7.0/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.2 h1:3dI6YNcrJTQ/CJQ6M/DUkc0gnqYSIk6o0rChn9E/D0M=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.2/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
@@ -1128,8 +1125,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=
|
||||
github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
|
||||
github.com/vektra/mockery/v2 v2.14.1 h1:Xamr4zUkFBDGdZhJ6iCiJ1AwkGRmUgZd8zkwjRXt+TU=
|
||||
github.com/vektra/mockery/v2 v2.14.1/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M=
|
||||
github.com/vektra/mockery/v2 v2.14.0 h1:KZ1p5Hrn8tiY+LErRMr14HHle6khxo+JKOXLBW/yfqs=
|
||||
github.com/vektra/mockery/v2 v2.14.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
@@ -1170,14 +1167,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 h1:syAz40OyelLZo42+3U68Phisvrx4qh+4wpdZw7eUUdY=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3/go.mod h1:Dts42MGkzZne2yCru741+bFiTMWkIj/LLRizad7b9tw=
|
||||
go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk=
|
||||
go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk=
|
||||
go.opentelemetry.io/otel/metric v0.32.3 h1:dMpnJYk2KULXr0j8ph6N7+IcuiIQXlPXD4kix9t7L9c=
|
||||
go.opentelemetry.io/otel/metric v0.32.3/go.mod h1:pgiGmKohxHyTPHGOff+vrtIH39/R9fiO/WoenUQ3kcc=
|
||||
go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI=
|
||||
go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.34.0 h1:PNEMW4EvpNQ7SuoPFNkvbZqi1STkTPKq+8vfoMl/6AE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.34.0/go.mod h1:fk1+icoN47ytLSgkoWHLJrtVTSQ+HgmkNgPTKrk/Nsc=
|
||||
go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw=
|
||||
go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo=
|
||||
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
|
||||
go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
@@ -1199,8 +1194,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
||||
go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=
|
||||
go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=
|
||||
go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0=
|
||||
go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@@ -1224,8 +1219,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1242,8 +1237,8 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
@@ -1272,9 +1267,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1333,8 +1327,8 @@ golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E=
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1362,8 +1356,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde h1:ejfdSekXMDxDLbRrJMwUk6KnSLZ2McaUCVcIKM+N6jc=
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1464,13 +1458,13 @@ golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 h1:Sx/u41w+OwrInGdEckYmEuU5gHoGSL4QbDz3S9s6j4U=
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035 h1:Q5284mrmYTpACcm+eAKjKJH48BBwSyfJqmmGDTtT8Vc=
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1479,9 +1473,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1581,9 +1574,8 @@ golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlz
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1673,8 +1665,8 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20211101144312-62acf1d99145/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw=
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
|
||||
google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b h1:SfSkJugek6xm7lWywqth4r2iTrYLpD8lOj1nMIIhMNM=
|
||||
google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
@@ -1718,9 +1710,10 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 h1:KR8+MyP7/qOlV+8Af01LtjL04bu7on42eVsxT4EyBQk=
|
||||
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -1768,8 +1761,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA=
|
||||
honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw=
|
||||
mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM=
|
||||
mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ=
|
||||
mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8=
|
||||
mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
|
||||
|
||||
@@ -48,7 +48,7 @@ func (r *Rand) init() {
|
||||
}
|
||||
|
||||
func (r *Rand) reset(seed int64) {
|
||||
r.rand = mrand.New(mrand.NewSource(seed))
|
||||
r.rand = mrand.New(mrand.NewSource(seed)) //nolint:gosec
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -134,7 +134,6 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
ID: mempool.MempoolChannel,
|
||||
Priority: 5,
|
||||
RecvMessageCapacity: batchMsg.Size(),
|
||||
MessageType: &protomem.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -155,35 +154,28 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
|
||||
// Receive implements Reactor.
|
||||
// It adds any received transactions to the mempool.
|
||||
func (memR *Reactor) Receive(e p2p.Envelope) {
|
||||
memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
|
||||
switch msg := e.Message.(type) {
|
||||
case *protomem.Txs:
|
||||
protoTxs := msg.GetTxs()
|
||||
if len(protoTxs) == 0 {
|
||||
memR.Logger.Error("received empty txs from peer", "src", e.Src)
|
||||
return
|
||||
}
|
||||
txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)}
|
||||
if e.Src != nil {
|
||||
txInfo.SenderP2PID = e.Src.ID()
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, tx := range protoTxs {
|
||||
ntx := types.Tx(tx)
|
||||
err = memR.mempool.CheckTx(ntx, nil, txInfo)
|
||||
if errors.Is(err, mempool.ErrTxInCache) {
|
||||
memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String())
|
||||
} else if err != nil {
|
||||
memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
|
||||
memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message))
|
||||
func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := memR.decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
memR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)}
|
||||
if src != nil {
|
||||
txInfo.SenderP2PID = src.ID()
|
||||
}
|
||||
|
||||
for _, tx := range msg.Txs {
|
||||
err = memR.mempool.CheckTx(tx, nil, txInfo)
|
||||
if errors.Is(err, mempool.ErrTxInCache) {
|
||||
memR.Logger.Debug("Tx already exists in cache", "tx", tx.String())
|
||||
} else if err != nil {
|
||||
memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// broadcasting happens from go routines per peer
|
||||
}
|
||||
@@ -242,10 +234,18 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
// https://github.com/tendermint/tendermint/issues/5796
|
||||
|
||||
if _, ok := memTx.senders.Load(peerID); !ok {
|
||||
success := peer.Send(p2p.Envelope{
|
||||
ChannelID: mempool.MempoolChannel,
|
||||
Message: &protomem.Txs{Txs: [][]byte{memTx.tx}},
|
||||
})
|
||||
msg := protomem.Message{
|
||||
Sum: &protomem.Message_Txs{
|
||||
Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
|
||||
},
|
||||
}
|
||||
|
||||
bz, err := msg.Marshal()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
success := peer.Send(mempool.MempoolChannel, bz)
|
||||
if !success {
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
@@ -264,6 +264,35 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
}
|
||||
}
|
||||
|
||||
func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
|
||||
msg := protomem.Message{}
|
||||
err := msg.Unmarshal(bz)
|
||||
if err != nil {
|
||||
return TxsMessage{}, err
|
||||
}
|
||||
|
||||
var message TxsMessage
|
||||
|
||||
if i, ok := msg.Sum.(*protomem.Message_Txs); ok {
|
||||
txs := i.Txs.GetTxs()
|
||||
|
||||
if len(txs) == 0 {
|
||||
return message, errors.New("empty TxsMessage")
|
||||
}
|
||||
|
||||
decoded := make([]types.Tx, len(txs))
|
||||
for j, tx := range txs {
|
||||
decoded[j] = types.Tx(tx)
|
||||
}
|
||||
|
||||
message = TxsMessage{
|
||||
Txs: decoded,
|
||||
}
|
||||
return message, nil
|
||||
}
|
||||
return message, fmt.Errorf("msg type: %T is not supported", msg)
|
||||
}
|
||||
|
||||
// TxsMessage is a Message containing transactions.
|
||||
type TxsMessage struct {
|
||||
Txs []types.Tx
|
||||
|
||||
@@ -264,10 +264,6 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: This test tests that we don't panic and are able to generate new
|
||||
// PeerIDs for each peer we add. It seems as though we should be able to test
|
||||
// this in a much more direct way.
|
||||
// https://github.com/tendermint/tendermint/issues/9639
|
||||
func TestDontExhaustMaxActiveIDs(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
const N = 1
|
||||
@@ -283,12 +279,7 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
|
||||
|
||||
for i := 0; i < mempool.MaxActiveIDs+1; i++ {
|
||||
peer := mock.NewPeer(nil)
|
||||
reactor.Receive(p2p.Envelope{
|
||||
ChannelID: mempool.MempoolChannel,
|
||||
Src: peer,
|
||||
Message: &memproto.Message{}, // This uses the wrong message type on purpose to stop the peer as in an error state in the reactor.
|
||||
},
|
||||
)
|
||||
reactor.Receive(mempool.MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
|
||||
reactor.AddPeer(peer)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +133,6 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
ID: mempool.MempoolChannel,
|
||||
Priority: 5,
|
||||
RecvMessageCapacity: batchMsg.Size(),
|
||||
MessageType: &protomem.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -154,36 +153,27 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
|
||||
// Receive implements Reactor.
|
||||
// It adds any received transactions to the mempool.
|
||||
func (memR *Reactor) Receive(e p2p.Envelope) {
|
||||
memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
|
||||
switch msg := e.Message.(type) {
|
||||
case *protomem.Txs:
|
||||
protoTxs := msg.GetTxs()
|
||||
if len(protoTxs) == 0 {
|
||||
memR.Logger.Error("received tmpty txs from peer", "src", e.Src)
|
||||
return
|
||||
}
|
||||
txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)}
|
||||
if e.Src != nil {
|
||||
txInfo.SenderP2PID = e.Src.ID()
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, tx := range protoTxs {
|
||||
ntx := types.Tx(tx)
|
||||
err = memR.mempool.CheckTx(ntx, nil, txInfo)
|
||||
if errors.Is(err, mempool.ErrTxInCache) {
|
||||
memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String())
|
||||
} else if err != nil {
|
||||
memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
|
||||
memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message))
|
||||
func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := memR.decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
memR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)}
|
||||
if src != nil {
|
||||
txInfo.SenderP2PID = src.ID()
|
||||
}
|
||||
for _, tx := range msg.Txs {
|
||||
err = memR.mempool.CheckTx(tx, nil, txInfo)
|
||||
if err == mempool.ErrTxInCache {
|
||||
memR.Logger.Debug("Tx already exists in cache", "tx", tx.String())
|
||||
} else if err != nil {
|
||||
memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err)
|
||||
}
|
||||
}
|
||||
// broadcasting happens from go routines per peer
|
||||
}
|
||||
|
||||
@@ -243,10 +233,18 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
// NOTE: Transaction batching was disabled due to
|
||||
// https://github.com/tendermint/tendermint/issues/5796
|
||||
if !memTx.HasPeer(peerID) {
|
||||
success := peer.Send(p2p.Envelope{
|
||||
ChannelID: mempool.MempoolChannel,
|
||||
Message: &protomem.Txs{Txs: [][]byte{memTx.tx}},
|
||||
})
|
||||
msg := protomem.Message{
|
||||
Sum: &protomem.Message_Txs{
|
||||
Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
|
||||
},
|
||||
}
|
||||
|
||||
bz, err := msg.Marshal()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
success := peer.Send(mempool.MempoolChannel, bz)
|
||||
if !success {
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
@@ -270,6 +268,37 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
|
||||
msg := protomem.Message{}
|
||||
err := msg.Unmarshal(bz)
|
||||
if err != nil {
|
||||
return TxsMessage{}, err
|
||||
}
|
||||
|
||||
var message TxsMessage
|
||||
|
||||
if i, ok := msg.Sum.(*protomem.Message_Txs); ok {
|
||||
txs := i.Txs.GetTxs()
|
||||
|
||||
if len(txs) == 0 {
|
||||
return message, errors.New("empty TxsMessage")
|
||||
}
|
||||
|
||||
decoded := make([]types.Tx, len(txs))
|
||||
for j, tx := range txs {
|
||||
decoded[j] = types.Tx(tx)
|
||||
}
|
||||
|
||||
message = TxsMessage{
|
||||
Txs: decoded,
|
||||
}
|
||||
return message, nil
|
||||
}
|
||||
return message, fmt.Errorf("msg type: %T is not supported", msg)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// TxsMessage is a Message containing transactions.
|
||||
type TxsMessage struct {
|
||||
Txs []types.Tx
|
||||
|
||||
35
node/id.go
Normal file
35
node/id.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
type ID struct {
|
||||
Name string
|
||||
PubKey crypto.PubKey
|
||||
}
|
||||
|
||||
type PrivNodeID struct {
|
||||
ID
|
||||
PrivKey crypto.PrivKey
|
||||
}
|
||||
|
||||
type Greeting struct {
|
||||
ID
|
||||
Version string
|
||||
ChainID string
|
||||
Message string
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
type SignedNodeGreeting struct {
|
||||
Greeting
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
func (pnid *PrivNodeID) SignGreeting() *SignedNodeGreeting {
|
||||
// greeting := NodeGreeting{}
|
||||
return nil
|
||||
}
|
||||
1065
node/node.go
1065
node/node.go
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
@@ -123,8 +122,6 @@ func TestNodeSetAppVersion(t *testing.T) {
|
||||
// create & start node
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, n.Start())
|
||||
defer n.Stop() //nolint:errcheck
|
||||
|
||||
// default config uses the kvstore app
|
||||
var appVersion = kvstore.ProtocolVersion
|
||||
@@ -138,29 +135,6 @@ func TestNodeSetAppVersion(t *testing.T) {
|
||||
assert.Equal(t, n.nodeInfo.(p2p.DefaultNodeInfo).ProtocolVersion.App, appVersion)
|
||||
}
|
||||
|
||||
func TestPprofServer(t *testing.T) {
|
||||
config := test.ResetTestRoot("node_pprof_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.RPC.PprofListenAddress = testFreeAddr(t)
|
||||
|
||||
// should not work yet
|
||||
_, err := http.Get("http://" + config.RPC.PprofListenAddress) //nolint: bodyclose
|
||||
assert.Error(t, err)
|
||||
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, n.Start())
|
||||
defer func() {
|
||||
require.NoError(t, n.Stop())
|
||||
}()
|
||||
assert.NotNil(t, n.pprofSrv)
|
||||
|
||||
resp, err := http.Get("http://" + config.RPC.PprofListenAddress + "/debug/pprof")
|
||||
assert.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestNodeSetPrivValTCP(t *testing.T) {
|
||||
addr := "tcp://" + testFreeAddr(t)
|
||||
|
||||
@@ -191,8 +165,6 @@ func TestNodeSetPrivValTCP(t *testing.T) {
|
||||
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, n.Start())
|
||||
defer n.Stop() //nolint:errcheck
|
||||
assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator())
|
||||
}
|
||||
|
||||
@@ -205,7 +177,7 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
|
||||
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
|
||||
|
||||
_, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestNodeSetPrivValIPC(t *testing.T) {
|
||||
@@ -455,6 +427,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
|
||||
RecvMessageCapacity: 100,
|
||||
},
|
||||
}
|
||||
customBlocksyncReactor := p2pmock.NewReactor()
|
||||
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
|
||||
require.NoError(t, err)
|
||||
@@ -467,7 +440,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
|
||||
DefaultDBProvider,
|
||||
DefaultMetricsProvider(config.Instrumentation),
|
||||
log.TestingLogger(),
|
||||
CustomReactors(map[string]p2p.Reactor{"FOO": cr}),
|
||||
CustomReactors(map[string]p2p.Reactor{"FOO": cr, "BLOCKSYNC": customBlocksyncReactor}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -478,6 +451,9 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
|
||||
assert.True(t, cr.IsRunning())
|
||||
assert.Equal(t, cr, n.Switch().Reactor("FOO"))
|
||||
|
||||
assert.True(t, customBlocksyncReactor.IsRunning())
|
||||
assert.Equal(t, customBlocksyncReactor, n.Switch().Reactor("BLOCKSYNC"))
|
||||
|
||||
channels := n.NodeInfo().(p2p.DefaultNodeInfo).Channels
|
||||
assert.Contains(t, channels, mempl.MempoolChannel)
|
||||
assert.Contains(t, channels, cr.Channels[0].ID)
|
||||
|
||||
682
node/setup.go
682
node/setup.go
@@ -1,682 +0,0 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/blocksync"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cs "github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/pex"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
|
||||
blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/psql"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
"github.com/tendermint/tendermint/state/txindex/kv"
|
||||
"github.com/tendermint/tendermint/state/txindex/null"
|
||||
"github.com/tendermint/tendermint/statesync"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
|
||||
_ "github.com/lib/pq" // provide the psql db driver
|
||||
)
|
||||
|
||||
// DBContext specifies config information for loading a new DB.
|
||||
type DBContext struct {
|
||||
ID string
|
||||
Config *cfg.Config
|
||||
}
|
||||
|
||||
// DBProvider takes a DBContext and returns an instantiated DB.
|
||||
type DBProvider func(*DBContext) (dbm.DB, error)
|
||||
|
||||
const readHeaderTimeout = 10 * time.Second
|
||||
|
||||
// DefaultDBProvider returns a database using the DBBackend and DBDir
|
||||
// specified in the ctx.Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
|
||||
dbType := dbm.BackendType(ctx.Config.DBBackend)
|
||||
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
}
|
||||
|
||||
// GenesisDocProvider returns a GenesisDoc.
|
||||
// It allows the GenesisDoc to be pulled from sources other than the
|
||||
// filesystem, for instance from a distributed key-value store cluster.
|
||||
type GenesisDocProvider func() (*types.GenesisDoc, error)
|
||||
|
||||
// DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
|
||||
// the GenesisDoc from the config.GenesisFile() on the filesystem.
|
||||
func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
|
||||
return func() (*types.GenesisDoc, error) {
|
||||
return types.GenesisDocFromFile(config.GenesisFile())
|
||||
}
|
||||
}
|
||||
|
||||
// Provider takes a config and a logger and returns a ready to go Node.
|
||||
type Provider func(*cfg.Config, log.Logger) (*Node, error)
|
||||
|
||||
// DefaultNewNode returns a Tendermint node with default settings for the
|
||||
// PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
|
||||
// It implements NodeProvider.
|
||||
func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
|
||||
}
|
||||
|
||||
return NewNode(config,
|
||||
privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
|
||||
nodeKey,
|
||||
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
|
||||
DefaultGenesisDocProviderFunc(config),
|
||||
DefaultDBProvider,
|
||||
DefaultMetricsProvider(config.Instrumentation),
|
||||
logger,
|
||||
)
|
||||
}
|
||||
|
||||
// MetricsProvider returns a consensus, p2p and mempool Metrics.
|
||||
type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics)
|
||||
|
||||
// DefaultMetricsProvider returns Metrics build using Prometheus client library
|
||||
// if Prometheus is enabled. Otherwise, it returns no-op Metrics.
|
||||
func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
|
||||
return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics) {
|
||||
if config.Prometheus {
|
||||
return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
sm.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
proxy.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
blocksync.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
statesync.PrometheusMetrics(config.Namespace, "chain_id", chainID)
|
||||
}
|
||||
return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics(), proxy.NopMetrics(), blocksync.NopMetrics(), statesync.NopMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
type blockSyncReactor interface {
|
||||
SwitchToBlockSync(sm.State) error
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
|
||||
var blockStoreDB dbm.DB
|
||||
blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
blockStore = store.NewBlockStore(blockStoreDB)
|
||||
|
||||
stateDB, err = dbProvider(&DBContext{"state", config})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func createProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) {
|
||||
proxyApp := proxy.NewAppConns(clientCreator, metrics)
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
return nil, fmt.Errorf("error starting proxy app connections: %v", err)
|
||||
}
|
||||
return proxyApp, nil
|
||||
}
|
||||
|
||||
func createEventBus(logger log.Logger) *types.EventBus {
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus.SetLogger(logger.With("module", "events"))
|
||||
return eventBus
|
||||
}
|
||||
|
||||
func createIndexerService(
|
||||
config *cfg.Config,
|
||||
chainID string,
|
||||
dbProvider DBProvider,
|
||||
eventBus *types.EventBus,
|
||||
logger log.Logger,
|
||||
) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
|
||||
var (
|
||||
txIndexer txindex.TxIndexer
|
||||
blockIndexer indexer.BlockIndexer
|
||||
)
|
||||
|
||||
switch config.TxIndex.Indexer {
|
||||
case "kv":
|
||||
store, err := dbProvider(&DBContext{"tx_index", config})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
txIndexer = kv.NewTxIndex(store)
|
||||
blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
|
||||
|
||||
case "psql":
|
||||
if config.TxIndex.PsqlConn == "" {
|
||||
return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`)
|
||||
}
|
||||
es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err)
|
||||
}
|
||||
txIndexer = es.TxIndexer()
|
||||
blockIndexer = es.BlockIndexer()
|
||||
|
||||
default:
|
||||
txIndexer = &null.TxIndex{}
|
||||
blockIndexer = &blockidxnull.BlockerIndexer{}
|
||||
}
|
||||
|
||||
indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false)
|
||||
indexerService.SetLogger(logger.With("module", "txindex"))
|
||||
|
||||
return indexerService, txIndexer, blockIndexer, nil
|
||||
}
|
||||
|
||||
func doHandshake(
|
||||
stateStore sm.Store,
|
||||
state sm.State,
|
||||
blockStore sm.BlockStore,
|
||||
genDoc *types.GenesisDoc,
|
||||
eventBus types.BlockEventPublisher,
|
||||
proxyApp proxy.AppConns,
|
||||
consensusLogger log.Logger,
|
||||
) error {
|
||||
handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
|
||||
handshaker.SetLogger(consensusLogger)
|
||||
handshaker.SetEventBus(eventBus)
|
||||
if err := handshaker.Handshake(proxyApp); err != nil {
|
||||
return fmt.Errorf("error during handshake: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger log.Logger) {
|
||||
// Log the version info.
|
||||
logger.Info("Version info",
|
||||
"tendermint_version", version.TMCoreSemVer,
|
||||
"abci", version.ABCISemVer,
|
||||
"block", version.BlockProtocol,
|
||||
"p2p", version.P2PProtocol,
|
||||
"commit_hash", version.TMGitCommitHash,
|
||||
)
|
||||
|
||||
// If the state and software differ in block version, at least log it.
|
||||
if state.Version.Consensus.Block != version.BlockProtocol {
|
||||
logger.Info("Software and state have different block protocols",
|
||||
"software", version.BlockProtocol,
|
||||
"state", state.Version.Consensus.Block,
|
||||
)
|
||||
}
|
||||
|
||||
addr := pubKey.Address()
|
||||
// Log whether this node is a validator or an observer
|
||||
if state.Validators.HasAddress(addr) {
|
||||
logger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
|
||||
} else {
|
||||
logger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
|
||||
}
|
||||
}
|
||||
|
||||
func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
|
||||
if state.Validators.Size() > 1 {
|
||||
return false
|
||||
}
|
||||
addr, _ := state.Validators.GetByIndex(0)
|
||||
return bytes.Equal(pubKey.Address(), addr)
|
||||
}
|
||||
|
||||
func createMempoolAndMempoolReactor(
|
||||
config *cfg.Config,
|
||||
proxyApp proxy.AppConns,
|
||||
state sm.State,
|
||||
memplMetrics *mempl.Metrics,
|
||||
logger log.Logger,
|
||||
) (mempl.Mempool, p2p.Reactor) {
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV1:
|
||||
mp := mempoolv1.NewTxMempool(
|
||||
logger,
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
|
||||
reactor := mempoolv1.NewReactor(
|
||||
config.Mempool,
|
||||
mp,
|
||||
)
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
return mp, reactor
|
||||
|
||||
case cfg.MempoolV0:
|
||||
mp := mempoolv0.NewCListMempool(
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
|
||||
mp.SetLogger(logger)
|
||||
|
||||
reactor := mempoolv0.NewReactor(
|
||||
config.Mempool,
|
||||
mp,
|
||||
)
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
return mp, reactor
|
||||
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
|
||||
stateStore sm.Store, blockStore *store.BlockStore, logger log.Logger,
|
||||
) (*evidence.Reactor, *evidence.Pool, error) {
|
||||
evidenceDB, err := dbProvider(&DBContext{"evidence", config})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
evidenceLogger := logger.With("module", "evidence")
|
||||
evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
evidenceReactor := evidence.NewReactor(evidencePool)
|
||||
evidenceReactor.SetLogger(evidenceLogger)
|
||||
return evidenceReactor, evidencePool, nil
|
||||
}
|
||||
|
||||
func createBlocksyncReactor(config *cfg.Config,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore *store.BlockStore,
|
||||
metrics *blocksync.Metrics,
|
||||
logger log.Logger,
|
||||
) (bcReactor *blocksync.Reactor, err error) {
|
||||
switch config.BlockSync.Version {
|
||||
case "v0":
|
||||
bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, metrics)
|
||||
case "v1", "v2":
|
||||
return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown fastsync version %s", config.BlockSync.Version)
|
||||
}
|
||||
|
||||
bcReactor.SetLogger(logger.With("module", "blocksync"))
|
||||
return bcReactor, nil
|
||||
}
|
||||
|
||||
func createConsensusReactor(config *cfg.Config,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore sm.BlockStore,
|
||||
mempool mempl.Mempool,
|
||||
evidencePool *evidence.Pool,
|
||||
privValidator types.PrivValidator,
|
||||
csMetrics *cs.Metrics,
|
||||
eventBus *types.EventBus,
|
||||
consensusLogger log.Logger,
|
||||
) (*cs.Reactor, *cs.State) {
|
||||
consensusState := cs.NewState(
|
||||
config.Consensus,
|
||||
blockExec,
|
||||
blockStore,
|
||||
mempool,
|
||||
evidencePool,
|
||||
cs.WithMetrics(csMetrics),
|
||||
)
|
||||
consensusState.SetLogger(consensusLogger)
|
||||
if privValidator != nil {
|
||||
consensusState.SetPrivValidator(privValidator)
|
||||
}
|
||||
consensusReactor := cs.NewReactor(consensusState, cs.ReactorMetrics(csMetrics))
|
||||
consensusReactor.SetLogger(consensusLogger)
|
||||
// services which will be publishing and/or subscribing for messages (events)
|
||||
// consensusReactor will set it on consensusState and blockExecutor
|
||||
consensusReactor.SetEventBus(eventBus)
|
||||
return consensusReactor, consensusState
|
||||
}
|
||||
|
||||
func createTransport(
|
||||
config *cfg.Config,
|
||||
nodeInfo p2p.NodeInfo,
|
||||
nodeKey *p2p.NodeKey,
|
||||
netAddr *p2p.NetAddress,
|
||||
proxyApp proxy.AppConns,
|
||||
) (
|
||||
*p2p.MultiplexTransport,
|
||||
[]p2p.PeerFilterFunc,
|
||||
) {
|
||||
var (
|
||||
mConnConfig = p2p.MConnConfig(config.P2P)
|
||||
transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, *netAddr, mConnConfig)
|
||||
connFilters = []p2p.ConnFilterFunc{}
|
||||
peerFilters = []p2p.PeerFilterFunc{}
|
||||
)
|
||||
|
||||
if !config.P2P.AllowDuplicateIP {
|
||||
connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
|
||||
}
|
||||
|
||||
// Filter peers by addr or pubkey with an ABCI query.
|
||||
// If the query return code is OK, add peer.
|
||||
if config.FilterPeers {
|
||||
connFilters = append(
|
||||
connFilters,
|
||||
// ABCI query for address filtering.
|
||||
func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
|
||||
res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
|
||||
Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.IsErr() {
|
||||
return fmt.Errorf("error querying abci app: %v", res)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
peerFilters = append(
|
||||
peerFilters,
|
||||
// ABCI query for ID filtering.
|
||||
func(_ p2p.IPeerSet, p p2p.Peer) error {
|
||||
res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
|
||||
Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.IsErr() {
|
||||
return fmt.Errorf("error querying abci app: %v", res)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
p2p.MultiplexTransportConnFilters(connFilters...)(transport)
|
||||
|
||||
// Limit the number of incoming connections.
|
||||
max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
|
||||
p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
|
||||
|
||||
return transport, peerFilters
|
||||
}
|
||||
|
||||
func createSwitch(config *cfg.Config,
|
||||
p2pMetrics *p2p.Metrics,
|
||||
mempoolReactor p2p.Reactor,
|
||||
bcReactor p2p.Reactor,
|
||||
stateSyncReactor *statesync.Reactor,
|
||||
consensusReactor *cs.Reactor,
|
||||
evidenceReactor *evidence.Reactor,
|
||||
p2pLogger log.Logger,
|
||||
) *p2p.Switch {
|
||||
sw := p2p.NewSwitch(
|
||||
config.P2P,
|
||||
p2p.WithMetrics(p2pMetrics),
|
||||
)
|
||||
sw.SetLogger(p2pLogger)
|
||||
sw.AddReactor("MEMPOOL", mempoolReactor)
|
||||
sw.AddReactor("BLOCKSYNC", bcReactor)
|
||||
sw.AddReactor("CONSENSUS", consensusReactor)
|
||||
sw.AddReactor("EVIDENCE", evidenceReactor)
|
||||
sw.AddReactor("STATESYNC", stateSyncReactor)
|
||||
return sw
|
||||
}
|
||||
|
||||
func createAddrBook(config *cfg.Config,
|
||||
p2pLogger log.Logger, nodeKey *p2p.NodeKey,
|
||||
) (pex.AddrBook, error) {
|
||||
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
|
||||
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
|
||||
|
||||
// Add ourselves to addrbook to prevent dialing ourselves
|
||||
if config.P2P.ExternalAddress != "" {
|
||||
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
|
||||
}
|
||||
addrBook.AddOurAddress(addr)
|
||||
}
|
||||
if config.P2P.ListenAddress != "" {
|
||||
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
|
||||
}
|
||||
addrBook.AddOurAddress(addr)
|
||||
}
|
||||
return addrBook, nil
|
||||
}
|
||||
|
||||
func createPEXReactor(addrBook pex.AddrBook, config *cfg.Config, logger log.Logger) *pex.Reactor {
|
||||
// TODO persistent peers ? so we can have their DNS addrs saved
|
||||
pexReactor := pex.NewReactor(addrBook,
|
||||
&pex.ReactorConfig{
|
||||
Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
|
||||
SeedMode: config.P2P.SeedMode,
|
||||
// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
|
||||
// blocks assuming 10s blocks ~ 28 hours.
|
||||
// TODO (melekes): make it dynamic based on the actual block latencies
|
||||
// from the live network.
|
||||
// https://github.com/tendermint/tendermint/issues/3523
|
||||
SeedDisconnectWaitPeriod: 28 * time.Hour,
|
||||
PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
|
||||
})
|
||||
pexReactor.SetLogger(logger.With("module", "pex"))
|
||||
return pexReactor
|
||||
}
|
||||
|
||||
// startStateSync starts an asynchronous state sync process, then switches to block sync mode.
|
||||
func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.Reactor,
|
||||
stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, blockSync bool,
|
||||
stateStore sm.Store, blockStore *store.BlockStore, state sm.State,
|
||||
) error {
|
||||
ssR.Logger.Info("Starting state sync")
|
||||
|
||||
if stateProvider == nil {
|
||||
var err error
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
stateProvider, err = statesync.NewLightClientStateProvider(
|
||||
ctx,
|
||||
state.ChainID, state.Version, state.InitialHeight,
|
||||
config.RPCServers, light.TrustOptions{
|
||||
Period: config.TrustPeriod,
|
||||
Height: config.TrustHeight,
|
||||
Hash: config.TrustHashBytes(),
|
||||
}, ssR.Logger.With("module", "light"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up light client state provider: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("State sync failed", "err", err)
|
||||
return
|
||||
}
|
||||
err = stateStore.Bootstrap(state)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
|
||||
return
|
||||
}
|
||||
err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("Failed to store last seen commit", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if blockSync {
|
||||
err = bcR.SwitchToBlockSync(state)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("Failed to switch to block sync", "err", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err := conR.SwitchToConsensus(state, true)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("Failed to switch to consensus", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var genesisDocKey = []byte("genesisDoc")
|
||||
|
||||
// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
|
||||
// database, or creates one using the given genesisDocProvider. On success this also
|
||||
// returns the genesis doc loaded through the given provider.
|
||||
func LoadStateFromDBOrGenesisDocProvider(
|
||||
stateDB dbm.DB,
|
||||
genesisDocProvider GenesisDocProvider,
|
||||
) (sm.State, *types.GenesisDoc, error) {
|
||||
// Get genesis doc
|
||||
genDoc, err := loadGenesisDoc(stateDB)
|
||||
if err != nil {
|
||||
genDoc, err = genesisDocProvider()
|
||||
if err != nil {
|
||||
return sm.State{}, nil, err
|
||||
}
|
||||
|
||||
err = genDoc.ValidateAndComplete()
|
||||
if err != nil {
|
||||
return sm.State{}, nil, fmt.Errorf("error in genesis doc: %w", err)
|
||||
}
|
||||
// save genesis doc to prevent a certain class of user errors (e.g. when it
|
||||
// was changed, accidentally or not). Also good for audit trail.
|
||||
if err := saveGenesisDoc(stateDB, genDoc); err != nil {
|
||||
return sm.State{}, nil, err
|
||||
}
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
return sm.State{}, nil, err
|
||||
}
|
||||
return state, genDoc, nil
|
||||
}
|
||||
|
||||
// panics if failed to unmarshal bytes
|
||||
func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
|
||||
b, err := db.Get(genesisDocKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil, errors.New("genesis doc not found")
|
||||
}
|
||||
var genDoc *types.GenesisDoc
|
||||
err = tmjson.Unmarshal(b, &genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
|
||||
}
|
||||
return genDoc, nil
|
||||
}
|
||||
|
||||
// panics if failed to marshal the given genesis document
|
||||
func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error {
|
||||
b, err := tmjson.Marshal(genDoc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err)
|
||||
}
|
||||
if err := db.SetSync(genesisDocKey, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPrivValidatorSocketClient(
|
||||
listenAddr,
|
||||
chainID string,
|
||||
logger log.Logger,
|
||||
) (types.PrivValidator, error) {
|
||||
pve, err := privval.NewSignerListener(listenAddr, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start private validator: %w", err)
|
||||
}
|
||||
|
||||
pvsc, err := privval.NewSignerClient(pve, chainID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start private validator: %w", err)
|
||||
}
|
||||
|
||||
const (
|
||||
retries = 50 // 50 * 100ms = 5s total
|
||||
timeout = 100 * time.Millisecond
|
||||
)
|
||||
pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
|
||||
|
||||
return pvscWithRetries, nil
|
||||
}
|
||||
|
||||
// splitAndTrimEmpty slices s into all subslices separated by sep and returns a
|
||||
// slice of the string s with all leading and trailing Unicode code points
|
||||
// contained in cutset removed. If sep is empty, SplitAndTrim splits after each
|
||||
// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
|
||||
// -1. also filter out empty strings, only return non-empty strings.
|
||||
func splitAndTrimEmpty(s, sep, cutset string) []string {
|
||||
if s == "" {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
spl := strings.Split(s, sep)
|
||||
nonEmptyStrings := make([]string, 0, len(spl))
|
||||
for i := 0; i < len(spl); i++ {
|
||||
element := strings.Trim(spl[i], cutset)
|
||||
if element != "" {
|
||||
nonEmptyStrings = append(nonEmptyStrings, element)
|
||||
}
|
||||
}
|
||||
return nonEmptyStrings
|
||||
}
|
||||
@@ -38,9 +38,13 @@ type Reactor interface {
|
||||
// or other reason).
|
||||
RemovePeer(peer Peer, reason interface{})
|
||||
|
||||
// Receive is called by the switch when an envelope is received from any connected
|
||||
// peer on any of the channels registered by the reactor
|
||||
Receive(Envelope)
|
||||
// Receive is called by the switch when msgBytes is received from the peer.
|
||||
//
|
||||
// NOTE reactor can not keep msgBytes around after Receive completes without
|
||||
// copying.
|
||||
//
|
||||
// CONTRACT: msgBytes are not nil.
|
||||
Receive(chID byte, peer Peer, msgBytes []byte)
|
||||
}
|
||||
|
||||
//--------------------------------------
|
||||
@@ -60,8 +64,8 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor {
|
||||
func (br *BaseReactor) SetSwitch(sw *Switch) {
|
||||
br.Switch = sw
|
||||
}
|
||||
func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
|
||||
func (*BaseReactor) AddPeer(peer Peer) {}
|
||||
func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
|
||||
func (*BaseReactor) Receive(e Envelope) {}
|
||||
func (*BaseReactor) InitPeer(peer Peer) Peer { return peer }
|
||||
func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
|
||||
func (*BaseReactor) AddPeer(peer Peer) {}
|
||||
func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
|
||||
func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
|
||||
func (*BaseReactor) InitPeer(peer Peer) Peer { return peer }
|
||||
|
||||
@@ -724,7 +724,6 @@ type ChannelDescriptor struct {
|
||||
SendQueueCapacity int
|
||||
RecvBufferCapacity int
|
||||
RecvMessageCapacity int
|
||||
MessageType proto.Message
|
||||
}
|
||||
|
||||
func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) {
|
||||
|
||||
@@ -44,29 +44,15 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "num_txs",
|
||||
Help: "Number of transactions submitted by each peer.",
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
MessageReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "message_receive_bytes_total",
|
||||
Help: "Number of bytes of each message type received.",
|
||||
}, append(labels, "message_type")).With(labelsAndValues...),
|
||||
MessageSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "message_send_bytes_total",
|
||||
Help: "Number of bytes of each message type sent.",
|
||||
}, append(labels, "message_type")).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Peers: discard.NewGauge(),
|
||||
PeerReceiveBytesTotal: discard.NewCounter(),
|
||||
PeerSendBytesTotal: discard.NewCounter(),
|
||||
PeerPendingSendBytes: discard.NewGauge(),
|
||||
NumTxs: discard.NewGauge(),
|
||||
MessageReceiveBytesTotal: discard.NewCounter(),
|
||||
MessageSendBytesTotal: discard.NewCounter(),
|
||||
Peers: discard.NewGauge(),
|
||||
PeerReceiveBytesTotal: discard.NewCounter(),
|
||||
PeerSendBytesTotal: discard.NewCounter(),
|
||||
PeerPendingSendBytes: discard.NewGauge(),
|
||||
NumTxs: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
|
||||
@@ -15,13 +10,6 @@ const (
|
||||
MetricsSubsystem = "p2p"
|
||||
)
|
||||
|
||||
var (
|
||||
// valueToLabelRegexp is used to find the golang package name and type name
|
||||
// so that the name can be turned into a prometheus label where the characters
|
||||
// in the label do not include prometheus special characters such as '*' and '.'.
|
||||
valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`)
|
||||
)
|
||||
|
||||
//go:generate go run ../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
@@ -36,43 +24,4 @@ type Metrics struct {
|
||||
PeerPendingSendBytes metrics.Gauge `metrics_labels:"peer_id"`
|
||||
// Number of transactions submitted by each peer.
|
||||
NumTxs metrics.Gauge `metrics_labels:"peer_id"`
|
||||
// Number of bytes of each message type received.
|
||||
MessageReceiveBytesTotal metrics.Counter `metrics_labels:"message_type"`
|
||||
// Number of bytes of each message type sent.
|
||||
MessageSendBytesTotal metrics.Counter `metrics_labels:"message_type"`
|
||||
}
|
||||
|
||||
type metricsLabelCache struct {
|
||||
mtx *sync.RWMutex
|
||||
messageLabelNames map[reflect.Type]string
|
||||
}
|
||||
|
||||
// ValueToMetricLabel is a method that is used to produce a prometheus label value of the golang
|
||||
// type that is passed in.
|
||||
// This method uses a map on the Metrics struct so that each label name only needs
|
||||
// to be produced once to prevent expensive string operations.
|
||||
func (m *metricsLabelCache) ValueToMetricLabel(i interface{}) string {
|
||||
t := reflect.TypeOf(i)
|
||||
m.mtx.RLock()
|
||||
|
||||
if s, ok := m.messageLabelNames[t]; ok {
|
||||
m.mtx.RUnlock()
|
||||
return s
|
||||
}
|
||||
m.mtx.RUnlock()
|
||||
|
||||
s := t.String()
|
||||
ss := valueToLabelRegexp.FindStringSubmatch(s)
|
||||
l := fmt.Sprintf("%s_%s", ss[1], ss[2])
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.messageLabelNames[t] = l
|
||||
return l
|
||||
}
|
||||
|
||||
func newMetricsLabelCache() *metricsLabelCache {
|
||||
return &metricsLabelCache{
|
||||
mtx: &sync.RWMutex{},
|
||||
messageLabelNames: map[reflect.Type]string{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,9 +42,9 @@ func NewPeer(ip net.IP) *Peer {
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error
|
||||
func (mp *Peer) TrySend(e p2p.Envelope) bool { return true }
|
||||
func (mp *Peer) Send(e p2p.Envelope) bool { return true }
|
||||
func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error
|
||||
func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.DefaultNodeInfo{
|
||||
DefaultNodeID: mp.addr.ID,
|
||||
|
||||
@@ -19,7 +19,7 @@ func NewReactor() *Reactor {
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels }
|
||||
func (r *Reactor) AddPeer(peer p2p.Peer) {}
|
||||
func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {}
|
||||
func (r *Reactor) Receive(e p2p.Envelope) {}
|
||||
func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels }
|
||||
func (r *Reactor) AddPeer(peer p2p.Peer) {}
|
||||
func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {}
|
||||
func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {}
|
||||
|
||||
@@ -234,13 +234,13 @@ func (_m *Peer) Reset() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// Send provides a mock function with given fields: _a0
|
||||
func (_m *Peer) Send(_a0 p2p.Envelope) bool {
|
||||
ret := _m.Called(_a0)
|
||||
// Send provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Peer) Send(_a0 byte, _a1 []byte) bool {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
@@ -335,13 +335,13 @@ func (_m *Peer) String() string {
|
||||
return r0
|
||||
}
|
||||
|
||||
// TrySend provides a mock function with given fields: _a0
|
||||
func (_m *Peer) TrySend(_a0 p2p.Envelope) bool {
|
||||
ret := _m.Called(_a0)
|
||||
// TrySend provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
77
p2p/peer.go
77
p2p/peer.go
@@ -3,11 +3,8 @@ package p2p
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/cmap"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
@@ -37,8 +34,8 @@ type Peer interface {
|
||||
Status() tmconn.ConnectionStatus
|
||||
SocketAddr() *NetAddress // actual address of the socket
|
||||
|
||||
Send(Envelope) bool
|
||||
TrySend(Envelope) bool
|
||||
Send(byte, []byte) bool
|
||||
TrySend(byte, []byte) bool
|
||||
|
||||
Set(string, interface{})
|
||||
Get(string) interface{}
|
||||
@@ -123,7 +120,6 @@ type peer struct {
|
||||
|
||||
metrics *Metrics
|
||||
metricsTicker *time.Ticker
|
||||
mlc *metricsLabelCache
|
||||
|
||||
// When removal of a peer fails, we set this flag
|
||||
removalAttemptFailed bool
|
||||
@@ -136,10 +132,8 @@ func newPeer(
|
||||
mConfig tmconn.MConnConfig,
|
||||
nodeInfo NodeInfo,
|
||||
reactorsByCh map[byte]Reactor,
|
||||
msgTypeByChID map[byte]proto.Message,
|
||||
chDescs []*tmconn.ChannelDescriptor,
|
||||
onPeerError func(Peer, interface{}),
|
||||
mlc *metricsLabelCache,
|
||||
options ...PeerOption,
|
||||
) *peer {
|
||||
p := &peer{
|
||||
@@ -149,14 +143,12 @@ func newPeer(
|
||||
Data: cmap.NewCMap(),
|
||||
metricsTicker: time.NewTicker(metricsTickerDuration),
|
||||
metrics: NopMetrics(),
|
||||
mlc: mlc,
|
||||
}
|
||||
|
||||
p.mconn = createMConnection(
|
||||
pc.conn,
|
||||
p,
|
||||
reactorsByCh,
|
||||
msgTypeByChID,
|
||||
chDescs,
|
||||
onPeerError,
|
||||
mConfig,
|
||||
@@ -257,39 +249,40 @@ func (p *peer) Status() tmconn.ConnectionStatus {
|
||||
|
||||
// Send msg bytes to the channel identified by chID byte. Returns false if the
|
||||
// send queue is full after timeout, specified by MConnection.
|
||||
func (p *peer) Send(e Envelope) bool {
|
||||
return p.send(e.ChannelID, e.Message, p.mconn.Send)
|
||||
}
|
||||
|
||||
// TrySend msg bytes to the channel identified by chID byte. Immediately returns
|
||||
// false if the send queue is full.
|
||||
func (p *peer) TrySend(e Envelope) bool {
|
||||
return p.send(e.ChannelID, e.Message, p.mconn.TrySend)
|
||||
}
|
||||
|
||||
func (p *peer) send(chID byte, msg proto.Message, sendFunc func(byte, []byte) bool) bool {
|
||||
func (p *peer) Send(chID byte, msgBytes []byte) bool {
|
||||
if !p.IsRunning() {
|
||||
// see Switch#Broadcast, where we fetch the list of peers and loop over
|
||||
// them - while we're looping, one peer may be removed and stopped.
|
||||
return false
|
||||
} else if !p.hasChannel(chID) {
|
||||
return false
|
||||
}
|
||||
metricLabelValue := p.mlc.ValueToMetricLabel(msg)
|
||||
if w, ok := msg.(Wrapper); ok {
|
||||
msg = w.Wrap()
|
||||
}
|
||||
msgBytes, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
p.Logger.Error("marshaling message to send", "error", err)
|
||||
return false
|
||||
}
|
||||
res := sendFunc(chID, msgBytes)
|
||||
res := p.mconn.Send(chID, msgBytes)
|
||||
if res {
|
||||
labels := []string{
|
||||
"peer_id", string(p.ID()),
|
||||
"chID", fmt.Sprintf("%#x", chID),
|
||||
}
|
||||
p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes)))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// TrySend msg bytes to the channel identified by chID byte. Immediately returns
|
||||
// false if the send queue is full.
|
||||
func (p *peer) TrySend(chID byte, msgBytes []byte) bool {
|
||||
if !p.IsRunning() {
|
||||
return false
|
||||
} else if !p.hasChannel(chID) {
|
||||
return false
|
||||
}
|
||||
res := p.mconn.TrySend(chID, msgBytes)
|
||||
if res {
|
||||
labels := []string{
|
||||
"peer_id", string(p.ID()),
|
||||
"chID", fmt.Sprintf("%#x", chID),
|
||||
}
|
||||
p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes)))
|
||||
p.metrics.MessageSendBytesTotal.With("message_type", metricLabelValue).Add(float64(len(msgBytes)))
|
||||
}
|
||||
return res
|
||||
}
|
||||
@@ -391,7 +384,6 @@ func createMConnection(
|
||||
conn net.Conn,
|
||||
p *peer,
|
||||
reactorsByCh map[byte]Reactor,
|
||||
msgTypeByChID map[byte]proto.Message,
|
||||
chDescs []*tmconn.ChannelDescriptor,
|
||||
onPeerError func(Peer, interface{}),
|
||||
config tmconn.MConnConfig,
|
||||
@@ -404,29 +396,12 @@ func createMConnection(
|
||||
// which does onPeerError.
|
||||
panic(fmt.Sprintf("Unknown channel %X", chID))
|
||||
}
|
||||
mt := msgTypeByChID[chID]
|
||||
msg := proto.Clone(mt)
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unmarshaling message: %s into type: %s", err, reflect.TypeOf(mt)))
|
||||
}
|
||||
labels := []string{
|
||||
"peer_id", string(p.ID()),
|
||||
"chID", fmt.Sprintf("%#x", chID),
|
||||
}
|
||||
if w, ok := msg.(Unwrapper); ok {
|
||||
msg, err = w.Unwrap()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unwrapping message: %s", err))
|
||||
}
|
||||
}
|
||||
p.metrics.PeerReceiveBytesTotal.With(labels...).Add(float64(len(msgBytes)))
|
||||
p.metrics.MessageReceiveBytesTotal.With("message_type", p.mlc.ValueToMetricLabel(msg)).Add(float64(len(msgBytes)))
|
||||
reactor.Receive(Envelope{
|
||||
ChannelID: chID,
|
||||
Src: p,
|
||||
Message: msg,
|
||||
})
|
||||
reactor.Receive(chID, p, msgBytes)
|
||||
}
|
||||
|
||||
onError := func(r interface{}) {
|
||||
|
||||
@@ -18,22 +18,22 @@ type mockPeer struct {
|
||||
id ID
|
||||
}
|
||||
|
||||
func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error
|
||||
func (mp *mockPeer) TrySend(e Envelope) bool { return true }
|
||||
func (mp *mockPeer) Send(e Envelope) bool { return true }
|
||||
func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} }
|
||||
func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} }
|
||||
func (mp *mockPeer) ID() ID { return mp.id }
|
||||
func (mp *mockPeer) IsOutbound() bool { return false }
|
||||
func (mp *mockPeer) IsPersistent() bool { return true }
|
||||
func (mp *mockPeer) Get(s string) interface{} { return s }
|
||||
func (mp *mockPeer) Set(string, interface{}) {}
|
||||
func (mp *mockPeer) RemoteIP() net.IP { return mp.ip }
|
||||
func (mp *mockPeer) SocketAddr() *NetAddress { return nil }
|
||||
func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
|
||||
func (mp *mockPeer) CloseConn() error { return nil }
|
||||
func (mp *mockPeer) SetRemovalFailed() {}
|
||||
func (mp *mockPeer) GetRemovalFailed() bool { return false }
|
||||
func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error
|
||||
func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} }
|
||||
func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} }
|
||||
func (mp *mockPeer) ID() ID { return mp.id }
|
||||
func (mp *mockPeer) IsOutbound() bool { return false }
|
||||
func (mp *mockPeer) IsPersistent() bool { return true }
|
||||
func (mp *mockPeer) Get(s string) interface{} { return s }
|
||||
func (mp *mockPeer) Set(string, interface{}) {}
|
||||
func (mp *mockPeer) RemoteIP() net.IP { return mp.ip }
|
||||
func (mp *mockPeer) SocketAddr() *NetAddress { return nil }
|
||||
func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
|
||||
func (mp *mockPeer) CloseConn() error { return nil }
|
||||
func (mp *mockPeer) SetRemovalFailed() {}
|
||||
func (mp *mockPeer) GetRemovalFailed() bool { return false }
|
||||
|
||||
// Returns a mock peer
|
||||
func newMockPeer(ip net.IP) *mockPeer {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -15,7 +14,6 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
tmconn "github.com/tendermint/tendermint/p2p/conn"
|
||||
@@ -72,7 +70,7 @@ func TestPeerSend(t *testing.T) {
|
||||
})
|
||||
|
||||
assert.True(p.CanSend(testCh))
|
||||
assert.True(p.Send(Envelope{ChannelID: testCh, Message: &p2p.Message{}}))
|
||||
assert.True(p.Send(testCh, []byte("Asylum")))
|
||||
}
|
||||
|
||||
func createOutboundPeerAndPerformHandshake(
|
||||
@@ -84,9 +82,6 @@ func createOutboundPeerAndPerformHandshake(
|
||||
{ID: testCh, Priority: 1},
|
||||
}
|
||||
reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)}
|
||||
msgTypeByChID := map[byte]proto.Message{
|
||||
testCh: &p2p.Message{},
|
||||
}
|
||||
pk := ed25519.GenPrivKey()
|
||||
pc, err := testOutboundPeerConn(addr, config, false, pk)
|
||||
if err != nil {
|
||||
@@ -99,7 +94,7 @@ func createOutboundPeerAndPerformHandshake(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, msgTypeByChID, chDescs, func(p Peer, r interface{}) {}, newMetricsLabelCache())
|
||||
p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {})
|
||||
p.SetLogger(log.TestingLogger().With("peer", addr))
|
||||
return p, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/cmap"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
@@ -182,7 +184,6 @@ func (r *Reactor) GetChannels() []*conn.ChannelDescriptor {
|
||||
Priority: 1,
|
||||
SendQueueCapacity: 10,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmp2p.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -235,10 +236,16 @@ func (r *Reactor) logErrAddrBook(err error) {
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling incoming PEX messages.
|
||||
func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
r.Logger.Debug("Received message", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
|
||||
func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) {
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
r.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
switch msg := e.Message.(type) {
|
||||
switch msg := msg.(type) {
|
||||
case *tmp2p.PexRequest:
|
||||
|
||||
// NOTE: this is a prime candidate for amplification attacks,
|
||||
@@ -248,8 +255,8 @@ func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
|
||||
// If we're a seed and this is an inbound peer,
|
||||
// respond once and disconnect.
|
||||
if r.config.SeedMode && !e.Src.IsOutbound() {
|
||||
id := string(e.Src.ID())
|
||||
if r.config.SeedMode && !src.IsOutbound() {
|
||||
id := string(src.ID())
|
||||
v := r.lastReceivedRequests.Get(id)
|
||||
if v != nil {
|
||||
// FlushStop/StopPeer are already
|
||||
@@ -259,36 +266,36 @@ func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
r.lastReceivedRequests.Set(id, time.Now())
|
||||
|
||||
// Send addrs and disconnect
|
||||
r.SendAddrs(e.Src, r.book.GetSelectionWithBias(biasToSelectNewPeers))
|
||||
r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers))
|
||||
go func() {
|
||||
// In a go-routine so it doesn't block .Receive.
|
||||
e.Src.FlushStop()
|
||||
r.Switch.StopPeerGracefully(e.Src)
|
||||
src.FlushStop()
|
||||
r.Switch.StopPeerGracefully(src)
|
||||
}()
|
||||
|
||||
} else {
|
||||
// Check we're not receiving requests too frequently.
|
||||
if err := r.receiveRequest(e.Src); err != nil {
|
||||
r.Switch.StopPeerForError(e.Src, err)
|
||||
r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime)
|
||||
if err := r.receiveRequest(src); err != nil {
|
||||
r.Switch.StopPeerForError(src, err)
|
||||
r.book.MarkBad(src.SocketAddr(), defaultBanTime)
|
||||
return
|
||||
}
|
||||
r.SendAddrs(e.Src, r.book.GetSelection())
|
||||
r.SendAddrs(src, r.book.GetSelection())
|
||||
}
|
||||
|
||||
case *tmp2p.PexAddrs:
|
||||
// If we asked for addresses, add them to the book
|
||||
addrs, err := p2p.NetAddressesFromProto(msg.Addrs)
|
||||
if err != nil {
|
||||
r.Switch.StopPeerForError(e.Src, err)
|
||||
r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime)
|
||||
r.Switch.StopPeerForError(src, err)
|
||||
r.book.MarkBad(src.SocketAddr(), defaultBanTime)
|
||||
return
|
||||
}
|
||||
err = r.ReceiveAddrs(addrs, e.Src)
|
||||
err = r.ReceiveAddrs(addrs, src)
|
||||
if err != nil {
|
||||
r.Switch.StopPeerForError(e.Src, err)
|
||||
r.Switch.StopPeerForError(src, err)
|
||||
if err == ErrUnsolicitedList {
|
||||
r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime)
|
||||
r.book.MarkBad(src.SocketAddr(), defaultBanTime)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -341,10 +348,7 @@ func (r *Reactor) RequestAddrs(p Peer) {
|
||||
}
|
||||
r.Logger.Debug("Request addrs", "from", p)
|
||||
r.requestsSent.Set(id, struct{}{})
|
||||
p.Send(p2p.Envelope{
|
||||
ChannelID: PexChannel,
|
||||
Message: &tmp2p.PexRequest{},
|
||||
})
|
||||
p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{}))
|
||||
}
|
||||
|
||||
// ReceiveAddrs adds the given addrs to the addrbook if theres an open
|
||||
@@ -402,11 +406,7 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
|
||||
|
||||
// SendAddrs sends addrs to the peer.
|
||||
func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
|
||||
e := p2p.Envelope{
|
||||
ChannelID: PexChannel,
|
||||
Message: &tmp2p.PexAddrs{Addrs: p2p.NetAddressesToProto(netAddrs)},
|
||||
}
|
||||
p.Send(e)
|
||||
p.Send(PexChannel, mustEncode(&tmp2p.PexAddrs{Addrs: p2p.NetAddressesToProto(netAddrs)}))
|
||||
}
|
||||
|
||||
// SetEnsurePeersPeriod sets period to ensure peers connected.
|
||||
@@ -763,3 +763,43 @@ func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) {
|
||||
book.MarkAttempt(addr)
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
// mustEncode proto encodes a tmp2p.Message
|
||||
func mustEncode(pb proto.Message) []byte {
|
||||
msg := tmp2p.Message{}
|
||||
switch pb := pb.(type) {
|
||||
case *tmp2p.PexRequest:
|
||||
msg.Sum = &tmp2p.Message_PexRequest{PexRequest: pb}
|
||||
case *tmp2p.PexAddrs:
|
||||
msg.Sum = &tmp2p.Message_PexAddrs{PexAddrs: pb}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown message type %T", pb))
|
||||
}
|
||||
|
||||
bz, err := msg.Marshal()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to marshal %T: %w", pb, err))
|
||||
}
|
||||
return bz
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &tmp2p.Message{}
|
||||
|
||||
err := pb.Unmarshal(bz)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *tmp2p.Message_PexRequest:
|
||||
return msg.PexRequest, nil
|
||||
case *tmp2p.Message_PexAddrs:
|
||||
return msg.PexAddrs, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message: %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,11 +131,12 @@ func TestPEXReactorReceive(t *testing.T) {
|
||||
r.RequestAddrs(peer)
|
||||
|
||||
size := book.Size()
|
||||
msg := &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}
|
||||
r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: msg})
|
||||
msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}})
|
||||
r.Receive(PexChannel, peer, msg)
|
||||
assert.Equal(t, size+1, book.Size())
|
||||
|
||||
r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}})
|
||||
msg = mustEncode(&tmp2p.PexRequest{})
|
||||
r.Receive(PexChannel, peer, msg) // should not panic.
|
||||
}
|
||||
|
||||
func TestPEXReactorRequestMessageAbuse(t *testing.T) {
|
||||
@@ -154,19 +155,20 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) {
|
||||
require.True(t, book.HasAddress(peerAddr))
|
||||
|
||||
id := string(peer.ID())
|
||||
msg := mustEncode(&tmp2p.PexRequest{})
|
||||
|
||||
// first time creates the entry
|
||||
r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}})
|
||||
r.Receive(PexChannel, peer, msg)
|
||||
assert.True(t, r.lastReceivedRequests.Has(id))
|
||||
assert.True(t, sw.Peers().Has(peer.ID()))
|
||||
|
||||
// next time sets the last time value
|
||||
r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}})
|
||||
r.Receive(PexChannel, peer, msg)
|
||||
assert.True(t, r.lastReceivedRequests.Has(id))
|
||||
assert.True(t, sw.Peers().Has(peer.ID()))
|
||||
|
||||
// third time is too many too soon - peer is removed
|
||||
r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}})
|
||||
r.Receive(PexChannel, peer, msg)
|
||||
assert.False(t, r.lastReceivedRequests.Has(id))
|
||||
assert.False(t, sw.Peers().Has(peer.ID()))
|
||||
assert.True(t, book.IsBanned(peerAddr))
|
||||
@@ -190,15 +192,15 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
|
||||
assert.True(t, r.requestsSent.Has(id))
|
||||
assert.True(t, sw.Peers().Has(peer.ID()))
|
||||
|
||||
msg := &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}
|
||||
msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}})
|
||||
|
||||
// receive some addrs. should clear the request
|
||||
r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: msg})
|
||||
r.Receive(PexChannel, peer, msg)
|
||||
assert.False(t, r.requestsSent.Has(id))
|
||||
assert.True(t, sw.Peers().Has(peer.ID()))
|
||||
|
||||
// receiving more unsolicited addrs causes a disconnect and ban
|
||||
r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: msg})
|
||||
r.Receive(PexChannel, peer, msg)
|
||||
assert.False(t, sw.Peers().Has(peer.ID()))
|
||||
assert.True(t, book.IsBanned(peer.SocketAddr()))
|
||||
}
|
||||
@@ -484,12 +486,8 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
|
||||
pexR.RequestAddrs(peer)
|
||||
|
||||
size := book.Size()
|
||||
msg := &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}
|
||||
pexR.Receive(p2p.Envelope{
|
||||
ChannelID: PexChannel,
|
||||
Src: peer,
|
||||
Message: msg,
|
||||
})
|
||||
msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}})
|
||||
pexR.Receive(PexChannel, peer, msg)
|
||||
assert.Equal(t, size, book.Size())
|
||||
|
||||
pexR.AddPeer(peer)
|
||||
@@ -697,9 +695,7 @@ func TestPexVectors(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
w := tc.msg.(p2p.Wrapper).Wrap()
|
||||
bz, err := proto.Marshal(w)
|
||||
require.NoError(t, err)
|
||||
bz := mustEncode(tc.msg)
|
||||
|
||||
require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
|
||||
}
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cmap"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
@@ -70,17 +69,16 @@ type PeerFilterFunc func(IPeerSet, Peer) error
|
||||
type Switch struct {
|
||||
service.BaseService
|
||||
|
||||
config *config.P2PConfig
|
||||
reactors map[string]Reactor
|
||||
chDescs []*conn.ChannelDescriptor
|
||||
reactorsByCh map[byte]Reactor
|
||||
msgTypeByChID map[byte]proto.Message
|
||||
peers *PeerSet
|
||||
dialing *cmap.CMap
|
||||
reconnecting *cmap.CMap
|
||||
nodeInfo NodeInfo // our node info
|
||||
nodeKey *NodeKey // our node privkey
|
||||
addrBook AddrBook
|
||||
config *config.P2PConfig
|
||||
reactors map[string]Reactor
|
||||
chDescs []*conn.ChannelDescriptor
|
||||
reactorsByCh map[byte]Reactor
|
||||
peers *PeerSet
|
||||
dialing *cmap.CMap
|
||||
reconnecting *cmap.CMap
|
||||
nodeInfo NodeInfo // our node info
|
||||
nodeKey *NodeKey // our node privkey
|
||||
addrBook AddrBook
|
||||
// peers addresses with whom we'll maintain constant connection
|
||||
persistentPeersAddrs []*NetAddress
|
||||
unconditionalPeerIDs map[ID]struct{}
|
||||
@@ -93,7 +91,6 @@ type Switch struct {
|
||||
rng *rand.Rand // seed for randomizing dial times and orders
|
||||
|
||||
metrics *Metrics
|
||||
mlc *metricsLabelCache
|
||||
}
|
||||
|
||||
// NetAddress returns the address the switch is listening on.
|
||||
@@ -108,23 +105,22 @@ type SwitchOption func(*Switch)
|
||||
// NewSwitch creates a new Switch with the given config.
|
||||
func NewSwitch(
|
||||
cfg *config.P2PConfig,
|
||||
transport Transport,
|
||||
options ...SwitchOption,
|
||||
) *Switch {
|
||||
|
||||
sw := &Switch{
|
||||
config: cfg,
|
||||
reactors: make(map[string]Reactor),
|
||||
chDescs: make([]*conn.ChannelDescriptor, 0),
|
||||
reactorsByCh: make(map[byte]Reactor),
|
||||
msgTypeByChID: make(map[byte]proto.Message),
|
||||
peers: NewPeerSet(),
|
||||
dialing: cmap.NewCMap(),
|
||||
reconnecting: cmap.NewCMap(),
|
||||
metrics: NopMetrics(),
|
||||
transport: transport,
|
||||
filterTimeout: defaultFilterTimeout,
|
||||
persistentPeersAddrs: make([]*NetAddress, 0),
|
||||
unconditionalPeerIDs: make(map[ID]struct{}),
|
||||
mlc: newMetricsLabelCache(),
|
||||
}
|
||||
|
||||
// Ensure we have a completely undeterministic PRNG.
|
||||
@@ -168,7 +164,6 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor {
|
||||
}
|
||||
sw.chDescs = append(sw.chDescs, chDesc)
|
||||
sw.reactorsByCh[chID] = reactor
|
||||
sw.msgTypeByChID[chID] = chDesc.MessageType
|
||||
}
|
||||
sw.reactors[name] = reactor
|
||||
reactor.SetSwitch(sw)
|
||||
@@ -187,7 +182,6 @@ func (sw *Switch) RemoveReactor(name string, reactor Reactor) {
|
||||
}
|
||||
}
|
||||
delete(sw.reactorsByCh, chDesc.ID)
|
||||
delete(sw.msgTypeByChID, chDesc.ID)
|
||||
}
|
||||
delete(sw.reactors, name)
|
||||
reactor.SetSwitch(nil)
|
||||
@@ -223,30 +217,11 @@ func (sw *Switch) SetNodeKey(nodeKey *NodeKey) {
|
||||
sw.nodeKey = nodeKey
|
||||
}
|
||||
|
||||
func (sw *Switch) SetPeerFilters(filters ...PeerFilterFunc) {
|
||||
sw.peerFilters = filters
|
||||
}
|
||||
|
||||
func (sw *Switch) SetTransport(transport Transport) {
|
||||
if sw.IsRunning() {
|
||||
panic("cannot set transport while switch is running")
|
||||
}
|
||||
sw.transport = transport
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// Service start/stop
|
||||
|
||||
// OnStart implements BaseService. It starts all the reactors and peers.
|
||||
func (sw *Switch) OnStart() error {
|
||||
if sw.transport == nil {
|
||||
return errors.New("transport not set")
|
||||
}
|
||||
|
||||
if err := sw.transport.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start reactors
|
||||
for _, reactor := range sw.reactors {
|
||||
err := reactor.Start()
|
||||
@@ -275,10 +250,6 @@ func (sw *Switch) OnStop() {
|
||||
sw.Logger.Error("error while stopped reactor", "reactor", reactor, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := sw.transport.Stop(); err != nil {
|
||||
sw.Logger.Error("closing transport", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
@@ -290,8 +261,8 @@ func (sw *Switch) OnStop() {
|
||||
// closed once msg bytes are sent to all peers (or time out).
|
||||
//
|
||||
// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved.
|
||||
func (sw *Switch) Broadcast(e Envelope) chan bool {
|
||||
sw.Logger.Debug("Broadcast", "channel", e.ChannelID)
|
||||
func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool {
|
||||
sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", log.NewLazySprintf("%X", msgBytes))
|
||||
|
||||
peers := sw.peers.List()
|
||||
var wg sync.WaitGroup
|
||||
@@ -301,7 +272,7 @@ func (sw *Switch) Broadcast(e Envelope) chan bool {
|
||||
for _, peer := range peers {
|
||||
go func(p Peer) {
|
||||
defer wg.Done()
|
||||
success := p.Send(e)
|
||||
success := p.Send(chID, msgBytes)
|
||||
successChan <- success
|
||||
}(peer)
|
||||
}
|
||||
@@ -652,13 +623,11 @@ func (sw *Switch) IsPeerPersistent(na *NetAddress) bool {
|
||||
func (sw *Switch) acceptRoutine() {
|
||||
for {
|
||||
p, err := sw.transport.Accept(peerConfig{
|
||||
chDescs: sw.chDescs,
|
||||
onPeerError: sw.StopPeerForError,
|
||||
reactorsByCh: sw.reactorsByCh,
|
||||
msgTypeByChID: sw.msgTypeByChID,
|
||||
metrics: sw.metrics,
|
||||
mlc: sw.mlc,
|
||||
isPersistent: sw.IsPeerPersistent,
|
||||
chDescs: sw.chDescs,
|
||||
onPeerError: sw.StopPeerForError,
|
||||
reactorsByCh: sw.reactorsByCh,
|
||||
metrics: sw.metrics,
|
||||
isPersistent: sw.IsPeerPersistent,
|
||||
})
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
@@ -757,13 +726,11 @@ func (sw *Switch) addOutboundPeerWithConfig(
|
||||
}
|
||||
|
||||
p, err := sw.transport.Dial(*addr, peerConfig{
|
||||
chDescs: sw.chDescs,
|
||||
onPeerError: sw.StopPeerForError,
|
||||
isPersistent: sw.IsPeerPersistent,
|
||||
reactorsByCh: sw.reactorsByCh,
|
||||
msgTypeByChID: sw.msgTypeByChID,
|
||||
metrics: sw.metrics,
|
||||
mlc: sw.mlc,
|
||||
chDescs: sw.chDescs,
|
||||
onPeerError: sw.StopPeerForError,
|
||||
isPersistent: sw.IsPeerPersistent,
|
||||
reactorsByCh: sw.reactorsByCh,
|
||||
metrics: sw.metrics,
|
||||
})
|
||||
if err != nil {
|
||||
if e, ok := err.(ErrRejected); ok {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -22,10 +21,8 @@ import (
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -39,8 +36,9 @@ func init() {
|
||||
}
|
||||
|
||||
type PeerMessage struct {
|
||||
Contents proto.Message
|
||||
Counter int
|
||||
PeerID ID
|
||||
Bytes []byte
|
||||
Counter int
|
||||
}
|
||||
|
||||
type TestReactor struct {
|
||||
@@ -72,12 +70,12 @@ func (tr *TestReactor) AddPeer(peer Peer) {}
|
||||
|
||||
func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {}
|
||||
|
||||
func (tr *TestReactor) Receive(e Envelope) {
|
||||
func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) {
|
||||
if tr.logMessages {
|
||||
tr.mtx.Lock()
|
||||
defer tr.mtx.Unlock()
|
||||
fmt.Printf("Received: %X, %X\n", e.ChannelID, e.Message)
|
||||
tr.msgsReceived[e.ChannelID] = append(tr.msgsReceived[e.ChannelID], PeerMessage{Contents: e.Message, Counter: tr.msgsCounter})
|
||||
// fmt.Printf("Received: %X, %X\n", chID, msgBytes)
|
||||
tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter})
|
||||
tr.msgsCounter++
|
||||
}
|
||||
}
|
||||
@@ -105,12 +103,12 @@ func initSwitchFunc(i int, sw *Switch) *Switch {
|
||||
|
||||
// Make two reactors of two channels each
|
||||
sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
|
||||
{ID: byte(0x00), Priority: 10, MessageType: &p2pproto.Message{}},
|
||||
{ID: byte(0x01), Priority: 10, MessageType: &p2pproto.Message{}},
|
||||
{ID: byte(0x00), Priority: 10},
|
||||
{ID: byte(0x01), Priority: 10},
|
||||
}, true))
|
||||
sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{
|
||||
{ID: byte(0x02), Priority: 10, MessageType: &p2pproto.Message{}},
|
||||
{ID: byte(0x03), Priority: 10, MessageType: &p2pproto.Message{}},
|
||||
{ID: byte(0x02), Priority: 10},
|
||||
{ID: byte(0x03), Priority: 10},
|
||||
}, true))
|
||||
|
||||
return sw
|
||||
@@ -137,47 +135,31 @@ func TestSwitches(t *testing.T) {
|
||||
}
|
||||
|
||||
// Lets send some messages
|
||||
ch0Msg := &p2pproto.PexAddrs{
|
||||
Addrs: []p2pproto.NetAddress{
|
||||
{
|
||||
ID: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
ch1Msg := &p2pproto.PexAddrs{
|
||||
Addrs: []p2pproto.NetAddress{
|
||||
{
|
||||
ID: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
ch2Msg := &p2pproto.PexAddrs{
|
||||
Addrs: []p2pproto.NetAddress{
|
||||
{
|
||||
ID: "2",
|
||||
},
|
||||
},
|
||||
}
|
||||
s1.Broadcast(Envelope{ChannelID: byte(0x00), Message: ch0Msg})
|
||||
s1.Broadcast(Envelope{ChannelID: byte(0x01), Message: ch1Msg})
|
||||
s1.Broadcast(Envelope{ChannelID: byte(0x02), Message: ch2Msg})
|
||||
ch0Msg := []byte("channel zero")
|
||||
ch1Msg := []byte("channel foo")
|
||||
ch2Msg := []byte("channel bar")
|
||||
|
||||
s1.Broadcast(byte(0x00), ch0Msg)
|
||||
s1.Broadcast(byte(0x01), ch1Msg)
|
||||
s1.Broadcast(byte(0x02), ch2Msg)
|
||||
|
||||
assertMsgReceivedWithTimeout(t,
|
||||
ch0Msg,
|
||||
byte(0x00),
|
||||
s2.Reactor("foo").(*TestReactor), 200*time.Millisecond, 5*time.Second)
|
||||
s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
|
||||
assertMsgReceivedWithTimeout(t,
|
||||
ch1Msg,
|
||||
byte(0x01),
|
||||
s2.Reactor("foo").(*TestReactor), 200*time.Millisecond, 5*time.Second)
|
||||
s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
|
||||
assertMsgReceivedWithTimeout(t,
|
||||
ch2Msg,
|
||||
byte(0x02),
|
||||
s2.Reactor("bar").(*TestReactor), 200*time.Millisecond, 5*time.Second)
|
||||
s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second)
|
||||
}
|
||||
|
||||
func assertMsgReceivedWithTimeout(
|
||||
t *testing.T,
|
||||
msg proto.Message,
|
||||
msgBytes []byte,
|
||||
channel byte,
|
||||
reactor *TestReactor,
|
||||
checkPeriod,
|
||||
@@ -188,13 +170,9 @@ func assertMsgReceivedWithTimeout(
|
||||
select {
|
||||
case <-ticker.C:
|
||||
msgs := reactor.getMsgs(channel)
|
||||
expectedBytes, err := proto.Marshal(msgs[0].Contents)
|
||||
require.NoError(t, err)
|
||||
gotBytes, err := proto.Marshal(msg)
|
||||
require.NoError(t, err)
|
||||
if len(msgs) > 0 {
|
||||
if !bytes.Equal(expectedBytes, gotBytes) {
|
||||
t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msg, msgs[0].Counter)
|
||||
if !bytes.Equal(msgs[0].Bytes, msgBytes) {
|
||||
t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -451,10 +429,7 @@ func TestSwitchStopPeerForError(t *testing.T) {
|
||||
|
||||
// send messages to the peer from sw1
|
||||
p := sw1.Peers().List()[0]
|
||||
p.Send(Envelope{
|
||||
ChannelID: 0x1,
|
||||
Message: &p2pproto.Message{},
|
||||
})
|
||||
p.Send(0x1, []byte("here's a message to send"))
|
||||
|
||||
// stop sw2. this should cause the p to fail,
|
||||
// which results in calling StopPeerForError internally
|
||||
@@ -696,16 +671,9 @@ func TestSwitchAcceptRoutine(t *testing.T) {
|
||||
}
|
||||
|
||||
type errorTransport struct {
|
||||
service.BaseService
|
||||
acceptErr error
|
||||
}
|
||||
|
||||
func newErrTransport(acceptErr error) *errorTransport {
|
||||
t := &errorTransport{acceptErr: acceptErr}
|
||||
t.BaseService = *service.NewBaseService(nil, "Error Transport", t)
|
||||
return t
|
||||
}
|
||||
|
||||
func (et errorTransport) NetAddress() NetAddress {
|
||||
panic("not implemented")
|
||||
}
|
||||
@@ -721,9 +689,7 @@ func (errorTransport) Cleanup(Peer) {
|
||||
}
|
||||
|
||||
func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
|
||||
sw := NewSwitch(cfg)
|
||||
sw.SetTransport(newErrTransport(ErrFilterTimeout{}))
|
||||
|
||||
sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}})
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -731,8 +697,7 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
sw = NewSwitch(cfg)
|
||||
sw.SetTransport(newErrTransport(ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}))
|
||||
sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}})
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -741,8 +706,7 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
|
||||
})
|
||||
// TODO(melekes) check we remove our address from addrBook
|
||||
|
||||
sw = NewSwitch(cfg)
|
||||
sw.SetTransport(newErrTransport(ErrTransportClosed{}))
|
||||
sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}})
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -860,7 +824,7 @@ func BenchmarkSwitchBroadcast(b *testing.B) {
|
||||
// Send random message from foo channel to another
|
||||
for i := 0; i < b.N; i++ {
|
||||
chID := byte(i % 4)
|
||||
successChan := s1.Broadcast(Envelope{ChannelID: chID})
|
||||
successChan := s1.Broadcast(chID, []byte("test data"))
|
||||
for s := range successChan {
|
||||
if s {
|
||||
numSuccess++
|
||||
|
||||
@@ -149,10 +149,8 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
|
||||
MConnConfig(sw.config),
|
||||
ni,
|
||||
sw.reactorsByCh,
|
||||
sw.msgTypeByChID,
|
||||
sw.chDescs,
|
||||
sw.StopPeerForError,
|
||||
sw.mlc,
|
||||
)
|
||||
|
||||
if err = sw.addPeer(p); err != nil {
|
||||
@@ -194,11 +192,14 @@ func MakeSwitch(
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t := NewMultiplexTransport(nodeInfo, nodeKey, *addr, MConnConfig(cfg))
|
||||
t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg))
|
||||
|
||||
if err := t.Listen(*addr); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// TODO: let the config be passed in?
|
||||
sw := initSwitch(i, NewSwitch(cfg, opts...))
|
||||
sw.SetTransport(t)
|
||||
sw := initSwitch(i, NewSwitch(cfg, t, opts...))
|
||||
sw.SetLogger(log.TestingLogger().With("switch", i))
|
||||
sw.SetNodeKey(&nodeKey)
|
||||
|
||||
|
||||
@@ -8,10 +8,8 @@ import (
|
||||
|
||||
"golang.org/x/net/netutil"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
)
|
||||
@@ -49,19 +47,15 @@ type peerConfig struct {
|
||||
// isPersistent allows you to set a function, which, given socket address
|
||||
// (for outbound peers) OR self-reported address (for inbound peers), tells
|
||||
// if the peer is persistent or not.
|
||||
isPersistent func(*NetAddress) bool
|
||||
reactorsByCh map[byte]Reactor
|
||||
msgTypeByChID map[byte]proto.Message
|
||||
metrics *Metrics
|
||||
mlc *metricsLabelCache
|
||||
isPersistent func(*NetAddress) bool
|
||||
reactorsByCh map[byte]Reactor
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// Transport emits and connects to Peers. The implementation of Peer is left to
|
||||
// the transport. Each transport is also responsible to filter establishing
|
||||
// peers specific to its domain.
|
||||
type Transport interface {
|
||||
service.Service
|
||||
|
||||
// Listening address.
|
||||
NetAddress() NetAddress
|
||||
|
||||
@@ -75,6 +69,13 @@ type Transport interface {
|
||||
Cleanup(Peer)
|
||||
}
|
||||
|
||||
// transportLifecycle bundles the methods for callers to control start and stop
|
||||
// behavior.
|
||||
type transportLifecycle interface {
|
||||
Close() error
|
||||
Listen(NetAddress) error
|
||||
}
|
||||
|
||||
// ConnFilterFunc to be implemented by filter hooks after a new connection has
|
||||
// been established. The set of exisiting connections is passed along together
|
||||
// with all resolved IPs for the new connection.
|
||||
@@ -132,8 +133,6 @@ func MultiplexTransportMaxIncomingConnections(n int) MultiplexTransportOption {
|
||||
// MultiplexTransport accepts and dials tcp connections and upgrades them to
|
||||
// multiplexed peers.
|
||||
type MultiplexTransport struct {
|
||||
service.BaseService
|
||||
|
||||
netAddr NetAddress
|
||||
listener net.Listener
|
||||
maxIncomingConnections int // see MaxIncomingConnections
|
||||
@@ -160,15 +159,15 @@ type MultiplexTransport struct {
|
||||
|
||||
// Test multiplexTransport for interface completeness.
|
||||
var _ Transport = (*MultiplexTransport)(nil)
|
||||
var _ transportLifecycle = (*MultiplexTransport)(nil)
|
||||
|
||||
// NewMultiplexTransport returns a tcp connected multiplexed peer.
|
||||
func NewMultiplexTransport(
|
||||
nodeInfo NodeInfo,
|
||||
nodeKey NodeKey,
|
||||
netAddr NetAddress,
|
||||
mConfig conn.MConnConfig,
|
||||
) *MultiplexTransport {
|
||||
t := &MultiplexTransport{
|
||||
return &MultiplexTransport{
|
||||
acceptc: make(chan accept),
|
||||
closec: make(chan struct{}),
|
||||
dialTimeout: defaultDialTimeout,
|
||||
@@ -177,12 +176,9 @@ func NewMultiplexTransport(
|
||||
mConfig: mConfig,
|
||||
nodeInfo: nodeInfo,
|
||||
nodeKey: nodeKey,
|
||||
netAddr: netAddr,
|
||||
conns: NewConnSet(),
|
||||
resolver: net.DefaultResolver,
|
||||
}
|
||||
t.BaseService = *service.NewBaseService(nil, "P2P Transport", t)
|
||||
return t
|
||||
}
|
||||
|
||||
// NetAddress implements Transport.
|
||||
@@ -235,20 +231,20 @@ func (mt *MultiplexTransport) Dial(
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// OnStop implements Service.
|
||||
func (mt *MultiplexTransport) OnStop() {
|
||||
// Close implements transportLifecycle.
|
||||
func (mt *MultiplexTransport) Close() error {
|
||||
close(mt.closec)
|
||||
|
||||
if mt.listener != nil {
|
||||
if err := mt.listener.Close(); err != nil {
|
||||
mt.Logger.Error("closing listener", "err", err)
|
||||
}
|
||||
return mt.listener.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStart implements Service.
|
||||
func (mt *MultiplexTransport) OnStart() error {
|
||||
ln, err := net.Listen("tcp", mt.netAddr.DialString())
|
||||
// Listen implements transportLifecycle.
|
||||
func (mt *MultiplexTransport) Listen(addr NetAddress) error {
|
||||
ln, err := net.Listen("tcp", addr.DialString())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -257,6 +253,7 @@ func (mt *MultiplexTransport) OnStart() error {
|
||||
ln = netutil.LimitListener(ln, mt.maxIncomingConnections)
|
||||
}
|
||||
|
||||
mt.netAddr = addr
|
||||
mt.listener = ln
|
||||
|
||||
go mt.acceptPeers()
|
||||
@@ -522,10 +519,8 @@ func (mt *MultiplexTransport) wrapPeer(
|
||||
mt.mConfig,
|
||||
ni,
|
||||
cfg.reactorsByCh,
|
||||
cfg.msgTypeByChID,
|
||||
cfg.chDescs,
|
||||
cfg.onPeerError,
|
||||
cfg.mlc,
|
||||
PeerMetrics(cfg.metrics),
|
||||
)
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
@@ -29,26 +28,20 @@ func emptyNodeInfo() NodeInfo {
|
||||
func newMultiplexTransport(
|
||||
nodeInfo NodeInfo,
|
||||
nodeKey NodeKey,
|
||||
netAddr NetAddress,
|
||||
) *MultiplexTransport {
|
||||
return NewMultiplexTransport(
|
||||
nodeInfo, nodeKey, netAddr, conn.DefaultMConnConfig(),
|
||||
nodeInfo, nodeKey, conn.DefaultMConnConfig(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestTransportMultiplexConnFilter(t *testing.T) {
|
||||
nodeKey := NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
}
|
||||
id := nodeKey.ID()
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
require.NoError(t, err)
|
||||
|
||||
mt := newMultiplexTransport(
|
||||
emptyNodeInfo(),
|
||||
nodeKey,
|
||||
*addr,
|
||||
NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
)
|
||||
id := mt.nodeKey.ID()
|
||||
|
||||
MultiplexTransportConnFilters(
|
||||
func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil },
|
||||
@@ -58,8 +51,14 @@ func TestTransportMultiplexConnFilter(t *testing.T) {
|
||||
},
|
||||
)(mt)
|
||||
|
||||
err = mt.Start()
|
||||
require.NoError(t, err)
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := mt.Listen(*addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
|
||||
@@ -90,18 +89,13 @@ func TestTransportMultiplexConnFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
|
||||
nodeKey := NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
}
|
||||
id := nodeKey.ID()
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
require.NoError(t, err)
|
||||
|
||||
mt := newMultiplexTransport(
|
||||
emptyNodeInfo(),
|
||||
nodeKey,
|
||||
*addr,
|
||||
NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
)
|
||||
id := mt.nodeKey.ID()
|
||||
|
||||
MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt)
|
||||
MultiplexTransportConnFilters(
|
||||
@@ -111,9 +105,14 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
|
||||
},
|
||||
)(mt)
|
||||
|
||||
err = mt.Start()
|
||||
require.NoError(t, err)
|
||||
defer mt.Stop() //nolint:errcheck
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := mt.Listen(*addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
go func() {
|
||||
@@ -141,10 +140,6 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
|
||||
func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
|
||||
pv := ed25519.GenPrivKey()
|
||||
id := PubKeyToID(pv.PubKey())
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mt := newMultiplexTransport(
|
||||
testNodeInfo(
|
||||
id, "transport",
|
||||
@@ -152,22 +147,26 @@ func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
|
||||
MultiplexTransportMaxIncomingConnections(0)(mt)
|
||||
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
const maxIncomingConns = 2
|
||||
MultiplexTransportMaxIncomingConnections(maxIncomingConns)(mt)
|
||||
err = mt.Start()
|
||||
require.NoError(t, err)
|
||||
if err := mt.Listen(*addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
// Connect more peers than max
|
||||
for i := 0; i <= maxIncomingConns; i++ {
|
||||
errc := make(chan error)
|
||||
go testDialer(t, *laddr, errc)
|
||||
go testDialer(*laddr, errc)
|
||||
|
||||
err = <-errc
|
||||
if i < maxIncomingConns {
|
||||
@@ -199,7 +198,7 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) {
|
||||
|
||||
// Setup dialers.
|
||||
for i := 0; i < nDialers; i++ {
|
||||
go testDialer(t, *laddr, errc)
|
||||
go testDialer(*laddr, errc)
|
||||
}
|
||||
|
||||
// Catch connection errors.
|
||||
@@ -236,26 +235,23 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := mt.Stop(); err != nil {
|
||||
if err := mt.Close(); err != nil {
|
||||
t.Errorf("close errored: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testDialer(t *testing.T, dialAddr NetAddress, errc chan error) {
|
||||
pv := ed25519.GenPrivKey()
|
||||
id := PubKeyToID(pv.PubKey())
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
require.NoError(t, err)
|
||||
|
||||
dialer := newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
func testDialer(dialAddr NetAddress, errc chan error) {
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
_, err = dialer.Dial(dialAddr, peerConfig{})
|
||||
_, err := dialer.Dial(dialAddr, peerConfig{})
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
@@ -323,14 +319,15 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
|
||||
go func() {
|
||||
<-slowc
|
||||
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
dialer := newMultiplexTransport(
|
||||
fastNodeInfo,
|
||||
NodeKey{
|
||||
PrivKey: fastNodePV,
|
||||
},
|
||||
*addr,
|
||||
var (
|
||||
dialer = newMultiplexTransport(
|
||||
fastNodeInfo,
|
||||
NodeKey{
|
||||
PrivKey: fastNodePV,
|
||||
},
|
||||
)
|
||||
)
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
@@ -363,17 +360,18 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) {
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
|
||||
pv := ed25519.GenPrivKey()
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
dialer := newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
errc <- err
|
||||
@@ -403,7 +401,6 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
dialer := newMultiplexTransport(
|
||||
testNodeInfo(
|
||||
PubKeyToID(ed25519.GenPrivKey().PubKey()), "dialer",
|
||||
@@ -411,8 +408,8 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
|
||||
NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
@@ -440,16 +437,18 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
|
||||
func TestTransportMultiplexDialRejectWrongID(t *testing.T) {
|
||||
mt := testSetupMultiplexTransport(t)
|
||||
|
||||
pv := ed25519.GenPrivKey()
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
wrongID := PubKeyToID(ed25519.GenPrivKey().PubKey())
|
||||
addr := NewNetAddress(wrongID, mt.listener.Addr())
|
||||
dialer := newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
@@ -472,15 +471,14 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) {
|
||||
go func() {
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
addr = NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfoWithNetwork(PubKeyToID(pv.PubKey()), "dialer", "incompatible-network"),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
)
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
@@ -624,16 +622,11 @@ func TestTransportHandshake(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTransportAddChannel(t *testing.T) {
|
||||
pv := ed25519.GenPrivKey()
|
||||
id := PubKeyToID(pv.PubKey())
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
require.NoError(t, err)
|
||||
mt := newMultiplexTransport(
|
||||
emptyNodeInfo(),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
testChannel := byte(0x01)
|
||||
|
||||
@@ -645,27 +638,27 @@ func TestTransportAddChannel(t *testing.T) {
|
||||
|
||||
// create listener
|
||||
func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport {
|
||||
pv := ed25519.GenPrivKey()
|
||||
id := PubKeyToID(pv.PubKey())
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
id = PubKeyToID(pv.PubKey())
|
||||
mt = newMultiplexTransport(
|
||||
testNodeInfo(
|
||||
id, "transport",
|
||||
),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mt := newMultiplexTransport(
|
||||
testNodeInfo(
|
||||
id, "transport",
|
||||
),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
|
||||
err = mt.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
mt.Stop() //nolint:errcheck
|
||||
})
|
||||
if err := mt.Listen(*addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// give the listener some time to get ready
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
55
p2p/trust/config.go
Normal file
55
p2p/trust/config.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package trust
|
||||
|
||||
import "time"
|
||||
|
||||
// MetricConfig - Configures the weight functions and time intervals for the metric
|
||||
type MetricConfig struct {
|
||||
// Determines the percentage given to current behavior
|
||||
ProportionalWeight float64
|
||||
|
||||
// Determines the percentage given to prior behavior
|
||||
IntegralWeight float64
|
||||
|
||||
// The window of time that the trust metric will track events across.
|
||||
// This can be set to cover many days without issue
|
||||
TrackingWindow time.Duration
|
||||
|
||||
// Each interval should be short for adapability.
|
||||
// Less than 30 seconds is too sensitive,
|
||||
// and greater than 5 minutes will make the metric numb
|
||||
IntervalLength time.Duration
|
||||
}
|
||||
|
||||
// DefaultConfig returns a config with values that have been tested and produce desirable results
|
||||
func DefaultConfig() MetricConfig {
|
||||
return MetricConfig{
|
||||
ProportionalWeight: 0.4,
|
||||
IntegralWeight: 0.6,
|
||||
TrackingWindow: (time.Minute * 60 * 24) * 14, // 14 days.
|
||||
IntervalLength: 1 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
// Ensures that all configuration elements have valid values
|
||||
func customConfig(tmc MetricConfig) MetricConfig {
|
||||
config := DefaultConfig()
|
||||
|
||||
// Check the config for set values, and setup appropriately
|
||||
if tmc.ProportionalWeight > 0 {
|
||||
config.ProportionalWeight = tmc.ProportionalWeight
|
||||
}
|
||||
|
||||
if tmc.IntegralWeight > 0 {
|
||||
config.IntegralWeight = tmc.IntegralWeight
|
||||
}
|
||||
|
||||
if tmc.IntervalLength > time.Duration(0) {
|
||||
config.IntervalLength = tmc.IntervalLength
|
||||
}
|
||||
|
||||
if tmc.TrackingWindow > time.Duration(0) &&
|
||||
tmc.TrackingWindow >= config.IntervalLength {
|
||||
config.TrackingWindow = tmc.TrackingWindow
|
||||
}
|
||||
return config
|
||||
}
|
||||
412
p2p/trust/metric.go
Normal file
412
p2p/trust/metric.go
Normal file
@@ -0,0 +1,412 @@
|
||||
// Copyright 2017 Tendermint. All rights reserved.
|
||||
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
|
||||
|
||||
package trust
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
//---------------------------------------------------------------------------------------
|
||||
|
||||
const (
|
||||
// The weight applied to the derivative when current behavior is >= previous behavior
|
||||
defaultDerivativeGamma1 = 0
|
||||
|
||||
// The weight applied to the derivative when current behavior is less than previous behavior
|
||||
defaultDerivativeGamma2 = 1.0
|
||||
|
||||
// The weight applied to history data values when calculating the history value
|
||||
defaultHistoryDataWeight = 0.8
|
||||
)
|
||||
|
||||
// MetricHistoryJSON - history data necessary to save the trust metric
|
||||
type MetricHistoryJSON struct {
|
||||
NumIntervals int `json:"intervals"`
|
||||
History []float64 `json:"history"`
|
||||
}
|
||||
|
||||
// Metric - keeps track of peer reliability
|
||||
// See tendermint/docs/architecture/adr-006-trust-metric.md for details
|
||||
type Metric struct {
|
||||
service.BaseService
|
||||
|
||||
// Mutex that protects the metric from concurrent access
|
||||
mtx tmsync.Mutex
|
||||
|
||||
// Determines the percentage given to current behavior
|
||||
proportionalWeight float64
|
||||
|
||||
// Determines the percentage given to prior behavior
|
||||
integralWeight float64
|
||||
|
||||
// Count of how many time intervals this metric has been tracking
|
||||
numIntervals int
|
||||
|
||||
// Size of the time interval window for this trust metric
|
||||
maxIntervals int
|
||||
|
||||
// The time duration for a single time interval
|
||||
intervalLen time.Duration
|
||||
|
||||
// Stores the trust history data for this metric
|
||||
history []float64
|
||||
|
||||
// Weights applied to the history data when calculating the history value
|
||||
historyWeights []float64
|
||||
|
||||
// The sum of the history weights used when calculating the history value
|
||||
historyWeightSum float64
|
||||
|
||||
// The current number of history data elements
|
||||
historySize int
|
||||
|
||||
// The maximum number of history data elements
|
||||
historyMaxSize int
|
||||
|
||||
// The calculated history value for the current time interval
|
||||
historyValue float64
|
||||
|
||||
// The number of recorded good and bad events for the current time interval
|
||||
bad, good float64
|
||||
|
||||
// While true, history data is not modified
|
||||
paused bool
|
||||
|
||||
// Used during testing in order to control the passing of time intervals
|
||||
testTicker MetricTicker
|
||||
}
|
||||
|
||||
// NewMetric returns a trust metric with the default configuration.
|
||||
// Use Start to begin tracking the quality of peer behavior over time
|
||||
func NewMetric() *Metric {
|
||||
return NewMetricWithConfig(DefaultConfig())
|
||||
}
|
||||
|
||||
// NewMetricWithConfig returns a trust metric with a custom configuration.
|
||||
// Use Start to begin tracking the quality of peer behavior over time
|
||||
func NewMetricWithConfig(tmc MetricConfig) *Metric {
|
||||
tm := new(Metric)
|
||||
config := customConfig(tmc)
|
||||
|
||||
// Setup using the configuration values
|
||||
tm.proportionalWeight = config.ProportionalWeight
|
||||
tm.integralWeight = config.IntegralWeight
|
||||
tm.intervalLen = config.IntervalLength
|
||||
// The maximum number of time intervals is the tracking window / interval length
|
||||
tm.maxIntervals = int(config.TrackingWindow / tm.intervalLen)
|
||||
// The history size will be determined by the maximum number of time intervals
|
||||
tm.historyMaxSize = intervalToHistoryOffset(tm.maxIntervals) + 1
|
||||
// This metric has a perfect history so far
|
||||
tm.historyValue = 1.0
|
||||
|
||||
tm.BaseService = *service.NewBaseService(nil, "Metric", tm)
|
||||
return tm
|
||||
}
|
||||
|
||||
// OnStart implements Service
|
||||
func (tm *Metric) OnStart() error {
|
||||
if err := tm.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
go tm.processRequests()
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements Service
|
||||
// Nothing to do since the goroutine shuts down by itself via BaseService.Quit()
|
||||
func (tm *Metric) OnStop() {}
|
||||
|
||||
// Returns a snapshot of the trust metric history data
|
||||
func (tm *Metric) HistoryJSON() MetricHistoryJSON {
|
||||
tm.mtx.Lock()
|
||||
defer tm.mtx.Unlock()
|
||||
|
||||
return MetricHistoryJSON{
|
||||
NumIntervals: tm.numIntervals,
|
||||
History: tm.history,
|
||||
}
|
||||
}
|
||||
|
||||
// Instantiates a trust metric by loading the history data for a single peer.
|
||||
// This is called only once and only right after creation, which is why the
|
||||
// lock is not held while accessing the trust metric struct members
|
||||
func (tm *Metric) Init(hist MetricHistoryJSON) {
|
||||
// Restore the number of time intervals we have previously tracked
|
||||
if hist.NumIntervals > tm.maxIntervals {
|
||||
hist.NumIntervals = tm.maxIntervals
|
||||
}
|
||||
tm.numIntervals = hist.NumIntervals
|
||||
// Restore the history and its current size
|
||||
if len(hist.History) > tm.historyMaxSize {
|
||||
// Keep the history no larger than historyMaxSize
|
||||
last := len(hist.History) - tm.historyMaxSize
|
||||
hist.History = hist.History[last:]
|
||||
}
|
||||
tm.history = hist.History
|
||||
tm.historySize = len(tm.history)
|
||||
// Create the history weight values and weight sum
|
||||
for i := 1; i <= tm.numIntervals; i++ {
|
||||
x := math.Pow(defaultHistoryDataWeight, float64(i)) // Optimistic weight
|
||||
tm.historyWeights = append(tm.historyWeights, x)
|
||||
}
|
||||
|
||||
for _, v := range tm.historyWeights {
|
||||
tm.historyWeightSum += v
|
||||
}
|
||||
// Calculate the history value based on the loaded history data
|
||||
tm.historyValue = tm.calcHistoryValue()
|
||||
}
|
||||
|
||||
// Pause tells the metric to pause recording data over time intervals.
|
||||
// All method calls that indicate events will unpause the metric
|
||||
func (tm *Metric) Pause() {
|
||||
tm.mtx.Lock()
|
||||
defer tm.mtx.Unlock()
|
||||
|
||||
// Pause the metric for now
|
||||
tm.paused = true
|
||||
}
|
||||
|
||||
// BadEvents indicates that an undesirable event(s) took place
|
||||
func (tm *Metric) BadEvents(num int) {
|
||||
tm.mtx.Lock()
|
||||
defer tm.mtx.Unlock()
|
||||
|
||||
tm.unpause()
|
||||
tm.bad += float64(num)
|
||||
}
|
||||
|
||||
// GoodEvents indicates that a desirable event(s) took place
|
||||
func (tm *Metric) GoodEvents(num int) {
|
||||
tm.mtx.Lock()
|
||||
defer tm.mtx.Unlock()
|
||||
|
||||
tm.unpause()
|
||||
tm.good += float64(num)
|
||||
}
|
||||
|
||||
// TrustValue gets the dependable trust value; always between 0 and 1
|
||||
func (tm *Metric) TrustValue() float64 {
|
||||
tm.mtx.Lock()
|
||||
defer tm.mtx.Unlock()
|
||||
|
||||
return tm.calcTrustValue()
|
||||
}
|
||||
|
||||
// TrustScore gets a score based on the trust value always between 0 and 100
|
||||
func (tm *Metric) TrustScore() int {
|
||||
score := tm.TrustValue() * 100
|
||||
|
||||
return int(math.Floor(score))
|
||||
}
|
||||
|
||||
// NextTimeInterval saves current time interval data and prepares for the following interval
|
||||
func (tm *Metric) NextTimeInterval() {
|
||||
tm.mtx.Lock()
|
||||
defer tm.mtx.Unlock()
|
||||
|
||||
if tm.paused {
|
||||
// Do not prepare for the next time interval while paused
|
||||
return
|
||||
}
|
||||
|
||||
// Add the current trust value to the history data
|
||||
newHist := tm.calcTrustValue()
|
||||
tm.history = append(tm.history, newHist)
|
||||
|
||||
// Update history and interval counters
|
||||
if tm.historySize < tm.historyMaxSize {
|
||||
tm.historySize++
|
||||
} else {
|
||||
// Keep the history no larger than historyMaxSize
|
||||
last := len(tm.history) - tm.historyMaxSize
|
||||
tm.history = tm.history[last:]
|
||||
}
|
||||
|
||||
if tm.numIntervals < tm.maxIntervals {
|
||||
tm.numIntervals++
|
||||
// Add the optimistic weight for the new time interval
|
||||
wk := math.Pow(defaultHistoryDataWeight, float64(tm.numIntervals))
|
||||
tm.historyWeights = append(tm.historyWeights, wk)
|
||||
tm.historyWeightSum += wk
|
||||
}
|
||||
|
||||
// Update the history data using Faded Memories
|
||||
tm.updateFadedMemory()
|
||||
// Calculate the history value for the upcoming time interval
|
||||
tm.historyValue = tm.calcHistoryValue()
|
||||
tm.good = 0
|
||||
tm.bad = 0
|
||||
}
|
||||
|
||||
// SetTicker allows a TestTicker to be provided that will manually control
|
||||
// the passing of time from the perspective of the Metric.
|
||||
// The ticker must be set before Start is called on the metric
|
||||
func (tm *Metric) SetTicker(ticker MetricTicker) {
|
||||
tm.mtx.Lock()
|
||||
defer tm.mtx.Unlock()
|
||||
|
||||
tm.testTicker = ticker
|
||||
}
|
||||
|
||||
// Copy returns a new trust metric with members containing the same values
|
||||
func (tm *Metric) Copy() *Metric {
|
||||
if tm == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tm.mtx.Lock()
|
||||
defer tm.mtx.Unlock()
|
||||
|
||||
return &Metric{
|
||||
proportionalWeight: tm.proportionalWeight,
|
||||
integralWeight: tm.integralWeight,
|
||||
numIntervals: tm.numIntervals,
|
||||
maxIntervals: tm.maxIntervals,
|
||||
intervalLen: tm.intervalLen,
|
||||
history: tm.history,
|
||||
historyWeights: tm.historyWeights,
|
||||
historyWeightSum: tm.historyWeightSum,
|
||||
historySize: tm.historySize,
|
||||
historyMaxSize: tm.historyMaxSize,
|
||||
historyValue: tm.historyValue,
|
||||
good: tm.good,
|
||||
bad: tm.bad,
|
||||
paused: tm.paused,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Private methods */
|
||||
|
||||
// This method is for a goroutine that handles all requests on the metric
|
||||
func (tm *Metric) processRequests() {
|
||||
t := tm.testTicker
|
||||
if t == nil {
|
||||
// No test ticker was provided, so we create a normal ticker
|
||||
t = NewTicker(tm.intervalLen)
|
||||
}
|
||||
defer t.Stop()
|
||||
// Obtain the raw channel
|
||||
tick := t.GetChannel()
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
tm.NextTimeInterval()
|
||||
case <-tm.Quit():
|
||||
// Stop all further tracking for this metric
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wakes the trust metric up if it is currently paused
|
||||
// This method needs to be called with the mutex locked
|
||||
func (tm *Metric) unpause() {
|
||||
// Check if this is the first experience with
|
||||
// what we are tracking since being paused
|
||||
if tm.paused {
|
||||
tm.good = 0
|
||||
tm.bad = 0
|
||||
// New events cause us to unpause the metric
|
||||
tm.paused = false
|
||||
}
|
||||
}
|
||||
|
||||
// Calculates the trust value for the request processing
|
||||
func (tm *Metric) calcTrustValue() float64 {
|
||||
weightedP := tm.proportionalWeight * tm.proportionalValue()
|
||||
weightedI := tm.integralWeight * tm.historyValue
|
||||
weightedD := tm.weightedDerivative()
|
||||
|
||||
tv := weightedP + weightedI + weightedD
|
||||
// Do not return a negative value.
|
||||
if tv < 0 {
|
||||
tv = 0
|
||||
}
|
||||
return tv
|
||||
}
|
||||
|
||||
// Calculates the current score for good/bad experiences
|
||||
func (tm *Metric) proportionalValue() float64 {
|
||||
value := 1.0
|
||||
|
||||
total := tm.good + tm.bad
|
||||
if total > 0 {
|
||||
value = tm.good / total
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// Strengthens the derivative component when the change is negative
|
||||
func (tm *Metric) weightedDerivative() float64 {
|
||||
var weight float64 = defaultDerivativeGamma1
|
||||
|
||||
d := tm.derivativeValue()
|
||||
if d < 0 {
|
||||
weight = defaultDerivativeGamma2
|
||||
}
|
||||
return weight * d
|
||||
}
|
||||
|
||||
// Calculates the derivative component
|
||||
func (tm *Metric) derivativeValue() float64 {
|
||||
return tm.proportionalValue() - tm.historyValue
|
||||
}
|
||||
|
||||
// Calculates the integral (history) component of the trust value
|
||||
func (tm *Metric) calcHistoryValue() float64 {
|
||||
var hv float64
|
||||
|
||||
for i := 0; i < tm.numIntervals; i++ {
|
||||
hv += tm.fadedMemoryValue(i) * tm.historyWeights[i]
|
||||
}
|
||||
|
||||
return hv / tm.historyWeightSum
|
||||
}
|
||||
|
||||
// Retrieves the actual history data value that represents the requested time interval
|
||||
func (tm *Metric) fadedMemoryValue(interval int) float64 {
|
||||
first := tm.historySize - 1
|
||||
|
||||
if interval == 0 {
|
||||
// Base case
|
||||
return tm.history[first]
|
||||
}
|
||||
|
||||
offset := intervalToHistoryOffset(interval)
|
||||
return tm.history[first-offset]
|
||||
}
|
||||
|
||||
// Performs the update for our Faded Memories process, which allows the
|
||||
// trust metric tracking window to be large while maintaining a small
|
||||
// number of history data values
|
||||
func (tm *Metric) updateFadedMemory() {
|
||||
if tm.historySize < 2 {
|
||||
return
|
||||
}
|
||||
|
||||
end := tm.historySize - 1
|
||||
// Keep the most recent history element
|
||||
for count := 1; count < tm.historySize; count++ {
|
||||
i := end - count
|
||||
// The older the data is, the more we spread it out
|
||||
x := math.Pow(2, float64(count))
|
||||
// Two history data values are merged into a single value
|
||||
tm.history[i] = ((tm.history[i] * (x - 1)) + tm.history[i+1]) / x
|
||||
}
|
||||
}
|
||||
|
||||
// Map the interval value down to an offset from the beginning of history
|
||||
func intervalToHistoryOffset(interval int) int {
|
||||
// The system maintains 2^m interval values in the form of m history
|
||||
// data values. Therefore, we access the ith interval by obtaining
|
||||
// the history data index = the floor of log2(i)
|
||||
return int(math.Floor(math.Log2(float64(interval))))
|
||||
}
|
||||
119
p2p/trust/metric_test.go
Normal file
119
p2p/trust/metric_test.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package trust
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTrustMetricScores(t *testing.T) {
|
||||
tm := NewMetric()
|
||||
err := tm.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Perfect score
|
||||
tm.GoodEvents(1)
|
||||
score := tm.TrustScore()
|
||||
assert.Equal(t, 100, score)
|
||||
|
||||
// Less than perfect score
|
||||
tm.BadEvents(10)
|
||||
score = tm.TrustScore()
|
||||
assert.NotEqual(t, 100, score)
|
||||
err = tm.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTrustMetricConfig(t *testing.T) {
|
||||
// 7 days
|
||||
window := time.Minute * 60 * 24 * 7
|
||||
config := MetricConfig{
|
||||
TrackingWindow: window,
|
||||
IntervalLength: 2 * time.Minute,
|
||||
}
|
||||
|
||||
tm := NewMetricWithConfig(config)
|
||||
err := tm.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// The max time intervals should be the TrackingWindow / IntervalLen
|
||||
assert.Equal(t, int(config.TrackingWindow/config.IntervalLength), tm.maxIntervals)
|
||||
|
||||
dc := DefaultConfig()
|
||||
// These weights should still be the default values
|
||||
assert.Equal(t, dc.ProportionalWeight, tm.proportionalWeight)
|
||||
assert.Equal(t, dc.IntegralWeight, tm.integralWeight)
|
||||
err = tm.Stop()
|
||||
require.NoError(t, err)
|
||||
tm.Wait()
|
||||
|
||||
config.ProportionalWeight = 0.3
|
||||
config.IntegralWeight = 0.7
|
||||
tm = NewMetricWithConfig(config)
|
||||
err = tm.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// These weights should be equal to our custom values
|
||||
assert.Equal(t, config.ProportionalWeight, tm.proportionalWeight)
|
||||
assert.Equal(t, config.IntegralWeight, tm.integralWeight)
|
||||
err = tm.Stop()
|
||||
require.NoError(t, err)
|
||||
tm.Wait()
|
||||
}
|
||||
|
||||
func TestTrustMetricCopyNilPointer(t *testing.T) {
|
||||
var tm *Metric
|
||||
|
||||
ctm := tm.Copy()
|
||||
|
||||
assert.Nil(t, ctm)
|
||||
}
|
||||
|
||||
// XXX: This test fails non-deterministically
|
||||
//
|
||||
//nolint:unused,deadcode
|
||||
func _TestTrustMetricStopPause(t *testing.T) {
|
||||
// The TestTicker will provide manual control over
|
||||
// the passing of time within the metric
|
||||
tt := NewTestTicker()
|
||||
tm := NewMetric()
|
||||
tm.SetTicker(tt)
|
||||
err := tm.Start()
|
||||
require.NoError(t, err)
|
||||
// Allow some time intervals to pass and pause
|
||||
tt.NextTick()
|
||||
tt.NextTick()
|
||||
tm.Pause()
|
||||
|
||||
// could be 1 or 2 because Pause and NextTick race
|
||||
first := tm.Copy().numIntervals
|
||||
|
||||
// Allow more time to pass and check the intervals are unchanged
|
||||
tt.NextTick()
|
||||
tt.NextTick()
|
||||
assert.Equal(t, first, tm.Copy().numIntervals)
|
||||
|
||||
// Get the trust metric activated again
|
||||
tm.GoodEvents(5)
|
||||
// Allow some time intervals to pass and stop
|
||||
tt.NextTick()
|
||||
tt.NextTick()
|
||||
err = tm.Stop()
|
||||
require.NoError(t, err)
|
||||
tm.Wait()
|
||||
|
||||
second := tm.Copy().numIntervals
|
||||
// Allow more intervals to pass while the metric is stopped
|
||||
// and check that the number of intervals match
|
||||
tm.NextTimeInterval()
|
||||
tm.NextTimeInterval()
|
||||
// XXX: fails non-deterministically:
|
||||
// expected 5, got 6
|
||||
assert.Equal(t, second+2, tm.Copy().numIntervals)
|
||||
|
||||
if first > second {
|
||||
t.Fatalf("numIntervals should always increase or stay the same over time")
|
||||
}
|
||||
}
|
||||
220
p2p/trust/store.go
Normal file
220
p2p/trust/store.go
Normal file
@@ -0,0 +1,220 @@
|
||||
// Copyright 2017 Tendermint. All rights reserved.
|
||||
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
|
||||
|
||||
package trust
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
const defaultStorePeriodicSaveInterval = 1 * time.Minute
|
||||
|
||||
var trustMetricKey = []byte("trustMetricStore")
|
||||
|
||||
// MetricStore - Manages all trust metrics for peers
|
||||
type MetricStore struct {
|
||||
service.BaseService
|
||||
|
||||
// Maps a Peer.Key to that peer's TrustMetric
|
||||
peerMetrics map[string]*Metric
|
||||
|
||||
// Mutex that protects the map and history data file
|
||||
mtx tmsync.Mutex
|
||||
|
||||
// The db where peer trust metric history data will be stored
|
||||
db dbm.DB
|
||||
|
||||
// This configuration will be used when creating new TrustMetrics
|
||||
config MetricConfig
|
||||
}
|
||||
|
||||
// NewTrustMetricStore returns a store that saves data to the DB
|
||||
// and uses the config when creating new trust metrics.
|
||||
// Use Start to to initialize the trust metric store
|
||||
func NewTrustMetricStore(db dbm.DB, tmc MetricConfig) *MetricStore {
|
||||
tms := &MetricStore{
|
||||
peerMetrics: make(map[string]*Metric),
|
||||
db: db,
|
||||
config: tmc,
|
||||
}
|
||||
|
||||
tms.BaseService = *service.NewBaseService(nil, "MetricStore", tms)
|
||||
return tms
|
||||
}
|
||||
|
||||
// OnStart implements Service
|
||||
func (tms *MetricStore) OnStart() error {
|
||||
if err := tms.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tms.mtx.Lock()
|
||||
defer tms.mtx.Unlock()
|
||||
|
||||
tms.loadFromDB()
|
||||
go tms.saveRoutine()
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements Service
|
||||
func (tms *MetricStore) OnStop() {
|
||||
tms.BaseService.OnStop()
|
||||
|
||||
tms.mtx.Lock()
|
||||
defer tms.mtx.Unlock()
|
||||
|
||||
// Stop all trust metric go-routines
|
||||
for _, tm := range tms.peerMetrics {
|
||||
if err := tm.Stop(); err != nil {
|
||||
tms.Logger.Error("unable to stop metric store", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make the final trust history data save
|
||||
tms.saveToDB()
|
||||
}
|
||||
|
||||
// Size returns the number of entries in the trust metric store
|
||||
func (tms *MetricStore) Size() int {
|
||||
tms.mtx.Lock()
|
||||
defer tms.mtx.Unlock()
|
||||
|
||||
return tms.size()
|
||||
}
|
||||
|
||||
// AddPeerTrustMetric takes an existing trust metric and associates it with a peer key.
|
||||
// The caller is expected to call Start on the TrustMetric being added
|
||||
func (tms *MetricStore) AddPeerTrustMetric(key string, tm *Metric) {
|
||||
tms.mtx.Lock()
|
||||
defer tms.mtx.Unlock()
|
||||
|
||||
if key == "" || tm == nil {
|
||||
return
|
||||
}
|
||||
tms.peerMetrics[key] = tm
|
||||
}
|
||||
|
||||
// GetPeerTrustMetric returns a trust metric by peer key
|
||||
func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric {
|
||||
tms.mtx.Lock()
|
||||
defer tms.mtx.Unlock()
|
||||
|
||||
tm, ok := tms.peerMetrics[key]
|
||||
if !ok {
|
||||
// If the metric is not available, we will create it
|
||||
tm = NewMetricWithConfig(tms.config)
|
||||
if err := tm.Start(); err != nil {
|
||||
tms.Logger.Error("unable to start metric store", "error", err)
|
||||
}
|
||||
// The metric needs to be in the map
|
||||
tms.peerMetrics[key] = tm
|
||||
}
|
||||
return tm
|
||||
}
|
||||
|
||||
// PeerDisconnected pauses the trust metric associated with the peer identified by the key
|
||||
func (tms *MetricStore) PeerDisconnected(key string) {
|
||||
tms.mtx.Lock()
|
||||
defer tms.mtx.Unlock()
|
||||
|
||||
// If the Peer that disconnected has a metric, pause it
|
||||
if tm, ok := tms.peerMetrics[key]; ok {
|
||||
tm.Pause()
|
||||
}
|
||||
}
|
||||
|
||||
// Saves the history data for all peers to the store DB.
|
||||
// This public method acquires the trust metric store lock
|
||||
func (tms *MetricStore) SaveToDB() {
|
||||
tms.mtx.Lock()
|
||||
defer tms.mtx.Unlock()
|
||||
|
||||
tms.saveToDB()
|
||||
}
|
||||
|
||||
/* Private methods */
|
||||
|
||||
// size returns the number of entries in the store without acquiring the mutex
|
||||
func (tms *MetricStore) size() int {
|
||||
return len(tms.peerMetrics)
|
||||
}
|
||||
|
||||
/* Loading & Saving */
|
||||
/* Both loadFromDB and savetoDB assume the mutex has been acquired */
|
||||
|
||||
// Loads the history data for all peers from the store DB
|
||||
// cmn.Panics if file is corrupt
|
||||
func (tms *MetricStore) loadFromDB() bool {
|
||||
// Obtain the history data we have so far
|
||||
bytes, err := tms.db.Get(trustMetricKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if bytes == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
peers := make(map[string]MetricHistoryJSON)
|
||||
err = json.Unmarshal(bytes, &peers)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err))
|
||||
}
|
||||
|
||||
// If history data exists in the file,
|
||||
// load it into trust metric
|
||||
for key, p := range peers {
|
||||
tm := NewMetricWithConfig(tms.config)
|
||||
|
||||
if err := tm.Start(); err != nil {
|
||||
tms.Logger.Error("unable to start metric", "error", err)
|
||||
}
|
||||
tm.Init(p)
|
||||
// Load the peer trust metric into the store
|
||||
tms.peerMetrics[key] = tm
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Saves the history data for all peers to the store DB
|
||||
func (tms *MetricStore) saveToDB() {
|
||||
tms.Logger.Debug("Saving TrustHistory to DB", "size", tms.size())
|
||||
|
||||
peers := make(map[string]MetricHistoryJSON)
|
||||
|
||||
for key, tm := range tms.peerMetrics {
|
||||
// Add an entry for the peer identified by key
|
||||
peers[key] = tm.HistoryJSON()
|
||||
}
|
||||
|
||||
// Write all the data back to the DB
|
||||
bytes, err := json.Marshal(peers)
|
||||
if err != nil {
|
||||
tms.Logger.Error("Failed to encode the TrustHistory", "err", err)
|
||||
return
|
||||
}
|
||||
if err := tms.db.SetSync(trustMetricKey, bytes); err != nil {
|
||||
tms.Logger.Error("failed to flush data to disk", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Periodically saves the trust history data to the DB
|
||||
func (tms *MetricStore) saveRoutine() {
|
||||
t := time.NewTicker(defaultStorePeriodicSaveInterval)
|
||||
defer t.Stop()
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
tms.SaveToDB()
|
||||
case <-tms.Quit():
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
166
p2p/trust/store_test.go
Normal file
166
p2p/trust/store_test.go
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2017 Tendermint. All rights reserved.
|
||||
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
|
||||
|
||||
package trust
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
func TestTrustMetricStoreSaveLoad(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("", "trust_test")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(dir)
|
||||
|
||||
historyDB, err := dbm.NewDB("trusthistory", "goleveldb", dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 0 peers saved
|
||||
store := NewTrustMetricStore(historyDB, DefaultConfig())
|
||||
store.SetLogger(log.TestingLogger())
|
||||
store.saveToDB()
|
||||
// Load the data from the file
|
||||
store = NewTrustMetricStore(historyDB, DefaultConfig())
|
||||
store.SetLogger(log.TestingLogger())
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
// Make sure we still have 0 entries
|
||||
assert.Zero(t, store.Size())
|
||||
|
||||
// 100 TestTickers
|
||||
var tt []*TestTicker
|
||||
for i := 0; i < 100; i++ {
|
||||
// The TestTicker will provide manual control over
|
||||
// the passing of time within the metric
|
||||
tt = append(tt, NewTestTicker())
|
||||
}
|
||||
// 100 peers
|
||||
for i := 0; i < 100; i++ {
|
||||
key := fmt.Sprintf("peer_%d", i)
|
||||
tm := NewMetric()
|
||||
|
||||
tm.SetTicker(tt[i])
|
||||
err = tm.Start()
|
||||
require.NoError(t, err)
|
||||
store.AddPeerTrustMetric(key, tm)
|
||||
|
||||
tm.BadEvents(10)
|
||||
tm.GoodEvents(1)
|
||||
}
|
||||
// Check that we have 100 entries and save
|
||||
assert.Equal(t, 100, store.Size())
|
||||
// Give the 100 metrics time to process the history data
|
||||
for i := 0; i < 100; i++ {
|
||||
tt[i].NextTick()
|
||||
tt[i].NextTick()
|
||||
}
|
||||
// Stop all the trust metrics and save
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Load the data from the DB
|
||||
store = NewTrustMetricStore(historyDB, DefaultConfig())
|
||||
store.SetLogger(log.TestingLogger())
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that we still have 100 peers with imperfect trust values
|
||||
assert.Equal(t, 100, store.Size())
|
||||
for _, tm := range store.peerMetrics {
|
||||
assert.NotEqual(t, 1.0, tm.TrustValue())
|
||||
}
|
||||
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTrustMetricStoreConfig(t *testing.T) {
|
||||
historyDB, err := dbm.NewDB("", "memdb", "")
|
||||
require.NoError(t, err)
|
||||
|
||||
config := MetricConfig{
|
||||
ProportionalWeight: 0.5,
|
||||
IntegralWeight: 0.5,
|
||||
}
|
||||
|
||||
// Create a store with custom config
|
||||
store := NewTrustMetricStore(historyDB, config)
|
||||
store.SetLogger(log.TestingLogger())
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Have the store make us a metric with the config
|
||||
tm := store.GetPeerTrustMetric("TestKey")
|
||||
|
||||
// Check that the options made it to the metric
|
||||
assert.Equal(t, 0.5, tm.proportionalWeight)
|
||||
assert.Equal(t, 0.5, tm.integralWeight)
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTrustMetricStoreLookup(t *testing.T) {
|
||||
historyDB, err := dbm.NewDB("", "memdb", "")
|
||||
require.NoError(t, err)
|
||||
|
||||
store := NewTrustMetricStore(historyDB, DefaultConfig())
|
||||
store.SetLogger(log.TestingLogger())
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create 100 peers in the trust metric store
|
||||
for i := 0; i < 100; i++ {
|
||||
key := fmt.Sprintf("peer_%d", i)
|
||||
store.GetPeerTrustMetric(key)
|
||||
|
||||
// Check that the trust metric was successfully entered
|
||||
ktm := store.peerMetrics[key]
|
||||
assert.NotNil(t, ktm, "Expected to find TrustMetric %s but wasn't there.", key)
|
||||
}
|
||||
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTrustMetricStorePeerScore(t *testing.T) {
|
||||
historyDB, err := dbm.NewDB("", "memdb", "")
|
||||
require.NoError(t, err)
|
||||
|
||||
store := NewTrustMetricStore(historyDB, DefaultConfig())
|
||||
store.SetLogger(log.TestingLogger())
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
key := "TestKey"
|
||||
tm := store.GetPeerTrustMetric(key)
|
||||
|
||||
// This peer is innocent so far
|
||||
first := tm.TrustScore()
|
||||
assert.Equal(t, 100, first)
|
||||
|
||||
// Add some undesirable events and disconnect
|
||||
tm.BadEvents(1)
|
||||
first = tm.TrustScore()
|
||||
assert.NotEqual(t, 100, first)
|
||||
tm.BadEvents(10)
|
||||
second := tm.TrustScore()
|
||||
|
||||
if second > first {
|
||||
t.Errorf("a greater number of bad events should lower the trust score")
|
||||
}
|
||||
store.PeerDisconnected(key)
|
||||
|
||||
// We will remember our experiences with this peer
|
||||
tm = store.GetPeerTrustMetric(key)
|
||||
assert.NotEqual(t, 100, tm.TrustScore())
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
62
p2p/trust/ticker.go
Normal file
62
p2p/trust/ticker.go
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2017 Tendermint. All rights reserved.
|
||||
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
|
||||
|
||||
package trust
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// MetricTicker provides a single ticker interface for the trust metric
|
||||
type MetricTicker interface {
|
||||
// GetChannel returns the receive only channel that fires at each time interval
|
||||
GetChannel() <-chan time.Time
|
||||
|
||||
// Stop will halt further activity on the ticker channel
|
||||
Stop()
|
||||
}
|
||||
|
||||
// The ticker used during testing that provides manual control over time intervals
|
||||
type TestTicker struct {
|
||||
C chan time.Time
|
||||
stopped bool
|
||||
}
|
||||
|
||||
// NewTestTicker returns our ticker used within test routines
|
||||
func NewTestTicker() *TestTicker {
|
||||
c := make(chan time.Time)
|
||||
return &TestTicker{
|
||||
C: c,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestTicker) GetChannel() <-chan time.Time {
|
||||
return t.C
|
||||
}
|
||||
|
||||
func (t *TestTicker) Stop() {
|
||||
t.stopped = true
|
||||
}
|
||||
|
||||
// NextInterval manually sends Time on the ticker channel
|
||||
func (t *TestTicker) NextTick() {
|
||||
if t.stopped {
|
||||
return
|
||||
}
|
||||
t.C <- time.Now()
|
||||
}
|
||||
|
||||
// Ticker is just a wrap around time.Ticker that allows it
|
||||
// to meet the requirements of our interface
|
||||
type Ticker struct {
|
||||
*time.Ticker
|
||||
}
|
||||
|
||||
// NewTicker returns a normal time.Ticker wrapped to meet our interface
|
||||
func NewTicker(d time.Duration) *Ticker {
|
||||
return &Ticker{time.NewTicker(d)}
|
||||
}
|
||||
|
||||
func (t *Ticker) GetChannel() <-chan time.Time {
|
||||
return t.C
|
||||
}
|
||||
32
p2p/types.go
32
p2p/types.go
@@ -1,40 +1,8 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
)
|
||||
|
||||
type ChannelDescriptor = conn.ChannelDescriptor
|
||||
type ConnectionStatus = conn.ConnectionStatus
|
||||
|
||||
// Envelope contains a message with sender routing info.
|
||||
type Envelope struct {
|
||||
Src Peer // sender (empty if outbound)
|
||||
Message proto.Message // message payload
|
||||
ChannelID byte
|
||||
}
|
||||
|
||||
// Unwrapper is a Protobuf message that can contain a variety of inner messages
|
||||
// (e.g. via oneof fields). If a Channel's message type implements Unwrapper, the
|
||||
// p2p layer will automatically unwrap inbound messages so that reactors do not have to do this themselves.
|
||||
type Unwrapper interface {
|
||||
proto.Message
|
||||
|
||||
// Unwrap will unwrap the inner message contained in this message.
|
||||
Unwrap() (proto.Message, error)
|
||||
}
|
||||
|
||||
// Wrapper is a companion type to Unwrapper. It is a Protobuf message that can contain a variety of inner messages. The p2p layer will automatically wrap outbound messages so that the reactors do not have to do it themselves.
|
||||
type Wrapper interface {
|
||||
proto.Message
|
||||
|
||||
// Wrap will take the underlying message and wrap it in its wrapper type.
|
||||
Wrap() proto.Message
|
||||
}
|
||||
|
||||
var (
|
||||
_ Wrapper = &tmp2p.PexRequest{}
|
||||
_ Wrapper = &tmp2p.PexAddrs{}
|
||||
)
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
var _ p2p.Wrapper = &StatusRequest{}
|
||||
var _ p2p.Wrapper = &StatusResponse{}
|
||||
var _ p2p.Wrapper = &NoBlockResponse{}
|
||||
var _ p2p.Wrapper = &BlockResponse{}
|
||||
var _ p2p.Wrapper = &BlockRequest{}
|
||||
|
||||
const (
|
||||
BlockResponseMessagePrefixSize = 4
|
||||
BlockResponseMessageFieldKeySize = 1
|
||||
)
|
||||
|
||||
func (m *BlockRequest) Wrap() proto.Message {
|
||||
bm := &Message{}
|
||||
bm.Sum = &Message_BlockRequest{BlockRequest: m}
|
||||
return bm
|
||||
}
|
||||
|
||||
func (m *BlockResponse) Wrap() proto.Message {
|
||||
bm := &Message{}
|
||||
bm.Sum = &Message_BlockResponse{BlockResponse: m}
|
||||
return bm
|
||||
}
|
||||
|
||||
func (m *NoBlockResponse) Wrap() proto.Message {
|
||||
bm := &Message{}
|
||||
bm.Sum = &Message_NoBlockResponse{NoBlockResponse: m}
|
||||
return bm
|
||||
}
|
||||
|
||||
func (m *StatusRequest) Wrap() proto.Message {
|
||||
bm := &Message{}
|
||||
bm.Sum = &Message_StatusRequest{StatusRequest: m}
|
||||
return bm
|
||||
}
|
||||
|
||||
func (m *StatusResponse) Wrap() proto.Message {
|
||||
bm := &Message{}
|
||||
bm.Sum = &Message_StatusResponse{StatusResponse: m}
|
||||
return bm
|
||||
}
|
||||
|
||||
// Unwrap implements the p2p Wrapper interface and unwraps a wrapped blockchain
|
||||
// message.
|
||||
func (m *Message) Unwrap() (proto.Message, error) {
|
||||
switch msg := m.Sum.(type) {
|
||||
case *Message_BlockRequest:
|
||||
return m.GetBlockRequest(), nil
|
||||
|
||||
case *Message_BlockResponse:
|
||||
return m.GetBlockResponse(), nil
|
||||
|
||||
case *Message_NoBlockResponse:
|
||||
return m.GetNoBlockResponse(), nil
|
||||
|
||||
case *Message_StatusRequest:
|
||||
return m.GetStatusRequest(), nil
|
||||
|
||||
case *Message_StatusResponse:
|
||||
return m.GetStatusResponse(), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message: %T", msg)
|
||||
}
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
var _ p2p.Wrapper = &VoteSetBits{}
|
||||
var _ p2p.Wrapper = &VoteSetMaj23{}
|
||||
var _ p2p.Wrapper = &Vote{}
|
||||
var _ p2p.Wrapper = &ProposalPOL{}
|
||||
var _ p2p.Wrapper = &Proposal{}
|
||||
var _ p2p.Wrapper = &NewValidBlock{}
|
||||
var _ p2p.Wrapper = &NewRoundStep{}
|
||||
var _ p2p.Wrapper = &HasVote{}
|
||||
var _ p2p.Wrapper = &BlockPart{}
|
||||
|
||||
func (m *VoteSetBits) Wrap() proto.Message {
|
||||
cm := &Message{}
|
||||
cm.Sum = &Message_VoteSetBits{VoteSetBits: m}
|
||||
return cm
|
||||
|
||||
}
|
||||
|
||||
func (m *VoteSetMaj23) Wrap() proto.Message {
|
||||
cm := &Message{}
|
||||
cm.Sum = &Message_VoteSetMaj23{VoteSetMaj23: m}
|
||||
return cm
|
||||
}
|
||||
|
||||
func (m *HasVote) Wrap() proto.Message {
|
||||
cm := &Message{}
|
||||
cm.Sum = &Message_HasVote{HasVote: m}
|
||||
return cm
|
||||
}
|
||||
|
||||
func (m *Vote) Wrap() proto.Message {
|
||||
cm := &Message{}
|
||||
cm.Sum = &Message_Vote{Vote: m}
|
||||
return cm
|
||||
}
|
||||
|
||||
func (m *BlockPart) Wrap() proto.Message {
|
||||
cm := &Message{}
|
||||
cm.Sum = &Message_BlockPart{BlockPart: m}
|
||||
return cm
|
||||
}
|
||||
|
||||
func (m *ProposalPOL) Wrap() proto.Message {
|
||||
cm := &Message{}
|
||||
cm.Sum = &Message_ProposalPol{ProposalPol: m}
|
||||
return cm
|
||||
}
|
||||
|
||||
func (m *Proposal) Wrap() proto.Message {
|
||||
cm := &Message{}
|
||||
cm.Sum = &Message_Proposal{Proposal: m}
|
||||
return cm
|
||||
}
|
||||
|
||||
func (m *NewValidBlock) Wrap() proto.Message {
|
||||
cm := &Message{}
|
||||
cm.Sum = &Message_NewValidBlock{NewValidBlock: m}
|
||||
return cm
|
||||
}
|
||||
|
||||
func (m *NewRoundStep) Wrap() proto.Message {
|
||||
cm := &Message{}
|
||||
cm.Sum = &Message_NewRoundStep{NewRoundStep: m}
|
||||
return cm
|
||||
}
|
||||
|
||||
// Unwrap implements the p2p Wrapper interface and unwraps a wrapped consensus
|
||||
// proto message.
|
||||
func (m *Message) Unwrap() (proto.Message, error) {
|
||||
switch msg := m.Sum.(type) {
|
||||
case *Message_NewRoundStep:
|
||||
return m.GetNewRoundStep(), nil
|
||||
|
||||
case *Message_NewValidBlock:
|
||||
return m.GetNewValidBlock(), nil
|
||||
|
||||
case *Message_Proposal:
|
||||
return m.GetProposal(), nil
|
||||
|
||||
case *Message_ProposalPol:
|
||||
return m.GetProposalPol(), nil
|
||||
|
||||
case *Message_BlockPart:
|
||||
return m.GetBlockPart(), nil
|
||||
|
||||
case *Message_Vote:
|
||||
return m.GetVote(), nil
|
||||
|
||||
case *Message_HasVote:
|
||||
return m.GetHasVote(), nil
|
||||
|
||||
case *Message_VoteSetMaj23:
|
||||
return m.GetVoteSetMaj23(), nil
|
||||
|
||||
case *Message_VoteSetBits:
|
||||
return m.GetVoteSetBits(), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message: %T", msg)
|
||||
}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
var _ p2p.Wrapper = &Txs{}
|
||||
var _ p2p.Unwrapper = &Message{}
|
||||
|
||||
// Wrap implements the p2p Wrapper interface and wraps a mempool message.
|
||||
func (m *Txs) Wrap() proto.Message {
|
||||
mm := &Message{}
|
||||
mm.Sum = &Message_Txs{Txs: m}
|
||||
return mm
|
||||
}
|
||||
|
||||
// Unwrap implements the p2p Wrapper interface and unwraps a wrapped mempool
|
||||
// message.
|
||||
func (m *Message) Unwrap() (proto.Message, error) {
|
||||
switch msg := m.Sum.(type) {
|
||||
case *Message_Txs:
|
||||
return m.GetTxs(), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message: %T", msg)
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
)
|
||||
|
||||
func (m *PexAddrs) Wrap() proto.Message {
|
||||
pm := &Message{}
|
||||
pm.Sum = &Message_PexAddrs{PexAddrs: m}
|
||||
return pm
|
||||
}
|
||||
|
||||
func (m *PexRequest) Wrap() proto.Message {
|
||||
pm := &Message{}
|
||||
pm.Sum = &Message_PexRequest{PexRequest: m}
|
||||
return pm
|
||||
}
|
||||
|
||||
// Unwrap implements the p2p Wrapper interface and unwraps a wrapped PEX
|
||||
// message.
|
||||
func (m *Message) Unwrap() (proto.Message, error) {
|
||||
switch msg := m.Sum.(type) {
|
||||
case *Message_PexRequest:
|
||||
return msg.PexRequest, nil
|
||||
case *Message_PexAddrs:
|
||||
return msg.PexAddrs, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown pex message: %T", msg)
|
||||
}
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package statesync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
var _ p2p.Wrapper = &ChunkRequest{}
|
||||
var _ p2p.Wrapper = &ChunkResponse{}
|
||||
var _ p2p.Wrapper = &SnapshotsRequest{}
|
||||
var _ p2p.Wrapper = &SnapshotsResponse{}
|
||||
|
||||
func (m *SnapshotsResponse) Wrap() proto.Message {
|
||||
sm := &Message{}
|
||||
sm.Sum = &Message_SnapshotsResponse{SnapshotsResponse: m}
|
||||
return sm
|
||||
}
|
||||
|
||||
func (m *SnapshotsRequest) Wrap() proto.Message {
|
||||
sm := &Message{}
|
||||
sm.Sum = &Message_SnapshotsRequest{SnapshotsRequest: m}
|
||||
return sm
|
||||
}
|
||||
|
||||
func (m *ChunkResponse) Wrap() proto.Message {
|
||||
sm := &Message{}
|
||||
sm.Sum = &Message_ChunkResponse{ChunkResponse: m}
|
||||
return sm
|
||||
}
|
||||
|
||||
func (m *ChunkRequest) Wrap() proto.Message {
|
||||
sm := &Message{}
|
||||
sm.Sum = &Message_ChunkRequest{ChunkRequest: m}
|
||||
return sm
|
||||
}
|
||||
|
||||
// Unwrap implements the p2p Wrapper interface and unwraps a wrapped state sync
|
||||
// proto message.
|
||||
func (m *Message) Unwrap() (proto.Message, error) {
|
||||
switch msg := m.Sum.(type) {
|
||||
case *Message_ChunkRequest:
|
||||
return m.GetChunkRequest(), nil
|
||||
|
||||
case *Message_ChunkResponse:
|
||||
return m.GetChunkResponse(), nil
|
||||
|
||||
case *Message_SnapshotsRequest:
|
||||
return m.GetSnapshotsRequest(), nil
|
||||
|
||||
case *Message_SnapshotsResponse:
|
||||
return m.GetSnapshotsResponse(), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message: %T", msg)
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/blocksync"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
"github.com/tendermint/tendermint/statesync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -93,8 +91,6 @@ type Environment struct {
|
||||
TxIndexer txindex.TxIndexer
|
||||
BlockIndexer indexer.BlockIndexer
|
||||
ConsensusReactor *consensus.Reactor
|
||||
BlocksyncReactor *blocksync.Reactor
|
||||
StatesyncReactor *statesync.Reactor
|
||||
EventBus *types.EventBus // thread safe
|
||||
Mempool mempl.Mempool
|
||||
|
||||
@@ -203,5 +199,9 @@ func getHeight(latestHeight int64, heightPtr *int64) (int64, error) {
|
||||
}
|
||||
|
||||
func latestUncommittedHeight() int64 {
|
||||
nodeIsSyncing := env.ConsensusReactor.WaitSync()
|
||||
if nodeIsSyncing {
|
||||
return env.BlockStore.Height()
|
||||
}
|
||||
return env.BlockStore.Height() + 1
|
||||
}
|
||||
|
||||
@@ -51,16 +51,6 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
|
||||
votingPower = val.VotingPower
|
||||
}
|
||||
|
||||
phase := "initializing"
|
||||
switch {
|
||||
case env.StatesyncReactor.IsSyncing():
|
||||
phase = "statesync"
|
||||
case env.BlocksyncReactor.IsSyncing():
|
||||
phase = "blocksync"
|
||||
case env.ConsensusReactor.IsConsensusRunning():
|
||||
phase = "consensus"
|
||||
}
|
||||
|
||||
result := &ctypes.ResultStatus{
|
||||
NodeInfo: env.P2PTransport.NodeInfo().(p2p.DefaultNodeInfo),
|
||||
SyncInfo: ctypes.SyncInfo{
|
||||
@@ -72,7 +62,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
|
||||
EarliestAppHash: earliestAppHash,
|
||||
EarliestBlockHeight: earliestBlockHeight,
|
||||
EarliestBlockTime: time.Unix(0, earliestBlockTimeNano),
|
||||
Phase: phase,
|
||||
CatchingUp: env.ConsensusReactor.WaitSync(),
|
||||
},
|
||||
ValidatorInfo: ctypes.ValidatorInfo{
|
||||
Address: env.PubKey.Address(),
|
||||
|
||||
@@ -86,9 +86,7 @@ type SyncInfo struct {
|
||||
EarliestBlockHeight int64 `json:"earliest_block_height"`
|
||||
EarliestBlockTime time.Time `json:"earliest_block_time"`
|
||||
|
||||
// Phase inidicates which processes are advancing state:
|
||||
// Either statesync, blocksync or consensus
|
||||
Phase string `json:"phase"`
|
||||
CatchingUp bool `json:"catching_up"`
|
||||
}
|
||||
|
||||
// Info about the node's validator
|
||||
|
||||
@@ -81,9 +81,6 @@ type State struct {
|
||||
|
||||
// Copy makes a copy of the State for mutating.
|
||||
func (state State) Copy() State {
|
||||
if state.IsEmpty() {
|
||||
return State{}
|
||||
}
|
||||
|
||||
return State{
|
||||
Version: state.Version,
|
||||
|
||||
@@ -16,6 +16,49 @@ const (
|
||||
chunkMsgSize = int(16e6)
|
||||
)
|
||||
|
||||
// mustEncodeMsg encodes a Protobuf message, panicing on error.
|
||||
func mustEncodeMsg(pb proto.Message) []byte {
|
||||
msg := ssproto.Message{}
|
||||
switch pb := pb.(type) {
|
||||
case *ssproto.ChunkRequest:
|
||||
msg.Sum = &ssproto.Message_ChunkRequest{ChunkRequest: pb}
|
||||
case *ssproto.ChunkResponse:
|
||||
msg.Sum = &ssproto.Message_ChunkResponse{ChunkResponse: pb}
|
||||
case *ssproto.SnapshotsRequest:
|
||||
msg.Sum = &ssproto.Message_SnapshotsRequest{SnapshotsRequest: pb}
|
||||
case *ssproto.SnapshotsResponse:
|
||||
msg.Sum = &ssproto.Message_SnapshotsResponse{SnapshotsResponse: pb}
|
||||
default:
|
||||
panic(fmt.Errorf("unknown message type %T", pb))
|
||||
}
|
||||
bz, err := msg.Marshal()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to marshal %T: %w", pb, err))
|
||||
}
|
||||
return bz
|
||||
}
|
||||
|
||||
// decodeMsg decodes a Protobuf message.
|
||||
func decodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &ssproto.Message{}
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *ssproto.Message_ChunkRequest:
|
||||
return msg.ChunkRequest, nil
|
||||
case *ssproto.Message_ChunkResponse:
|
||||
return msg.ChunkResponse, nil
|
||||
case *ssproto.Message_SnapshotsRequest:
|
||||
return msg.SnapshotsRequest, nil
|
||||
case *ssproto.Message_SnapshotsResponse:
|
||||
return msg.SnapshotsResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// validateMsg validates a message.
|
||||
func validateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
@@ -100,9 +99,8 @@ func TestStateSyncVectors(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
w := tc.msg.(p2p.Wrapper).Wrap()
|
||||
bz, err := proto.Marshal(w)
|
||||
require.NoError(t, err)
|
||||
|
||||
bz := mustEncodeMsg(tc.msg)
|
||||
|
||||
require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
|
||||
}
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package statesync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Syncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "syncing",
|
||||
Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Syncing: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package statesync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "statesync"
|
||||
)
|
||||
|
||||
//go:generate go run ../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Whether or not a node is state syncing. 1 if yes, 0 if no.
|
||||
Syncing metrics.Gauge
|
||||
}
|
||||
@@ -33,7 +33,6 @@ type Reactor struct {
|
||||
conn proxy.AppConnSnapshot
|
||||
connQuery proxy.AppConnQuery
|
||||
tempDir string
|
||||
metrics *Metrics
|
||||
|
||||
// This will only be set when a state sync is in progress. It is used to feed received
|
||||
// snapshots and chunks into the sync.
|
||||
@@ -47,14 +46,12 @@ func NewReactor(
|
||||
conn proxy.AppConnSnapshot,
|
||||
connQuery proxy.AppConnQuery,
|
||||
tempDir string,
|
||||
metrics *Metrics,
|
||||
) *Reactor {
|
||||
|
||||
r := &Reactor{
|
||||
cfg: cfg,
|
||||
conn: conn,
|
||||
connQuery: connQuery,
|
||||
metrics: metrics,
|
||||
}
|
||||
r.BaseReactor = *p2p.NewBaseReactor("StateSync", r)
|
||||
|
||||
@@ -69,14 +66,12 @@ func (r *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 10,
|
||||
RecvMessageCapacity: snapshotMsgSize,
|
||||
MessageType: &ssproto.Message{},
|
||||
},
|
||||
{
|
||||
ID: ChunkChannel,
|
||||
Priority: 3,
|
||||
SendQueueCapacity: 10,
|
||||
RecvMessageCapacity: chunkMsgSize,
|
||||
MessageType: &ssproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -95,13 +90,6 @@ func (r *Reactor) AddPeer(peer p2p.Peer) {
|
||||
}
|
||||
}
|
||||
|
||||
// IsSyncing returns true if state sync is actively being used to restore a state
|
||||
func (r *Reactor) IsSyncing() bool {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.syncer != nil
|
||||
}
|
||||
|
||||
// RemovePeer implements p2p.Reactor.
|
||||
func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
r.mtx.RLock()
|
||||
@@ -112,21 +100,27 @@ func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
}
|
||||
|
||||
// Receive implements p2p.Reactor.
|
||||
func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
if !r.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
err := validateMsg(e.Message)
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
r.Logger.Error("Invalid message", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
r.Switch.StopPeerForError(e.Src, err)
|
||||
r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
r.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
err = validateMsg(msg)
|
||||
if err != nil {
|
||||
r.Logger.Error("Invalid message", "peer", src, "msg", msg, "err", err)
|
||||
r.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch e.ChannelID {
|
||||
switch chID {
|
||||
case SnapshotChannel:
|
||||
switch msg := e.Message.(type) {
|
||||
switch msg := msg.(type) {
|
||||
case *ssproto.SnapshotsRequest:
|
||||
snapshots, err := r.recentSnapshots(recentSnapshots)
|
||||
if err != nil {
|
||||
@@ -135,17 +129,14 @@ func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
for _, snapshot := range snapshots {
|
||||
r.Logger.Debug("Advertising snapshot", "height", snapshot.Height,
|
||||
"format", snapshot.Format, "peer", e.Src.ID())
|
||||
e.Src.Send(p2p.Envelope{
|
||||
ChannelID: e.ChannelID,
|
||||
Message: &ssproto.SnapshotsResponse{
|
||||
Height: snapshot.Height,
|
||||
Format: snapshot.Format,
|
||||
Chunks: snapshot.Chunks,
|
||||
Hash: snapshot.Hash,
|
||||
Metadata: snapshot.Metadata,
|
||||
},
|
||||
})
|
||||
"format", snapshot.Format, "peer", src.ID())
|
||||
src.Send(chID, mustEncodeMsg(&ssproto.SnapshotsResponse{
|
||||
Height: snapshot.Height,
|
||||
Format: snapshot.Format,
|
||||
Chunks: snapshot.Chunks,
|
||||
Hash: snapshot.Hash,
|
||||
Metadata: snapshot.Metadata,
|
||||
}))
|
||||
}
|
||||
|
||||
case *ssproto.SnapshotsResponse:
|
||||
@@ -155,8 +146,8 @@ func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
r.Logger.Debug("Received unexpected snapshot, no state sync in progress")
|
||||
return
|
||||
}
|
||||
r.Logger.Debug("Received snapshot", "height", msg.Height, "format", msg.Format, "peer", e.Src.ID())
|
||||
_, err := r.syncer.AddSnapshot(e.Src, &snapshot{
|
||||
r.Logger.Debug("Received snapshot", "height", msg.Height, "format", msg.Format, "peer", src.ID())
|
||||
_, err := r.syncer.AddSnapshot(src, &snapshot{
|
||||
Height: msg.Height,
|
||||
Format: msg.Format,
|
||||
Chunks: msg.Chunks,
|
||||
@@ -166,7 +157,7 @@ func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
// TODO: We may want to consider punishing the peer for certain errors
|
||||
if err != nil {
|
||||
r.Logger.Error("Failed to add snapshot", "height", msg.Height, "format", msg.Format,
|
||||
"peer", e.Src.ID(), "err", err)
|
||||
"peer", src.ID(), "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -175,10 +166,10 @@ func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
|
||||
case ChunkChannel:
|
||||
switch msg := e.Message.(type) {
|
||||
switch msg := msg.(type) {
|
||||
case *ssproto.ChunkRequest:
|
||||
r.Logger.Debug("Received chunk request", "height", msg.Height, "format", msg.Format,
|
||||
"chunk", msg.Index, "peer", e.Src.ID())
|
||||
"chunk", msg.Index, "peer", src.ID())
|
||||
resp, err := r.conn.LoadSnapshotChunkSync(abci.RequestLoadSnapshotChunk{
|
||||
Height: msg.Height,
|
||||
Format: msg.Format,
|
||||
@@ -190,33 +181,30 @@ func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
return
|
||||
}
|
||||
r.Logger.Debug("Sending chunk", "height", msg.Height, "format", msg.Format,
|
||||
"chunk", msg.Index, "peer", e.Src.ID())
|
||||
e.Src.Send(p2p.Envelope{
|
||||
ChannelID: ChunkChannel,
|
||||
Message: &ssproto.ChunkResponse{
|
||||
Height: msg.Height,
|
||||
Format: msg.Format,
|
||||
Index: msg.Index,
|
||||
Chunk: resp.Chunk,
|
||||
Missing: resp.Chunk == nil,
|
||||
},
|
||||
})
|
||||
"chunk", msg.Index, "peer", src.ID())
|
||||
src.Send(ChunkChannel, mustEncodeMsg(&ssproto.ChunkResponse{
|
||||
Height: msg.Height,
|
||||
Format: msg.Format,
|
||||
Index: msg.Index,
|
||||
Chunk: resp.Chunk,
|
||||
Missing: resp.Chunk == nil,
|
||||
}))
|
||||
|
||||
case *ssproto.ChunkResponse:
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.syncer == nil {
|
||||
r.Logger.Debug("Received unexpected chunk, no state sync in progress", "peer", e.Src.ID())
|
||||
r.Logger.Debug("Received unexpected chunk, no state sync in progress", "peer", src.ID())
|
||||
return
|
||||
}
|
||||
r.Logger.Debug("Received chunk, adding to sync", "height", msg.Height, "format", msg.Format,
|
||||
"chunk", msg.Index, "peer", e.Src.ID())
|
||||
"chunk", msg.Index, "peer", src.ID())
|
||||
_, err := r.syncer.AddChunk(&chunk{
|
||||
Height: msg.Height,
|
||||
Format: msg.Format,
|
||||
Index: msg.Index,
|
||||
Chunk: msg.Chunk,
|
||||
Sender: e.Src.ID(),
|
||||
Sender: src.ID(),
|
||||
})
|
||||
if err != nil {
|
||||
r.Logger.Error("Failed to add chunk", "height", msg.Height, "format", msg.Format,
|
||||
@@ -229,7 +217,7 @@ func (r *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
|
||||
default:
|
||||
r.Logger.Error("Received message on invalid channel %x", e.ChannelID)
|
||||
r.Logger.Error("Received message on invalid channel %x", chID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,18 +263,13 @@ func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration)
|
||||
r.mtx.Unlock()
|
||||
return sm.State{}, nil, errors.New("a state sync is already in progress")
|
||||
}
|
||||
r.metrics.Syncing.Add(1)
|
||||
r.syncer = newSyncer(r.cfg, r.Logger, r.conn, r.connQuery, stateProvider, r.tempDir)
|
||||
r.mtx.Unlock()
|
||||
|
||||
hook := func() {
|
||||
r.Logger.Debug("Requesting snapshots from known peers")
|
||||
// Request snapshots from all currently connected peers
|
||||
|
||||
r.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: SnapshotChannel,
|
||||
Message: &ssproto.SnapshotsRequest{},
|
||||
})
|
||||
r.Switch.Broadcast(SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{}))
|
||||
}
|
||||
|
||||
hook()
|
||||
@@ -295,7 +278,6 @@ func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration)
|
||||
|
||||
r.mtx.Lock()
|
||||
r.syncer = nil
|
||||
r.metrics.Syncing.Add(0)
|
||||
r.mtx.Unlock()
|
||||
return state, commit, err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -54,24 +53,16 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) {
|
||||
peer.On("ID").Return(p2p.ID("id"))
|
||||
var response *ssproto.ChunkResponse
|
||||
if tc.expectResponse != nil {
|
||||
peer.On("Send", mock.MatchedBy(func(i interface{}) bool {
|
||||
e, ok := i.(p2p.Envelope)
|
||||
return ok && e.ChannelID == ChunkChannel
|
||||
})).Run(func(args mock.Arguments) {
|
||||
e := args[0].(p2p.Envelope)
|
||||
|
||||
// Marshal to simulate a wire roundtrip.
|
||||
bz, err := proto.Marshal(e.Message)
|
||||
peer.On("Send", ChunkChannel, mock.Anything).Run(func(args mock.Arguments) {
|
||||
msg, err := decodeMsg(args[1].([]byte))
|
||||
require.NoError(t, err)
|
||||
err = proto.Unmarshal(bz, e.Message)
|
||||
require.NoError(t, err)
|
||||
response = e.Message.(*ssproto.ChunkResponse)
|
||||
response = msg.(*ssproto.ChunkResponse)
|
||||
}).Return(true)
|
||||
}
|
||||
|
||||
// Start a reactor and send a ssproto.ChunkRequest, then wait for and check response
|
||||
cfg := config.DefaultStateSyncConfig()
|
||||
r := NewReactor(*cfg, conn, nil, "", NopMetrics())
|
||||
r := NewReactor(*cfg, conn, nil, "")
|
||||
err := r.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
@@ -80,11 +71,7 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
r.Receive(p2p.Envelope{
|
||||
ChannelID: ChunkChannel,
|
||||
Src: peer,
|
||||
Message: tc.request,
|
||||
})
|
||||
r.Receive(ChunkChannel, peer, mustEncodeMsg(tc.request))
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
assert.Equal(t, tc.expectResponse, response)
|
||||
|
||||
@@ -144,24 +131,16 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) {
|
||||
peer := &p2pmocks.Peer{}
|
||||
if len(tc.expectResponses) > 0 {
|
||||
peer.On("ID").Return(p2p.ID("id"))
|
||||
peer.On("Send", mock.MatchedBy(func(i interface{}) bool {
|
||||
e, ok := i.(p2p.Envelope)
|
||||
return ok && e.ChannelID == SnapshotChannel
|
||||
})).Run(func(args mock.Arguments) {
|
||||
e := args[0].(p2p.Envelope)
|
||||
|
||||
// Marshal to simulate a wire roundtrip.
|
||||
bz, err := proto.Marshal(e.Message)
|
||||
peer.On("Send", SnapshotChannel, mock.Anything).Run(func(args mock.Arguments) {
|
||||
msg, err := decodeMsg(args[1].([]byte))
|
||||
require.NoError(t, err)
|
||||
err = proto.Unmarshal(bz, e.Message)
|
||||
require.NoError(t, err)
|
||||
responses = append(responses, e.Message.(*ssproto.SnapshotsResponse))
|
||||
responses = append(responses, msg.(*ssproto.SnapshotsResponse))
|
||||
}).Return(true)
|
||||
}
|
||||
|
||||
// Start a reactor and send a SnapshotsRequestMessage, then wait for and check responses
|
||||
cfg := config.DefaultStateSyncConfig()
|
||||
r := NewReactor(*cfg, conn, nil, "", NopMetrics())
|
||||
r := NewReactor(*cfg, conn, nil, "")
|
||||
err := r.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
@@ -170,11 +149,7 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
r.Receive(p2p.Envelope{
|
||||
ChannelID: SnapshotChannel,
|
||||
Src: peer,
|
||||
Message: &ssproto.SnapshotsRequest{},
|
||||
})
|
||||
r.Receive(SnapshotChannel, peer, mustEncodeMsg(&ssproto.SnapshotsRequest{}))
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
assert.Equal(t, tc.expectResponses, responses)
|
||||
|
||||
|
||||
@@ -126,11 +126,7 @@ func (s *syncer) AddSnapshot(peer p2p.Peer, snapshot *snapshot) (bool, error) {
|
||||
// to discover snapshots, later we may want to do retries and stuff.
|
||||
func (s *syncer) AddPeer(peer p2p.Peer) {
|
||||
s.logger.Debug("Requesting snapshots from peer", "peer", peer.ID())
|
||||
e := p2p.Envelope{
|
||||
ChannelID: SnapshotChannel,
|
||||
Message: &ssproto.SnapshotsRequest{},
|
||||
}
|
||||
peer.Send(e)
|
||||
peer.Send(SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{}))
|
||||
}
|
||||
|
||||
// RemovePeer removes a peer from the pool.
|
||||
@@ -471,14 +467,11 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) {
|
||||
}
|
||||
s.logger.Debug("Requesting snapshot chunk", "height", snapshot.Height,
|
||||
"format", snapshot.Format, "chunk", chunk, "peer", peer.ID())
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: ChunkChannel,
|
||||
Message: &ssproto.ChunkRequest{
|
||||
Height: snapshot.Height,
|
||||
Format: snapshot.Format,
|
||||
Index: chunk,
|
||||
},
|
||||
})
|
||||
peer.Send(ChunkChannel, mustEncodeMsg(&ssproto.ChunkRequest{
|
||||
Height: snapshot.Height,
|
||||
Format: snapshot.Format,
|
||||
Index: chunk,
|
||||
}))
|
||||
}
|
||||
|
||||
// verifyApp verifies the sync, checking the app hash, last block height and app version
|
||||
|
||||
@@ -98,27 +98,13 @@ func TestSyncer_SyncAny(t *testing.T) {
|
||||
// Adding a couple of peers should trigger snapshot discovery messages
|
||||
peerA := &p2pmocks.Peer{}
|
||||
peerA.On("ID").Return(p2p.ID("a"))
|
||||
peerA.On("Send", mock.MatchedBy(func(i interface{}) bool {
|
||||
e, ok := i.(p2p.Envelope)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
req, ok := e.Message.(*ssproto.SnapshotsRequest)
|
||||
return ok && e.ChannelID == SnapshotChannel && req != nil
|
||||
})).Return(true)
|
||||
peerA.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true)
|
||||
syncer.AddPeer(peerA)
|
||||
peerA.AssertExpectations(t)
|
||||
|
||||
peerB := &p2pmocks.Peer{}
|
||||
peerB.On("ID").Return(p2p.ID("b"))
|
||||
peerB.On("Send", mock.MatchedBy(func(i interface{}) bool {
|
||||
e, ok := i.(p2p.Envelope)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
req, ok := e.Message.(*ssproto.SnapshotsRequest)
|
||||
return ok && e.ChannelID == SnapshotChannel && req != nil
|
||||
})).Return(true)
|
||||
peerB.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true)
|
||||
syncer.AddPeer(peerB)
|
||||
peerB.AssertExpectations(t)
|
||||
|
||||
@@ -161,9 +147,9 @@ func TestSyncer_SyncAny(t *testing.T) {
|
||||
chunkRequests := make(map[uint32]int)
|
||||
chunkRequestsMtx := tmsync.Mutex{}
|
||||
onChunkRequest := func(args mock.Arguments) {
|
||||
e, ok := args[0].(p2p.Envelope)
|
||||
require.True(t, ok)
|
||||
msg := e.Message.(*ssproto.ChunkRequest)
|
||||
pb, err := decodeMsg(args[1].([]byte))
|
||||
require.NoError(t, err)
|
||||
msg := pb.(*ssproto.ChunkRequest)
|
||||
require.EqualValues(t, 1, msg.Height)
|
||||
require.EqualValues(t, 1, msg.Format)
|
||||
require.LessOrEqual(t, msg.Index, uint32(len(chunks)))
|
||||
@@ -176,14 +162,8 @@ func TestSyncer_SyncAny(t *testing.T) {
|
||||
chunkRequests[msg.Index]++
|
||||
chunkRequestsMtx.Unlock()
|
||||
}
|
||||
peerA.On("Send", mock.MatchedBy(func(i interface{}) bool {
|
||||
e, ok := i.(p2p.Envelope)
|
||||
return ok && e.ChannelID == ChunkChannel
|
||||
})).Maybe().Run(onChunkRequest).Return(true)
|
||||
peerB.On("Send", mock.MatchedBy(func(i interface{}) bool {
|
||||
e, ok := i.(p2p.Envelope)
|
||||
return ok && e.ChannelID == ChunkChannel
|
||||
})).Maybe().Run(onChunkRequest).Return(true)
|
||||
peerA.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true)
|
||||
peerB.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true)
|
||||
|
||||
// The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1,
|
||||
// which should cause it to keep the existing chunk 0 and 2, and restart restoration from
|
||||
|
||||
@@ -101,7 +101,7 @@ func (voteSet *VoteSet) ChainID() string {
|
||||
|
||||
// Implements VoteSetReader.
|
||||
func (voteSet *VoteSet) GetHeight() int64 {
|
||||
if voteSet.IsEmpty() {
|
||||
if voteSet == nil {
|
||||
return 0
|
||||
}
|
||||
return voteSet.height
|
||||
@@ -109,7 +109,7 @@ func (voteSet *VoteSet) GetHeight() int64 {
|
||||
|
||||
// Implements VoteSetReader.
|
||||
func (voteSet *VoteSet) GetRound() int32 {
|
||||
if voteSet.IsEmpty() {
|
||||
if voteSet == nil {
|
||||
return -1
|
||||
}
|
||||
return voteSet.round
|
||||
@@ -117,7 +117,7 @@ func (voteSet *VoteSet) GetRound() int32 {
|
||||
|
||||
// Implements VoteSetReader.
|
||||
func (voteSet *VoteSet) Type() byte {
|
||||
if voteSet.IsEmpty() {
|
||||
if voteSet == nil {
|
||||
return 0x00
|
||||
}
|
||||
return byte(voteSet.signedMsgType)
|
||||
@@ -125,16 +125,12 @@ func (voteSet *VoteSet) Type() byte {
|
||||
|
||||
// Implements VoteSetReader.
|
||||
func (voteSet *VoteSet) Size() int {
|
||||
if voteSet.IsEmpty() {
|
||||
if voteSet == nil {
|
||||
return 0
|
||||
}
|
||||
return voteSet.valSet.Size()
|
||||
}
|
||||
|
||||
func (voteSet *VoteSet) IsEmpty() bool {
|
||||
return voteSet == nil || voteSet.height == 0
|
||||
}
|
||||
|
||||
// Returns added=true if vote is valid and new.
|
||||
// Otherwise returns err=ErrVote[
|
||||
//
|
||||
|
||||
Reference in New Issue
Block a user