mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-19 11:12:50 +00:00
Compare commits
138 Commits
v0.35.3
...
v035-testi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf3bcbaa4c | ||
|
|
222a25284d | ||
|
|
cae81ce43d | ||
|
|
3e8daaeb44 | ||
|
|
aa2d6ee64a | ||
|
|
2b189852b0 | ||
|
|
3790968156 | ||
|
|
9e64c95e56 | ||
|
|
cb93d3b587 | ||
|
|
f98de20f7e | ||
|
|
b17f044a1c | ||
|
|
451e697331 | ||
|
|
a8c419f126 | ||
|
|
20c1ffd03a | ||
|
|
e3292a48e3 | ||
|
|
6a354a1e8d | ||
|
|
2750cb26a9 | ||
|
|
a04759c4f6 | ||
|
|
1daf7b939d | ||
|
|
09c54a8d5c | ||
|
|
156c305b08 | ||
|
|
bc49f66c35 | ||
|
|
9b02094827 | ||
|
|
bf1ab9c3d8 | ||
|
|
da83edc588 | ||
|
|
25f6557174 | ||
|
|
047d7c927b | ||
|
|
49788adde5 | ||
|
|
91b32b93cd | ||
|
|
3940d64ba6 | ||
|
|
babae90f8f | ||
|
|
210e8a02f7 | ||
|
|
e414d0a878 | ||
|
|
e66d76f6e9 | ||
|
|
fbcb965c75 | ||
|
|
6a646f366e | ||
|
|
dc0e77f41e | ||
|
|
815e611c68 | ||
|
|
01984cb3b2 | ||
|
|
11456f9edf | ||
|
|
b5f92f5d2e | ||
|
|
288cb31040 | ||
|
|
e2d2c04aac | ||
|
|
204281fa66 | ||
|
|
486370ac68 | ||
|
|
978f754ad3 | ||
|
|
c4ef566071 | ||
|
|
f19e52e6f2 | ||
|
|
19b98c7005 | ||
|
|
826f224c2d | ||
|
|
2df4c2b19d | ||
|
|
6f4ef72964 | ||
|
|
3398f37979 | ||
|
|
8ef63fe3d9 | ||
|
|
9daea43375 | ||
|
|
df9363c67c | ||
|
|
24701cd587 | ||
|
|
e9c87a3c49 | ||
|
|
034a9f8422 | ||
|
|
4322f7d0b9 | ||
|
|
83526cacbc | ||
|
|
25d724b920 | ||
|
|
3945cec115 | ||
|
|
74c6d8100d | ||
|
|
e2d01cdcff | ||
|
|
bee6597b28 | ||
|
|
ce8284c027 | ||
|
|
d02f58e191 | ||
|
|
28c38522e0 | ||
|
|
0b63e293f1 | ||
|
|
af0590a819 | ||
|
|
46c27b45ab | ||
|
|
3c29b6996b | ||
|
|
138be1f7b0 | ||
|
|
98411962c6 | ||
|
|
3079eb8b30 | ||
|
|
0e3a3fe58b | ||
|
|
e17e6b1aaa | ||
|
|
0421f8b25e | ||
|
|
4faa8b72aa | ||
|
|
336dc2f2c5 | ||
|
|
e8ac37223f | ||
|
|
a889f17e51 | ||
|
|
2b5a4de4b3 | ||
|
|
a85d9c5163 | ||
|
|
12a0559d67 | ||
|
|
a22f7bec39 | ||
|
|
3784371dd8 | ||
|
|
4ee91663da | ||
|
|
87763a3d6a | ||
|
|
ad9e875376 | ||
|
|
2f8483aa85 | ||
|
|
0e6b85efa9 | ||
|
|
13cc1931a7 | ||
|
|
f6b13f8c95 | ||
|
|
248cb26845 | ||
|
|
79d83cea15 | ||
|
|
643eaef146 | ||
|
|
552e1e78b8 | ||
|
|
fcf0579f0e | ||
|
|
3df465c353 | ||
|
|
142b273c2f | ||
|
|
74267a062e | ||
|
|
12fed0ed53 | ||
|
|
bdd59c892c | ||
|
|
23834b6b31 | ||
|
|
b40a7b63b7 | ||
|
|
923d14c439 | ||
|
|
5b634976dc | ||
|
|
383408479d | ||
|
|
f383e8fa98 | ||
|
|
df66afab99 | ||
|
|
971bd1487e | ||
|
|
512a0bf356 | ||
|
|
06d3d41623 | ||
|
|
5b14d27ccf | ||
|
|
ad7c501359 | ||
|
|
70d771ead2 | ||
|
|
5b3b3065ad | ||
|
|
9195a005bd | ||
|
|
2a91d21b61 | ||
|
|
14f0d60f24 | ||
|
|
21d68441a1 | ||
|
|
4d9ad115b0 | ||
|
|
e646bd77ca | ||
|
|
8682489551 | ||
|
|
04c1f76569 | ||
|
|
226bc94c5f | ||
|
|
641d290a6d | ||
|
|
8579cc382e | ||
|
|
1d8b1c7507 | ||
|
|
118ff02272 | ||
|
|
52bcd56d60 | ||
|
|
12e0ea6ea7 | ||
|
|
1c3921f5df | ||
|
|
a639323cf0 | ||
|
|
e4d83ba2ad | ||
|
|
9edb87c5f8 |
27
.github/dependabot.yml
vendored
27
.github/dependabot.yml
vendored
@@ -1,27 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- fadeev
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
labels:
|
||||
- T:dependencies
|
||||
18
.github/workflows/build.yml
vendored
18
.github/workflows/build.yml
vendored
@@ -20,11 +20,11 @@ jobs:
|
||||
goos: ["linux"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -41,11 +41,11 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -63,11 +63,11 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -39,17 +39,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1.10.0
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
4
.github/workflows/e2e-manual.yml
vendored
4
.github/workflows/e2e-manual.yml
vendored
@@ -15,11 +15,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
|
||||
4
.github/workflows/e2e-nightly-34x.yml
vendored
4
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -21,11 +21,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
|
||||
4
.github/workflows/e2e-nightly-master.yml
vendored
4
.github/workflows/e2e-nightly-master.yml
vendored
@@ -21,11 +21,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
|
||||
6
.github/workflows/e2e.yml
vendored
6
.github/workflows/e2e.yml
vendored
@@ -14,11 +14,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
8
.github/workflows/fuzz-nightly.yml
vendored
8
.github/workflows/fuzz-nightly.yml
vendored
@@ -13,11 +13,11 @@ jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
@@ -54,14 +54,14 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
|
||||
2
.github/workflows/janitor.yml
vendored
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.9.1
|
||||
- uses: styfle/cancel-workflow-action@0.10.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
4
.github/workflows/jepsen.yml
vendored
4
.github/workflows/jepsen.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the Jepsen repository
|
||||
uses: actions/checkout@v2.3.4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: 'tendermint/jepsen'
|
||||
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
|
||||
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results
|
||||
path: tendermint/store/latest
|
||||
|
||||
4
.github/workflows/linkchecker.yml
vendored
4
.github/workflows/linkchecker.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
- uses: actions/checkout@v3
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
26
.github/workflows/lint.yml
vendored
26
.github/workflows/lint.yml
vendored
@@ -1,7 +1,11 @@
|
||||
name: Lint
|
||||
# Lint runs golangci-lint over the entire Tendermint repository
|
||||
# This workflow is run on every pull request and push to master
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
|
||||
name: Golang Linter
|
||||
# Lint runs golangci-lint over the entire Tendermint repository.
|
||||
#
|
||||
# This workflow is run on every pull request and push to master.
|
||||
#
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum}
|
||||
# files have been modified.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
@@ -13,19 +17,21 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: '^1.17'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3.1.0
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.45
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
|
||||
2
.github/workflows/linter.yml
vendored
2
.github/workflows/linter.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2.4.0
|
||||
uses: actions/checkout@v3
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v4
|
||||
env:
|
||||
|
||||
8
.github/workflows/proto-docker.yml
vendored
8
.github/workflows/proto-docker.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -34,16 +34,16 @@ jobs:
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
|
||||
4
.github/workflows/proto.yml
vendored
4
.github/workflows/proto.yml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v3
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v3
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
|
||||
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@@ -12,26 +12,28 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v4
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
|
||||
22
.github/workflows/tests.yml
vendored
22
.github/workflows/tests.yml
vendored
@@ -16,11 +16,11 @@ jobs:
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03", "04", "05"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=6
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./build/${{ matrix.part }}.profile.out
|
||||
@@ -41,8 +41,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -50,26 +50,26 @@ jobs:
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v2.1.0
|
||||
- uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
63
CHANGELOG.md
63
CHANGELOG.md
@@ -2,6 +2,69 @@
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.7
|
||||
|
||||
June 16, 2022
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [p2p] [\#8692](https://github.com/tendermint/tendermint/pull/8692) scale the number of stored peers by the configured maximum connections (#8684)
|
||||
- [rpc] [\#8715](https://github.com/tendermint/tendermint/pull/8715) always close http bodies (backport #8712)
|
||||
- [p2p] [\#8760](https://github.com/tendermint/tendermint/pull/8760) accept should not abort on first error (backport #8759)
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish)
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish)
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish)
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish)
|
||||
|
||||
## v0.35.6
|
||||
|
||||
June 3, 2022
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [migrate] [\#8672](https://github.com/tendermint/tendermint/pull/8672) provide function for database production (backport #8614) (@tychoish)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#8651](https://github.com/tendermint/tendermint/pull/8651) restructure peer catchup sleep (@tychoish)
|
||||
- [pex] [\#8657](https://github.com/tendermint/tendermint/pull/8657) align max address thresholds (@cmwaters)
|
||||
- [cmd] [\#8668](https://github.com/tendermint/tendermint/pull/8668) don't used global config for reset commands (@cmwaters)
|
||||
- [p2p] [\#8681](https://github.com/tendermint/tendermint/pull/8681) shed peers from store from other networks (backport #8678) (@tychoish)
|
||||
|
||||
## v0.35.5
|
||||
|
||||
May 26, 2022
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [p2p] [\#8371](https://github.com/tendermint/tendermint/pull/8371) fix setting in con-tracker (backport #8370) (@tychoish)
|
||||
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
|
||||
- [statesync] [\#8494](https://github.com/tendermint/tendermint/pull/8494) avoid potential race (@tychoish)
|
||||
- [keymigrate] [\#8467](https://github.com/tendermint/tendermint/pull/8467) improve filtering for legacy transaction hashes (backport #8466) (@creachadair)
|
||||
- [rpc] [\#8594](https://github.com/tendermint/tendermint/pull/8594) fix encoding of block_results responses (@creachadair)
|
||||
|
||||
## v0.35.4
|
||||
|
||||
April 18, 2022
|
||||
|
||||
Special thanks to external contributors on this release: @firelizzard18
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8300](https://github.com/tendermint/tendermint/pull/8300) Add a tool to update old config files to the latest version [backport [\#8281](https://github.com/tendermint/tendermint/pull/8281)]. (@creachadair)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8294](https://github.com/tendermint/tendermint/pull/8294) keymigrate: ensure block hash keys are correctly translated. (@creachadair)
|
||||
- [cli] [\#8352](https://github.com/tendermint/tendermint/pull/8352) keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)
|
||||
|
||||
## v0.35.3
|
||||
|
||||
April 8, 2022
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.4
|
||||
## v0.35.8
|
||||
|
||||
Month, DD, YYYY
|
||||
Month DD, YYYY
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
@@ -22,6 +22,10 @@ Special thanks to external contributors on this release:
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8675] Add command to force compact goleveldb databases (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] \#8944 Fix unbounded heap growth in the priority mempool. (@creachadair)
|
||||
|
||||
4
Makefile
4
Makefile
@@ -228,10 +228,8 @@ build-docs:
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
build-docker:
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
|
||||
51
UPGRADING.md
51
UPGRADING.md
@@ -44,26 +44,47 @@ This guide provides instructions for upgrading to specific versions of Tendermin
|
||||
* The fast sync process as well as the blockchain package and service has all
|
||||
been renamed to block sync
|
||||
|
||||
* We have added a new, experimental tool to help operators migrate
|
||||
configuration files created by previous versions of Tendermint.
|
||||
To try this tool, run:
|
||||
|
||||
```shell
|
||||
# Install the tool.
|
||||
go install github.com/tendermint/tendermint/scripts/confix@v0.35.x
|
||||
|
||||
# Run the tool with the old configuration file as input.
|
||||
# Replace the -config argument with your path.
|
||||
confix -config ~/.tendermint/config/config.toml -out updated.toml
|
||||
```
|
||||
|
||||
This tool should be able to update configurations from v0.34 to v0.35. We
|
||||
plan to extend it to handle older configuration files in the future. For now,
|
||||
it will report an error (without making any changes) if it does not recognize
|
||||
the version that created the file.
|
||||
|
||||
### Database Key Format Changes
|
||||
|
||||
The format of all tendermint on-disk database keys changes in
|
||||
0.35. Upgrading nodes must either re-sync all data or run a migration
|
||||
script provided in this release. The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
|
||||
provides the function `Migrate(context.Context, db.DB)` which you can
|
||||
operationalize as makes sense for your deployment.
|
||||
script provided in this release.
|
||||
|
||||
The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` provides the
|
||||
function `Migrate(context.Context, db.DB)` which you can operationalize as
|
||||
makes sense for your deployment.
|
||||
|
||||
For ease of use the `tendermint` command includes a CLI version of the
|
||||
migration script, which you can invoke, as in:
|
||||
|
||||
tendermint key-migrate
|
||||
|
||||
This reads the configuration file as normal and allows the
|
||||
`--db-backend` and `--db-dir` flags to change database operations as
|
||||
needed.
|
||||
This reads the configuration file as normal and allows the `--db-backend` and
|
||||
`--db-dir` flags to override the database location as needed.
|
||||
|
||||
The migration operation is idempotent and can be run more than once,
|
||||
if needed.
|
||||
The migration operation is intended to be idempotent, and should be safe to
|
||||
rerun on the same database multiple times. As a safety measure, however, we
|
||||
recommend that operators test out the migration on a copy of the database
|
||||
first, if it is practical to do so, before applying it to the production data.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
@@ -113,11 +134,11 @@ To access any of the functionality previously available via the
|
||||
`node.Node` type, use the `*local.Local` "RPC" client, that exposes
|
||||
the full RPC interface provided as direct function calls. Import the
|
||||
`github.com/tendermint/tendermint/rpc/client/local` package and pass
|
||||
the node service as in the following:
|
||||
the node service as in the following:
|
||||
|
||||
```go
|
||||
node := node.NewDefault() //construct the node object
|
||||
// start and set up the node service
|
||||
// start and set up the node service
|
||||
|
||||
client := local.New(node.(local.NodeService))
|
||||
// use client object to interact with the node
|
||||
@@ -144,10 +165,10 @@ both stacks.
|
||||
The P2P library was reimplemented in this release. The new implementation is
|
||||
enabled by default in this version of Tendermint. The legacy implementation is still
|
||||
included in this version of Tendermint as a backstop to work around unforeseen
|
||||
production issues. The new and legacy version are interoperable. If necessary,
|
||||
production issues. The new and legacy version are interoperable. If necessary,
|
||||
you can enable the legacy implementation in the server configuration file.
|
||||
|
||||
To make use of the legacy P2P implemementation add or update the following field of
|
||||
To make use of the legacy P2P implemementation add or update the following field of
|
||||
your server's configuration file under the `[p2p]` section:
|
||||
|
||||
```toml
|
||||
@@ -172,8 +193,8 @@ in the order in which they were received.
|
||||
|
||||
* `priority`: A priority queue of messages.
|
||||
|
||||
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
|
||||
weighted deficit round robin queue is created per peer. Each queue contains a
|
||||
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
|
||||
weighted deficit round robin queue is created per peer. Each queue contains a
|
||||
separate 'flow' for each of the channels of communication that exist between any two
|
||||
peers. Tendermint maintains a channel per message type between peers. Each WDRR
|
||||
queue maintains a shared buffered with a fixed capacity through which messages on different
|
||||
|
||||
@@ -87,9 +87,15 @@ type ReqRes struct {
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.Mutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
mtx tmsync.Mutex
|
||||
|
||||
// callbackInvoked as a variable to track if the callback was already
|
||||
// invoked during the regular execution of the request. This variable
|
||||
// allows clients to set the callback simultaneously without potentially
|
||||
// invoking the callback twice by accident, once when 'SetCallback' is
|
||||
// called and once during the normal request.
|
||||
callbackInvoked bool
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
@@ -98,8 +104,8 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
WaitGroup: waitGroup1(),
|
||||
Response: nil,
|
||||
|
||||
done: false,
|
||||
cb: nil,
|
||||
callbackInvoked: false,
|
||||
cb: nil,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,7 +115,7 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
if r.done {
|
||||
if r.callbackInvoked {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
return
|
||||
@@ -128,6 +134,7 @@ func (r *ReqRes) InvokeCallback() {
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
r.callbackInvoked = true
|
||||
}
|
||||
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
@@ -142,13 +149,6 @@ func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
r.done = true
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
wg = &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
@@ -72,7 +72,6 @@ func (cli *grpcClient) OnStart() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
reqres.Done()
|
||||
|
||||
// Notify client listener if set
|
||||
@@ -81,9 +80,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(reqres.Response)
|
||||
}
|
||||
reqres.InvokeCallback()
|
||||
}
|
||||
for reqres := range cli.chReqRes {
|
||||
if reqres != nil {
|
||||
|
||||
@@ -348,12 +348,13 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.Callback(req, res)
|
||||
return newLocalReqRes(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
|
||||
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqRes := NewReqRes(req)
|
||||
reqRes.Response = res
|
||||
reqRes.SetDone()
|
||||
return reqRes
|
||||
}
|
||||
|
||||
@@ -801,3 +801,18 @@ func (_m *Client) String() string {
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewClient interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewClient(t mockConstructorTestingTNewClient) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package abciclient_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -125,3 +126,73 @@ func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
|
||||
// set after the client completes the call into the app. Currently this
|
||||
// test relies on the callback being allowed to be invoked twice if set multiple
|
||||
// times, once when set early and once when set late.
|
||||
func TestCallbackInvokedWhenSetLate(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes, err := c.CheckTxAsync(context.Background(), types.RequestCheckTx{})
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
<-done
|
||||
|
||||
var called bool
|
||||
cb = func(_ *types.Response) {
|
||||
called = true
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
require.True(t, called)
|
||||
}
|
||||
|
||||
type blockedABCIApplication struct {
|
||||
wg *sync.WaitGroup
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (b blockedABCIApplication) CheckTx(r types.RequestCheckTx) types.ResponseCheckTx {
|
||||
b.wg.Wait()
|
||||
return b.BaseApplication.CheckTx(r)
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetEarly ensures that the callback is invoked when
|
||||
// set before the client completes the call into the app.
|
||||
func TestCallbackInvokedWhenSetEarly(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes, err := c.CheckTxAsync(context.Background(), types.RequestCheckTx{})
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
|
||||
called := func() bool {
|
||||
select {
|
||||
case <-done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
require.Eventually(t, called, time.Second, time.Millisecond*25)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,9 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -102,6 +105,48 @@ func (r *EventAttribute) UnmarshalJSON(b []byte) error {
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
// validatorUpdateJSON is the JSON encoding of a validator update.
|
||||
//
|
||||
// It handles translation of public keys from the protobuf representation to
|
||||
// the legacy Amino-compatible format expected by RPC clients.
|
||||
type validatorUpdateJSON struct {
|
||||
PubKey json.RawMessage `json:"pub_key,omitempty"`
|
||||
Power int64 `json:"power,string"`
|
||||
}
|
||||
|
||||
func (v *ValidatorUpdate) MarshalJSON() ([]byte, error) {
|
||||
key, err := encoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jkey, err := tmjson.Marshal(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(validatorUpdateJSON{
|
||||
PubKey: jkey,
|
||||
Power: v.GetPower(),
|
||||
})
|
||||
}
|
||||
|
||||
func (v *ValidatorUpdate) UnmarshalJSON(data []byte) error {
|
||||
var vu validatorUpdateJSON
|
||||
if err := json.Unmarshal(data, &vu); err != nil {
|
||||
return err
|
||||
}
|
||||
var key crypto.PubKey
|
||||
if err := tmjson.Unmarshal(vu.PubKey, &key); err != nil {
|
||||
return err
|
||||
}
|
||||
pkey, err := encoding.PubKeyToProto(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.PubKey = pkey
|
||||
v.Power = vu.Power
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some compile time assertions to ensure we don't
|
||||
// have accidental runtime surprises later on.
|
||||
|
||||
|
||||
69
cmd/tendermint/commands/compact.go
Normal file
69
cmd/tendermint/commands/compact.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
func MakeCompactDBCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "experimental-compact-goleveldb",
|
||||
Short: "force compacts the tendermint storage engine (only GoLevelDB supported)",
|
||||
Long: `
|
||||
This is a temporary utility command that performs a force compaction on the state
|
||||
and blockstores to reduce disk space for a pruning node. This should only be run
|
||||
once the node has stopped. This command will likely be omitted in the future after
|
||||
the planned refactor to the storage engine.
|
||||
|
||||
Currently, only GoLevelDB is supported.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if config.DBBackend != "goleveldb" {
|
||||
return errors.New("compaction is currently only supported with goleveldb")
|
||||
}
|
||||
|
||||
compactGoLevelDBs(config.RootDir, logger)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func compactGoLevelDBs(rootDir string, logger log.Logger) {
|
||||
dbNames := []string{"state", "blockstore"}
|
||||
o := &opt.Options{
|
||||
DisableSeeksCompaction: true,
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for _, dbName := range dbNames {
|
||||
dbName := dbName
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
dbPath := filepath.Join(rootDir, "data", dbName+".db")
|
||||
store, err := leveldb.OpenFile(dbPath, o)
|
||||
if err != nil {
|
||||
logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err)
|
||||
return
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
logger.Info("starting compaction...", "db", dbPath)
|
||||
|
||||
err = store.CompactRange(util.Range{Start: nil, Limit: nil})
|
||||
if err != nil {
|
||||
logger.Error("failed to compact tendermint db", "path", dbPath, "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/scripts/keymigrate"
|
||||
"github.com/tendermint/tendermint/scripts/scmigrate"
|
||||
)
|
||||
@@ -15,53 +17,7 @@ func MakeKeyMigrateCommand() *cobra.Command {
|
||||
Use: "key-migrate",
|
||||
Short: "Run Database key migration",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
contexts := []string{
|
||||
// this is ordered to put the
|
||||
// (presumably) biggest/most important
|
||||
// subsets first.
|
||||
"blockstore",
|
||||
"state",
|
||||
"peerstore",
|
||||
"tx_index",
|
||||
"evidence",
|
||||
"light",
|
||||
}
|
||||
|
||||
for idx, dbctx := range contexts {
|
||||
logger.Info("beginning a key migration",
|
||||
"dbctx", dbctx,
|
||||
"num", idx+1,
|
||||
"total", len(contexts),
|
||||
)
|
||||
|
||||
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
|
||||
ID: dbctx,
|
||||
Config: config,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("constructing database handle: %w", err)
|
||||
}
|
||||
|
||||
if err = keymigrate.Migrate(ctx, db); err != nil {
|
||||
return fmt.Errorf("running migration for context %q: %w",
|
||||
dbctx, err)
|
||||
}
|
||||
|
||||
if dbctx == "blockstore" {
|
||||
if err := scmigrate.Migrate(ctx, db); err != nil {
|
||||
return fmt.Errorf("running seen commit migration: %w", err)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("completed database migration successfully")
|
||||
|
||||
return nil
|
||||
return RunDatabaseMigration(cmd.Context(), logger, config)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -70,3 +26,51 @@ func MakeKeyMigrateCommand() *cobra.Command {
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *cfg.Config) error {
|
||||
contexts := []string{
|
||||
// this is ordered to put
|
||||
// the more ephemeral tables first to
|
||||
// reduce the possibility of the
|
||||
// ephemeral data overwriting later data
|
||||
"tx_index",
|
||||
"peerstore",
|
||||
"light",
|
||||
"blockstore",
|
||||
"state",
|
||||
"evidence",
|
||||
}
|
||||
|
||||
for idx, dbctx := range contexts {
|
||||
logger.Info("beginning a key migration",
|
||||
"dbctx", dbctx,
|
||||
"num", idx+1,
|
||||
"total", len(contexts),
|
||||
)
|
||||
|
||||
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
|
||||
ID: dbctx,
|
||||
Config: conf,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("constructing database handle: %w", err)
|
||||
}
|
||||
|
||||
if err = keymigrate.Migrate(ctx, db); err != nil {
|
||||
return fmt.Errorf("running migration for context %q: %w",
|
||||
dbctx, err)
|
||||
}
|
||||
|
||||
if dbctx == "blockstore" {
|
||||
if err := scmigrate.Migrate(ctx, db); err != nil {
|
||||
return fmt.Errorf("running seen commit migration: %w", err)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("completed database migration successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -27,6 +27,11 @@ var ResetStateCmd = &cobra.Command{
|
||||
Use: "reset-state",
|
||||
Short: "Remove all the data and WAL",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
config, err := ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resetState(config.DBDir(), logger, keyType)
|
||||
},
|
||||
}
|
||||
@@ -47,13 +52,27 @@ var ResetPrivValidatorCmd = &cobra.Command{
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAllCmd(cmd *cobra.Command, args []string) error {
|
||||
return resetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(),
|
||||
config.PrivValidator.StateFile(), logger)
|
||||
config, err := ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resetAll(
|
||||
config.DBDir(),
|
||||
config.P2P.AddrBookFile(),
|
||||
config.PrivValidator.KeyFile(),
|
||||
config.PrivValidator.StateFile(),
|
||||
logger,
|
||||
)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) error {
|
||||
config, err := ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return resetFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger, keyType)
|
||||
}
|
||||
|
||||
|
||||
@@ -34,9 +34,6 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
config.PrivValidator.ListenAddr,
|
||||
"socket address to listen on for connections from external priv-validator process")
|
||||
|
||||
// node flags
|
||||
cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing")
|
||||
|
||||
// TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle
|
||||
// This check was added to give users an upgrade prompt to use the new flag for syncing.
|
||||
//
|
||||
|
||||
@@ -32,6 +32,7 @@ func main() {
|
||||
cmd.InspectCmd,
|
||||
cmd.RollbackStateCmd,
|
||||
cmd.MakeKeyMigrateCommand(),
|
||||
cmd.MakeCompactDBCommand(),
|
||||
debug.DebugCmd,
|
||||
cli.NewCompletionCmd(rootCmd, true),
|
||||
)
|
||||
|
||||
@@ -712,6 +712,10 @@ type P2PConfig struct { //nolint: maligned
|
||||
// outbound).
|
||||
MaxConnections uint16 `mapstructure:"max-connections"`
|
||||
|
||||
// MaxOutgoingConnections defines the maximum number of connected peers (inbound and
|
||||
// outbound).
|
||||
MaxOutgoingConnections uint16 `mapstructure:"max-outgoing-connections"`
|
||||
|
||||
// MaxIncomingConnectionAttempts rate limits the number of incoming connection
|
||||
// attempts per IP address.
|
||||
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
|
||||
@@ -774,6 +778,7 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
MaxNumInboundPeers: 40,
|
||||
MaxNumOutboundPeers: 10,
|
||||
MaxConnections: 64,
|
||||
MaxOutgoingConnections: 12,
|
||||
MaxIncomingConnectionAttempts: 100,
|
||||
PersistentPeersMaxDialPeriod: 0 * time.Second,
|
||||
FlushThrottleTimeout: 100 * time.Millisecond,
|
||||
@@ -833,6 +838,9 @@ func (cfg *P2PConfig) ValidateBasic() error {
|
||||
if cfg.RecvRate < 0 {
|
||||
return errors.New("recv-rate can't be negative")
|
||||
}
|
||||
if cfg.MaxOutgoingConnections > cfg.MaxConnections {
|
||||
return errors.New("max-outgoing-connections cannot be larger than max-connections")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -355,6 +355,10 @@ max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }}
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = {{ .P2P.MaxConnections }}
|
||||
|
||||
# Maximum number of connections reserved for outgoing
|
||||
# connections. Must be less than max-connections
|
||||
max-outgoing-connections = {{ .P2P.MaxOutgoingConnections }}
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
|
||||
|
||||
|
||||
@@ -44,10 +44,6 @@ module.exports = {
|
||||
{
|
||||
title: 'Resources',
|
||||
children: [
|
||||
{
|
||||
title: 'Developer Sessions',
|
||||
path: '/DEV_SESSIONS.html'
|
||||
},
|
||||
{
|
||||
// TODO(creachadair): Figure out how to make this per-branch.
|
||||
// See: https://github.com/tendermint/tendermint/issues/7908
|
||||
|
||||
@@ -18,39 +18,43 @@ Listen address can be changed in the config file (see
|
||||
|
||||
The following metrics are available:
|
||||
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- |
|
||||
| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods |
|
||||
| consensus_height | Gauge | | Height of the chain |
|
||||
| consensus_validators | Gauge | | Number of validators |
|
||||
| consensus_validators_power | Gauge | | Total voting power of all validators |
|
||||
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
|
||||
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
|
||||
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
|
||||
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
|
||||
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
|
||||
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus_rounds | Gauge | | Number of rounds |
|
||||
| consensus_num_txs | Gauge | | Number of transactions |
|
||||
| consensus_total_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
|
||||
| consensus_latest_block_height | gauge | | /status sync_info number |
|
||||
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | | Block size in bytes |
|
||||
| p2p_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer |
|
||||
| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer |
|
||||
| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id |
|
||||
| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer |
|
||||
| mempool_size | Gauge | | Number of uncommitted transactions |
|
||||
| mempool_tx_size_bytes | histogram | | transaction sizes in bytes |
|
||||
| mempool_failed_txs | counter | | number of failed transactions |
|
||||
| mempool_recheck_times | counter | | number of transactions rechecked in the mempool |
|
||||
| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms |
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|----------------------------------------|-----------|---------------|-----------------------------------------------------------------------------------------------------------|
|
||||
| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods |
|
||||
| consensus_height | Gauge | | Height of the chain |
|
||||
| consensus_validators | Gauge | | Number of validators |
|
||||
| consensus_validators_power | Gauge | | Total voting power of all validators |
|
||||
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
|
||||
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
|
||||
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
|
||||
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
|
||||
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
|
||||
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus_rounds | Gauge | | Number of rounds |
|
||||
| consensus_num_txs | Gauge | | Number of transactions |
|
||||
| consensus_total_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
|
||||
| consensus_latest_block_height | gauge | | /status sync_info number |
|
||||
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | | Block size in bytes |
|
||||
| p2p_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p_peer_receive_bytes_total | Counter | peer_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p_peer_send_bytes_total | Counter | peer_id, chID | number of bytes per channel sent to a given peer |
|
||||
| p2p_peer_pending_send_bytes | Gauge | peer_id | number of pending bytes to be sent to a given peer |
|
||||
| p2p_router_peer_queue_recv | Histogram | | The time taken to read off of a peer's queue before sending on the connection |
|
||||
| p2p_router_peer_queue_send | Histogram | | The time taken to send on a peer's queue which will later be sent on the connection |
|
||||
| p2p_router_channel_queue_send | Histogram | | The time taken to send on a p2p channel's queue which will later be consumed by the corresponding service |
|
||||
| p2p_router_channel_queue_dropped_msgs | Counter | ch_id | The number of messages dropped from a peer's queue for a specific p2p channel |
|
||||
| p2p_peer_queue_msg_size | Gauge | ch_id | The size of messages sent over a peer's queue for a specific p2p channel |
|
||||
| mempool_size | Gauge | | Number of uncommitted transactions |
|
||||
| mempool_tx_size_bytes | histogram | | transaction sizes in bytes |
|
||||
| mempool_failed_txs | counter | | number of failed transactions |
|
||||
| mempool_recheck_times | counter | | number of transactions rechecked in the mempool |
|
||||
| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms |
|
||||
|
||||
|
||||
## Useful queries
|
||||
|
||||
|
||||
37
go.mod
37
go.mod
@@ -5,10 +5,13 @@ go 1.16
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.1.0
|
||||
github.com/Workiva/go-datastructures v1.0.53
|
||||
github.com/adlio/schema v1.3.0
|
||||
github.com/btcsuite/btcd v0.22.0-beta
|
||||
github.com/adlio/schema v1.3.3
|
||||
github.com/btcsuite/btcd v0.22.1
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/creachadair/atomicfile v0.2.6
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/creachadair/tomledit v0.0.22
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
|
||||
@@ -16,33 +19,37 @@ require (
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/golangci/golangci-lint v1.45.2
|
||||
github.com/golangci/golangci-lint v1.46.0
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/google/orderedcode v0.0.1
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/lib/pq v1.10.5
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/mroth/weightedrand v0.4.1
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/rs/zerolog v1.26.1
|
||||
github.com/rs/zerolog v1.27.0
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/viper v1.10.1
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/viper v1.12.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
github.com/vektra/mockery/v2 v2.10.4
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292
|
||||
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/grpc v1.45.0
|
||||
github.com/vektra/mockery/v2 v2.14.0
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29
|
||||
google.golang.org/grpc v1.47.0
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
pgregory.net/rapid v0.4.7
|
||||
)
|
||||
|
||||
@@ -544,8 +544,15 @@ FOR_LOOP:
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
|
||||
if err == nil {
|
||||
// validate the block before we persist it
|
||||
err = r.blockExec.ValidateBlock(state, first)
|
||||
}
|
||||
|
||||
// If either of the checks failed we log the error and request for a new block
|
||||
// at that height
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid last commit: %w", err)
|
||||
r.Logger.Error(
|
||||
err.Error(),
|
||||
"last_commit", second.LastCommit,
|
||||
@@ -570,37 +577,34 @@ FOR_LOOP:
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
r.pool.PopRequest()
|
||||
}
|
||||
|
||||
// TODO: batch saves so we do not persist to disk every block
|
||||
r.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
r.pool.PopRequest()
|
||||
|
||||
var err error
|
||||
// TODO: batch saves so we do not persist to disk every block
|
||||
r.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO: This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
r.metrics.RecordConsMetrics(first)
|
||||
r.metrics.RecordConsMetrics(first)
|
||||
|
||||
blocksSynced++
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.Logger.Info(
|
||||
"block sync rate",
|
||||
"height", r.pool.height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"blocks/s", lastRate,
|
||||
)
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.Logger.Info(
|
||||
"block sync rate",
|
||||
"height", r.pool.height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"blocks/s", lastRate,
|
||||
)
|
||||
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
|
||||
@@ -663,6 +663,39 @@ func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) {
|
||||
ensureVote(voteCh, height, round, tmproto.PrevoteType)
|
||||
}
|
||||
|
||||
func ensurePrevoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte) {
|
||||
t.Helper()
|
||||
ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrevoteType)
|
||||
}
|
||||
|
||||
func ensurePrecommitMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte) {
|
||||
t.Helper()
|
||||
ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrecommitType)
|
||||
}
|
||||
|
||||
func ensureVoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte, voteType tmproto.SignedMsgType) {
|
||||
t.Helper()
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
t.Fatal("Timeout expired while waiting for NewVote event")
|
||||
case msg := <-voteCh:
|
||||
voteEvent, ok := msg.Data().(types.EventDataVote)
|
||||
require.True(t, ok, "expected a EventDataVote, got %T. Wrong subscription channel?",
|
||||
msg.Data())
|
||||
|
||||
vote := voteEvent.Vote
|
||||
require.Equal(t, height, vote.Height)
|
||||
require.Equal(t, round, vote.Round)
|
||||
|
||||
require.Equal(t, voteType, vote.Type)
|
||||
if hash == nil {
|
||||
require.Nil(t, vote.BlockID.Hash, "Expected prevote to be for nil, got %X", vote.BlockID.Hash)
|
||||
} else {
|
||||
require.True(t, bytes.Equal(vote.BlockID.Hash, hash), "Expected prevote to be for %X, got %X", hash, vote.BlockID.Hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32,
|
||||
voteType tmproto.SignedMsgType) {
|
||||
select {
|
||||
|
||||
@@ -26,3 +26,18 @@ func (_m *ConsSyncReactor) SetStateSyncingMetrics(_a0 float64) {
|
||||
func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) {
|
||||
_m.Called(_a0, _a1)
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewConsSyncReactor interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewConsSyncReactor creates a new instance of ConsSyncReactor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewConsSyncReactor(t mockConstructorTestingTNewConsSyncReactor) *ConsSyncReactor {
|
||||
mock := &ConsSyncReactor{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -544,6 +544,8 @@ func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.Peer
|
||||
|
||||
func (r *Reactor) gossipDataRoutine(ps *PeerState) {
|
||||
logger := r.Logger.With("peer", ps.peerID)
|
||||
timer := time.NewTimer(r.state.config.PeerGossipSleepDuration)
|
||||
defer timer.Stop()
|
||||
|
||||
OUTER_LOOP:
|
||||
for {
|
||||
@@ -551,6 +553,8 @@ OUTER_LOOP:
|
||||
return
|
||||
}
|
||||
|
||||
timer.Reset(r.state.config.PeerGossipSleepDuration)
|
||||
|
||||
select {
|
||||
case <-r.closeCh:
|
||||
return
|
||||
@@ -558,8 +562,7 @@ OUTER_LOOP:
|
||||
// The peer is marked for removal via a PeerUpdate as the doneCh was
|
||||
// explicitly closed to signal we should exit.
|
||||
return
|
||||
|
||||
default:
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
rs := r.getRoundState()
|
||||
@@ -605,7 +608,6 @@ OUTER_LOOP:
|
||||
"blockstoreBase", blockStoreBase,
|
||||
"blockstoreHeight", r.state.blockStore.Height(),
|
||||
)
|
||||
time.Sleep(r.state.config.PeerGossipSleepDuration)
|
||||
} else {
|
||||
ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader)
|
||||
}
|
||||
@@ -621,7 +623,6 @@ OUTER_LOOP:
|
||||
|
||||
// if height and round don't match, sleep
|
||||
if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
|
||||
time.Sleep(r.state.config.PeerGossipSleepDuration)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
@@ -676,12 +677,8 @@ OUTER_LOOP:
|
||||
}:
|
||||
}
|
||||
}
|
||||
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
// nothing to do -- sleep
|
||||
time.Sleep(r.state.config.PeerGossipSleepDuration)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,8 +243,7 @@ func TestStateBadProposal(t *testing.T) {
|
||||
ensureProposal(proposalCh, height, round, blockID)
|
||||
|
||||
// wait for prevote
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], nil)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, nil)
|
||||
|
||||
// add bad prevote from vs2 and wait for it
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
|
||||
@@ -308,8 +307,7 @@ func TestStateOversizedBlock(t *testing.T) {
|
||||
|
||||
// and then should send nil prevote and precommit regardless of whether other validators prevote and
|
||||
// precommit on it
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], nil)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, nil)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
|
||||
ensurePrevote(voteCh, height, round)
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
@@ -352,8 +350,7 @@ func TestStateFullRound1(t *testing.T) {
|
||||
ensureNewProposal(propCh, height, round)
|
||||
propBlockHash := cs.GetRoundState().ProposalBlock.Hash()
|
||||
|
||||
ensurePrevote(voteCh, height, round) // wait for prevote
|
||||
validatePrevote(t, cs, round, vss[0], propBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
|
||||
|
||||
ensurePrecommit(voteCh, height, round) // wait for precommit
|
||||
|
||||
@@ -376,8 +373,8 @@ func TestStateFullRoundNil(t *testing.T) {
|
||||
cs.enterPrevote(height, round)
|
||||
cs.startRoutines(4)
|
||||
|
||||
ensurePrevote(voteCh, height, round) // prevote
|
||||
ensurePrecommit(voteCh, height, round) // precommit
|
||||
ensurePrevoteMatch(t, voteCh, height, round, nil) // prevote
|
||||
ensurePrecommitMatch(t, voteCh, height, round, nil) // precommit
|
||||
|
||||
// should prevote and precommit nil
|
||||
validatePrevoteAndPrecommit(t, cs, round, -1, vss[0], nil, nil)
|
||||
@@ -502,10 +499,8 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
panic("Expected proposal block to be nil")
|
||||
}
|
||||
|
||||
// wait to finish prevote
|
||||
ensurePrevote(voteCh, height, round)
|
||||
// we should have prevoted our locked block
|
||||
validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash())
|
||||
// wait to finish prevote and ensure we have prevoted our locked block
|
||||
ensurePrevoteMatch(t, voteCh, height, round, rs.LockedBlock.Hash())
|
||||
|
||||
// add a conflicting prevote from the other validator
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
|
||||
@@ -548,8 +543,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
rs.LockedBlock))
|
||||
}
|
||||
|
||||
ensurePrevote(voteCh, height, round) // prevote
|
||||
validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash())
|
||||
ensurePrevoteMatch(t, voteCh, height, round, rs.LockedBlock.Hash())
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
|
||||
ensurePrevote(voteCh, height, round)
|
||||
@@ -594,9 +588,8 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
}
|
||||
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
ensurePrevote(voteCh, height, round) // prevote
|
||||
// prevote for locked block (not proposal)
|
||||
validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash())
|
||||
ensurePrevoteMatch(t, voteCh, height, round, cs1.LockedBlock.Hash())
|
||||
|
||||
// prevote for proposed block
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
|
||||
@@ -704,8 +697,7 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
|
||||
// go to prevote, node should prevote for locked block (not the new proposal) - this is relocking
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], theBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, theBlockHash)
|
||||
|
||||
// now lets add prevotes from everyone else for the new block
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
|
||||
@@ -757,8 +749,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
theBlockParts := rs.ProposalBlockParts.Header()
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], theBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, theBlockHash)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
|
||||
|
||||
@@ -796,8 +787,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
|
||||
// go to prevote, prevote for locked block (not proposal)
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], lockedBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, lockedBlockHash)
|
||||
// now lets add prevotes from everyone else for nil (a polka!)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
@@ -888,8 +878,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
|
||||
// now we're on a new round but v1 misses the proposal
|
||||
|
||||
// go to prevote, node should prevote for locked block (not the new proposal) - this is relocking
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], firstBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, firstBlockHash)
|
||||
|
||||
// now lets add prevotes from everyone else for the new block
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, secondBlockHash, secondBlockParts.Header(), vs2, vs3, vs4)
|
||||
@@ -933,9 +922,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
// we are no longer locked to the first block so we should be able to prevote
|
||||
validatePrevote(t, cs1, round, vss[0], thirdPropBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, thirdPropBlockHash)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, thirdPropBlockHash, thirdPropBlockParts.Header(), vs2, vs3, vs4)
|
||||
|
||||
@@ -975,8 +962,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
rs := cs1.GetRoundState()
|
||||
propBlock := rs.ProposalBlock
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], propBlock.Hash())
|
||||
ensurePrevoteMatch(t, voteCh, height, round, propBlock.Hash())
|
||||
|
||||
// the others sign a polka but we don't see it
|
||||
prevotes := signVotes(config, tmproto.PrevoteType,
|
||||
@@ -1022,8 +1008,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash))
|
||||
|
||||
// go to prevote, prevote for proposal block
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
|
||||
|
||||
// now we see the others prevote for it, so we should lock on it
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
|
||||
@@ -1049,10 +1034,8 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
// timeout of propose
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
// finish prevote
|
||||
ensurePrevote(voteCh, height, round)
|
||||
// we should prevote what we're locked on
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash)
|
||||
// finish prevote and vote for the block we're locked on
|
||||
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
|
||||
|
||||
newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep)
|
||||
|
||||
@@ -1119,8 +1102,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
}
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash1)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash1)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4)
|
||||
|
||||
@@ -1162,9 +1144,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
|
||||
ensureNoNewUnlock(unlockCh)
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash1)
|
||||
|
||||
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash1)
|
||||
}
|
||||
|
||||
// 4 vals.
|
||||
@@ -1201,8 +1181,7 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
propBlock := rs.ProposalBlock
|
||||
propBlockHash := propBlock.Hash()
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
|
||||
|
||||
// the others sign a polka
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4)
|
||||
@@ -1225,8 +1204,7 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
// timeout of propose
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
@@ -1294,8 +1272,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
|
||||
propBlockHash := propBlock.Hash()
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
|
||||
|
||||
// vs2 send prevote for propBlock
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2)
|
||||
@@ -1358,8 +1335,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
|
||||
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], nil)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, nil)
|
||||
|
||||
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
|
||||
propBlockHash := propBlock.Hash()
|
||||
@@ -1445,8 +1421,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], nil)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, nil)
|
||||
}
|
||||
|
||||
// 4 vals, 3 Precommits for nil from the higher round.
|
||||
@@ -1515,8 +1490,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) {
|
||||
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], nil)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, nil)
|
||||
}
|
||||
|
||||
// What we want:
|
||||
@@ -1645,8 +1619,7 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) {
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
theBlockParts := rs.ProposalBlockParts.Header()
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], theBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, theBlockHash)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
|
||||
|
||||
@@ -1708,8 +1681,7 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
theBlockParts := rs.ProposalBlockParts.Header()
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], theBlockHash)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, theBlockHash)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
|
||||
|
||||
@@ -1881,8 +1853,7 @@ func TestStateHalt1(t *testing.T) {
|
||||
*/
|
||||
|
||||
// go to prevote, prevote for locked block
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash())
|
||||
ensurePrevoteMatch(t, voteCh, height, round, rs.LockedBlock.Hash())
|
||||
|
||||
// now we receive the precommit from the previous round
|
||||
addVotes(cs1, precommit4)
|
||||
|
||||
@@ -119,9 +119,9 @@ func (t *timeoutTicker) timeoutRoutine() {
|
||||
// NOTE time.Timer allows duration to be non-positive
|
||||
ti = newti
|
||||
t.timer.Reset(ti.Duration)
|
||||
t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
t.Logger.Debug("Internal state machine timeout scheduled", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
case <-t.timer.C:
|
||||
t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
t.Logger.Debug("Internal state machine timeout elapsed ", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
// go routine here guarantees timeoutRoutine doesn't block.
|
||||
// Determinism comes from playback in the receiveRoutine.
|
||||
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
|
||||
|
||||
@@ -57,3 +57,18 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewBlockStore interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore {
|
||||
mock := &BlockStore{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -242,17 +242,13 @@ func (mem *CListMempool) CheckTx(
|
||||
// so we only record the sender for txs still in the mempool.
|
||||
if e, ok := mem.txsMap.Load(tx.Key()); ok {
|
||||
memTx := e.(*clist.CElement).Value.(*mempoolTx)
|
||||
_, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true)
|
||||
memTx.senders.LoadOrStore(txInfo.SenderID, true)
|
||||
// TODO: consider punishing peer for dups,
|
||||
// its non-trivial since invalid txs can become valid,
|
||||
// but they can spam the same tx with little cost to them atm.
|
||||
if loaded {
|
||||
return types.ErrTxInCache
|
||||
}
|
||||
}
|
||||
|
||||
mem.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash())
|
||||
return nil
|
||||
return types.ErrTxInCache
|
||||
}
|
||||
|
||||
if ctx == nil {
|
||||
|
||||
@@ -200,7 +200,7 @@ func TestMempoolUpdate(t *testing.T) {
|
||||
err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// 2. Removes valid txs from the mempool
|
||||
@@ -248,13 +248,13 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) {
|
||||
for _, tx := range txs {
|
||||
reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx}))
|
||||
reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK})
|
||||
// SetDone allows the ReqRes to process its callback synchronously.
|
||||
// This simulates the Response being ready for the client immediately.
|
||||
reqRes.SetDone()
|
||||
|
||||
mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil)
|
||||
err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// ensure that the callback that the mempool sets on the ReqRes is run.
|
||||
reqRes.InvokeCallback()
|
||||
}
|
||||
|
||||
// Calling update to remove the first transaction from the mempool.
|
||||
@@ -305,11 +305,15 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
|
||||
// a must be added to the cache
|
||||
err = mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, types.ErrTxInCache, err)
|
||||
}
|
||||
|
||||
// b must remain in the cache
|
||||
err = mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, types.ErrTxInCache, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 2. An invalid transaction must remain in the cache
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
@@ -24,13 +23,6 @@ var (
|
||||
_ p2p.Wrapper = (*protomem.Message)(nil)
|
||||
)
|
||||
|
||||
// PeerManager defines the interface contract required for getting necessary
|
||||
// peer information. This should eventually be replaced with a message-oriented
|
||||
// approach utilizing the p2p stack.
|
||||
type PeerManager interface {
|
||||
GetHeight(types.NodeID) int64
|
||||
}
|
||||
|
||||
// Reactor implements a service that contains mempool of txs that are broadcasted
|
||||
// amongst peers. It maintains a map from peer ID to counter, to prevent gossiping
|
||||
// txs to the peers you received it from.
|
||||
@@ -41,11 +33,6 @@ type Reactor struct {
|
||||
mempool *CListMempool
|
||||
ids *mempool.MempoolIDs
|
||||
|
||||
// XXX: Currently, this is the only way to get information about a peer. Ideally,
|
||||
// we rely on message-oriented communication to get necessary peer data.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
peerMgr PeerManager
|
||||
|
||||
mempoolCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
closeCh chan struct{}
|
||||
@@ -62,7 +49,6 @@ type Reactor struct {
|
||||
func NewReactor(
|
||||
logger log.Logger,
|
||||
cfg *config.MempoolConfig,
|
||||
peerMgr PeerManager,
|
||||
mp *CListMempool,
|
||||
mempoolCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdates,
|
||||
@@ -70,7 +56,6 @@ func NewReactor(
|
||||
|
||||
r := &Reactor{
|
||||
cfg: cfg,
|
||||
peerMgr: peerMgr,
|
||||
mempool: mp,
|
||||
ids: mempool.NewMempoolIDs(),
|
||||
mempoolCh: mempoolCh,
|
||||
@@ -171,6 +156,15 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error {
|
||||
|
||||
for _, tx := range protoTxs {
|
||||
if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil {
|
||||
if errors.Is(err, types.ErrTxInCache) {
|
||||
// if the tx is in the cache,
|
||||
// then we've been gossiped a
|
||||
// Tx that we've already
|
||||
// got. Gossip should be
|
||||
// smarter, but it's not a
|
||||
// problem.
|
||||
continue
|
||||
}
|
||||
logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err)
|
||||
}
|
||||
}
|
||||
@@ -355,15 +349,6 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
|
||||
memTx := next.Value.(*mempoolTx)
|
||||
|
||||
if r.peerMgr != nil {
|
||||
height := r.peerMgr.GetHeight(peerID)
|
||||
if height > 0 && height < memTx.Height()-1 {
|
||||
// allow for a lag of one block
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: Transaction batching was disabled due to:
|
||||
// https://github.com/tendermint/tendermint/issues/5796
|
||||
|
||||
|
||||
@@ -70,7 +70,6 @@ func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint)
|
||||
rts.reactors[nodeID] = NewReactor(
|
||||
rts.logger.With("nodeID", nodeID),
|
||||
config,
|
||||
rts.network.Nodes[nodeID].PeerManager,
|
||||
mempool,
|
||||
rts.mempoolChnnels[nodeID],
|
||||
rts.peerUpdates[nodeID],
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -95,6 +95,18 @@ func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
|
||||
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
|
||||
}
|
||||
|
||||
// mustCheckTx invokes txmp.CheckTx for the given transaction and waits until
|
||||
// its callback has finished executing. It fails t if CheckTx fails.
|
||||
func mustCheckTx(t *testing.T, txmp *TxMempool, spec string) {
|
||||
done := make(chan struct{})
|
||||
if err := txmp.CheckTx(context.Background(), []byte(spec), func(*abci.Response) {
|
||||
close(done)
|
||||
}, mempool.TxInfo{}); err != nil {
|
||||
t.Fatalf("CheckTx for %q failed: %v", spec, err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
|
||||
func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
|
||||
txs := make([]testTx, numTxs)
|
||||
txInfo := mempool.TxInfo{SenderID: peerID}
|
||||
@@ -196,6 +208,76 @@ func TestTxMempool_Size(t *testing.T) {
|
||||
require.Equal(t, int64(2850), txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_Eviction(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txmp.config.Size = 5
|
||||
txmp.config.MaxTxsBytes = 60
|
||||
txExists := func(spec string) bool {
|
||||
txmp.Lock()
|
||||
defer txmp.Unlock()
|
||||
key := types.Tx(spec).Key()
|
||||
_, ok := txmp.txByKey[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// A transaction bigger than the mempool should be rejected even when there
|
||||
// are slots available.
|
||||
mustCheckTx(t, txmp, "big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1")
|
||||
require.Equal(t, 0, txmp.Size())
|
||||
|
||||
// Nearly-fill the mempool with a low-priority transaction, to show that it
|
||||
// is evicted even when slots are available for a higher-priority tx.
|
||||
const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2"
|
||||
mustCheckTx(t, txmp, bigTx)
|
||||
require.Equal(t, 1, txmp.Size()) // bigTx is the only element
|
||||
require.True(t, txExists(bigTx))
|
||||
require.Equal(t, int64(len(bigTx)), txmp.SizeBytes())
|
||||
|
||||
// The next transaction should evict bigTx, because it is higher priority
|
||||
// but does not fit on size.
|
||||
mustCheckTx(t, txmp, "key1=0000=25")
|
||||
require.True(t, txExists("key1=0000=25"))
|
||||
require.False(t, txExists(bigTx))
|
||||
require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes())
|
||||
|
||||
// Now fill up the rest of the slots with other transactions.
|
||||
mustCheckTx(t, txmp, "key2=0001=5")
|
||||
mustCheckTx(t, txmp, "key3=0002=10")
|
||||
mustCheckTx(t, txmp, "key4=0003=3")
|
||||
mustCheckTx(t, txmp, "key5=0004=3")
|
||||
|
||||
// A new transaction with low priority should be discarded.
|
||||
mustCheckTx(t, txmp, "key6=0005=1")
|
||||
require.False(t, txExists("key6=0005=1"))
|
||||
|
||||
// A new transaction with higher priority should evict key5, which is the
|
||||
// newest of the two transactions with lowest priority.
|
||||
mustCheckTx(t, txmp, "key7=0006=7")
|
||||
require.True(t, txExists("key7=0006=7")) // new transaction added
|
||||
require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted
|
||||
require.True(t, txExists("key4=0003=3")) // older low-priority tx retained
|
||||
|
||||
// Another new transaction evicts the other low-priority element.
|
||||
mustCheckTx(t, txmp, "key8=0007=20")
|
||||
require.True(t, txExists("key8=0007=20"))
|
||||
require.False(t, txExists("key4=0003=3"))
|
||||
|
||||
// Now the lowest-priority tx is 5, so that should be the next to go.
|
||||
mustCheckTx(t, txmp, "key9=0008=9")
|
||||
require.True(t, txExists("key9=0008=9"))
|
||||
require.False(t, txExists("k3y2=0001=5"))
|
||||
|
||||
// Add a transaction that requires eviction of multiple lower-priority
|
||||
// entries, in order to fit the size of the element.
|
||||
mustCheckTx(t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11
|
||||
require.True(t, txExists("key1=0000=25"))
|
||||
require.True(t, txExists("key8=0007=20"))
|
||||
require.True(t, txExists("key10=0123456789abcdef=11"))
|
||||
require.False(t, txExists("key3=0002=10"))
|
||||
require.False(t, txExists("key9=0008=9"))
|
||||
require.False(t, txExists("key7=0006=7"))
|
||||
}
|
||||
|
||||
func TestTxMempool_Flush(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txs := checkTxs(t, txmp, 100, 0)
|
||||
@@ -438,6 +520,51 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
|
||||
require.Zero(t, txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_ExpiredTxs_Timestamp(t *testing.T) {
|
||||
txmp := setup(t, 50)
|
||||
txmp.config.TTLDuration = 5 * time.Millisecond
|
||||
|
||||
added1 := checkTxs(t, txmp, 25, 0)
|
||||
require.Equal(t, len(added1), txmp.Size())
|
||||
|
||||
// Wait a while, then add some more transactions that should not be expired
|
||||
// when the first batch TTLs out.
|
||||
//
|
||||
// ms: 0 1 2 3 4 5 6
|
||||
// ^ ^ ^ ^
|
||||
// | | | +-- Update (triggers pruning)
|
||||
// | | +------ first batch expires
|
||||
// | +-------------- second batch added
|
||||
// +-------------------------- first batch added
|
||||
//
|
||||
// The exact intervals are not important except that the delta should be
|
||||
// large relative to the cost of CheckTx (ms vs. ns is fine here).
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
added2 := checkTxs(t, txmp, 25, 1)
|
||||
|
||||
// Wait a while longer, so that the first batch will expire.
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
|
||||
// Trigger an update so that pruning will occur.
|
||||
txmp.Lock()
|
||||
defer txmp.Unlock()
|
||||
require.NoError(t, txmp.Update(txmp.height+1, nil, nil, nil, nil))
|
||||
|
||||
// All the transactions in the original set should have been purged.
|
||||
for _, tx := range added1 {
|
||||
if _, ok := txmp.txByKey[tx.tx.Key()]; ok {
|
||||
t.Errorf("Transaction %X should have been purged for TTL", tx.tx.Key())
|
||||
}
|
||||
}
|
||||
|
||||
// All the transactions added later should still be around.
|
||||
for _, tx := range added2 {
|
||||
if _, ok := txmp.txByKey[tx.tx.Key()]; !ok {
|
||||
t.Errorf("Transaction %X should still be in the mempool, but is not", tx.tx.Key())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp := setup(t, 500)
|
||||
txmp.height = 100
|
||||
@@ -445,7 +572,6 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
|
||||
tTxs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, 100, txmp.heightIndex.Size())
|
||||
|
||||
// reap 5 txs at the next height -- no txs should expire
|
||||
reapedTxs := txmp.ReapMaxTxs(5)
|
||||
@@ -459,12 +585,10 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp.Unlock()
|
||||
|
||||
require.Equal(t, 95, txmp.Size())
|
||||
require.Equal(t, 95, txmp.heightIndex.Size())
|
||||
|
||||
// check more txs at height 101
|
||||
_ = checkTxs(t, txmp, 50, 1)
|
||||
require.Equal(t, 145, txmp.Size())
|
||||
require.Equal(t, 145, txmp.heightIndex.Size())
|
||||
|
||||
// Reap 5 txs at a height that would expire all the transactions from before
|
||||
// the previous Update (height 100).
|
||||
@@ -485,7 +609,6 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp.Unlock()
|
||||
|
||||
require.GreaterOrEqual(t, txmp.Size(), 45)
|
||||
require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45)
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sort"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
)
|
||||
|
||||
var _ heap.Interface = (*TxPriorityQueue)(nil)
|
||||
|
||||
// TxPriorityQueue defines a thread-safe priority queue for valid transactions.
|
||||
type TxPriorityQueue struct {
|
||||
mtx tmsync.RWMutex
|
||||
txs []*WrappedTx
|
||||
}
|
||||
|
||||
func NewTxPriorityQueue() *TxPriorityQueue {
|
||||
pq := &TxPriorityQueue{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
}
|
||||
|
||||
heap.Init(pq)
|
||||
|
||||
return pq
|
||||
}
|
||||
|
||||
// GetEvictableTxs attempts to find and return a list of *WrappedTx than can be
|
||||
// evicted to make room for another *WrappedTx with higher priority. If no such
|
||||
// list of *WrappedTx exists, nil will be returned. The returned list of *WrappedTx
|
||||
// indicate that these transactions can be removed due to them being of lower
|
||||
// priority and that their total sum in size allows room for the incoming
|
||||
// transaction according to the mempool's configured limits.
|
||||
func (pq *TxPriorityQueue) GetEvictableTxs(priority, txSize, totalSize, cap int64) []*WrappedTx {
|
||||
pq.mtx.RLock()
|
||||
defer pq.mtx.RUnlock()
|
||||
|
||||
txs := make([]*WrappedTx, len(pq.txs))
|
||||
copy(txs, pq.txs)
|
||||
|
||||
sort.Slice(txs, func(i, j int) bool {
|
||||
return txs[i].priority < txs[j].priority
|
||||
})
|
||||
|
||||
var (
|
||||
toEvict []*WrappedTx
|
||||
i int
|
||||
)
|
||||
|
||||
currSize := totalSize
|
||||
|
||||
// Loop over all transactions in ascending priority order evaluating those
|
||||
// that are only of less priority than the provided argument. We continue
|
||||
// evaluating transactions until there is sufficient capacity for the new
|
||||
// transaction (size) as defined by txSize.
|
||||
for i < len(txs) && txs[i].priority < priority {
|
||||
toEvict = append(toEvict, txs[i])
|
||||
currSize -= int64(txs[i].Size())
|
||||
|
||||
if currSize+txSize <= cap {
|
||||
return toEvict
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumTxs returns the number of transactions in the priority queue. It is
|
||||
// thread safe.
|
||||
func (pq *TxPriorityQueue) NumTxs() int {
|
||||
pq.mtx.RLock()
|
||||
defer pq.mtx.RUnlock()
|
||||
|
||||
return len(pq.txs)
|
||||
}
|
||||
|
||||
// RemoveTx removes a specific transaction from the priority queue.
|
||||
func (pq *TxPriorityQueue) RemoveTx(tx *WrappedTx) {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
if tx.heapIndex < len(pq.txs) {
|
||||
heap.Remove(pq, tx.heapIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// PushTx adds a valid transaction to the priority queue. It is thread safe.
|
||||
func (pq *TxPriorityQueue) PushTx(tx *WrappedTx) {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
heap.Push(pq, tx)
|
||||
}
|
||||
|
||||
// PopTx removes the top priority transaction from the queue. It is thread safe.
|
||||
func (pq *TxPriorityQueue) PopTx() *WrappedTx {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
x := heap.Pop(pq)
|
||||
if x != nil {
|
||||
return x.(*WrappedTx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Push implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Push. Use PushTx instead.
|
||||
func (pq *TxPriorityQueue) Push(x interface{}) {
|
||||
n := len(pq.txs)
|
||||
item := x.(*WrappedTx)
|
||||
item.heapIndex = n
|
||||
pq.txs = append(pq.txs, item)
|
||||
}
|
||||
|
||||
// Pop implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Pop. Use PopTx instead.
|
||||
func (pq *TxPriorityQueue) Pop() interface{} {
|
||||
old := pq.txs
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil // avoid memory leak
|
||||
item.heapIndex = -1 // for safety
|
||||
pq.txs = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// Len implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Len. Use NumTxs instead.
|
||||
func (pq *TxPriorityQueue) Len() int {
|
||||
return len(pq.txs)
|
||||
}
|
||||
|
||||
// Less implements the Heap interface. It returns true if the transaction at
|
||||
// position i in the queue is of less priority than the transaction at position j.
|
||||
func (pq *TxPriorityQueue) Less(i, j int) bool {
|
||||
// If there exists two transactions with the same priority, consider the one
|
||||
// that we saw the earliest as the higher priority transaction.
|
||||
if pq.txs[i].priority == pq.txs[j].priority {
|
||||
return pq.txs[i].timestamp.Before(pq.txs[j].timestamp)
|
||||
}
|
||||
|
||||
// We want Pop to give us the highest, not lowest, priority so we use greater
|
||||
// than here.
|
||||
return pq.txs[i].priority > pq.txs[j].priority
|
||||
}
|
||||
|
||||
// Swap implements the Heap interface. It swaps two transactions in the queue.
|
||||
func (pq *TxPriorityQueue) Swap(i, j int) {
|
||||
pq.txs[i], pq.txs[j] = pq.txs[j], pq.txs[i]
|
||||
pq.txs[i].heapIndex = i
|
||||
pq.txs[j].heapIndex = j
|
||||
}
|
||||
@@ -1,176 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTxPriorityQueue(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
numTxs := 1000
|
||||
|
||||
priorities := make([]int, numTxs)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 1; i <= numTxs; i++ {
|
||||
priorities[i-1] = i
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int) {
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: int64(i),
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.IntSlice(priorities)))
|
||||
|
||||
wg.Wait()
|
||||
require.Equal(t, numTxs, pq.NumTxs())
|
||||
|
||||
// Wait a second and push a tx with a duplicate priority
|
||||
time.Sleep(time.Second)
|
||||
now := time.Now()
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: 1000,
|
||||
timestamp: now,
|
||||
})
|
||||
require.Equal(t, 1001, pq.NumTxs())
|
||||
|
||||
tx := pq.PopTx()
|
||||
require.Equal(t, 1000, pq.NumTxs())
|
||||
require.Equal(t, int64(1000), tx.priority)
|
||||
require.NotEqual(t, now, tx.timestamp)
|
||||
|
||||
gotPriorities := make([]int, 0)
|
||||
for pq.NumTxs() > 0 {
|
||||
gotPriorities = append(gotPriorities, int(pq.PopTx().priority))
|
||||
}
|
||||
|
||||
require.Equal(t, priorities, gotPriorities)
|
||||
}
|
||||
|
||||
func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
values := make([]int, 1000)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
tx := make([]byte, 5) // each tx is 5 bytes
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
x := rng.Intn(100000)
|
||||
pq.PushTx(&WrappedTx{
|
||||
tx: tx,
|
||||
priority: int64(x),
|
||||
})
|
||||
|
||||
values[i] = x
|
||||
}
|
||||
|
||||
sort.Ints(values)
|
||||
|
||||
max := values[len(values)-1]
|
||||
min := values[0]
|
||||
totalSize := int64(len(values) * 5)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
priority, txSize, totalSize, cap int64
|
||||
expectedLen int
|
||||
}{
|
||||
{
|
||||
name: "larest priority; single tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 1,
|
||||
},
|
||||
{
|
||||
name: "larest priority; multi tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 17,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 4,
|
||||
},
|
||||
{
|
||||
name: "larest priority; out of capacity",
|
||||
priority: int64(max + 1),
|
||||
txSize: totalSize + 1,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "smallest priority; no tx",
|
||||
priority: int64(min - 1),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "small priority; no tx",
|
||||
priority: int64(min),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
evictTxs := pq.GetEvictableTxs(tc.priority, tc.txSize, tc.totalSize, tc.cap)
|
||||
require.Len(t, evictTxs, tc.expectedLen)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxPriorityQueue_RemoveTx(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
numTxs := 1000
|
||||
|
||||
values := make([]int, numTxs)
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
x := rng.Intn(100000)
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: int64(x),
|
||||
})
|
||||
|
||||
values[i] = x
|
||||
}
|
||||
|
||||
require.Equal(t, numTxs, pq.NumTxs())
|
||||
|
||||
sort.Ints(values)
|
||||
max := values[len(values)-1]
|
||||
|
||||
wtx := pq.txs[pq.NumTxs()/2]
|
||||
pq.RemoveTx(wtx)
|
||||
require.Equal(t, numTxs-1, pq.NumTxs())
|
||||
require.Equal(t, int64(max), pq.PopTx().priority)
|
||||
require.Equal(t, numTxs-2, pq.NumTxs())
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
pq.RemoveTx(&WrappedTx{heapIndex: numTxs})
|
||||
pq.RemoveTx(&WrappedTx{heapIndex: numTxs + 1})
|
||||
})
|
||||
require.Equal(t, numTxs-2, pq.NumTxs())
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
@@ -24,13 +23,6 @@ var (
|
||||
_ p2p.Wrapper = (*protomem.Message)(nil)
|
||||
)
|
||||
|
||||
// PeerManager defines the interface contract required for getting necessary
|
||||
// peer information. This should eventually be replaced with a message-oriented
|
||||
// approach utilizing the p2p stack.
|
||||
type PeerManager interface {
|
||||
GetHeight(types.NodeID) int64
|
||||
}
|
||||
|
||||
// Reactor implements a service that contains mempool of txs that are broadcasted
|
||||
// amongst peers. It maintains a map from peer ID to counter, to prevent gossiping
|
||||
// txs to the peers you received it from.
|
||||
@@ -41,11 +33,6 @@ type Reactor struct {
|
||||
mempool *TxMempool
|
||||
ids *mempool.MempoolIDs
|
||||
|
||||
// XXX: Currently, this is the only way to get information about a peer. Ideally,
|
||||
// we rely on message-oriented communication to get necessary peer data.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
peerMgr PeerManager
|
||||
|
||||
mempoolCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
closeCh chan struct{}
|
||||
@@ -66,7 +53,6 @@ type Reactor struct {
|
||||
func NewReactor(
|
||||
logger log.Logger,
|
||||
cfg *config.MempoolConfig,
|
||||
peerMgr PeerManager,
|
||||
txmp *TxMempool,
|
||||
mempoolCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdates,
|
||||
@@ -74,7 +60,6 @@ func NewReactor(
|
||||
|
||||
r := &Reactor{
|
||||
cfg: cfg,
|
||||
peerMgr: peerMgr,
|
||||
mempool: txmp,
|
||||
ids: mempool.NewMempoolIDs(),
|
||||
mempoolCh: mempoolCh,
|
||||
@@ -178,6 +163,15 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error {
|
||||
|
||||
for _, tx := range protoTxs {
|
||||
if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil {
|
||||
if errors.Is(err, types.ErrTxInCache) {
|
||||
// if the tx is in the cache,
|
||||
// then we've been gossiped a
|
||||
// Tx that we've already
|
||||
// got. Gossip should be
|
||||
// smarter, but it's not a
|
||||
// problem.
|
||||
continue
|
||||
}
|
||||
logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err)
|
||||
}
|
||||
}
|
||||
@@ -314,9 +308,6 @@ func (r *Reactor) processPeerUpdates() {
|
||||
}
|
||||
|
||||
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) {
|
||||
peerMempoolID := r.ids.GetForPeer(peerID)
|
||||
var nextGossipTx *clist.CElement
|
||||
|
||||
// remove the peer ID from the map of routines and mark the waitgroup as done
|
||||
defer func() {
|
||||
r.mtx.Lock()
|
||||
@@ -335,6 +326,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
}
|
||||
}()
|
||||
|
||||
peerMempoolID := r.ids.GetForPeer(peerID)
|
||||
var nextGossipTx *clist.CElement
|
||||
for {
|
||||
if !r.IsRunning() {
|
||||
return
|
||||
@@ -345,8 +338,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
// start from the beginning.
|
||||
if nextGossipTx == nil {
|
||||
select {
|
||||
case <-r.mempool.WaitForNextTx(): // wait until a tx is available
|
||||
if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil {
|
||||
case <-r.mempool.TxsWaitChan(): // wait until a tx is available
|
||||
if nextGossipTx = r.mempool.TxsFront(); nextGossipTx == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -364,18 +357,11 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
|
||||
memTx := nextGossipTx.Value.(*WrappedTx)
|
||||
|
||||
if r.peerMgr != nil {
|
||||
height := r.peerMgr.GetHeight(peerID)
|
||||
if height > 0 && height < memTx.height-1 {
|
||||
// allow for a lag of one block
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Send the transaction to a peer if we didn't receive it from that peer.
|
||||
//
|
||||
// NOTE: Transaction batching was disabled due to:
|
||||
// https://github.com/tendermint/tendermint/issues/5796
|
||||
if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok {
|
||||
if !memTx.HasPeer(peerMempoolID) {
|
||||
// Send the mempool tx to the corresponding peer. Note, the peer may be
|
||||
// behind and thus would not be able to process the mempool tx correctly.
|
||||
r.mempoolCh.Out <- p2p.Envelope{
|
||||
|
||||
@@ -67,7 +67,6 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite {
|
||||
rts.reactors[nodeID] = NewReactor(
|
||||
rts.logger.With("nodeID", nodeID),
|
||||
cfg.Mempool,
|
||||
rts.network.Nodes[nodeID].PeerManager,
|
||||
mempool,
|
||||
rts.mempoolChannels[nodeID],
|
||||
rts.peerUpdates[nodeID],
|
||||
@@ -135,7 +134,9 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
primaryMempool.Lock()
|
||||
primaryMempool.insertTx(next)
|
||||
primaryMempool.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
|
||||
@@ -1,281 +1,87 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// WrappedTx defines a wrapper around a raw transaction with additional metadata
|
||||
// that is used for indexing.
|
||||
type WrappedTx struct {
|
||||
// tx represents the raw binary transaction data
|
||||
tx types.Tx
|
||||
tx types.Tx // the original transaction data
|
||||
hash types.TxKey // the transaction hash
|
||||
height int64 // height when this transaction was initially checked (for expiry)
|
||||
timestamp time.Time // time when transaction was entered (for TTL)
|
||||
|
||||
// hash defines the transaction hash and the primary key used in the mempool
|
||||
hash types.TxKey
|
||||
|
||||
// height defines the height at which the transaction was validated at
|
||||
height int64
|
||||
|
||||
// gasWanted defines the amount of gas the transaction sender requires
|
||||
gasWanted int64
|
||||
|
||||
// priority defines the transaction's priority as specified by the application
|
||||
// in the ResponseCheckTx response.
|
||||
priority int64
|
||||
|
||||
// sender defines the transaction's sender as specified by the application in
|
||||
// the ResponseCheckTx response.
|
||||
sender string
|
||||
|
||||
// timestamp is the time at which the node first received the transaction from
|
||||
// a peer. It is used as a second dimension is prioritizing transactions when
|
||||
// two transactions have the same priority.
|
||||
timestamp time.Time
|
||||
|
||||
// peers records a mapping of all peers that sent a given transaction
|
||||
peers map[uint16]struct{}
|
||||
|
||||
// heapIndex defines the index of the item in the heap
|
||||
heapIndex int
|
||||
|
||||
// gossipEl references the linked-list element in the gossip index
|
||||
gossipEl *clist.CElement
|
||||
|
||||
// removed marks the transaction as removed from the mempool. This is set
|
||||
// during RemoveTx and is needed due to the fact that a given existing
|
||||
// transaction in the mempool can be evicted when it is simultaneously having
|
||||
// a reCheckTx callback executed.
|
||||
removed bool
|
||||
mtx sync.Mutex
|
||||
gasWanted int64 // app: gas required to execute this transaction
|
||||
priority int64 // app: priority value for this transaction
|
||||
sender string // app: assigned sender label
|
||||
peers map[uint16]bool // peer IDs who have sent us this transaction
|
||||
}
|
||||
|
||||
func (wtx *WrappedTx) Size() int {
|
||||
return len(wtx.tx)
|
||||
}
|
||||
// Size reports the size of the raw transaction in bytes.
|
||||
func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) }
|
||||
|
||||
// TxStore implements a thread-safe mapping of valid transaction(s).
|
||||
//
|
||||
// NOTE:
|
||||
// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative
|
||||
// access is not allowed. Regardless, it is not expected for the mempool to
|
||||
// need mutative access.
|
||||
type TxStore struct {
|
||||
mtx tmsync.RWMutex
|
||||
hashTxs map[types.TxKey]*WrappedTx // primary index
|
||||
senderTxs map[string]*WrappedTx // sender is defined by the ABCI application
|
||||
}
|
||||
|
||||
func NewTxStore() *TxStore {
|
||||
return &TxStore{
|
||||
senderTxs: make(map[string]*WrappedTx),
|
||||
hashTxs: make(map[types.TxKey]*WrappedTx),
|
||||
// SetPeer adds the specified peer ID as a sender of w.
|
||||
func (w *WrappedTx) SetPeer(id uint16) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
if w.peers == nil {
|
||||
w.peers = map[uint16]bool{id: true}
|
||||
} else {
|
||||
w.peers[id] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the total number of transactions in the store.
|
||||
func (txs *TxStore) Size() int {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return len(txs.hashTxs)
|
||||
}
|
||||
|
||||
// GetAllTxs returns all the transactions currently in the store.
|
||||
func (txs *TxStore) GetAllTxs() []*WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wTxs := make([]*WrappedTx, len(txs.hashTxs))
|
||||
i := 0
|
||||
for _, wtx := range txs.hashTxs {
|
||||
wTxs[i] = wtx
|
||||
i++
|
||||
}
|
||||
|
||||
return wTxs
|
||||
}
|
||||
|
||||
// GetTxBySender returns a *WrappedTx by the transaction's sender property
|
||||
// defined by the ABCI application.
|
||||
func (txs *TxStore) GetTxBySender(sender string) *WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return txs.senderTxs[sender]
|
||||
}
|
||||
|
||||
// GetTxByHash returns a *WrappedTx by the transaction's hash.
|
||||
func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return txs.hashTxs[hash]
|
||||
}
|
||||
|
||||
// IsTxRemoved returns true if a transaction by hash is marked as removed and
|
||||
// false otherwise.
|
||||
func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wtx, ok := txs.hashTxs[hash]
|
||||
if ok {
|
||||
return wtx.removed
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a
|
||||
// non-empty sender, we additionally store the transaction by the sender as
|
||||
// defined by the ABCI application.
|
||||
func (txs *TxStore) SetTx(wtx *WrappedTx) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
if len(wtx.sender) > 0 {
|
||||
txs.senderTxs[wtx.sender] = wtx
|
||||
}
|
||||
|
||||
txs.hashTxs[wtx.tx.Key()] = wtx
|
||||
}
|
||||
|
||||
// RemoveTx removes a *WrappedTx from the transaction store. It deletes all
|
||||
// indexes of the transaction.
|
||||
func (txs *TxStore) RemoveTx(wtx *WrappedTx) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
if len(wtx.sender) > 0 {
|
||||
delete(txs.senderTxs, wtx.sender)
|
||||
}
|
||||
|
||||
delete(txs.hashTxs, wtx.tx.Key())
|
||||
wtx.removed = true
|
||||
}
|
||||
|
||||
// TxHasPeer returns true if a transaction by hash has a given peer ID and false
|
||||
// otherwise. If the transaction does not exist, false is returned.
|
||||
func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wtx := txs.hashTxs[hash]
|
||||
if wtx == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := wtx.peers[peerID]
|
||||
// HasPeer reports whether the specified peer ID is a sender of w.
|
||||
func (w *WrappedTx) HasPeer(id uint16) bool {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
_, ok := w.peers[id]
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the
|
||||
// given peerID to the WrappedTx's set of peers that sent us this transaction.
|
||||
// We return true if we've already recorded the given peer for this transaction
|
||||
// and false otherwise. If the transaction does not exist by hash, we return
|
||||
// (nil, false).
|
||||
func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
wtx := txs.hashTxs[hash]
|
||||
if wtx == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if wtx.peers == nil {
|
||||
wtx.peers = make(map[uint16]struct{})
|
||||
}
|
||||
|
||||
if _, ok := wtx.peers[peerID]; ok {
|
||||
return wtx, true
|
||||
}
|
||||
|
||||
wtx.peers[peerID] = struct{}{}
|
||||
return wtx, false
|
||||
// SetGasWanted sets the application-assigned gas requirement of w.
|
||||
func (w *WrappedTx) SetGasWanted(gas int64) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.gasWanted = gas
|
||||
}
|
||||
|
||||
// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be
|
||||
// used to build generic transaction indexes in the mempool. It accepts a
|
||||
// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx
|
||||
// references which is used during Insert in order to determine sorted order. If
|
||||
// less returns true, a <= b.
|
||||
type WrappedTxList struct {
|
||||
mtx tmsync.RWMutex
|
||||
txs []*WrappedTx
|
||||
less func(*WrappedTx, *WrappedTx) bool
|
||||
// GasWanted reports the application-assigned gas requirement of w.
|
||||
func (w *WrappedTx) GasWanted() int64 {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.gasWanted
|
||||
}
|
||||
|
||||
func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList {
|
||||
return &WrappedTxList{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
less: less,
|
||||
}
|
||||
// SetSender sets the application-assigned sender of w.
|
||||
func (w *WrappedTx) SetSender(sender string) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.sender = sender
|
||||
}
|
||||
|
||||
// Size returns the number of WrappedTx objects in the list.
|
||||
func (wtl *WrappedTxList) Size() int {
|
||||
wtl.mtx.RLock()
|
||||
defer wtl.mtx.RUnlock()
|
||||
|
||||
return len(wtl.txs)
|
||||
// Sender reports the application-assigned sender of w.
|
||||
func (w *WrappedTx) Sender() string {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.sender
|
||||
}
|
||||
|
||||
// Reset resets the list of transactions to an empty list.
|
||||
func (wtl *WrappedTxList) Reset() {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
wtl.txs = make([]*WrappedTx, 0)
|
||||
// SetPriority sets the application-assigned priority of w.
|
||||
func (w *WrappedTx) SetPriority(p int64) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.priority = p
|
||||
}
|
||||
|
||||
// Insert inserts a WrappedTx reference into the sorted list based on the list's
|
||||
// comparator function.
|
||||
func (wtl *WrappedTxList) Insert(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
if i == len(wtl.txs) {
|
||||
// insert at the end
|
||||
wtl.txs = append(wtl.txs, wtx)
|
||||
return
|
||||
}
|
||||
|
||||
// Make space for the inserted element by shifting values at the insertion
|
||||
// index up one index.
|
||||
//
|
||||
// NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs).
|
||||
wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...)
|
||||
wtl.txs[i] = wtx
|
||||
}
|
||||
|
||||
// Remove attempts to remove a WrappedTx from the sorted list.
|
||||
func (wtl *WrappedTxList) Remove(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
// Since the list is sorted, we evaluate all elements starting at i. Note, if
|
||||
// the element does not exist, we may potentially evaluate the entire remainder
|
||||
// of the list. However, a caller should not be expected to call Remove with a
|
||||
// non-existing element.
|
||||
for i < len(wtl.txs) {
|
||||
if wtl.txs[i] == wtx {
|
||||
wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...)
|
||||
return
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
// Priority reports the application-assigned priority of w.
|
||||
func (w *WrappedTx) Priority() int64 {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.priority
|
||||
}
|
||||
|
||||
@@ -1,230 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestTxStore_GetTxBySender(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
sender: "foo",
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
res := txs.GetTxBySender(wtx.sender)
|
||||
require.Nil(t, res)
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxBySender(wtx.sender)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_GetTxByHash(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
sender: "foo",
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.Nil(t, res)
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_SetTx(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res := txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
|
||||
wtx.sender = "foo"
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15)
|
||||
require.Nil(t, res)
|
||||
require.False(t, ok)
|
||||
|
||||
res, ok = txs.GetOrSetPeerByTxHash(key, 15)
|
||||
require.NotNil(t, res)
|
||||
require.False(t, ok)
|
||||
|
||||
res, ok = txs.GetOrSetPeerByTxHash(key, 15)
|
||||
require.NotNil(t, res)
|
||||
require.True(t, ok)
|
||||
|
||||
require.True(t, txs.TxHasPeer(key, 15))
|
||||
require.False(t, txs.TxHasPeer(key, 16))
|
||||
}
|
||||
|
||||
func TestTxStore_RemoveTx(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
|
||||
txs.RemoveTx(res)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.Nil(t, res)
|
||||
}
|
||||
|
||||
func TestTxStore_Size(t *testing.T) {
|
||||
txStore := NewTxStore()
|
||||
numTxs := 1000
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
txStore.SetTx(&WrappedTx{
|
||||
tx: []byte(fmt.Sprintf("test_tx_%d", i)),
|
||||
priority: int64(i),
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
require.Equal(t, numTxs, txStore.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Reset(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
require.Zero(t, list.Size())
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
list.Insert(&WrappedTx{height: int64(i)})
|
||||
}
|
||||
|
||||
require.Equal(t, 100, list.Size())
|
||||
|
||||
list.Reset()
|
||||
require.Zero(t, list.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Insert(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var expected []int
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
expected = append(expected, int(height))
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
|
||||
if i%10 == 0 {
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
expected = append(expected, int(height))
|
||||
}
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Remove(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var txs []*WrappedTx
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
tx := &WrappedTx{height: height}
|
||||
|
||||
txs = append(txs, tx)
|
||||
list.Insert(tx)
|
||||
|
||||
if i%10 == 0 {
|
||||
tx = &WrappedTx{height: height}
|
||||
list.Insert(tx)
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// remove a tx that does not exist
|
||||
list.Remove(&WrappedTx{height: 20000})
|
||||
|
||||
// remove a tx that exists (by height) but not referenced
|
||||
list.Remove(&WrappedTx{height: txs[0].height})
|
||||
|
||||
// remove a few existing txs
|
||||
for i := 0; i < 25; i++ {
|
||||
j := rng.Intn(len(txs))
|
||||
list.Remove(txs[j])
|
||||
txs = append(txs[:j], txs[j+1:]...)
|
||||
}
|
||||
|
||||
expected := make([]int, len(txs))
|
||||
for i, tx := range txs {
|
||||
expected[i] = int(tx.height)
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
@@ -807,6 +807,8 @@ func (ch *Channel) sendBytes(bytes []byte) bool {
|
||||
return true
|
||||
case <-time.After(defaultSendTimeout):
|
||||
return false
|
||||
case <-ch.conn.Quit():
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ func newConnTracker(max uint, window time.Duration) connectionTracker {
|
||||
cache: make(map[string]uint),
|
||||
lastConnect: make(map[string]time.Time),
|
||||
max: max,
|
||||
window: window,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +44,7 @@ func (rat *connTrackerImpl) AddConn(addr net.IP) error {
|
||||
if num := rat.cache[address]; num >= rat.max {
|
||||
return fmt.Errorf("%q has %d connections [max=%d]", address, num, rat.max)
|
||||
} else if num == 0 {
|
||||
// if there is already at least connection, check to
|
||||
// if there is already at least one connection, check to
|
||||
// see if it was established before within the window,
|
||||
// and error if so.
|
||||
if last := rat.lastConnect[address]; time.Since(last) < rat.window {
|
||||
|
||||
@@ -70,4 +70,15 @@ func TestConnTracker(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, 10, ct.Len())
|
||||
})
|
||||
t.Run("Window", func(t *testing.T) {
|
||||
const window = 100 * time.Millisecond
|
||||
ct := newConnTracker(10, window)
|
||||
ip := randLocalIPv4()
|
||||
require.NoError(t, ct.AddConn(ip))
|
||||
ct.RemoveConn(ip)
|
||||
require.Error(t, ct.AddConn(ip))
|
||||
time.Sleep(window)
|
||||
require.NoError(t, ct.AddConn(ip))
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
@@ -27,8 +27,13 @@ var (
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Number of peers.
|
||||
// Number of peers connected.
|
||||
Peers metrics.Gauge
|
||||
// Nomber of peers in the peer store database.
|
||||
PeersStored metrics.Gauge
|
||||
// Number of inactive peers stored.
|
||||
PeersInactivated metrics.Gauge
|
||||
|
||||
// Number of bytes received from a given peer.
|
||||
PeerReceiveBytesTotal metrics.Counter
|
||||
// Number of bytes sent to a given peer.
|
||||
@@ -36,6 +41,21 @@ type Metrics struct {
|
||||
// Pending bytes to be sent to a given peer.
|
||||
PeerPendingSendBytes metrics.Gauge
|
||||
|
||||
// Number of successful connection attempts
|
||||
PeersConnectedSuccess metrics.Counter
|
||||
// Number of failed connection attempts
|
||||
PeersConnectedFailure metrics.Counter
|
||||
|
||||
// Number of peers connected as a result of dialing the
|
||||
// peer.
|
||||
PeersConnectedOutgoing metrics.Gauge
|
||||
// Number of peers connected as a result of the peer dialing
|
||||
// this node.
|
||||
PeersConnectedIncoming metrics.Gauge
|
||||
|
||||
// Number of peers evicted by this node.
|
||||
PeersEvicted metrics.Counter
|
||||
|
||||
// RouterPeerQueueRecv defines the time taken to read off of a peer's queue
|
||||
// before sending on the connection.
|
||||
RouterPeerQueueRecv metrics.Histogram
|
||||
@@ -73,7 +93,49 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers",
|
||||
Help: "Number of peers.",
|
||||
Help: "Number of peers connected.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersStored: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_stored",
|
||||
Help: "Number of peers in the peer Store",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersInactivated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_inactivated",
|
||||
Help: "Number of peers inactivated",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedSuccess: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_success",
|
||||
Help: "Number of successful peer connection attempts",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersEvicted: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_evicted",
|
||||
Help: "Number of connected peers evicted",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedFailure: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_failure",
|
||||
Help: "Number of unsuccessful peer connection attempts",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedIncoming: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_incoming",
|
||||
Help: "Number of peers connected by peer dialing this node",
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeersConnectedOutgoing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers_connected_outgoing",
|
||||
Help: "Number of peers connected by this node dialing the peer",
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
@@ -141,6 +203,13 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Peers: discard.NewGauge(),
|
||||
PeersStored: discard.NewGauge(),
|
||||
PeersConnectedSuccess: discard.NewCounter(),
|
||||
PeersConnectedFailure: discard.NewCounter(),
|
||||
PeersConnectedIncoming: discard.NewGauge(),
|
||||
PeersConnectedOutgoing: discard.NewGauge(),
|
||||
PeersInactivated: discard.NewGauge(),
|
||||
PeersEvicted: discard.NewCounter(),
|
||||
PeerReceiveBytesTotal: discard.NewCounter(),
|
||||
PeerSendBytesTotal: discard.NewCounter(),
|
||||
PeerPendingSendBytes: discard.NewGauge(),
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
|
||||
p2p "github.com/tendermint/tendermint/internal/p2p"
|
||||
|
||||
time "time"
|
||||
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -49,20 +51,20 @@ func (_m *Connection) FlushClose() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// Handshake provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
// Handshake provides a mock function with given fields: _a0, _a1, _a2, _a3
|
||||
func (_m *Connection) Handshake(_a0 context.Context, _a1 time.Duration, _a2 types.NodeInfo, _a3 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2, _a3)
|
||||
|
||||
var r0 types.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok {
|
||||
r0 = rf(_a0, _a1, _a2, _a3)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.NodeInfo)
|
||||
}
|
||||
|
||||
var r1 crypto.PubKey
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok {
|
||||
r1 = rf(_a0, _a1, _a2)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok {
|
||||
r1 = rf(_a0, _a1, _a2, _a3)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(crypto.PubKey)
|
||||
@@ -70,8 +72,8 @@ func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 cry
|
||||
}
|
||||
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(2).(func(context.Context, types.NodeInfo, crypto.PrivKey) error); ok {
|
||||
r2 = rf(_a0, _a1, _a2)
|
||||
if rf, ok := ret.Get(2).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) error); ok {
|
||||
r2 = rf(_a0, _a1, _a2, _a3)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
@@ -206,3 +208,18 @@ func (_m *Connection) TrySendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewConnection interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewConnection creates a new instance of Connection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewConnection(t mockConstructorTestingTNewConnection) *Connection {
|
||||
mock := &Connection{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -332,3 +332,18 @@ func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool {
|
||||
func (_m *Peer) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewPeer interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewPeer creates a new instance of Peer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewPeer(t mockConstructorTestingTNewPeer) *Peer {
|
||||
mock := &Peer{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -119,3 +119,18 @@ func (_m *Transport) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewTransport interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewTransport creates a new instance of Transport. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewTransport(t mockConstructorTestingTNewTransport) *Transport {
|
||||
mock := &Transport{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package p2ptest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -238,11 +237,13 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node {
|
||||
require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint")
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MinRetryTime: 10 * time.Millisecond,
|
||||
MaxRetryTime: 100 * time.Millisecond,
|
||||
RetryTimeJitter: time.Millisecond,
|
||||
MaxPeers: opts.MaxPeers,
|
||||
MaxConnected: opts.MaxConnected,
|
||||
MinRetryTime: 10 * time.Millisecond,
|
||||
DisconnectCooldownPeriod: 10 * time.Millisecond,
|
||||
MaxRetryTime: 100 * time.Millisecond,
|
||||
RetryTimeJitter: time.Millisecond,
|
||||
MaxPeers: opts.MaxPeers,
|
||||
MaxConnected: opts.MaxConnected,
|
||||
Metrics: p2p.NopMetrics(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -253,7 +254,7 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node {
|
||||
privKey,
|
||||
peerManager,
|
||||
[]p2p.Transport{transport},
|
||||
p2p.RouterOptions{DialSleep: func(_ context.Context) {}},
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, router.Start())
|
||||
|
||||
@@ -90,7 +90,7 @@ func createOutboundPeerAndPerformHandshake(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peerInfo, _, err := pc.conn.Handshake(context.Background(), ourNodeInfo, pk)
|
||||
peerInfo, _, err := pc.conn.Handshake(context.Background(), 0, ourNodeInfo, pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey)
|
||||
_, _, err = pc.conn.Handshake(context.Background(), 0, rp.nodeInfo(), rp.PrivKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -213,7 +213,7 @@ func (rp *remotePeer) accept() {
|
||||
if err != nil {
|
||||
golog.Printf("Failed to create a peer: %+v", err)
|
||||
}
|
||||
_, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey)
|
||||
_, _, err = pc.conn.Handshake(context.Background(), 0, rp.nodeInfo(), rp.PrivKey)
|
||||
if err != nil {
|
||||
golog.Printf("Failed to handshake a peer: %+v", err)
|
||||
}
|
||||
|
||||
@@ -38,11 +38,19 @@ const (
|
||||
PeerStatusBad PeerStatus = "bad" // peer observed as bad
|
||||
)
|
||||
|
||||
// PeerScore is a numeric score assigned to a peer (higher is better).
|
||||
type PeerScore uint8
|
||||
type peerConnectionDirection int
|
||||
|
||||
const (
|
||||
PeerScorePersistent PeerScore = math.MaxUint8 // persistent peers
|
||||
peerConnectionIncoming peerConnectionDirection = iota + 1
|
||||
peerConnectionOutgoing
|
||||
)
|
||||
|
||||
// PeerScore is a numeric score assigned to a peer (higher is better).
|
||||
type PeerScore int16
|
||||
|
||||
const (
|
||||
PeerScorePersistent PeerScore = math.MaxInt16 // persistent peers
|
||||
MaxPeerScoreNotPersistent PeerScore = PeerScorePersistent - 1
|
||||
)
|
||||
|
||||
// PeerUpdate is a peer update event sent via PeerUpdates.
|
||||
@@ -118,6 +126,13 @@ type PeerManagerOptions struct {
|
||||
// outbound). 0 means no limit.
|
||||
MaxConnected uint16
|
||||
|
||||
// MaxOutgoingConnections specifies how many outgoing
|
||||
// connections. It must be lower than MaxConnected. If it is
|
||||
// 0, then all connections can be outgoing. Once this limit is
|
||||
// reached, the node will not dial peers, allowing the
|
||||
// remaining peer connections to be used by incoming connections.
|
||||
MaxOutgoingConnections uint16
|
||||
|
||||
// MaxConnectedUpgrade is the maximum number of additional connections to
|
||||
// use for probing any better-scored peers to upgrade to when all connection
|
||||
// slots are full. 0 disables peer upgrading.
|
||||
@@ -147,6 +162,10 @@ type PeerManagerOptions struct {
|
||||
// retry times, to avoid thundering herds. 0 disables jitter.
|
||||
RetryTimeJitter time.Duration
|
||||
|
||||
// DisconnectCooldownPeriod is the amount of time after we
|
||||
// disconnect from a peer before we'll consider dialing a new peer
|
||||
DisconnectCooldownPeriod time.Duration
|
||||
|
||||
// PeerScores sets fixed scores for specific peers. It is mainly used
|
||||
// for testing. A score of 0 is ignored.
|
||||
PeerScores map[types.NodeID]PeerScore
|
||||
@@ -162,6 +181,9 @@ type PeerManagerOptions struct {
|
||||
// persistentPeers provides fast PersistentPeers lookups. It is built
|
||||
// by optimize().
|
||||
persistentPeers map[types.NodeID]bool
|
||||
|
||||
// Peer Metrics
|
||||
Metrics *Metrics
|
||||
}
|
||||
|
||||
// Validate validates the options.
|
||||
@@ -212,6 +234,10 @@ func (o *PeerManagerOptions) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
if o.MaxOutgoingConnections > 0 && o.MaxConnected < o.MaxOutgoingConnections {
|
||||
return errors.New("cannot set MaxOutgoingConnections to a value larger than MaxConnected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -280,6 +306,7 @@ func (o *PeerManagerOptions) optimize() {
|
||||
type PeerManager struct {
|
||||
selfID types.NodeID
|
||||
options PeerManagerOptions
|
||||
metrics *Metrics
|
||||
rand *rand.Rand
|
||||
dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes
|
||||
evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes
|
||||
@@ -288,13 +315,13 @@ type PeerManager struct {
|
||||
|
||||
mtx sync.Mutex
|
||||
store *peerStore
|
||||
subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address)
|
||||
dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail)
|
||||
upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail)
|
||||
connected map[types.NodeID]bool // connected peers (Dialed/Accepted → Disconnected)
|
||||
ready map[types.NodeID]bool // ready peers (Ready → Disconnected)
|
||||
evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext)
|
||||
evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected)
|
||||
subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address)
|
||||
dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail)
|
||||
upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail)
|
||||
connected map[types.NodeID]peerConnectionDirection // connected peers (Dialed/Accepted → Disconnected)
|
||||
ready map[types.NodeID]bool // ready peers (Ready → Disconnected)
|
||||
evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext)
|
||||
evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected)
|
||||
}
|
||||
|
||||
// NewPeerManager creates a new peer manager.
|
||||
@@ -314,28 +341,34 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio
|
||||
}
|
||||
|
||||
peerManager := &PeerManager{
|
||||
selfID: selfID,
|
||||
options: options,
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec
|
||||
dialWaker: tmsync.NewWaker(),
|
||||
evictWaker: tmsync.NewWaker(),
|
||||
closeCh: make(chan struct{}),
|
||||
|
||||
selfID: selfID,
|
||||
options: options,
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec
|
||||
dialWaker: tmsync.NewWaker(),
|
||||
evictWaker: tmsync.NewWaker(),
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: NopMetrics(),
|
||||
store: store,
|
||||
dialing: map[types.NodeID]bool{},
|
||||
upgrading: map[types.NodeID]types.NodeID{},
|
||||
connected: map[types.NodeID]bool{},
|
||||
connected: map[types.NodeID]peerConnectionDirection{},
|
||||
ready: map[types.NodeID]bool{},
|
||||
evict: map[types.NodeID]bool{},
|
||||
evicting: map[types.NodeID]bool{},
|
||||
subscriptions: map[*PeerUpdates]*PeerUpdates{},
|
||||
}
|
||||
|
||||
if options.Metrics != nil {
|
||||
peerManager.metrics = options.Metrics
|
||||
}
|
||||
|
||||
if err = peerManager.configurePeers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = peerManager.prunePeers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return peerManager, nil
|
||||
}
|
||||
|
||||
@@ -361,6 +394,7 @@ func (m *PeerManager) configurePeers() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
m.metrics.PeersStored.Add(float64(m.store.Size()))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -390,20 +424,45 @@ func (m *PeerManager) prunePeers() error {
|
||||
ranked := m.store.Ranked()
|
||||
for i := len(ranked) - 1; i >= 0; i-- {
|
||||
peerID := ranked[i].ID
|
||||
|
||||
switch {
|
||||
case m.store.Size() <= int(m.options.MaxPeers):
|
||||
return nil
|
||||
case m.dialing[peerID]:
|
||||
case m.connected[peerID]:
|
||||
case m.isConnected(peerID):
|
||||
default:
|
||||
if err := m.store.Delete(peerID); err != nil {
|
||||
return err
|
||||
}
|
||||
m.metrics.PeersStored.Add(-1)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PeerManager) isConnected(peerID types.NodeID) bool {
|
||||
_, ok := m.connected[peerID]
|
||||
return ok
|
||||
}
|
||||
|
||||
type connectionStats struct {
|
||||
incoming uint16
|
||||
outgoing uint16
|
||||
}
|
||||
|
||||
func (m *PeerManager) getConnectedInfo() connectionStats {
|
||||
out := connectionStats{}
|
||||
for _, direction := range m.connected {
|
||||
switch direction {
|
||||
case peerConnectionIncoming:
|
||||
out.incoming++
|
||||
case peerConnectionOutgoing:
|
||||
out.outgoing++
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Add adds a peer to the manager, given as an address. If the peer already
|
||||
// exists, the address is added to it if it isn't already present. This will push
|
||||
// low scoring peers out of the address book if it exceeds the maximum size.
|
||||
@@ -427,12 +486,17 @@ func (m *PeerManager) Add(address NodeAddress) (bool, error) {
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
if peer.Inactive {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// else add the new address
|
||||
peer.AddressInfo[address] = &peerAddressInfo{Address: address}
|
||||
if err := m.store.Set(peer); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
m.metrics.PeersStored.Add(1)
|
||||
if err := m.prunePeers(); err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -452,18 +516,35 @@ func (m *PeerManager) PeerRatio() float64 {
|
||||
return float64(m.store.Size()) / float64(m.options.MaxPeers)
|
||||
}
|
||||
|
||||
func (m *PeerManager) HasMaxPeerCapacity() bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
return len(m.connected) >= int(m.options.MaxConnected)
|
||||
}
|
||||
|
||||
func (m *PeerManager) HasDialedMaxPeers() bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
stats := m.getConnectedInfo()
|
||||
|
||||
return stats.outgoing >= m.options.MaxOutgoingConnections
|
||||
}
|
||||
|
||||
// DialNext finds an appropriate peer address to dial, and marks it as dialing.
|
||||
// If no peer is found, or all connection slots are full, it blocks until one
|
||||
// becomes available. The caller must call Dialed() or DialFailed() for the
|
||||
// returned peer.
|
||||
func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) {
|
||||
for {
|
||||
address, err := m.TryDialNext()
|
||||
if err != nil || (address != NodeAddress{}) {
|
||||
return address, err
|
||||
if address := m.TryDialNext(); (address != NodeAddress{}) {
|
||||
return address, nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-m.dialWaker.Sleep():
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
return NodeAddress{}, ctx.Err()
|
||||
}
|
||||
@@ -472,20 +553,28 @@ func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) {
|
||||
|
||||
// TryDialNext is equivalent to DialNext(), but immediately returns an empty
|
||||
// address if no peers or connection slots are available.
|
||||
func (m *PeerManager) TryDialNext() (NodeAddress, error) {
|
||||
func (m *PeerManager) TryDialNext() NodeAddress {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// We allow dialing MaxConnected+MaxConnectedUpgrade peers. Including
|
||||
// MaxConnectedUpgrade allows us to probe additional peers that have a
|
||||
// higher score than any other peers, and if successful evict it.
|
||||
if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >=
|
||||
int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return NodeAddress{}, nil
|
||||
if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return NodeAddress{}
|
||||
}
|
||||
|
||||
cinfo := m.getConnectedInfo()
|
||||
if m.options.MaxOutgoingConnections > 0 && cinfo.outgoing >= m.options.MaxOutgoingConnections {
|
||||
return NodeAddress{}
|
||||
}
|
||||
|
||||
for _, peer := range m.store.Ranked() {
|
||||
if m.dialing[peer.ID] || m.connected[peer.ID] {
|
||||
if m.dialing[peer.ID] || m.isConnected(peer.ID) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !peer.LastDisconnected.IsZero() && time.Since(peer.LastDisconnected) < m.options.DisconnectCooldownPeriod {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -494,6 +583,10 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if id, ok := m.store.Resolve(addressInfo.Address); ok && (m.isConnected(id) || m.dialing[id]) {
|
||||
continue
|
||||
}
|
||||
|
||||
// We now have an eligible address to dial. If we're full but have
|
||||
// upgrade capacity (as checked above), we find a lower-scored peer
|
||||
// we can replace and mark it as upgrading so noone else claims it.
|
||||
@@ -504,25 +597,24 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) {
|
||||
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
|
||||
upgradeFromPeer := m.findUpgradeCandidate(peer.ID, peer.Score())
|
||||
if upgradeFromPeer == "" {
|
||||
return NodeAddress{}, nil
|
||||
return NodeAddress{}
|
||||
}
|
||||
m.upgrading[upgradeFromPeer] = peer.ID
|
||||
}
|
||||
|
||||
m.dialing[peer.ID] = true
|
||||
return addressInfo.Address, nil
|
||||
return addressInfo.Address
|
||||
}
|
||||
}
|
||||
return NodeAddress{}, nil
|
||||
return NodeAddress{}
|
||||
}
|
||||
|
||||
// DialFailed reports a failed dial attempt. This will make the peer available
|
||||
// for dialing again when appropriate (possibly after a retry timeout).
|
||||
//
|
||||
// FIXME: This should probably delete or mark bad addresses/peers after some time.
|
||||
func (m *PeerManager) DialFailed(address NodeAddress) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.metrics.PeersConnectedFailure.Add(1)
|
||||
|
||||
delete(m.dialing, address.NodeID)
|
||||
for from, to := range m.upgrading {
|
||||
@@ -542,6 +634,7 @@ func (m *PeerManager) DialFailed(address NodeAddress) error {
|
||||
|
||||
addressInfo.LastDialFailure = time.Now().UTC()
|
||||
addressInfo.DialFailures++
|
||||
|
||||
if err := m.store.Set(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -575,6 +668,8 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
m.metrics.PeersConnectedSuccess.Add(1)
|
||||
|
||||
delete(m.dialing, address.NodeID)
|
||||
|
||||
var upgradeFromPeer types.NodeID
|
||||
@@ -589,12 +684,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
if address.NodeID == m.selfID {
|
||||
return fmt.Errorf("rejecting connection to self (%v)", address.NodeID)
|
||||
}
|
||||
if m.connected[address.NodeID] {
|
||||
if m.isConnected(address.NodeID) {
|
||||
return fmt.Errorf("peer %v is already connected", address.NodeID)
|
||||
}
|
||||
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
|
||||
if upgradeFromPeer == "" || len(m.connected) >=
|
||||
int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
if upgradeFromPeer == "" || len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return fmt.Errorf("already connected to maximum number of peers")
|
||||
}
|
||||
}
|
||||
@@ -604,6 +698,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
return fmt.Errorf("peer %q was removed while dialing", address.NodeID)
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
if peer.Inactive {
|
||||
m.metrics.PeersInactivated.Add(-1)
|
||||
}
|
||||
peer.Inactive = false
|
||||
|
||||
peer.LastConnected = now
|
||||
if addressInfo, ok := peer.AddressInfo[address]; ok {
|
||||
addressInfo.DialFailures = 0
|
||||
@@ -615,8 +714,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if upgradeFromPeer != "" && m.options.MaxConnected > 0 &&
|
||||
len(m.connected) >= int(m.options.MaxConnected) {
|
||||
if upgradeFromPeer != "" && m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
|
||||
// Look for an even lower-scored peer that may have appeared since we
|
||||
// started the upgrade.
|
||||
if p, ok := m.store.Get(upgradeFromPeer); ok {
|
||||
@@ -625,9 +723,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
|
||||
}
|
||||
}
|
||||
m.evict[upgradeFromPeer] = true
|
||||
m.evictWaker.Wake()
|
||||
}
|
||||
m.connected[peer.ID] = true
|
||||
m.evictWaker.Wake()
|
||||
|
||||
m.metrics.PeersConnectedOutgoing.Add(1)
|
||||
m.connected[peer.ID] = peerConnectionOutgoing
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -656,11 +756,10 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error {
|
||||
if peerID == m.selfID {
|
||||
return fmt.Errorf("rejecting connection from self (%v)", peerID)
|
||||
}
|
||||
if m.connected[peerID] {
|
||||
if m.isConnected(peerID) {
|
||||
return fmt.Errorf("peer %q is already connected", peerID)
|
||||
}
|
||||
if m.options.MaxConnected > 0 &&
|
||||
len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
|
||||
return fmt.Errorf("already connected to maximum number of peers")
|
||||
}
|
||||
|
||||
@@ -685,12 +784,17 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error {
|
||||
}
|
||||
}
|
||||
|
||||
if peer.Inactive {
|
||||
m.metrics.PeersInactivated.Add(-1)
|
||||
}
|
||||
peer.Inactive = false
|
||||
peer.LastConnected = time.Now().UTC()
|
||||
if err := m.store.Set(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.connected[peerID] = true
|
||||
m.metrics.PeersConnectedIncoming.Add(1)
|
||||
m.connected[peerID] = peerConnectionIncoming
|
||||
if upgradeFromPeer != "" {
|
||||
m.evict[upgradeFromPeer] = true
|
||||
}
|
||||
@@ -709,7 +813,7 @@ func (m *PeerManager) Ready(peerID types.NodeID, channels ChannelIDSet) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.connected[peerID] {
|
||||
if m.isConnected(peerID) {
|
||||
m.ready[peerID] = true
|
||||
m.broadcast(PeerUpdate{
|
||||
NodeID: peerID,
|
||||
@@ -745,7 +849,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) {
|
||||
// random one.
|
||||
for peerID := range m.evict {
|
||||
delete(m.evict, peerID)
|
||||
if m.connected[peerID] && !m.evicting[peerID] {
|
||||
if m.isConnected(peerID) && !m.evicting[peerID] {
|
||||
m.evicting[peerID] = true
|
||||
return peerID, nil
|
||||
}
|
||||
@@ -762,7 +866,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) {
|
||||
ranked := m.store.Ranked()
|
||||
for i := len(ranked) - 1; i >= 0; i-- {
|
||||
peer := ranked[i]
|
||||
if m.connected[peer.ID] && !m.evicting[peer.ID] {
|
||||
if m.isConnected(peer.ID) && !m.evicting[peer.ID] {
|
||||
m.evicting[peer.ID] = true
|
||||
return peer.ID, nil
|
||||
}
|
||||
@@ -777,6 +881,13 @@ func (m *PeerManager) Disconnected(peerID types.NodeID) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
switch m.connected[peerID] {
|
||||
case peerConnectionIncoming:
|
||||
m.metrics.PeersConnectedIncoming.Add(-1)
|
||||
case peerConnectionOutgoing:
|
||||
m.metrics.PeersConnectedOutgoing.Add(-1)
|
||||
}
|
||||
|
||||
ready := m.ready[peerID]
|
||||
|
||||
delete(m.connected, peerID)
|
||||
@@ -785,6 +896,22 @@ func (m *PeerManager) Disconnected(peerID types.NodeID) {
|
||||
delete(m.evicting, peerID)
|
||||
delete(m.ready, peerID)
|
||||
|
||||
if peer, ok := m.store.Get(peerID); ok {
|
||||
peer.LastDisconnected = time.Now()
|
||||
_ = m.store.Set(peer)
|
||||
// launch a thread to ping the dialWaker when the
|
||||
// disconnected peer can be dialed again.
|
||||
go func() {
|
||||
timer := time.NewTimer(m.options.DisconnectCooldownPeriod)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
m.dialWaker.Wake()
|
||||
case <-m.closeCh:
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if ready {
|
||||
m.broadcast(PeerUpdate{
|
||||
NodeID: peerID,
|
||||
@@ -807,17 +934,34 @@ func (m *PeerManager) Errored(peerID types.NodeID, err error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.connected[peerID] {
|
||||
if m.isConnected(peerID) {
|
||||
m.evict[peerID] = true
|
||||
}
|
||||
|
||||
m.evictWaker.Wake()
|
||||
}
|
||||
|
||||
// Inactivate marks a peer as inactive which means we won't attempt to
|
||||
// dial this peer again. A peer can be reactivated by successfully
|
||||
// dialing and connecting to the node.
|
||||
func (m *PeerManager) Inactivate(peerID types.NodeID) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
peer, ok := m.store.peers[peerID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
peer.Inactive = true
|
||||
m.metrics.PeersInactivated.Add(1)
|
||||
return m.store.Set(*peer)
|
||||
}
|
||||
|
||||
// Advertise returns a list of peer addresses to advertise to a peer.
|
||||
//
|
||||
// FIXME: This is fairly naïve and only returns the addresses of the
|
||||
// highest-ranked peers.
|
||||
// It sorts all peers in the peer store, and assembles a list of peers
|
||||
// that is most likely to include the highest priority of peers.
|
||||
func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
@@ -830,19 +974,98 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress
|
||||
addresses = append(addresses, m.options.SelfAddress)
|
||||
}
|
||||
|
||||
for _, peer := range m.store.Ranked() {
|
||||
var numAddresses int
|
||||
var totalAbsScore int
|
||||
ranked := m.store.Ranked()
|
||||
seenAddresses := map[NodeAddress]struct{}{}
|
||||
scores := map[types.NodeID]int{}
|
||||
|
||||
// get the total number of possible addresses
|
||||
for _, peer := range ranked {
|
||||
if peer.ID == peerID {
|
||||
continue
|
||||
}
|
||||
score := int(peer.Score())
|
||||
if score < 0 {
|
||||
totalAbsScore += -score
|
||||
} else {
|
||||
totalAbsScore += score
|
||||
}
|
||||
|
||||
for nodeAddr, addressInfo := range peer.AddressInfo {
|
||||
if len(addresses) >= int(limit) {
|
||||
return addresses
|
||||
scores[peer.ID] = score
|
||||
for addr := range peer.AddressInfo {
|
||||
if _, ok := m.options.PrivatePeers[addr.NodeID]; !ok {
|
||||
numAddresses++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
meanAbsScore := (totalAbsScore + 1) / (len(scores) + 1)
|
||||
|
||||
var attempts uint16
|
||||
var addedLastIteration bool
|
||||
|
||||
// if the number of addresses is less than the number of peers
|
||||
// to advertise, adjust the limit downwards
|
||||
if numAddresses < int(limit) {
|
||||
limit = uint16(numAddresses)
|
||||
}
|
||||
|
||||
// collect addresses until we have the number requested
|
||||
// (limit), or we've added all known addresses, or we've tried
|
||||
// at least 256 times and the last time we iterated over
|
||||
// remaining addresses we added no new candidates.
|
||||
for len(addresses) < int(limit) && (attempts < (limit*2) || !addedLastIteration) {
|
||||
attempts++
|
||||
addedLastIteration = false
|
||||
|
||||
for idx, peer := range ranked {
|
||||
if peer.ID == peerID {
|
||||
continue
|
||||
}
|
||||
|
||||
// only add non-private NodeIDs
|
||||
if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok {
|
||||
addresses = append(addresses, addressInfo.Address)
|
||||
if len(addresses) >= int(limit) {
|
||||
break
|
||||
}
|
||||
|
||||
for nodeAddr, addressInfo := range peer.AddressInfo {
|
||||
if len(addresses) >= int(limit) {
|
||||
break
|
||||
}
|
||||
|
||||
// only look at each address once, by
|
||||
// tracking a set of addresses seen
|
||||
if _, ok := seenAddresses[addressInfo.Address]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// only add non-private NodeIDs
|
||||
if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok {
|
||||
// add the peer if the total number of ranked addresses is
|
||||
// will fit within the limit, or otherwise adding
|
||||
// addresses based on a coin flip.
|
||||
|
||||
// the coinflip is based on the score, commonly, but
|
||||
// 10% of the time we'll randomly insert a "loosing"
|
||||
// peer.
|
||||
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
if numAddresses <= int(limit) || rand.Intn((meanAbsScore*2)+1) <= scores[peer.ID]+1 || rand.Intn((idx+1)*10) <= idx+1 {
|
||||
addresses = append(addresses, addressInfo.Address)
|
||||
addedLastIteration = true
|
||||
seenAddresses[addressInfo.Address] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
seenAddresses[addressInfo.Address] = struct{}{}
|
||||
// if the number of addresses
|
||||
// is the same as the limit,
|
||||
// we should remove private
|
||||
// addresses from the limit so
|
||||
// we can still return early.
|
||||
if numAddresses == int(limit) {
|
||||
limit--
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -912,8 +1135,14 @@ func (m *PeerManager) processPeerEvent(pu PeerUpdate) {
|
||||
|
||||
switch pu.Status {
|
||||
case PeerStatusBad:
|
||||
if m.store.peers[pu.NodeID].MutableScore == math.MinInt16 {
|
||||
return
|
||||
}
|
||||
m.store.peers[pu.NodeID].MutableScore--
|
||||
case PeerStatusGood:
|
||||
if m.store.peers[pu.NodeID].MutableScore == math.MaxInt16 {
|
||||
return
|
||||
}
|
||||
m.store.peers[pu.NodeID].MutableScore++
|
||||
}
|
||||
}
|
||||
@@ -1014,9 +1243,11 @@ func (m *PeerManager) findUpgradeCandidate(id types.NodeID, score PeerScore) typ
|
||||
for i := len(ranked) - 1; i >= 0; i-- {
|
||||
candidate := ranked[i]
|
||||
switch {
|
||||
case candidate.ID == id:
|
||||
continue
|
||||
case candidate.Score() >= score:
|
||||
return "" // no further peers can be scored lower, due to sorting
|
||||
case !m.connected[candidate.ID]:
|
||||
case !m.isConnected(candidate.ID):
|
||||
case m.evict[candidate.ID]:
|
||||
case m.evicting[candidate.ID]:
|
||||
case m.upgrading[candidate.ID] != "":
|
||||
@@ -1055,37 +1286,6 @@ func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration
|
||||
return delay
|
||||
}
|
||||
|
||||
// GetHeight returns a peer's height, as reported via SetHeight, or 0 if the
|
||||
// peer or height is unknown.
|
||||
//
|
||||
// FIXME: This is a temporary workaround to share state between the consensus
|
||||
// and mempool reactors, carried over from the legacy P2P stack. Reactors should
|
||||
// not have dependencies on each other, instead tracking this themselves.
|
||||
func (m *PeerManager) GetHeight(peerID types.NodeID) int64 {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
peer, _ := m.store.Get(peerID)
|
||||
return peer.Height
|
||||
}
|
||||
|
||||
// SetHeight stores a peer's height, making it available via GetHeight.
|
||||
//
|
||||
// FIXME: This is a temporary workaround to share state between the consensus
|
||||
// and mempool reactors, carried over from the legacy P2P stack. Reactors should
|
||||
// not have dependencies on each other, instead tracking this themselves.
|
||||
func (m *PeerManager) SetHeight(peerID types.NodeID, height int64) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
peer, ok := m.store.Get(peerID)
|
||||
if !ok {
|
||||
peer = m.newPeerInfo(peerID)
|
||||
}
|
||||
peer.Height = height
|
||||
return m.store.Set(peer)
|
||||
}
|
||||
|
||||
// peerStore stores information about peers. It is not thread-safe, assuming it
|
||||
// is only used by PeerManager which handles concurrency control. This allows
|
||||
// the manager to execute multiple operations atomically via its own mutex.
|
||||
@@ -1096,6 +1296,7 @@ func (m *PeerManager) SetHeight(peerID types.NodeID, height int64) error {
|
||||
type peerStore struct {
|
||||
db dbm.DB
|
||||
peers map[types.NodeID]*peerInfo
|
||||
index map[NodeAddress]types.NodeID
|
||||
ranked []*peerInfo // cache for Ranked(), nil invalidates cache
|
||||
}
|
||||
|
||||
@@ -1115,6 +1316,7 @@ func newPeerStore(db dbm.DB) (*peerStore, error) {
|
||||
// loadPeers loads all peers from the database into memory.
|
||||
func (s *peerStore) loadPeers() error {
|
||||
peers := map[types.NodeID]*peerInfo{}
|
||||
addrs := map[NodeAddress]types.NodeID{}
|
||||
|
||||
start, end := keyPeerInfoRange()
|
||||
iter, err := s.db.Iterator(start, end)
|
||||
@@ -1134,11 +1336,18 @@ func (s *peerStore) loadPeers() error {
|
||||
return fmt.Errorf("invalid peer data: %w", err)
|
||||
}
|
||||
peers[peer.ID] = peer
|
||||
for addr := range peer.AddressInfo {
|
||||
// TODO maybe check to see if we've seen this
|
||||
// addr before for a different peer, there
|
||||
// could be duplicates.
|
||||
addrs[addr] = peer.ID
|
||||
}
|
||||
}
|
||||
if iter.Error() != nil {
|
||||
return iter.Error()
|
||||
}
|
||||
s.peers = peers
|
||||
s.index = addrs
|
||||
s.ranked = nil // invalidate cache if populated
|
||||
return nil
|
||||
}
|
||||
@@ -1150,6 +1359,12 @@ func (s *peerStore) Get(id types.NodeID) (peerInfo, bool) {
|
||||
return peer.Copy(), ok
|
||||
}
|
||||
|
||||
// Resolve returns the peer ID for a given node address if known.
|
||||
func (s *peerStore) Resolve(addr NodeAddress) (types.NodeID, bool) {
|
||||
id, ok := s.index[addr]
|
||||
return id, ok
|
||||
}
|
||||
|
||||
// Set stores peer data. The input data will be copied, and can safely be reused
|
||||
// by the caller.
|
||||
func (s *peerStore) Set(peer peerInfo) error {
|
||||
@@ -1178,20 +1393,29 @@ func (s *peerStore) Set(peer peerInfo) error {
|
||||
// update the existing pointer address.
|
||||
*current = peer
|
||||
}
|
||||
for addr := range peer.AddressInfo {
|
||||
s.index[addr] = peer.ID
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes a peer, or does nothing if it does not exist.
|
||||
func (s *peerStore) Delete(id types.NodeID) error {
|
||||
if _, ok := s.peers[id]; !ok {
|
||||
peer, ok := s.peers[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := s.db.Delete(keyPeerInfo(id)); err != nil {
|
||||
return err
|
||||
for _, addr := range peer.AddressInfo {
|
||||
delete(s.index, addr.Address)
|
||||
}
|
||||
delete(s.peers, id)
|
||||
s.ranked = nil
|
||||
|
||||
if err := s.db.Delete(keyPeerInfo(id)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1227,8 +1451,6 @@ func (s *peerStore) Ranked() []*peerInfo {
|
||||
s.ranked = append(s.ranked, peer)
|
||||
}
|
||||
sort.Slice(s.ranked, func(i, j int) bool {
|
||||
// FIXME: If necessary, consider precomputing scores before sorting,
|
||||
// to reduce the number of Score() calls.
|
||||
return s.ranked[i].Score() > s.ranked[j].Score()
|
||||
})
|
||||
return s.ranked
|
||||
@@ -1241,17 +1463,18 @@ func (s *peerStore) Size() int {
|
||||
|
||||
// peerInfo contains peer information stored in a peerStore.
|
||||
type peerInfo struct {
|
||||
ID types.NodeID
|
||||
AddressInfo map[NodeAddress]*peerAddressInfo
|
||||
LastConnected time.Time
|
||||
ID types.NodeID
|
||||
AddressInfo map[NodeAddress]*peerAddressInfo
|
||||
LastConnected time.Time
|
||||
LastDisconnected time.Time
|
||||
|
||||
// These fields are ephemeral, i.e. not persisted to the database.
|
||||
Persistent bool
|
||||
Seed bool
|
||||
Height int64
|
||||
FixedScore PeerScore // mainly for tests
|
||||
|
||||
MutableScore int64 // updated by router
|
||||
Inactive bool
|
||||
}
|
||||
|
||||
// peerInfoFromProto converts a Protobuf PeerInfo message to a peerInfo,
|
||||
@@ -1260,6 +1483,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) {
|
||||
p := &peerInfo{
|
||||
ID: types.NodeID(msg.ID),
|
||||
AddressInfo: map[NodeAddress]*peerAddressInfo{},
|
||||
Inactive: msg.Inactive,
|
||||
}
|
||||
if msg.LastConnected != nil {
|
||||
p.LastConnected = *msg.LastConnected
|
||||
@@ -1282,6 +1506,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) {
|
||||
func (p *peerInfo) ToProto() *p2pproto.PeerInfo {
|
||||
msg := &p2pproto.PeerInfo{
|
||||
ID: string(p.ID),
|
||||
Inactive: p.Inactive,
|
||||
LastConnected: &p.LastConnected,
|
||||
}
|
||||
for _, addressInfo := range p.AddressInfo {
|
||||
@@ -1290,6 +1515,7 @@ func (p *peerInfo) ToProto() *p2pproto.PeerInfo {
|
||||
if msg.LastConnected.IsZero() {
|
||||
msg.LastConnected = nil
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
@@ -1306,6 +1532,45 @@ func (p *peerInfo) Copy() peerInfo {
|
||||
return c
|
||||
}
|
||||
|
||||
// LastDialed returns when the peer was last dialed, and if that dial
|
||||
// attempt was successful. If the peer was never dialed the time stamp
|
||||
// is zero time.
|
||||
func (p *peerInfo) LastDialed() (time.Time, bool) {
|
||||
var (
|
||||
last time.Time
|
||||
success bool
|
||||
)
|
||||
last = last.Add(-1) // so it's after the epoch
|
||||
|
||||
for _, addr := range p.AddressInfo {
|
||||
if addr.LastDialFailure.Equal(addr.LastDialSuccess) {
|
||||
if addr.LastDialFailure.IsZero() {
|
||||
continue
|
||||
}
|
||||
if last.After(addr.LastDialSuccess) {
|
||||
continue
|
||||
}
|
||||
success = true
|
||||
last = addr.LastDialSuccess
|
||||
}
|
||||
if addr.LastDialFailure.After(last) {
|
||||
success = false
|
||||
last = addr.LastDialFailure
|
||||
}
|
||||
if addr.LastDialSuccess.After(last) || last.Equal(addr.LastDialSuccess) {
|
||||
success = true
|
||||
last = addr.LastDialSuccess
|
||||
}
|
||||
}
|
||||
|
||||
// if we never modified last, then
|
||||
if last.Add(1).IsZero() {
|
||||
return time.Time{}, success
|
||||
}
|
||||
|
||||
return last, success
|
||||
}
|
||||
|
||||
// Score calculates a score for the peer. Higher-scored peers will be
|
||||
// preferred over lower scores.
|
||||
func (p *peerInfo) Score() PeerScore {
|
||||
@@ -1324,12 +1589,8 @@ func (p *peerInfo) Score() PeerScore {
|
||||
score -= int64(addr.DialFailures)
|
||||
}
|
||||
|
||||
if score <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
if score >= math.MaxUint8 {
|
||||
return PeerScore(math.MaxUint8)
|
||||
if score < math.MinInt16 {
|
||||
score = math.MinInt16
|
||||
}
|
||||
|
||||
return PeerScore(score)
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestPeerScoring(t *testing.T) {
|
||||
|
||||
t.Run("Synchronous", func(t *testing.T) {
|
||||
// update the manager and make sure it's correct
|
||||
require.EqualValues(t, 0, peerManager.Scores()[id])
|
||||
require.Zero(t, peerManager.Scores()[id])
|
||||
|
||||
// add a bunch of good status updates and watch things increase.
|
||||
for i := 1; i < 10; i++ {
|
||||
@@ -80,3 +80,173 @@ func TestPeerScoring(t *testing.T) {
|
||||
"startAt=%d score=%d", start, peerManager.Scores()[id])
|
||||
})
|
||||
}
|
||||
|
||||
func makeMockPeerStore(t *testing.T, peers ...peerInfo) *peerStore {
|
||||
t.Helper()
|
||||
s, err := newPeerStore(dbm.NewMemDB())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for idx := range peers {
|
||||
if err := s.Set(peers[idx]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestPeerRanking(t *testing.T) {
|
||||
t.Run("InactiveSecond", func(t *testing.T) {
|
||||
t.Skip("inactive status is not currently factored into peer rank.")
|
||||
|
||||
store := makeMockPeerStore(t,
|
||||
peerInfo{ID: "second", Inactive: true},
|
||||
peerInfo{ID: "first", Inactive: false},
|
||||
)
|
||||
|
||||
ranked := store.Ranked()
|
||||
if len(ranked) != 2 {
|
||||
t.Fatal("missing peer in ranked output")
|
||||
}
|
||||
if ranked[0].ID != "first" {
|
||||
t.Error("inactive peer is first")
|
||||
}
|
||||
if ranked[1].ID != "second" {
|
||||
t.Error("active peer is second")
|
||||
}
|
||||
})
|
||||
t.Run("ScoreOrder", func(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
Name string
|
||||
First int64
|
||||
Second int64
|
||||
}{
|
||||
{
|
||||
Name: "Mirror",
|
||||
First: 100,
|
||||
Second: -100,
|
||||
},
|
||||
{
|
||||
Name: "VeryLow",
|
||||
First: 0,
|
||||
Second: -100,
|
||||
},
|
||||
{
|
||||
Name: "High",
|
||||
First: 300,
|
||||
Second: 256,
|
||||
},
|
||||
} {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
store := makeMockPeerStore(t,
|
||||
peerInfo{
|
||||
ID: "second",
|
||||
MutableScore: test.Second,
|
||||
},
|
||||
peerInfo{
|
||||
ID: "first",
|
||||
MutableScore: test.First,
|
||||
})
|
||||
|
||||
ranked := store.Ranked()
|
||||
if len(ranked) != 2 {
|
||||
t.Fatal("missing peer in ranked output")
|
||||
}
|
||||
if ranked[0].ID != "first" {
|
||||
t.Error("higher peer is first")
|
||||
}
|
||||
if ranked[1].ID != "second" {
|
||||
t.Error("higher peer is second")
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLastDialed(t *testing.T) {
|
||||
t.Run("Zero", func(t *testing.T) {
|
||||
p := &peerInfo{}
|
||||
ts, ok := p.LastDialed()
|
||||
if !ts.IsZero() {
|
||||
t.Error("timestamp should be zero:", ts)
|
||||
}
|
||||
if ok {
|
||||
t.Error("peer reported success, despite none")
|
||||
}
|
||||
})
|
||||
t.Run("NeverDialed", func(t *testing.T) {
|
||||
p := &peerInfo{
|
||||
AddressInfo: map[NodeAddress]*peerAddressInfo{
|
||||
{NodeID: "kip"}: {},
|
||||
{NodeID: "merlin"}: {},
|
||||
},
|
||||
}
|
||||
ts, ok := p.LastDialed()
|
||||
if !ts.IsZero() {
|
||||
t.Error("timestamp should be zero:", ts)
|
||||
}
|
||||
if ok {
|
||||
t.Error("peer reported success, despite none")
|
||||
}
|
||||
})
|
||||
t.Run("Ordered", func(t *testing.T) {
|
||||
base := time.Now()
|
||||
for _, test := range []struct {
|
||||
Name string
|
||||
SuccessTime time.Time
|
||||
FailTime time.Time
|
||||
ExpectedSuccess bool
|
||||
}{
|
||||
{
|
||||
Name: "Zero",
|
||||
},
|
||||
{
|
||||
Name: "Success",
|
||||
SuccessTime: base.Add(time.Hour),
|
||||
FailTime: base,
|
||||
ExpectedSuccess: true,
|
||||
},
|
||||
{
|
||||
Name: "Equal",
|
||||
SuccessTime: base,
|
||||
FailTime: base,
|
||||
ExpectedSuccess: true,
|
||||
},
|
||||
{
|
||||
Name: "Failure",
|
||||
SuccessTime: base,
|
||||
FailTime: base.Add(time.Hour),
|
||||
ExpectedSuccess: false,
|
||||
},
|
||||
} {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
p := &peerInfo{
|
||||
AddressInfo: map[NodeAddress]*peerAddressInfo{
|
||||
{NodeID: "kip"}: {LastDialSuccess: test.SuccessTime},
|
||||
{NodeID: "merlin"}: {LastDialFailure: test.FailTime},
|
||||
},
|
||||
}
|
||||
ts, ok := p.LastDialed()
|
||||
if test.ExpectedSuccess && !ts.Equal(test.SuccessTime) {
|
||||
if !ts.Equal(test.FailTime) {
|
||||
t.Fatal("got unexpected timestamp:", ts)
|
||||
}
|
||||
|
||||
t.Error("last dialed time reported incorrect value:", ts)
|
||||
}
|
||||
if !test.ExpectedSuccess && !ts.Equal(test.FailTime) {
|
||||
if !ts.Equal(test.SuccessTime) {
|
||||
t.Fatal("got unexpected timestamp:", ts)
|
||||
}
|
||||
|
||||
t.Error("last dialed time reported incorrect value:", ts)
|
||||
}
|
||||
if test.ExpectedSuccess != ok {
|
||||
t.Error("test reported incorrect outcome for last dialed type")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
@@ -378,16 +378,14 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Add b. We shouldn't be able to dial it, due to MaxConnected.
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Spawn a goroutine to fail a's dial attempt.
|
||||
@@ -415,8 +413,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.DialFailed(dial))
|
||||
failed := time.Now()
|
||||
@@ -443,8 +440,7 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) {
|
||||
err = peerManager.Accepted(a.NodeID)
|
||||
require.NoError(t, err)
|
||||
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
go func() {
|
||||
@@ -473,8 +469,7 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
|
||||
@@ -482,16 +477,14 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// At this point, adding c will not allow dialing it.
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -504,11 +497,11 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 0,
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 2,
|
||||
d.NodeID: 3,
|
||||
e.NodeID: 0,
|
||||
a.NodeID: p2p.PeerScore(0),
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(2),
|
||||
d.NodeID: p2p.PeerScore(3),
|
||||
e.NodeID: p2p.PeerScore(0),
|
||||
},
|
||||
PersistentPeers: []types.NodeID{c.NodeID, d.NodeID},
|
||||
MaxConnected: 2,
|
||||
@@ -520,7 +513,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
dial := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
@@ -529,8 +522,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// Even though we are at capacity, we should be allowed to dial c for an
|
||||
@@ -538,8 +530,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
|
||||
// However, since we're using all upgrade slots now, we can't add and dial
|
||||
@@ -547,24 +538,20 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(d)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// We go through with c's upgrade.
|
||||
require.NoError(t, peerManager.Dialed(c))
|
||||
|
||||
// Still can't dial d.
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Now, if we disconnect a, we should be allowed to dial d because we have a
|
||||
// free upgrade slot.
|
||||
require.Error(t, peerManager.Dialed(d))
|
||||
peerManager.Disconnected(a.NodeID)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, d, dial)
|
||||
require.NoError(t, peerManager.Dialed(d))
|
||||
|
||||
// However, if we disconnect b (such that only c and d are connected), we
|
||||
@@ -574,8 +561,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(e)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -585,7 +571,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
})
|
||||
@@ -595,8 +581,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
|
||||
@@ -604,8 +589,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// Adding c and dialing it will fail, because a is the only connected
|
||||
@@ -613,8 +597,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Empty(t, dial)
|
||||
}
|
||||
|
||||
@@ -635,22 +618,19 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Adding a's TCP address will not dispense a, since it's already dialing.
|
||||
added, err = peerManager.Add(aTCP)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Marking a as dialed will still not dispense it.
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
|
||||
// Adding b and accepting a connection from it will not dispense it either.
|
||||
@@ -658,8 +638,7 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
require.NoError(t, peerManager.Accepted(bID))
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -685,16 +664,14 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) {
|
||||
// All addresses should be dispensed as long as dialing them has failed.
|
||||
dial := []p2p.NodeAddress{}
|
||||
for range addresses {
|
||||
address, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
address := peerManager.TryDialNext()
|
||||
require.NotZero(t, address)
|
||||
require.NoError(t, peerManager.DialFailed(address))
|
||||
dial = append(dial, address)
|
||||
}
|
||||
require.ElementsMatch(t, dial, addresses)
|
||||
|
||||
address, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
address := peerManager.TryDialNext()
|
||||
require.Zero(t, address)
|
||||
}
|
||||
|
||||
@@ -716,15 +693,14 @@ func TestPeerManager_DialFailed(t *testing.T) {
|
||||
// Dialing and then calling DialFailed with a different address (same
|
||||
// NodeID) should unmark as dialing and allow us to dial the other address
|
||||
// again, but not register the failed address.
|
||||
dial, err := peerManager.TryDialNext()
|
||||
dial := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.DialFailed(p2p.NodeAddress{
|
||||
Protocol: "tcp", NodeID: aID, Hostname: "localhost"}))
|
||||
require.Equal(t, []p2p.NodeAddress{a}, peerManager.Addresses(aID))
|
||||
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Calling DialFailed on same address twice should be fine.
|
||||
@@ -742,7 +718,10 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(2),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
})
|
||||
@@ -752,8 +731,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
|
||||
@@ -761,8 +739,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// Adding c and dialing it will fail, even though it could upgrade a and we
|
||||
@@ -771,14 +748,12 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Empty(t, dial)
|
||||
|
||||
// Failing b's dial will now make c available for dialing.
|
||||
require.NoError(t, peerManager.DialFailed(b))
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
}
|
||||
|
||||
@@ -793,8 +768,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
require.NoError(t, peerManager.Dialed(a))
|
||||
@@ -804,8 +778,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
require.NoError(t, peerManager.Accepted(b.NodeID))
|
||||
@@ -834,8 +807,7 @@ func TestPeerManager_Dialed_MaxConnected(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
// Marking b as dialed in the meanwhile (even without TryDialNext)
|
||||
@@ -858,7 +830,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: p2p.PeerScore(1), d.NodeID: 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -877,8 +849,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
require.NoError(t, peerManager.Dialed(c))
|
||||
|
||||
@@ -908,7 +879,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -922,8 +893,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
require.NoError(t, peerManager.Dialed(b))
|
||||
|
||||
@@ -932,8 +902,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Empty(t, dial)
|
||||
|
||||
// a should now be evicted.
|
||||
@@ -952,10 +921,10 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) {
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 3,
|
||||
b.NodeID: 2,
|
||||
c.NodeID: 10,
|
||||
d.NodeID: 1,
|
||||
a.NodeID: p2p.PeerScore(3),
|
||||
b.NodeID: p2p.PeerScore(2),
|
||||
c.NodeID: p2p.PeerScore(10),
|
||||
d.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -976,8 +945,7 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
|
||||
// In the meanwhile, a disconnects and d connects. d is even lower-scored
|
||||
@@ -1005,9 +973,9 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) {
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 1,
|
||||
b.NodeID: 2,
|
||||
c.NodeID: 3,
|
||||
a.NodeID: p2p.PeerScore(1),
|
||||
b.NodeID: p2p.PeerScore(2),
|
||||
c.NodeID: p2p.PeerScore(3),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1027,7 +995,7 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
dial := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c, dial)
|
||||
|
||||
@@ -1073,8 +1041,7 @@ func TestPeerManager_Accepted(t *testing.T) {
|
||||
added, err = peerManager.Add(c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, c, dial)
|
||||
require.NoError(t, peerManager.Accepted(c.NodeID))
|
||||
require.Error(t, peerManager.Dialed(c))
|
||||
@@ -1083,8 +1050,7 @@ func TestPeerManager_Accepted(t *testing.T) {
|
||||
added, err = peerManager.Add(d)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, d, dial)
|
||||
require.NoError(t, peerManager.Dialed(d))
|
||||
require.Error(t, peerManager.Accepted(d.NodeID))
|
||||
@@ -1126,8 +1092,8 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
c.NodeID: 1,
|
||||
d.NodeID: 2,
|
||||
c.NodeID: p2p.PeerScore(1),
|
||||
d.NodeID: p2p.PeerScore(2),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
@@ -1171,8 +1137,8 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 1,
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
@@ -1214,8 +1180,8 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) {
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 1,
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
c.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
@@ -1232,8 +1198,7 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) {
|
||||
added, err = peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
|
||||
// a has already been claimed as an upgrade of a, so accepting
|
||||
@@ -1376,7 +1341,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1)},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1393,8 +1358,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
|
||||
added, err := peerManager.Add(b)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, b, dial)
|
||||
require.NoError(t, peerManager.Dialed(b))
|
||||
}()
|
||||
@@ -1414,7 +1378,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: p2p.PeerScore(1),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1518,13 +1484,11 @@ func TestPeerManager_Disconnected(t *testing.T) {
|
||||
|
||||
// Disconnecting a dialing peer does not unmark it as dialing, to avoid
|
||||
// dialing it multiple times in parallel.
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
|
||||
peerManager.Disconnected(a.NodeID)
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Zero(t, dial)
|
||||
}
|
||||
|
||||
@@ -1592,8 +1556,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
|
||||
require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates())
|
||||
|
||||
// Outbound connection with peer error and eviction.
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.Empty(t, sub.Updates())
|
||||
|
||||
@@ -1616,8 +1579,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
|
||||
require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates())
|
||||
|
||||
// Outbound connection with dial failure.
|
||||
dial, err = peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial = peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.Empty(t, sub.Updates())
|
||||
|
||||
@@ -1713,8 +1675,7 @@ func TestPeerManager_Close(t *testing.T) {
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
dial, err := peerManager.TryDialNext()
|
||||
require.NoError(t, err)
|
||||
dial := peerManager.TryDialNext()
|
||||
require.Equal(t, a, dial)
|
||||
require.NoError(t, peerManager.DialFailed(a))
|
||||
|
||||
@@ -1763,6 +1724,7 @@ func TestPeerManager_Advertise(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
|
||||
require.Len(t, peerManager.Advertise(dID, 100), 6)
|
||||
// d should get all addresses.
|
||||
require.ElementsMatch(t, []p2p.NodeAddress{
|
||||
aTCP, aMem, bTCP, bMem, cTCP, cMem,
|
||||
@@ -1776,10 +1738,24 @@ func TestPeerManager_Advertise(t *testing.T) {
|
||||
// Asking for 0 addresses should return, well, 0.
|
||||
require.Empty(t, peerManager.Advertise(aID, 0))
|
||||
|
||||
// Asking for 2 addresses should get the highest-rated ones, i.e. a.
|
||||
require.ElementsMatch(t, []p2p.NodeAddress{
|
||||
aTCP, aMem,
|
||||
}, peerManager.Advertise(dID, 2))
|
||||
// Asking for 2 addresses should get two addresses
|
||||
// and usually not the lowest ranked one
|
||||
numLowestRanked := 0
|
||||
for i := 0; i < 100; i++ {
|
||||
addrs := peerManager.Advertise(dID, 2)
|
||||
require.Len(t, addrs, 2)
|
||||
for _, addr := range addrs {
|
||||
if dID == addr.NodeID {
|
||||
t.Fatal("never advertise self")
|
||||
}
|
||||
if cID == addr.NodeID {
|
||||
numLowestRanked++
|
||||
}
|
||||
}
|
||||
}
|
||||
if numLowestRanked > 20 {
|
||||
t.Errorf("lowest ranked peer returned in results too often: %d", numLowestRanked)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerManager_Advertise_Self(t *testing.T) {
|
||||
@@ -1799,39 +1775,3 @@ func TestPeerManager_Advertise_Self(t *testing.T) {
|
||||
self,
|
||||
}, peerManager.Advertise(dID, 100))
|
||||
}
|
||||
|
||||
func TestPeerManager_SetHeight_GetHeight(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Getting a height should default to 0, for unknown peers and
|
||||
// for known peers without height.
|
||||
added, err := peerManager.Add(a)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
require.EqualValues(t, 0, peerManager.GetHeight(a.NodeID))
|
||||
require.EqualValues(t, 0, peerManager.GetHeight(b.NodeID))
|
||||
|
||||
// Setting a height should work for a known node.
|
||||
require.NoError(t, peerManager.SetHeight(a.NodeID, 3))
|
||||
require.EqualValues(t, 3, peerManager.GetHeight(a.NodeID))
|
||||
|
||||
// Setting a height should add an unknown node.
|
||||
require.Equal(t, []types.NodeID{a.NodeID}, peerManager.Peers())
|
||||
require.NoError(t, peerManager.SetHeight(b.NodeID, 7))
|
||||
require.EqualValues(t, 7, peerManager.GetHeight(b.NodeID))
|
||||
require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
|
||||
|
||||
// The heights should not be persisted.
|
||||
peerManager.Close()
|
||||
peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
|
||||
require.Zero(t, peerManager.GetHeight(a.NodeID))
|
||||
require.Zero(t, peerManager.GetHeight(b.NodeID))
|
||||
}
|
||||
|
||||
@@ -51,5 +51,5 @@ const (
|
||||
|
||||
// max addresses returned by GetSelection
|
||||
// NOTE: this must match "maxMsgSize"
|
||||
maxGetSelection = 250
|
||||
maxGetSelection = 100
|
||||
)
|
||||
|
||||
@@ -102,12 +102,6 @@ type Reactor struct {
|
||||
crawlPeerInfos map[types.NodeID]crawlPeerInfo
|
||||
}
|
||||
|
||||
func (r *Reactor) minReceiveRequestInterval() time.Duration {
|
||||
// NOTE: must be less than ensurePeersPeriod, otherwise we'll request
|
||||
// peers too quickly from others and they'll think we're bad!
|
||||
return r.ensurePeersPeriod / 3
|
||||
}
|
||||
|
||||
// ReactorConfig holds reactor specific configuration data.
|
||||
type ReactorConfig struct {
|
||||
// Seed/Crawler mode
|
||||
@@ -331,7 +325,7 @@ func (r *Reactor) receiveRequest(src Peer) error {
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
minInterval := r.minReceiveRequestInterval()
|
||||
minInterval := minReceiveRequestInterval
|
||||
if now.Sub(lastReceived) < minInterval {
|
||||
return fmt.Errorf(
|
||||
"peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting",
|
||||
|
||||
@@ -24,7 +24,7 @@ var (
|
||||
// See https://github.com/tendermint/tendermint/issues/6371
|
||||
const (
|
||||
// the minimum time one peer can send another request to the same peer
|
||||
minReceiveRequestInterval = 100 * time.Millisecond
|
||||
minReceiveRequestInterval = 200 * time.Millisecond
|
||||
|
||||
// the maximum amount of addresses that can be included in a response
|
||||
maxAddresses uint16 = 100
|
||||
|
||||
@@ -29,8 +29,16 @@ func (pq priorityQueue) get(i int) *pqEnvelope { return pq[i] }
|
||||
func (pq priorityQueue) Len() int { return len(pq) }
|
||||
|
||||
func (pq priorityQueue) Less(i, j int) bool {
|
||||
// if both elements have the same priority, prioritize based on most recent
|
||||
// if both elements have the same priority, prioritize based
|
||||
// on most recent and largest
|
||||
if pq[i].priority == pq[j].priority {
|
||||
diff := pq[i].timestamp.Sub(pq[j].timestamp)
|
||||
if diff < 0 {
|
||||
diff *= -1
|
||||
}
|
||||
if diff < 10*time.Millisecond {
|
||||
return pq[i].size > pq[j].size
|
||||
}
|
||||
return pq[i].timestamp.After(pq[j].timestamp)
|
||||
}
|
||||
|
||||
@@ -272,12 +280,10 @@ func (s *pqScheduler) process() {
|
||||
}
|
||||
|
||||
func (s *pqScheduler) push(pqEnv *pqEnvelope) {
|
||||
chIDStr := strconv.Itoa(int(pqEnv.envelope.channelID))
|
||||
|
||||
// enqueue the incoming Envelope
|
||||
heap.Push(s.pq, pqEnv)
|
||||
s.size += pqEnv.size
|
||||
s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Add(float64(pqEnv.size))
|
||||
s.metrics.PeerQueueMsgSize.With("ch_id", strconv.Itoa(int(pqEnv.envelope.channelID))).Add(float64(pqEnv.size))
|
||||
|
||||
// Update the cumulative sizes by adding the Envelope's size to every
|
||||
// priority less than or equal to it.
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
@@ -41,6 +40,10 @@ type Envelope struct {
|
||||
channelID ChannelID
|
||||
}
|
||||
|
||||
func (e Envelope) IsZero() bool {
|
||||
return e.From == "" && e.To == "" && e.Message == nil
|
||||
}
|
||||
|
||||
// PeerError is a peer error reported via Channel.Error.
|
||||
//
|
||||
// FIXME: This currently just disconnects the peer, which is too simplistic.
|
||||
@@ -54,6 +57,7 @@ type Envelope struct {
|
||||
type PeerError struct {
|
||||
NodeID types.NodeID
|
||||
Err error
|
||||
Fatal bool
|
||||
}
|
||||
|
||||
// Channel is a bidirectional channel to exchange Protobuf messages with peers,
|
||||
@@ -159,12 +163,6 @@ type RouterOptions struct {
|
||||
// return an error to reject the peer.
|
||||
FilterPeerByID func(context.Context, types.NodeID) error
|
||||
|
||||
// DialSleep controls the amount of time that the router
|
||||
// sleeps between dialing peers. If not set, a default value
|
||||
// is used that sleeps for a (random) amount of time up to 3
|
||||
// seconds between submitting each peer to be dialed.
|
||||
DialSleep func(context.Context)
|
||||
|
||||
// NumConcrruentDials controls how many parallel go routines
|
||||
// are used to dial peers. This defaults to the value of
|
||||
// runtime.NumCPU.
|
||||
@@ -172,9 +170,10 @@ type RouterOptions struct {
|
||||
}
|
||||
|
||||
const (
|
||||
queueTypeFifo = "fifo"
|
||||
queueTypePriority = "priority"
|
||||
queueTypeWDRR = "wdrr"
|
||||
queueTypeFifo = "fifo"
|
||||
queueTypePriority = "priority"
|
||||
queueTypeWDRR = "wdrr"
|
||||
queueTypeSimplePriority = "simple-priority"
|
||||
)
|
||||
|
||||
// Validate validates router options.
|
||||
@@ -182,8 +181,8 @@ func (o *RouterOptions) Validate() error {
|
||||
switch o.QueueType {
|
||||
case "":
|
||||
o.QueueType = queueTypeFifo
|
||||
case queueTypeFifo, queueTypeWDRR, queueTypePriority:
|
||||
// passI me
|
||||
case queueTypeFifo, queueTypeWDRR, queueTypePriority, queueTypeSimplePriority:
|
||||
// pass
|
||||
default:
|
||||
return fmt.Errorf("queue type %q is not supported", o.QueueType)
|
||||
}
|
||||
@@ -290,7 +289,7 @@ func NewRouter(
|
||||
|
||||
router := &Router{
|
||||
logger: logger,
|
||||
metrics: metrics,
|
||||
metrics: NopMetrics(),
|
||||
nodeInfo: nodeInfo,
|
||||
privKey: privKey,
|
||||
connTracker: newConnTracker(
|
||||
@@ -311,6 +310,10 @@ func NewRouter(
|
||||
|
||||
router.BaseService = service.NewBaseService(logger, "router", router)
|
||||
|
||||
if metrics != nil {
|
||||
router.metrics = metrics
|
||||
}
|
||||
|
||||
qf, err := router.createQueueFactory()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -356,6 +359,9 @@ func (r *Router) createQueueFactory() (func(int) queue, error) {
|
||||
return q
|
||||
}, nil
|
||||
|
||||
case queueTypeSimplePriority:
|
||||
return func(size int) queue { return newSimplePriorityQueue(r.stopCtx(), size, r.chDescs) }, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot construct queue of type %q", r.options.QueueType)
|
||||
}
|
||||
@@ -424,8 +430,9 @@ func (r *Router) routeChannel(
|
||||
case envelope, ok := <-outCh:
|
||||
if !ok {
|
||||
return
|
||||
} else if envelope.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Mark the envelope with the channel ID to allow sendPeer() to pass
|
||||
// it on to Transport.SendMessage().
|
||||
envelope.channelID = chID
|
||||
@@ -506,20 +513,35 @@ func (r *Router) routeChannel(
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
maxPeerCapacity := r.peerManager.HasMaxPeerCapacity()
|
||||
r.logger.Error("peer error",
|
||||
"peer", peerError.NodeID,
|
||||
"err", peerError.Err,
|
||||
"disconnecting", peerError.Fatal || maxPeerCapacity,
|
||||
)
|
||||
|
||||
r.logger.Error("peer error, evicting", "peer", peerError.NodeID, "err", peerError.Err)
|
||||
|
||||
r.peerManager.Errored(peerError.NodeID, peerError.Err)
|
||||
|
||||
if peerError.Fatal || maxPeerCapacity {
|
||||
// if the error is fatal or all peer
|
||||
// slots are in use, we can error
|
||||
// (disconnect) from the peer.
|
||||
r.peerManager.Errored(peerError.NodeID, peerError.Err)
|
||||
} else {
|
||||
// this just decrements the peer
|
||||
// score.
|
||||
r.peerManager.processPeerEvent(PeerUpdate{
|
||||
NodeID: peerError.NodeID,
|
||||
Status: PeerStatusBad,
|
||||
})
|
||||
}
|
||||
case <-r.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Router) numConccurentDials() int {
|
||||
func (r *Router) numConcurrentDials() int {
|
||||
if r.options.NumConcurrentDials == nil {
|
||||
return runtime.NumCPU()
|
||||
return runtime.NumCPU() * 32
|
||||
}
|
||||
|
||||
return r.options.NumConcurrentDials()
|
||||
@@ -541,23 +563,6 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error {
|
||||
return r.options.FilterPeerByID(ctx, id)
|
||||
}
|
||||
|
||||
func (r *Router) dialSleep(ctx context.Context) {
|
||||
if r.options.DialSleep == nil {
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
r.options.DialSleep(ctx)
|
||||
}
|
||||
|
||||
// acceptPeers accepts inbound connections from peers on the given transport,
|
||||
// and spawns goroutines that route messages to/from them.
|
||||
func (r *Router) acceptPeers(transport Transport) {
|
||||
@@ -565,14 +570,14 @@ func (r *Router) acceptPeers(transport Transport) {
|
||||
ctx := r.stopCtx()
|
||||
for {
|
||||
conn, err := transport.Accept()
|
||||
switch err {
|
||||
case nil:
|
||||
case io.EOF:
|
||||
r.logger.Debug("stopping accept routine", "transport", transport)
|
||||
switch {
|
||||
case errors.Is(err, io.EOF):
|
||||
r.logger.Debug("stopping accept routine", "transport", transport, "err", "EOF")
|
||||
return
|
||||
default:
|
||||
case err != nil:
|
||||
// in this case we got an error from the net.Listener.
|
||||
r.logger.Error("failed to accept connection", "transport", transport, "err", err)
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
incomingIP := conn.RemoteEndpoint().IP
|
||||
@@ -584,7 +589,7 @@ func (r *Router) acceptPeers(transport Transport) {
|
||||
"close_err", closeErr,
|
||||
)
|
||||
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
// Spawn a goroutine for the handshake, to avoid head-of-line blocking.
|
||||
@@ -656,7 +661,7 @@ func (r *Router) dialPeers() {
|
||||
// able to add peers at a reasonable pace, though the number
|
||||
// is somewhat arbitrary. The action is further throttled by a
|
||||
// sleep after sending to the addresses channel.
|
||||
for i := 0; i < r.numConccurentDials(); i++ {
|
||||
for i := 0; i < r.numConcurrentDials(); i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -679,19 +684,13 @@ LOOP:
|
||||
case errors.Is(err, context.Canceled):
|
||||
r.logger.Debug("stopping dial routine")
|
||||
break LOOP
|
||||
case err != nil:
|
||||
r.logger.Error("failed to find next peer to dial", "err", err)
|
||||
break LOOP
|
||||
case address == NodeAddress{}:
|
||||
continue LOOP
|
||||
}
|
||||
|
||||
select {
|
||||
case addresses <- address:
|
||||
// this jitters the frequency that we call
|
||||
// DialNext and prevents us from attempting to
|
||||
// create connections too quickly.
|
||||
|
||||
r.dialSleep(ctx)
|
||||
continue
|
||||
continue LOOP
|
||||
case <-ctx.Done():
|
||||
close(addresses)
|
||||
break LOOP
|
||||
@@ -707,7 +706,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) {
|
||||
case errors.Is(err, context.Canceled):
|
||||
return
|
||||
case err != nil:
|
||||
r.logger.Error("failed to dial peer", "peer", address, "err", err)
|
||||
r.logger.Debug("failed to dial peer", "peer", address, "err", err)
|
||||
if err = r.peerManager.DialFailed(address); err != nil {
|
||||
r.logger.Error("failed to report dial failure", "peer", address, "err", err)
|
||||
}
|
||||
@@ -729,8 +728,8 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) {
|
||||
}
|
||||
|
||||
if err := r.runWithPeerMutex(func() error { return r.peerManager.Dialed(address) }); err != nil {
|
||||
r.logger.Error("failed to dial peer",
|
||||
"op", "outgoing/dialing", "peer", address.NodeID, "err", err)
|
||||
r.logger.Error("failed to dial peer", "op", "outgoing/dialing", "peer", address.NodeID, "err", err)
|
||||
r.peerManager.dialWaker.Wake()
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
@@ -794,12 +793,13 @@ func (r *Router) dialPeer(ctx context.Context, address NodeAddress) (Connection,
|
||||
// Internet can't and needs a different public address.
|
||||
conn, err := transport.Dial(dialCtx, endpoint)
|
||||
if err != nil {
|
||||
r.logger.Error("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err)
|
||||
r.logger.Debug("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err)
|
||||
} else {
|
||||
r.logger.Debug("dialed peer", "peer", address.NodeID, "endpoint", endpoint)
|
||||
return conn, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("all endpoints failed")
|
||||
}
|
||||
|
||||
@@ -811,19 +811,14 @@ func (r *Router) handshakePeer(
|
||||
expectID types.NodeID,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
|
||||
if r.options.HandshakeTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.options.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
peerInfo, peerKey, err := conn.Handshake(ctx, r.nodeInfo, r.privKey)
|
||||
peerInfo, peerKey, err := conn.Handshake(ctx, r.options.HandshakeTimeout, r.nodeInfo, r.privKey)
|
||||
if err != nil {
|
||||
return peerInfo, peerKey, err
|
||||
}
|
||||
if err = peerInfo.Validate(); err != nil {
|
||||
return peerInfo, peerKey, fmt.Errorf("invalid handshake NodeInfo: %w", err)
|
||||
}
|
||||
|
||||
if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID {
|
||||
return peerInfo, peerKey, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)",
|
||||
peerInfo.NodeID, types.NodeIDFromPubKey(peerKey))
|
||||
@@ -832,7 +827,12 @@ func (r *Router) handshakePeer(
|
||||
return peerInfo, peerKey, fmt.Errorf("expected to connect with peer %q, got %q",
|
||||
expectID, peerInfo.NodeID)
|
||||
}
|
||||
|
||||
if err := r.nodeInfo.CompatibleWith(peerInfo); err != nil {
|
||||
if err := r.peerManager.Inactivate(peerInfo.NodeID); err != nil {
|
||||
return peerInfo, peerKey, fmt.Errorf("problem inactivating peer %q: %w", peerInfo.ID(), err)
|
||||
}
|
||||
|
||||
return peerInfo, peerKey, ErrRejected{
|
||||
err: err,
|
||||
id: peerInfo.ID(),
|
||||
@@ -1011,6 +1011,8 @@ func (r *Router) evictPeers() {
|
||||
queue, ok := r.peerQueues[peerID]
|
||||
r.peerMtx.RUnlock()
|
||||
|
||||
r.metrics.PeersEvicted.Add(1)
|
||||
|
||||
if ok {
|
||||
queue.close()
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package p2p_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -133,13 +132,6 @@ func TestRouter_Channel_Basic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, router.NodeInfo().Channels, chDesc2.ID)
|
||||
|
||||
// Closing the channel, then opening it again should be fine.
|
||||
channel.Close()
|
||||
time.Sleep(100 * time.Millisecond) // yes yes, but Close() is async...
|
||||
|
||||
channel, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should be able to send on the channel, even though there are no peers.
|
||||
p2ptest.RequireSend(t, channel, p2p.Envelope{
|
||||
To: types.NodeID(strings.Repeat("a", 40)),
|
||||
@@ -352,7 +344,7 @@ func TestRouter_AcceptPeers(t *testing.T) {
|
||||
closer := tmsync.NewCloser()
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(tc.peerInfo, tc.peerKey, nil)
|
||||
mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
@@ -413,72 +405,42 @@ func TestRouter_AcceptPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouter_AcceptPeers_Error(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
func TestRouter_AcceptPeers_Errors(t *testing.T) {
|
||||
for _, err := range []error{io.EOF} {
|
||||
t.Run(err.Error(), func(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Set up a mock transport that returns an error, which should prevent
|
||||
// the router from calling Accept again.
|
||||
mockTransport := &mocks.Transport{}
|
||||
mockTransport.On("String").Maybe().Return("mock")
|
||||
mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"})
|
||||
mockTransport.On("Accept").Once().Return(nil, errors.New("boom"))
|
||||
mockTransport.On("Close").Return(nil)
|
||||
// Set up a mock transport that returns io.EOF once, which should prevent
|
||||
// the router from calling Accept again.
|
||||
mockTransport := &mocks.Transport{}
|
||||
mockTransport.On("String").Maybe().Return("mock")
|
||||
mockTransport.On("Accept", mock.Anything).Once().Return(nil, err)
|
||||
mockTransport.On("Listen", mock.Anything).Return(nil).Maybe()
|
||||
mockTransport.On("Close").Return(nil)
|
||||
mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"})
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
router, err := p2p.NewRouter(
|
||||
log.TestingLogger(),
|
||||
p2p.NopMetrics(),
|
||||
selfInfo,
|
||||
selfKey,
|
||||
peerManager,
|
||||
[]p2p.Transport{mockTransport},
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
router, err := p2p.NewRouter(
|
||||
log.TestingLogger(),
|
||||
p2p.NopMetrics(),
|
||||
selfInfo,
|
||||
selfKey,
|
||||
peerManager,
|
||||
[]p2p.Transport{mockTransport},
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, router.Start())
|
||||
time.Sleep(time.Second)
|
||||
require.NoError(t, router.Stop())
|
||||
|
||||
require.NoError(t, router.Start())
|
||||
time.Sleep(time.Second)
|
||||
require.NoError(t, router.Stop())
|
||||
mockTransport.AssertExpectations(t)
|
||||
|
||||
mockTransport.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Set up a mock transport that returns io.EOF once, which should prevent
|
||||
// the router from calling Accept again.
|
||||
mockTransport := &mocks.Transport{}
|
||||
mockTransport.On("String").Maybe().Return("mock")
|
||||
mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"})
|
||||
mockTransport.On("Accept").Once().Return(nil, io.EOF)
|
||||
mockTransport.On("Close").Return(nil)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
router, err := p2p.NewRouter(
|
||||
log.TestingLogger(),
|
||||
p2p.NopMetrics(),
|
||||
selfInfo,
|
||||
selfKey,
|
||||
peerManager,
|
||||
[]p2p.Transport{mockTransport},
|
||||
p2p.RouterOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, router.Start())
|
||||
time.Sleep(time.Second)
|
||||
require.NoError(t, router.Stop())
|
||||
|
||||
mockTransport.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
|
||||
@@ -492,7 +454,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF)
|
||||
mockConnection.On("Close").Return(nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
@@ -573,7 +535,7 @@ func TestRouter_DialPeers(t *testing.T) {
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
if tc.dialErr == nil {
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(tc.peerInfo, tc.peerKey, nil)
|
||||
mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil)
|
||||
}
|
||||
@@ -660,7 +622,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF)
|
||||
mockConnection.On("Close").Return(nil)
|
||||
|
||||
@@ -701,7 +663,6 @@ func TestRouter_DialPeers_Parallel(t *testing.T) {
|
||||
peerManager,
|
||||
[]p2p.Transport{mockTransport},
|
||||
p2p.RouterOptions{
|
||||
DialSleep: func(_ context.Context) {},
|
||||
NumConcurrentDials: func() int {
|
||||
ncpu := runtime.NumCPU()
|
||||
if ncpu <= 3 {
|
||||
@@ -740,7 +701,7 @@ func TestRouter_EvictPeers(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(peerInfo, peerKey.PubKey(), nil)
|
||||
mockConnection.On("ReceiveMessage").WaitUntil(closeCh).Return(chID, nil, io.EOF)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
@@ -809,7 +770,7 @@ func TestRouter_ChannelCompatability(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(incompatiblePeer, peerKey.PubKey(), nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
mockConnection.On("Close").Return(nil)
|
||||
@@ -858,7 +819,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) {
|
||||
|
||||
mockConnection := &mocks.Connection{}
|
||||
mockConnection.On("String").Maybe().Return("mock")
|
||||
mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey).
|
||||
mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey).
|
||||
Return(peer, peerKey.PubKey(), nil)
|
||||
mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{})
|
||||
mockConnection.On("Close").Return(nil)
|
||||
|
||||
112
internal/p2p/rqueue.go
Normal file
112
internal/p2p/rqueue.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"context"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
type simpleQueue struct {
|
||||
input chan Envelope
|
||||
output chan Envelope
|
||||
closeFn func()
|
||||
closeCh <-chan struct{}
|
||||
|
||||
maxSize int
|
||||
chDescs []ChannelDescriptor
|
||||
}
|
||||
|
||||
func newSimplePriorityQueue(ctx context.Context, size int, chDescs []ChannelDescriptor) *simpleQueue {
|
||||
if size%2 != 0 {
|
||||
size++
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
q := &simpleQueue{
|
||||
input: make(chan Envelope, size*2),
|
||||
output: make(chan Envelope, size/2),
|
||||
maxSize: size * size,
|
||||
closeCh: ctx.Done(),
|
||||
closeFn: cancel,
|
||||
}
|
||||
|
||||
go q.run(ctx)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *simpleQueue) enqueue() chan<- Envelope { return q.input }
|
||||
func (q *simpleQueue) dequeue() <-chan Envelope { return q.output }
|
||||
func (q *simpleQueue) close() { q.closeFn() }
|
||||
func (q *simpleQueue) closed() <-chan struct{} { return q.closeCh }
|
||||
|
||||
func (q *simpleQueue) run(ctx context.Context) {
|
||||
defer q.closeFn()
|
||||
|
||||
var chPriorities = make(map[ChannelID]uint, len(q.chDescs))
|
||||
for _, chDesc := range q.chDescs {
|
||||
chID := ChannelID(chDesc.ID)
|
||||
chPriorities[chID] = uint(chDesc.Priority)
|
||||
}
|
||||
|
||||
pq := make(priorityQueue, 0, q.maxSize)
|
||||
heap.Init(&pq)
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
// must have a buffer of exactly one because both sides of
|
||||
// this channel are used in this loop, and simply signals adds
|
||||
// to the heap
|
||||
signal := make(chan struct{}, 1)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-q.closeCh:
|
||||
return
|
||||
case e := <-q.input:
|
||||
// enqueue the incoming Envelope
|
||||
heap.Push(&pq, &pqEnvelope{
|
||||
envelope: e,
|
||||
size: uint(proto.Size(e.Message)),
|
||||
priority: chPriorities[e.channelID],
|
||||
timestamp: time.Now().UTC(),
|
||||
})
|
||||
|
||||
select {
|
||||
case signal <- struct{}{}:
|
||||
default:
|
||||
if len(pq) > q.maxSize {
|
||||
sort.Sort(pq)
|
||||
pq = pq[:q.maxSize]
|
||||
}
|
||||
}
|
||||
|
||||
case <-ticker.C:
|
||||
if len(pq) > q.maxSize {
|
||||
sort.Sort(pq)
|
||||
pq = pq[:q.maxSize]
|
||||
}
|
||||
if len(pq) > 0 {
|
||||
select {
|
||||
case signal <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
case <-signal:
|
||||
SEND:
|
||||
for len(pq) > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-q.closeCh:
|
||||
return
|
||||
case q.output <- heap.Pop(&pq).(*pqEnvelope).envelope:
|
||||
continue SEND
|
||||
default:
|
||||
break SEND
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
47
internal/p2p/rqueue_test.go
Normal file
47
internal/p2p/rqueue_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSimpleQueue(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// set up a small queue with very small buffers so we can
|
||||
// watch it shed load, then send a bunch of messages to the
|
||||
// queue, most of which we'll watch it drop.
|
||||
sq := newSimplePriorityQueue(ctx, 1, nil)
|
||||
for i := 0; i < 100; i++ {
|
||||
sq.enqueue() <- Envelope{From: "merlin"}
|
||||
}
|
||||
|
||||
seen := 0
|
||||
|
||||
RETRY:
|
||||
for seen <= 2 {
|
||||
select {
|
||||
case e := <-sq.dequeue():
|
||||
if e.From != "merlin" {
|
||||
continue
|
||||
}
|
||||
seen++
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
break RETRY
|
||||
}
|
||||
}
|
||||
// if we don't see any messages, then it's just broken.
|
||||
if seen == 0 {
|
||||
t.Errorf("seen %d messages, should have seen more than one", seen)
|
||||
}
|
||||
// ensure that load shedding happens: there can be at most 3
|
||||
// messages that we get out of this, one that was buffered
|
||||
// plus 2 that were under the cap, everything else gets
|
||||
// dropped.
|
||||
if seen > 3 {
|
||||
t.Errorf("saw %d messages, should have seen 5 or fewer", seen)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -417,7 +417,7 @@ func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
|
||||
// RemovePeer is finished.
|
||||
// https://github.com/tendermint/tendermint/issues/3338
|
||||
if sw.peers.Remove(peer) {
|
||||
sw.metrics.Peers.Add(float64(-1))
|
||||
sw.metrics.Peers.Add(-1)
|
||||
}
|
||||
|
||||
sw.conns.RemoveAddr(peer.RemoteAddr())
|
||||
@@ -865,11 +865,11 @@ func (sw *Switch) handshakePeer(
|
||||
c Connection,
|
||||
expectPeerID types.NodeID,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
// Moved from transport and hardcoded until legacy P2P stack removal.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
peerInfo, peerKey, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
// Moved timeout from transport and hardcoded until legacy P2P stack removal.
|
||||
peerInfo, peerKey, err := c.Handshake(ctx, 5*time.Second, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
return peerInfo, peerKey, ErrRejected{
|
||||
conn: c.(*mConnConnection).conn,
|
||||
@@ -1035,7 +1035,7 @@ func (sw *Switch) addPeer(p Peer) error {
|
||||
if err := sw.peers.Add(p); err != nil {
|
||||
return err
|
||||
}
|
||||
sw.metrics.Peers.Add(float64(1))
|
||||
sw.metrics.Peers.Add(1)
|
||||
|
||||
// Start all the reactor protocols on the peer.
|
||||
for _, reactor := range sw.reactors {
|
||||
|
||||
@@ -267,7 +267,7 @@ func TestSwitchPeerFilter(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerInfo, _, err := c.Handshake(ctx, 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -324,7 +324,7 @@ func TestSwitchPeerFilterTimeout(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerInfo, _, err := c.Handshake(ctx, 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -360,7 +360,7 @@ func TestSwitchPeerFilterDuplicate(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerInfo, _, err := c.Handshake(ctx, 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -415,7 +415,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerInfo, _, err := c.Handshake(ctx, 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), 0, sw.nodeInfo, sw.nodeKey.PrivKey)
|
||||
if err != nil {
|
||||
if err := conn.Close(); err != nil {
|
||||
sw.Logger.Error("Error closing connection", "err", err)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
@@ -84,7 +85,7 @@ type Connection interface {
|
||||
// FIXME: The handshake should really be the Router's responsibility, but
|
||||
// that requires the connection interface to be byte-oriented rather than
|
||||
// message-oriented (see comment above).
|
||||
Handshake(context.Context, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error)
|
||||
Handshake(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error)
|
||||
|
||||
// ReceiveMessage returns the next message received on the connection,
|
||||
// blocking until one is available. Returns io.EOF if closed.
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/netutil"
|
||||
|
||||
@@ -255,6 +256,7 @@ func newMConnConnection(
|
||||
// Handshake implements Connection.
|
||||
func (c *mConnConnection) Handshake(
|
||||
ctx context.Context,
|
||||
timeout time.Duration,
|
||||
nodeInfo types.NodeInfo,
|
||||
privKey crypto.PrivKey,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
@@ -264,6 +266,12 @@ func (c *mConnConnection) Handshake(
|
||||
peerKey crypto.PubKey
|
||||
errCh = make(chan error, 1)
|
||||
)
|
||||
handshakeCtx := ctx
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
handshakeCtx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
// To handle context cancellation, we need to do the handshake in a
|
||||
// goroutine and abort the blocking network calls by closing the connection
|
||||
// when the context is canceled.
|
||||
@@ -276,14 +284,19 @@ func (c *mConnConnection) Handshake(
|
||||
}
|
||||
}()
|
||||
var err error
|
||||
mconn, peerInfo, peerKey, err = c.handshake(ctx, nodeInfo, privKey)
|
||||
errCh <- err
|
||||
mconn, peerInfo, peerKey, err = c.handshake(handshakeCtx, nodeInfo, privKey)
|
||||
|
||||
select {
|
||||
case errCh <- err:
|
||||
case <-handshakeCtx.Done():
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-handshakeCtx.Done():
|
||||
_ = c.Close()
|
||||
return types.NodeInfo{}, nil, ctx.Err()
|
||||
return types.NodeInfo{}, nil, handshakeCtx.Err()
|
||||
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
@@ -270,9 +271,16 @@ func (c *MemoryConnection) Status() conn.ConnectionStatus {
|
||||
// Handshake implements Connection.
|
||||
func (c *MemoryConnection) Handshake(
|
||||
ctx context.Context,
|
||||
timeout time.Duration,
|
||||
nodeInfo types.NodeInfo,
|
||||
privKey crypto.PrivKey,
|
||||
) (types.NodeInfo, crypto.PubKey, error) {
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
select {
|
||||
case c.sendCh <- memoryMessage{nodeInfo: &nodeInfo, pubKey: privKey.PubKey()}:
|
||||
c.logger.Debug("sent handshake", "nodeInfo", nodeInfo)
|
||||
|
||||
@@ -265,7 +265,7 @@ func TestConnection_Handshake(t *testing.T) {
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
// Must use assert due to goroutine.
|
||||
peerInfo, peerKey, err := ba.Handshake(ctx, bInfo, bKey)
|
||||
peerInfo, peerKey, err := ba.Handshake(ctx, 0, bInfo, bKey)
|
||||
if err == nil {
|
||||
assert.Equal(t, aInfo, peerInfo)
|
||||
assert.Equal(t, aKey.PubKey(), peerKey)
|
||||
@@ -273,7 +273,7 @@ func TestConnection_Handshake(t *testing.T) {
|
||||
errCh <- err
|
||||
}()
|
||||
|
||||
peerInfo, peerKey, err := ab.Handshake(ctx, aInfo, aKey)
|
||||
peerInfo, peerKey, err := ab.Handshake(ctx, 0, aInfo, aKey)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bInfo, peerInfo)
|
||||
require.Equal(t, bKey.PubKey(), peerKey)
|
||||
@@ -291,7 +291,7 @@ func TestConnection_HandshakeCancel(t *testing.T) {
|
||||
ab, ba := dialAccept(t, a, b)
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
cancel()
|
||||
_, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
_, _, err := ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
require.Error(t, err)
|
||||
require.Equal(t, context.Canceled, err)
|
||||
_ = ab.Close()
|
||||
@@ -301,7 +301,7 @@ func TestConnection_HandshakeCancel(t *testing.T) {
|
||||
ab, ba = dialAccept(t, a, b)
|
||||
timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond)
|
||||
defer cancel()
|
||||
_, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
_, _, err = ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey())
|
||||
require.Error(t, err)
|
||||
require.Equal(t, context.DeadlineExceeded, err)
|
||||
_ = ab.Close()
|
||||
@@ -630,13 +630,13 @@ func dialAcceptHandshake(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.
|
||||
go func() {
|
||||
privKey := ed25519.GenPrivKey()
|
||||
nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())}
|
||||
_, _, err := ba.Handshake(ctx, nodeInfo, privKey)
|
||||
_, _, err := ba.Handshake(ctx, 0, nodeInfo, privKey)
|
||||
errCh <- err
|
||||
}()
|
||||
|
||||
privKey := ed25519.GenPrivKey()
|
||||
nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())}
|
||||
_, _, err := ab.Handshake(ctx, nodeInfo, privKey)
|
||||
_, _, err := ab.Handshake(ctx, 0, nodeInfo, privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
timer := time.NewTimer(2 * time.Second)
|
||||
|
||||
@@ -150,3 +150,18 @@ func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.Request
|
||||
func (_m *AppConnConsensus) SetResponseCallback(_a0 abciclient.Callback) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewAppConnConsensus interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewAppConnConsensus creates a new instance of AppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewAppConnConsensus(t mockConstructorTestingTNewAppConnConsensus) *AppConnConsensus {
|
||||
mock := &AppConnConsensus{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -118,3 +118,18 @@ func (_m *AppConnMempool) FlushSync(_a0 context.Context) error {
|
||||
func (_m *AppConnMempool) SetResponseCallback(_a0 abciclient.Callback) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewAppConnMempool interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewAppConnMempool creates a new instance of AppConnMempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewAppConnMempool(t mockConstructorTestingTNewAppConnMempool) *AppConnMempool {
|
||||
mock := &AppConnMempool{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -97,3 +97,18 @@ func (_m *AppConnQuery) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewAppConnQuery interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewAppConnQuery creates a new instance of AppConnQuery. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewAppConnQuery(t mockConstructorTestingTNewAppConnQuery) *AppConnQuery {
|
||||
mock := &AppConnQuery{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -120,3 +120,18 @@ func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 context.Context, _a1 types.Requ
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewAppConnSnapshot interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewAppConnSnapshot creates a new instance of AppConnSnapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewAppConnSnapshot(t mockConstructorTestingTNewAppConnSnapshot) *AppConnSnapshot {
|
||||
mock := &AppConnSnapshot{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -37,24 +37,32 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*co
|
||||
err := env.Mempool.CheckTx(
|
||||
ctx.Context(),
|
||||
tx,
|
||||
func(res *abci.Response) { resCh <- res },
|
||||
func(res *abci.Response) {
|
||||
select {
|
||||
case <-ctx.Context().Done():
|
||||
case resCh <- res:
|
||||
}
|
||||
},
|
||||
mempool.TxInfo{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := <-resCh
|
||||
r := res.GetCheckTx()
|
||||
|
||||
return &coretypes.ResultBroadcastTx{
|
||||
Code: r.Code,
|
||||
Data: r.Data,
|
||||
Log: r.Log,
|
||||
Codespace: r.Codespace,
|
||||
MempoolError: r.MempoolError,
|
||||
Hash: tx.Hash(),
|
||||
}, nil
|
||||
select {
|
||||
case <-ctx.Context().Done():
|
||||
return nil, fmt.Errorf("broadcast confirmation not received: %w", ctx.Context().Err())
|
||||
case res := <-resCh:
|
||||
r := res.GetCheckTx()
|
||||
return &coretypes.ResultBroadcastTx{
|
||||
Code: r.Code,
|
||||
Data: r.Data,
|
||||
Log: r.Log,
|
||||
Codespace: r.Codespace,
|
||||
MempoolError: r.MempoolError,
|
||||
Hash: tx.Hash(),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx.
|
||||
@@ -64,61 +72,71 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*
|
||||
err := env.Mempool.CheckTx(
|
||||
ctx.Context(),
|
||||
tx,
|
||||
func(res *abci.Response) { resCh <- res },
|
||||
func(res *abci.Response) {
|
||||
select {
|
||||
case <-ctx.Context().Done():
|
||||
case resCh <- res:
|
||||
}
|
||||
},
|
||||
mempool.TxInfo{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := (<-resCh).GetCheckTx()
|
||||
if r.Code != abci.CodeTypeOK {
|
||||
return &coretypes.ResultBroadcastTxCommit{
|
||||
CheckTx: *r,
|
||||
Hash: tx.Hash(),
|
||||
}, fmt.Errorf("transaction encountered error (%s)", r.MempoolError)
|
||||
}
|
||||
|
||||
if !indexer.KVSinkEnabled(env.EventSinks) {
|
||||
return &coretypes.ResultBroadcastTxCommit{
|
||||
select {
|
||||
case <-ctx.Context().Done():
|
||||
return nil, fmt.Errorf("broadcast confirmation not received: %w", ctx.Context().Err())
|
||||
case res := <-resCh:
|
||||
r := res.GetCheckTx()
|
||||
if r.Code != abci.CodeTypeOK {
|
||||
return &coretypes.ResultBroadcastTxCommit{
|
||||
CheckTx: *r,
|
||||
Hash: tx.Hash(),
|
||||
},
|
||||
errors.New("cannot wait for commit because kvEventSync is not enabled")
|
||||
}
|
||||
}, fmt.Errorf("transaction encountered error (%s)", r.MempoolError)
|
||||
}
|
||||
|
||||
startAt := time.Now()
|
||||
timer := time.NewTimer(0)
|
||||
defer timer.Stop()
|
||||
|
||||
count := 0
|
||||
for {
|
||||
count++
|
||||
select {
|
||||
case <-ctx.Context().Done():
|
||||
env.Logger.Error("Error on broadcastTxCommit",
|
||||
"duration", time.Since(startAt),
|
||||
"err", err)
|
||||
if !indexer.KVSinkEnabled(env.EventSinks) {
|
||||
return &coretypes.ResultBroadcastTxCommit{
|
||||
CheckTx: *r,
|
||||
Hash: tx.Hash(),
|
||||
}, fmt.Errorf("timeout waiting for commit of tx %s (%s)",
|
||||
tx.Hash(), time.Since(startAt))
|
||||
case <-timer.C:
|
||||
txres, err := env.Tx(ctx, tx.Hash(), false)
|
||||
if err != nil {
|
||||
jitter := 100*time.Millisecond + time.Duration(rand.Int63n(int64(time.Second))) // nolint: gosec
|
||||
backoff := 100 * time.Duration(count) * time.Millisecond
|
||||
timer.Reset(jitter + backoff)
|
||||
continue
|
||||
}
|
||||
},
|
||||
errors.New("cannot confirm transaction because kvEventSink is not enabled")
|
||||
}
|
||||
|
||||
return &coretypes.ResultBroadcastTxCommit{
|
||||
CheckTx: *r,
|
||||
DeliverTx: txres.TxResult,
|
||||
Hash: tx.Hash(),
|
||||
Height: txres.Height,
|
||||
}, nil
|
||||
startAt := time.Now()
|
||||
timer := time.NewTimer(0)
|
||||
defer timer.Stop()
|
||||
|
||||
count := 0
|
||||
for {
|
||||
count++
|
||||
select {
|
||||
case <-ctx.Context().Done():
|
||||
env.Logger.Error("error on broadcastTxCommit",
|
||||
"duration", time.Since(startAt),
|
||||
"err", err)
|
||||
return &coretypes.ResultBroadcastTxCommit{
|
||||
CheckTx: *r,
|
||||
Hash: tx.Hash(),
|
||||
}, fmt.Errorf("timeout waiting for commit of tx %s (%s)",
|
||||
tx.Hash(), time.Since(startAt))
|
||||
case <-timer.C:
|
||||
txres, err := env.Tx(ctx, tx.Hash(), false)
|
||||
if err != nil {
|
||||
jitter := 100*time.Millisecond + time.Duration(rand.Int63n(int64(time.Second))) // nolint: gosec
|
||||
backoff := 100 * time.Duration(count) * time.Millisecond
|
||||
timer.Reset(jitter + backoff)
|
||||
continue
|
||||
}
|
||||
|
||||
return &coretypes.ResultBroadcastTxCommit{
|
||||
CheckTx: *r,
|
||||
DeliverTx: txres.TxResult,
|
||||
Hash: tx.Hash(),
|
||||
Height: txres.Height,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,3 +165,18 @@ func (_m *EventSink) Type() indexer.EventSinkType {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewEventSink interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewEventSink creates a new instance of EventSink. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewEventSink(t mockConstructorTestingTNewEventSink) *EventSink {
|
||||
mock := &EventSink{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -18,14 +18,16 @@ var _ indexer.EventSink = (*EventSink)(nil)
|
||||
// The EventSink is an aggregator for redirecting the call path of the tx/block kvIndexer.
|
||||
// For the implementation details please see the kv.go in the indexer/block and indexer/tx folder.
|
||||
type EventSink struct {
|
||||
txi *kvt.TxIndex
|
||||
bi *kvb.BlockerIndexer
|
||||
txi *kvt.TxIndex
|
||||
bi *kvb.BlockerIndexer
|
||||
store dbm.DB
|
||||
}
|
||||
|
||||
func NewEventSink(store dbm.DB) indexer.EventSink {
|
||||
return &EventSink{
|
||||
txi: kvt.NewTxIndex(store),
|
||||
bi: kvb.New(store),
|
||||
txi: kvt.NewTxIndex(store),
|
||||
bi: kvb.New(store),
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,5 +60,5 @@ func (kves *EventSink) HasBlock(h int64) (bool, error) {
|
||||
}
|
||||
|
||||
func (kves *EventSink) Stop() error {
|
||||
return nil
|
||||
return kves.store.Close()
|
||||
}
|
||||
|
||||
@@ -208,3 +208,18 @@ func (_m *BlockStore) Size() int64 {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewBlockStore interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore {
|
||||
mock := &BlockStore{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -68,3 +68,18 @@ func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64
|
||||
func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) {
|
||||
_m.Called(_a0, _a1)
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewEvidencePool interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewEvidencePool(t mockConstructorTestingTNewEvidencePool) *EvidencePool {
|
||||
mock := &EvidencePool{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -186,3 +186,18 @@ func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewStore interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewStore(t mockConstructorTestingTNewStore) *Store {
|
||||
mock := &Store{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -82,3 +82,18 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State,
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewStateProvider interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewStateProvider(t mockConstructorTestingTNewStateProvider) *StateProvider {
|
||||
mock := &StateProvider{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -388,7 +388,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t
|
||||
for {
|
||||
iterCount++
|
||||
select {
|
||||
case s.paramsSendCh <- p2p.Envelope{
|
||||
case requestCh <- p2p.Envelope{
|
||||
To: peer,
|
||||
Message: &ssproto.ParamsRequest{
|
||||
Height: uint64(height),
|
||||
|
||||
@@ -75,7 +75,7 @@ func MustNewDefaultLogger(format, level string, trace bool) Logger {
|
||||
}
|
||||
|
||||
func (l defaultLogger) Info(msg string, keyVals ...interface{}) {
|
||||
l.Logger.Info().Fields(getLogFields(keyVals...)).Msg(msg)
|
||||
l.Logger.Info().Fields(keyVals).Msg(msg)
|
||||
}
|
||||
|
||||
func (l defaultLogger) Error(msg string, keyVals ...interface{}) {
|
||||
@@ -84,29 +84,16 @@ func (l defaultLogger) Error(msg string, keyVals ...interface{}) {
|
||||
e = e.Stack()
|
||||
}
|
||||
|
||||
e.Fields(getLogFields(keyVals...)).Msg(msg)
|
||||
e.Fields(keyVals).Msg(msg)
|
||||
}
|
||||
|
||||
func (l defaultLogger) Debug(msg string, keyVals ...interface{}) {
|
||||
l.Logger.Debug().Fields(getLogFields(keyVals...)).Msg(msg)
|
||||
l.Logger.Debug().Fields(keyVals).Msg(msg)
|
||||
}
|
||||
|
||||
func (l defaultLogger) With(keyVals ...interface{}) Logger {
|
||||
return defaultLogger{
|
||||
Logger: l.Logger.With().Fields(getLogFields(keyVals...)).Logger(),
|
||||
Logger: l.Logger.With().Fields(keyVals).Logger(),
|
||||
trace: l.trace,
|
||||
}
|
||||
}
|
||||
|
||||
func getLogFields(keyVals ...interface{}) map[string]interface{} {
|
||||
if len(keyVals)%2 != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{}, len(keyVals))
|
||||
for i := 0; i < len(keyVals); i += 2 {
|
||||
fields[fmt.Sprint(keyVals[i])] = keyVals[i+1]
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
@@ -1018,7 +1018,12 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool)
|
||||
|
||||
// process all the responses as they come in
|
||||
for i := 0; i < cap(witnessResponsesC); i++ {
|
||||
response := <-witnessResponsesC
|
||||
var response witnessResponse
|
||||
select {
|
||||
case response = <-witnessResponsesC:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
switch response.err {
|
||||
// success! We have found a new primary
|
||||
case nil:
|
||||
@@ -1047,10 +1052,6 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool)
|
||||
// return the light block that new primary responded with
|
||||
return response.lb, nil
|
||||
|
||||
// catch canceled contexts or deadlines
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
return nil, response.err
|
||||
|
||||
// process benign errors by logging them only
|
||||
case provider.ErrNoResponse, provider.ErrLightBlockNotFound, provider.ErrHeightTooHigh:
|
||||
lastError = response.err
|
||||
|
||||
@@ -51,3 +51,18 @@ func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) erro
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewProvider interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewProvider(t mockConstructorTestingTNewProvider) *Provider {
|
||||
mock := &Provider{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user