mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-16 09:42:50 +00:00
Compare commits
38 Commits
marko/brin
...
wb/interna
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c627a1d762 | ||
|
|
5dbb706e2c | ||
|
|
56b5a234bf | ||
|
|
c3f64df354 | ||
|
|
ec46cc4006 | ||
|
|
8935da8872 | ||
|
|
ab83d3307d | ||
|
|
636cd97712 | ||
|
|
a8b85c1999 | ||
|
|
eec438ac97 | ||
|
|
5e4575695d | ||
|
|
b4da26555b | ||
|
|
d9820182e6 | ||
|
|
667c53dcbc | ||
|
|
c35bcbe320 | ||
|
|
b075117d83 | ||
|
|
3e71e81938 | ||
|
|
5caea6e01e | ||
|
|
146c996ec7 | ||
|
|
101d357224 | ||
|
|
5aeee88443 | ||
|
|
b0fe38c245 | ||
|
|
06b1812094 | ||
|
|
735a1a537b | ||
|
|
c052181e32 | ||
|
|
a4c3b5cab4 | ||
|
|
9dae97d845 | ||
|
|
412a77915d | ||
|
|
4b36feaa2b | ||
|
|
b52b8f2740 | ||
|
|
083716b22a | ||
|
|
494c5cddbe | ||
|
|
cec0a97987 | ||
|
|
694ab2c6d1 | ||
|
|
97f2944db0 | ||
|
|
ef44460c41 | ||
|
|
ce40697ea6 | ||
|
|
e980e1468d |
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@@ -39,17 +39,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v1.7.0
|
||||
uses: docker/setup-buildx-action@v2.0.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1.14.1
|
||||
uses: docker/login-action@v2.0.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.10.0
|
||||
uses: docker/build-push-action@v3.0.0
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
62
.github/workflows/docs-deployment.yml
vendored
Normal file
62
.github/workflows/docs-deployment.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# Build and deploy the docs.tendermint.com website content.
|
||||
# The static content is published to GitHub Pages.
|
||||
#
|
||||
# For documentation build info, see docs/DOCS_README.md.
|
||||
name: Build static documentation site
|
||||
on:
|
||||
workflow_dispatch: # allow manual updates
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- docs/**
|
||||
- spec/**
|
||||
|
||||
jobs:
|
||||
# This is split into two jobs so that the build, which runs npm, does not
|
||||
# have write access to anything. The deploy requires write access to publish
|
||||
# to the branch used by GitHub Pages, however, so we can't just make the
|
||||
# whole workflow read-only.
|
||||
build:
|
||||
name: VuePress build
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: alpine:latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Install generator dependencies
|
||||
run: |
|
||||
apk add --no-cache make bash git npm
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# We need to fetch full history so the backport branches for previous
|
||||
# versions will be available for the build.
|
||||
fetch-depth: 0
|
||||
- name: Build documentation
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output/
|
||||
|
||||
deploy:
|
||||
name: Deploy to GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: 'docs-tendermint-com'
|
||||
folder: ~/output
|
||||
single-commit: true
|
||||
32
.github/workflows/docs.yaml
vendored
32
.github/workflows/docs.yaml
vendored
@@ -1,32 +0,0 @@
|
||||
name: Documentation
|
||||
# This workflow builds the static documentation site, and publishes the results to GitHub Pages.
|
||||
# It runs on every push to the main branch, with changes in the docs and spec directories
|
||||
on:
|
||||
workflow_dispatch: # allow manual updates
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
- "spec/**"
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: tendermintdev/docker-website-deployment
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install and Build 🔧
|
||||
run: |
|
||||
apk add rsync
|
||||
make build-docs
|
||||
- name: Deploy 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@v4.3.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: ~/output
|
||||
single-commit: true
|
||||
18
Makefile
18
Makefile
@@ -226,7 +226,8 @@ DESTINATION = ./index.html.md
|
||||
build-docs:
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
( git checkout $${branch} && npm ci --quiet && \
|
||||
VUEPRESS_BASE="/$${path_prefix}/" npm run build --quiet ) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
@@ -250,6 +251,21 @@ mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Metrics ###
|
||||
###############################################################################
|
||||
|
||||
metrics: testdata-metrics
|
||||
go generate -run="scripts/metricsgen" ./...
|
||||
.PHONY: metrics
|
||||
|
||||
# By convention, the go tool ignores subdirectories of directories named
|
||||
# 'testdata'. This command invokes the generate command on the folder directly
|
||||
# to avoid this.
|
||||
testdata-metrics:
|
||||
ls ./scripts/metricsgen/testdata | xargs -I{} go generate -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{}
|
||||
.PHONY: testdata-metrics
|
||||
|
||||
###############################################################################
|
||||
### Local testnet using docker ###
|
||||
###############################################################################
|
||||
|
||||
@@ -18,40 +18,53 @@ Listen address can be changed in the config file (see
|
||||
|
||||
The following metrics are available:
|
||||
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- |
|
||||
| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods |
|
||||
| consensus_height | Gauge | | Height of the chain |
|
||||
| consensus_validators | Gauge | | Number of validators |
|
||||
| consensus_validators_power | Gauge | | Total voting power of all validators |
|
||||
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
|
||||
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
|
||||
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
|
||||
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
|
||||
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
|
||||
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus_rounds | Gauge | | Number of rounds |
|
||||
| consensus_num_txs | Gauge | | Number of transactions |
|
||||
| consensus_total_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
|
||||
| consensus_latest_block_height | gauge | | /status sync_info number |
|
||||
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | | Block size in bytes |
|
||||
| evidence_pool_num_evidence | Gauge | | Number of evidence in the evidence pool
|
||||
| p2p_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer |
|
||||
| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer |
|
||||
| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id |
|
||||
| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer |
|
||||
| mempool_size | Gauge | | Number of uncommitted transactions |
|
||||
| mempool_tx_size_bytes | histogram | | transaction sizes in bytes |
|
||||
| mempool_failed_txs | counter | | number of failed transactions |
|
||||
| mempool_recheck_times | counter | | number of transactions rechecked in the mempool |
|
||||
| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms |
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|-----------------------------------------|-----------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods |
|
||||
| consensus_height | Gauge | | Height of the chain |
|
||||
| consensus_validators | Gauge | | Number of validators |
|
||||
| consensus_validators_power | Gauge | | Total voting power of all validators |
|
||||
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
|
||||
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
|
||||
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
|
||||
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
|
||||
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
|
||||
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus_rounds | Gauge | | Number of rounds |
|
||||
| consensus_num_txs | Gauge | | Number of transactions |
|
||||
| consensus_total_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus_block_parts | Counter | peer_id | number of blockparts transmitted by peer |
|
||||
| consensus_latest_block_height | gauge | | /status sync_info number |
|
||||
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | | Block size in bytes |
|
||||
| consensus_step_duration | Histogram | step | Histogram of durations for each step in the consensus protocol |
|
||||
| consensus_block_gossip_receive_latency | Histogram | | Histogram of time taken to receive a block in seconds, measure between when a new block is first discovered to when the block is completed |
|
||||
| consensus_block_gossip_parts_received | Counter | matches_current | Number of block parts received by the node |
|
||||
| consensus_quorum_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum |
|
||||
| consensus_full_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted |
|
||||
| consensus_proposal_timestamp_difference | Histogram | | Difference between the timestamp in the proposal message and the local time of the validator at the time it received the message |
|
||||
| consensus_vote_extension_receive_count | Counter | status | Number of vote extensions received |
|
||||
| consensus_proposal_receive_count | Counter | status | Total number of proposals received by the node since process start |
|
||||
| consensus_proposal_create_count | Counter | | Total number of proposals created by the node since process start |
|
||||
| consensus_round_voting_power_percent | Gauge | vote_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round |
|
||||
| consensus_late_votes | Counter | vote_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. |
|
||||
| evidence_pool_num_evidence | Gauge | | Number of evidence in the evidence pool |
|
||||
| p2p_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p_peer_receive_bytes_total | Counter | peer_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p_peer_send_bytes_total | Counter | peer_id, chID | number of bytes per channel sent to a given peer |
|
||||
| p2p_peer_pending_send_bytes | Gauge | peer_id | number of pending bytes to be sent to a given peer |
|
||||
| p2p_num_txs | Gauge | peer_id | number of transactions submitted by each peer_id |
|
||||
| p2p_pending_send_bytes | Gauge | peer_id | amount of data pending to be sent to peer |
|
||||
| mempool_size | Gauge | | Number of uncommitted transactions |
|
||||
| mempool_tx_size_bytes | Histogram | | transaction sizes in bytes |
|
||||
| mempool_failed_txs | Counter | | number of failed transactions |
|
||||
| mempool_recheck_times | Counter | | number of transactions rechecked in the mempool |
|
||||
| state_block_processing_time | Histogram | | time between BeginBlock and EndBlock in ms |
|
||||
| state_consensus_param_updates | Counter | | number of consensus parameter updates returned by the application since process start |
|
||||
| state_validator_set_updates | Counter | | number of validator set updates returned by the application since process start |
|
||||
|
||||
## Useful queries
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"serve": "trap 'exit 0' SIGINT; vuepress dev --no-cache",
|
||||
"postserve": "./post.sh",
|
||||
"prebuild": "./pre.sh",
|
||||
"build": "trap 'exit 0' SIGINT; vuepress build --no-cache",
|
||||
"build": "trap 'exit 0' SIGINT; vuepress build --no-cache --silent",
|
||||
"postbuild": "./post.sh"
|
||||
},
|
||||
"author": "",
|
||||
|
||||
@@ -53,6 +53,9 @@ sections.
|
||||
- [RFC-013: ABCI++](./rfc-013-abci++.md)
|
||||
- [RFC-014: Semantic Versioning](./rfc-014-semantic-versioning.md)
|
||||
- [RFC-015: ABCI++ Tx Mutation](./rfc-015-abci++-tx-mutation.md)
|
||||
- [RFC-016: Node Architecture](./rfc-016-node-architecture.md)
|
||||
- [RFC-017: ABCI++ Vote Extension Propagation](./rfc-017-abci++-vote-extension-propag.md)
|
||||
- [RFC-018: BLS Signature Aggregation Exploration](./rfc-018-bls-agg-exploration.md)
|
||||
- [RFC-019: Configuration File Versioning](./rfc-019-config-version.md)
|
||||
|
||||
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||
|
||||
3
docs/rfc/images/node-dependency-tree.svg
Normal file
3
docs/rfc/images/node-dependency-tree.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 24 KiB |
@@ -31,12 +31,12 @@ not reference the new parameters. Any nodes joining the network with the newer
|
||||
version of Tendermint will have the new consensus parameters. Tendermint will need
|
||||
to handle this case so that new versions of Tendermint with new consensus parameters
|
||||
can still validate old blocks correctly without having to do anything overly complex
|
||||
or hacky.
|
||||
or hacky.
|
||||
|
||||
### Allowing Developer-Defined Values and the `EndBlock` Problem
|
||||
|
||||
When new consensus parameters are added, application developers may wish to set
|
||||
values for them so that the developer-defined values may be used as soon as the
|
||||
values for them so that the developer-defined values may be used as soon as the
|
||||
software upgrades. We do not currently have a clean mechanism for handling this.
|
||||
|
||||
Consensus parameter updates are communicated from the application to Tendermint
|
||||
@@ -51,7 +51,7 @@ can take effect is height `H+1`. As of now, height `H` must run with the default
|
||||
|
||||
### Hash Compatibility
|
||||
|
||||
This section discusses possible solutions to the problem of maintaining backwards-compatibility
|
||||
This section discusses possible solutions to the problem of maintaining backwards-compatibility
|
||||
of hashed parameters while adding new parameters.
|
||||
|
||||
#### Never Hash Defaults
|
||||
|
||||
83
docs/rfc/rfc-016-node-architecture.md
Normal file
83
docs/rfc/rfc-016-node-architecture.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# RFC 016: Node Architecture
|
||||
|
||||
## Changelog
|
||||
|
||||
- April 8, 2022: Initial draft (@cmwaters)
|
||||
- April 15, 2022: Incorporation of feedback
|
||||
|
||||
## Abstract
|
||||
|
||||
The `node` package is the entry point into the Tendermint codebase, used both by the command line and programatically to create the nodes that make up a network. The package has suffered the most from the evolution of the codebase, becoming bloated as developers clipped on their bits of code here and there to get whatever feature they wanted working.
|
||||
|
||||
The decisions made at the node level have the biggest impact to simplifying the protocols within them, unlocking better internal designs and making Tendermint more intuitive to use and easier to understand from the outside. Work, in minor increments, has already begun on this section of the codebase. This document exists to spark forth the necessary discourse in a few related areas that will help the team to converge on the long term makeup of the node.
|
||||
|
||||
## Discussion
|
||||
|
||||
The following is a list of points of discussion around the architecture of the node:
|
||||
|
||||
### Dependency Tree
|
||||
|
||||
The node object is currently stuffed with every component that possibly exists within Tendermint. In the constructor, all objects are built and interlaid with one another in some awkward dance. My guiding principle is that the node should only be made up of the components that it wants to have direct control of throughout its life. The node is a service which currently has the purpose of starting other services up in a particular order and stopping them all when commanded to do so. However, there are many services which are not direct dependents i.e. the mempool and evidence services should only be working when the consensus service is running. I propose to form more of a hierarchical structure of dependents which forces us to be clear about the relations that one component has to the other. More concretely, I propose the following dependency tree:
|
||||
|
||||

|
||||
|
||||
Many of the further discussion topics circle back to this representation of the node.
|
||||
|
||||
It's also important to distinguish two dimensions which may require different characteristics of the architecture. There is the starting and stopping of services and their general lifecycle management. What is the correct order of operations to starting a node for example. Then there is the question of the needs of the service during actual operation. Then there is the question of what resources each service needs access to during its operation. Some need to publish events, others need access to data stores, and so forth.
|
||||
|
||||
An alternative model and one that perhaps better suits the latter of these dimensions is the notion of an internal message passing system. Either the events bus or p2p layer could serve as a viable transport. This would essentially allow all services to communicate with any other service and could perhaps provide a solution to the coordination problem (presented below) without a centralized coordinator. The other main advantage is that such a system would be more robust to disruptions and changes to the code which may make a hierarchical structure quickly outdated and suboptimal. The addition of message routing is an added complexity to implement, will increase the degree of asynchronicity in the system and may make it harder to debug problems that are across multiple services.
|
||||
|
||||
### Coordination of State Advancing Mechanisms
|
||||
|
||||
Advancement of state in Tendermint is simply defined in heights: If the node is at height n, how does it get to height n + 1 and so on. Based on this definition we have three components that help a node to advance in height: consensus, statesync and blocksync. The way these components behave currently is very tightly coupled to one another with references passed back and forth. My guiding principle is that each of these should be able to operate completely independently of each other, e.g. a node should be able to run solely blocksync indefinitely. There have been several ideas suggested towards improving this flow. I've been leaning strongly towards a centralized system, whereby an orchestrator (in this case the node) decides what services to start and stop.
|
||||
In a decentralized message passing system, individual services make their decision based upon a "global" shared state i.e. if my height is less that 10 below the average peer height, I as consensus, should stop (knowing that blocksync has the same condition for starting). As the example illustrates, each mechanism will still need to be aware of the presence of other mechanisms.
|
||||
|
||||
Both centralized and decentralized systems rely on the communication of the nodes current height and a judgement on the height of the head of the chain. The latter, working out the head of the chain, is quite a difficult challenge as their is nothing preventing the node from acting maliciously and providing a different height. Currently both blocksync, consensus (and to a certain degree statesync), have parallel systems where peers communicate their height. This could be streamlined with the consensus (or even the p2p layer), broadcasting peer heights and either the node or the other state advancing mechanisms acting accordingly.
|
||||
|
||||
Currently, when a node starts, it turns on every service that it is attached to. This means that while a node is syncing up by requesting blocks, it is also receiving transactions and votes, as well as snapshot and block requests. This is a needless use of bandwidth. An implementation of an orchestrator, regardless of whether the system is heirachical or not, should look to be able to open and close channels dynamically and effectively broadcast which services it is running. Integrating this with service discovery may also lead to a better serivce to peers.
|
||||
|
||||
The orchestrator allows for some deal of variablity in how a node is constructed. Does it just run blocksync, shadowing the head of the chain and be highly available for querying. Does it rely on state sync at all? An important question that arises from this dynamicism is we ideally want to encourage nodes to provide as much of their resources as possible so that their is a healthy amount of providers to consumers. Do we make all services compulsory or allow for them to be disabled? Arguably it's possible that a user forks the codebase and rips out the blocksync code because they want to reduce bandwidth so this is more a question of how easy do we want to make this for users.
|
||||
|
||||
### Block Executor
|
||||
|
||||
The block executor is an important component that is currently used by both consensus and blocksync to execute transactions and update application state. Principally, I think it should be the only component that can write (and possibly even read) the block and state stores, and we should clean up other direct dependencies on the storage engine if we can. This would mean:
|
||||
|
||||
- The reactors Consensus, BlockSync and StateSync should all import the executor for advancing state ie. `ApplyBlock` and `BootstrapState`.
|
||||
- Pruning should also be a concern of the block executor as well as `FinalizeBlock` and `Commit`. This can simplify consensus to focus just on the consensus part.
|
||||
|
||||
### The Interprocess communication systems: RPC, P2P, ABCI, and Events
|
||||
|
||||
The schematic supplied above shows the relations between the different services, the node, the block executor, and the storage layer. Represented as colored dots are the components responsible for different roles of interprocess communication (IPC). These components permeate throughout the code base, seeping into most services. What can provide powerful functionality on one hand can also become a twisted vine, creating messy corner cases and convoluting the protocols themselves. A lot of the thinking around
|
||||
how we want our IPC systens to function has been summarised in this [RFC](./rfc-002-ipc-ecosystem.md). In this section, I'd like to focus the reader on the relation between the IPC and the node structure. An issue that has frequently risen is that the RPC has control of the components where it strikes me as being more logical for the component to dictate the information that is emitted/available and the knobs it wishes to expose. The RPC is also inextricably tied to the node instance and has situations where it is passed pointers directly to the storage engine and other components.
|
||||
|
||||
I am currently convinced of the approach that the p2p layer takes and would like to see other IPC components follow suit. This would mean that the RPC and events system would be constructed in the node yet would pass the adequate methods to register endpoints and topics to the sub components. For example,
|
||||
|
||||
```go
|
||||
// Methods from the RPC and event bus that would be passed into the constructor of components like "consensus"
|
||||
// NOTE: This is a hypothetical construction to convey the idea. An actual implementation may differ.
|
||||
func RegisterRoute(path string, handler func(http.ResponseWriter, *http.Request))
|
||||
|
||||
func RegisterTopic(name string) EventPublisher
|
||||
|
||||
type EventPublisher func (context.Context, types.EventData, []abci.Event)
|
||||
```
|
||||
|
||||
This would give the components control to the information they want to expose and keep all relevant logic within that package. It accomodates more to a dynamic system where services can switch on and off. Each component would also receive access to the logger and metrics system for introspection and debuggability.
|
||||
|
||||
#### IPC Rubric
|
||||
|
||||
I'd like to aim to reach a state where we as a team have either an implicit or explicit rubric which can determine, in the event of some new need to communicate information, what tool it should use for doing this. In the case of inter node communication, this is obviously the p2p stack (with perhaps the exception of the light client). Metrics and logging also have clear usage patterns. RPC and the events system are less clear. The RPC is used for debugging data and fine tuned operator control as it is for general public querying and transaction submission. The RPC is also known to have been plumbed back into the application for historical queries. The events system, similarly, is used for consuming transaction events as it is for the testing of consensus state transitions.
|
||||
|
||||
Principally, I think we should look to change our language away from what the actual transport is and more towards what it's being used for and to whom. We call it a peer to peer layer and not the underlying tcp connection. In the same way, we should look to split RPC into an operator interface (RPC Internal), a public interface (RPC External) and a bidirectional ABCI.
|
||||
|
||||
### Seperation of consumers and suppliers
|
||||
|
||||
When a service such as blocksync is turned on, it automatically begins requesting blocks to verify and apply them as it also tries to serve them to other peers catching up. We should look to distinguish these two aspects: supplying of information and consuming of information in many of these components. More concretely, I'd suggest:
|
||||
|
||||
- The blocksync and statesync service, i.e. supplying information for those trying to catch up should only start running once a node has caught up i.e. after running the blocksync and/or state sync *processes*
|
||||
- The blocksync and state sync processes have defined termination clauses that inform the orchestrator when they are done and where they finished.
|
||||
- One way of achieving this would be that every process both passes and returns the `State` object
|
||||
- In some cases, a node may specify that it wants to run blocksync indefinitely.
|
||||
- The mempool should also indicate whether it wants to receive transactions or to send them only (one-directional mempool)
|
||||
- Similarly, the light client itself only requests information whereas the light client service (currently part of state sync) can do both.
|
||||
- This distinction needs to be communicated in the p2p layer handshake itself but should also be changeable over the lifespan of the connection.
|
||||
571
docs/rfc/rfc-017-abci++-vote-extension-propag.md
Normal file
571
docs/rfc/rfc-017-abci++-vote-extension-propag.md
Normal file
@@ -0,0 +1,571 @@
|
||||
# RFC 017: ABCI++ Vote Extension Propagation
|
||||
|
||||
## Changelog
|
||||
|
||||
- 11-Apr-2022: Initial draft (@sergio-mena).
|
||||
- 15-Apr-2022: Addressed initial comments. First complete version (@sergio-mena).
|
||||
- 09-May-2022: Addressed all outstanding comments.
|
||||
|
||||
## Abstract
|
||||
|
||||
According to the
|
||||
[ABCI++ specification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md)
|
||||
(as of 11-Apr-2022), a validator MUST provide a signed vote extension for each non-`nil` precommit vote
|
||||
of height *h* that it uses to propose a block in height *h+1*. When a validator is up to
|
||||
date, this is easy to do, but when a validator needs to catch up this is far from trivial as this data
|
||||
cannot be retrieved from the blockchain.
|
||||
|
||||
This RFC presents and compares the different options to address this problem, which have been proposed
|
||||
in several discussions by the Tendermint Core team.
|
||||
|
||||
## Document Structure
|
||||
|
||||
The RFC is structured as follows. In the [Background](#background) section,
|
||||
subsections [Problem Description](#problem-description) and [Cases to Address](#cases-to-address)
|
||||
explain the problem at hand from a high level perspective, i.e., abstracting away from the current
|
||||
Tendermint implementation. In contrast, subsection
|
||||
[Current Catch-up Mechanisms](#current-catch-up-mechanisms) delves into the details of the current
|
||||
Tendermint code.
|
||||
|
||||
In the [Discussion](#discussion) section, subsection [Solutions Proposed](#solutions-proposed) is also
|
||||
worded abstracting away from implementation details, whilst subsections
|
||||
[Feasibility of the Proposed Solutions](#feasibility-of-the-proposed-solutions) and
|
||||
[Current Limitations and Possible Implementations](#current-limitations-and-possible-implementations)
|
||||
analize the viability of one of the proposed solutions in the context of Tendermint's architecture
|
||||
based on reactors. Finally, [Formalization Work](#formalization-work) briefly discusses the work
|
||||
still needed demonstrate the correctness of the chosen solution.
|
||||
|
||||
The high level subsections are aimed at readers who are familiar with consensus algorithms, in
|
||||
particular with the one described in the Tendermint (white paper), but who are not necessarily
|
||||
acquainted with the details of the Tendermint codebase. The other subsections, which go into
|
||||
implementation details, are best understood by engineers with deep knowledge of the implementation of
|
||||
Tendermint's blocksync and consensus reactors.
|
||||
|
||||
## Background
|
||||
|
||||
### Basic Definitions
|
||||
|
||||
This document assumes that all validators have equal voting power for the sake of simplicity. This is done
|
||||
without loss of generality.
|
||||
|
||||
There are two types of votes in Tendermint: *prevotes* and *precommits*. Votes can be `nil` or refer to
|
||||
a proposed block. This RFC focuses on precommits,
|
||||
also known as *precommit votes*. In this document we sometimes call them simply *votes*.
|
||||
|
||||
Validators send precommit votes to their peer nodes in *precommit messages*. According to the
|
||||
[ABCI++ specification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md),
|
||||
a precommit message MUST also contain a *vote extension*.
|
||||
This mandatory vote extension can be empty, but MUST be signed with the same key as the precommit
|
||||
vote (i.e., the sending validator's).
|
||||
Nevertheless, the vote extension is signed independently from the vote, so a vote can be separated from
|
||||
its extension.
|
||||
The reason for vote extensions to be mandatory in precommit messages is that, otherwise, a (malicious)
|
||||
node can omit a vote extension while still providing/forwarding/sending the corresponding precommit vote.
|
||||
|
||||
The validator set at height *h* is denoted *valset<sub>h</sub>*. A *commit* for height *h* consists of more
|
||||
than *2n<sub>h</sub>/3* precommit votes voting for a block *b*, where *n<sub>h</sub>* denotes the size of
|
||||
*valset<sub>h</sub>*. A commit does not contain `nil` precommit votes, and all votes in it refer to the
|
||||
same block. An *extended commit* is a *commit* where every precommit vote has its respective vote extension
|
||||
attached.
|
||||
|
||||
### Problem Description
|
||||
|
||||
In the version of [ABCI](https://github.com/tendermint/spec/blob/4fb99af/spec/abci/README.md) present up to
|
||||
Tendermint v0.35, for any height *h*, a validator *v* MUST have the decided block *b* and a commit for
|
||||
height *h* in order to decide at height *h*. Then, *v* just needs a commit for height *h* to propose at
|
||||
height *h+1*, in the rounds of *h+1* where *v* is a proposer.
|
||||
|
||||
In [ABCI++](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md),
|
||||
the information that a validator *v* MUST have to be able to decide in *h* does not change with
|
||||
respect to pre-existing ABCI: the decided block *b* and a commit for *h*.
|
||||
In contrast, for proposing in *h+1*, a commit for *h* is not enough: *v* MUST now have an extended
|
||||
commit.
|
||||
|
||||
When a validator takes an active part in consensus at height *h*, it has all the data it needs in memory,
|
||||
in its consensus state, to decide on *h* and propose in *h+1*. Things are not so easy in the cases when
|
||||
*v* cannot take part in consensus because it is late (e.g., it falls behind, it crashes
|
||||
and recovers, or it just starts after the others). If *v* does not take part, it cannot actively
|
||||
gather precommit messages (which include vote extensions) in order to decide.
|
||||
Before ABCI++, this was not a problem: full nodes are supposed to persist past blocks in the block store,
|
||||
so other nodes would realise that *v* is late and send it the missing decided block at height *h* and
|
||||
the corresponding commit (kept in block *h+1*) so that *v* can catch up.
|
||||
However, we cannot apply this catch-up technique for ABCI++, as the vote extensions, which are part
|
||||
of the needed *extended commit* are not part of the blockchain.
|
||||
|
||||
### Cases to Address
|
||||
|
||||
Before we tackle the description of the possible cases we need to address, let us describe the following
|
||||
incremental improvement to the ABCI++ logic. Upon decision, a full node persists (e.g., in the block
|
||||
store) the extended commit that allowed the node to decide. For the moment, let us assume the node only
|
||||
needs to keep its *most recent* extended commit, and MAY remove any older extended commits from persistent
|
||||
storage.
|
||||
This improvement is so obvious that all solutions described in the [Discussion](#discussion) section use
|
||||
it as a building block. Moreover, it completely addresses by itself some of the cases described in this
|
||||
subsection.
|
||||
|
||||
We now describe the cases (i.e. possible *runs* of the system) that have been raised in different
|
||||
discussions and need to be addressed. They are (roughly) ordered from easiest to hardest to deal with.
|
||||
|
||||
- **(a)** *Happy path: all validators advance together, no crash*.
|
||||
|
||||
This case is included for completeness. All validators have taken part in height *h*.
|
||||
Even if some of them did not manage to send a precommit message for the decided block, they all
|
||||
receive enough precommit messages to be able to decide. As vote extensions are mandatory in
|
||||
precommit messages, every validator *v* trivially has all the information, namely the decided block
|
||||
and the extended commit, needed to propose in height *h+1* for the rounds in which *v* is the
|
||||
proposer.
|
||||
|
||||
No problem to solve here.
|
||||
|
||||
- **(b)** *All validators advance together, then all crash at the same height*.
|
||||
|
||||
This case has been raised in some discussions, the main concern being whether the vote extensions
|
||||
for the previous height would be lost across the network. With the improvement described above,
|
||||
namely persisting the latest extended commit at decision time, this case is solved.
|
||||
When a crashed validator recovers, it recovers the last extended commit from persistent storage
|
||||
and handshakes with the Application.
|
||||
If need be, it also reconstructs messages for the unfinished height
|
||||
(including all precommits received) from the WAL.
|
||||
Then, the validator can resume where it was at the time of the crash. Thus, as extensions are
|
||||
persisted, either in the WAL (in the form of received precommit messages), or in the latest
|
||||
extended commit, the only way that vote extensions needed to start the next height could be lost
|
||||
forever would be if all validators crashed and never recovered (e.g. disk corruption).
|
||||
Since a *correct* node MUST eventually recover, this violates Tendermint's assumption of more than
|
||||
*2n<sub>h</sub>/3* correct validators for every height *h*.
|
||||
|
||||
No problem to solve here.
|
||||
|
||||
- **(c)** *Lagging majority*.
|
||||
|
||||
Let us assume the validator set does not change between *h* and *h+1*.
|
||||
It is not possible by the nature of the Tendermint algorithm, which requires more
|
||||
than *2n<sub>h</sub>/3* precommit votes for some round of height *h* in order to make progress.
|
||||
So, only up to *n<sub>h</sub>/3* validators can lag behind.
|
||||
|
||||
On the other hand, for the case where there are changes to the validator set between *h* and
|
||||
*h+1* please see case (d) below, where the extreme case is discussed.
|
||||
|
||||
- **(d)** *Validator set changes completely between* h *and* h+1.
|
||||
|
||||
If sets *valset<sub>h</sub>* and *valset<sub>h+1</sub>* are disjoint,
|
||||
more than *2n<sub>h</sub>/3* of validators in height *h* should
|
||||
have actively participated in conensus in *h*. So, as of height *h*, only a minority of validators
|
||||
in *h* can be lagging behind, although they could all lag behind from *h+1* on, as they are no
|
||||
longer validators, only full nodes. This situation falls under the assumptions of case (h) below.
|
||||
|
||||
As for validators in *valset<sub>h+1</sub>*, as they were not validators as of height *h*, they
|
||||
could all be lagging behind by that time. However, by the time *h* finishes and *h+1* begins, the
|
||||
chain will halt until more than *2n<sub>h+1</sub>/3* of them have caught up and started consensus
|
||||
at height *h+1*. If set *valset<sub>h+1</sub>* does not change in *h+2* and subsequent
|
||||
heights, only up to *n<sub>h+1</sub>/3* validators will be able to lag behind. Thus, we have
|
||||
converted this case into case (h) below.
|
||||
|
||||
- **(e)** *Enough validators crash to block the rest*.
|
||||
|
||||
In this case, blockchain progress halts, i.e. surviving full nodes keep increasing rounds
|
||||
indefinitely, until some of the crashed validators are able to recover.
|
||||
Those validators that recover first will handshake with the Application and recover at the height
|
||||
they crashed, which is still the same the nodes that did not crash are stuck in, so they don't need
|
||||
to catch up.
|
||||
Further, they had persisted the extended commit for the previous height. Nothing to solve.
|
||||
|
||||
For those validators recovering later, we are in case (h) below.
|
||||
|
||||
- **(f)** *Some validators crash, but not enough to block progress*.
|
||||
|
||||
When the correct processes that crashed recover, they handshake with the Application and resume at
|
||||
the height they were at when they crashed. As the blockchain did not stop making progress, the
|
||||
recovered processes are likely to have fallen behind with respect to the progressing majority.
|
||||
|
||||
At this point, the recovered processes are in case (h) below.
|
||||
|
||||
- **(g)** *A new full node starts*.
|
||||
|
||||
The reasoning here also applies to the case when more than one full node are starting.
|
||||
When the full node starts from scratch, it has no state (its current height is 0). Ignoring
|
||||
statesync for the time being, the node just needs to catch up by applying past blocks one by one
|
||||
(after verifying them).
|
||||
|
||||
Thus, the node is in case (h) below.
|
||||
|
||||
- **(h)** *Advancing majority, lagging minority*
|
||||
|
||||
In this case, some nodes are late. More precisely, at the present time, a set of full nodes,
|
||||
denoted *L<sub>h<sub>p</sub></sub>*, are falling behind
|
||||
(e.g., temporary disconnection or network partition, memory thrashing, crashes, new nodes)
|
||||
an arbitrary
|
||||
number of heights:
|
||||
between *h<sub>s</sub>* and *h<sub>p</sub>*, where *h<sub>s</sub> < h<sub>p</sub>*, and
|
||||
*h<sub>p</sub>* is the highest height
|
||||
any correct full node has reached so far.
|
||||
|
||||
The correct full nodes that reached *h<sub>p</sub>* were able to decide for *h<sub>p</sub>-1*.
|
||||
Therefore, less than *n<sub>h<sub>p</sub>-1</sub>/3* validators of *h<sub>p</sub>-1* can be part
|
||||
of *L<sub>h<sub>p</sub></sub>*, since enough up-to-date validators needed to actively participate
|
||||
in consensus for *h<sub>p</sub>-1*.
|
||||
|
||||
Since, at the present time,
|
||||
no node in *L<sub>h<sub>p</sub></sub>* took part in any consensus between
|
||||
*h<sub>s</sub>* and *h<sub>p</sub>-1*,
|
||||
the reasoning above can be extended to validator set changes between *h<sub>s</sub>* and
|
||||
*h<sub>p</sub>-1*. This results in the following restriction on the full nodes that can be part of *L<sub>h<sub>p</sub></sub>*.
|
||||
|
||||
- ∀ *h*, where *h<sub>s</sub> ≤ h < h<sub>p</sub>*,
|
||||
| *valset<sub>h</sub>* ∩ *L<sub>h<sub>p</sub></sub>* | *< n<sub>h</sub>/3*
|
||||
|
||||
If this property does not hold for a particular height *h*, where
|
||||
*h<sub>s</sub> ≤ h < h<sub>p</sub>*, Tendermint could not have progressed beyond *h* and
|
||||
therefore no full node could have reached *h<sub>p</sub>* (a contradiction).
|
||||
|
||||
These lagging nodes in *L<sub>h<sub>p</sub></sub>* need to catch up. They have to obtain the
|
||||
information needed to make
|
||||
progress from other nodes. For each height *h* between *h<sub>s</sub>* and *h<sub>p</sub>-2*,
|
||||
this includes the decided block for *h*, and the
|
||||
precommit votes also for *deciding h* (which can be extracted from the block at height *h+1*).
|
||||
|
||||
At a given height *h<sub>c</sub>* (where possibly *h<sub>c</sub> << h<sub>p</sub>*),
|
||||
a full node in *L<sub>h<sub>p</sub></sub>* will consider itself *caught up*, based on the
|
||||
(maybe out of date) information it is getting from its peers. Then, the node needs to be ready to
|
||||
propose at height *h<sub>c</sub>+1*, which requires having received the vote extensions for
|
||||
*h<sub>c</sub>*.
|
||||
As the vote extensions are *not* stored in the blocks, and it is difficult to have strong
|
||||
guarantees on *when* a late node considers itself caught up, providing the late node with the right
|
||||
vote extensions for the right height poses a problem.
|
||||
|
||||
At this point, we have described and compared all cases raised in discussions leading up to this
|
||||
RFC. The list above aims at being exhaustive. The analysis of each case included above makes all of
|
||||
them converge into case (h).
|
||||
|
||||
### Current Catch-up Mechanisms
|
||||
|
||||
We now briefly describe the current catch-up mechanisms in the reactors concerned in Tendermint.
|
||||
|
||||
#### Statesync
|
||||
|
||||
Full nodes optionally run statesync just after starting, when they start from scratch.
|
||||
If statesync succeeds, an Application snapshot is installed, and Tendermint jumps from height 0 directly
|
||||
to the height the Application snapshop represents, without applying the block of any previous height.
|
||||
Some light blocks are received and stored in the block store for running light-client verification of
|
||||
all the skipped blocks. Light blocks are incomplete blocks, typically containing the header and the
|
||||
canonical commit but, e.g., no transactions. They are stored in the block store as "signed headers".
|
||||
|
||||
The statesync reactor is not really relevant for solving the problem discussed in this RFC. We will
|
||||
nevertheless mention it when needed; in particular, to understand some corner cases.
|
||||
|
||||
#### Blocksync
|
||||
|
||||
The blocksync reactor kicks in after start up or recovery (and, optionally, after statesync is done)
|
||||
and sends the following messages to its peers:
|
||||
|
||||
- `StatusRequest` to query the height its peers are currently at, and
|
||||
- `BlockRequest`, asking for blocks of heights the local node is missing.
|
||||
|
||||
Using `BlockResponse` messages received from peers, the blocksync reactor validates each received
|
||||
block using the block of the following height, saves the block in the block store, and sends the
|
||||
block to the Application for execution.
|
||||
|
||||
If blocksync has validated and applied the block for the height *previous* to the highest seen in
|
||||
a `StatusResponse` message, or if no progress has been made after a timeout, the node considers
|
||||
itself as caught up and switches to the consensus reactor.
|
||||
|
||||
#### Consensus Reactor
|
||||
|
||||
The consensus reactor runs the full Tendermint algorithm. For a validator this means it has to
|
||||
propose blocks, and send/receive prevote/precommit messages, as mandated by Tendermint, before it can
|
||||
decide and move on to the next height.
|
||||
|
||||
If a full node that is running the consensus reactor falls behind at height *h*, when a peer node
|
||||
realises this it will retrieve the canonical commit of *h+1* from the block store, and *convert*
|
||||
it into a set of precommit votes and will send those to the late node.
|
||||
|
||||
## Discussion
|
||||
|
||||
### Solutions Proposed
|
||||
|
||||
These are the solutions proposed in discussions leading up to this RFC.
|
||||
|
||||
- **Solution 0.** *Vote extensions are made **best effort** in the specification*.
|
||||
|
||||
This is the simplest solution, considered as a way to provide vote extensions in a simple enough
|
||||
way so that it can be part of v0.36.
|
||||
It consists in changing the specification so as to not *require* that precommit votes used upon
|
||||
`PrepareProposal` contain their corresponding vote extensions. In other words, we render vote
|
||||
extensions optional.
|
||||
There are strong implications stemming from such a relaxation of the original specification.
|
||||
|
||||
- As a vote extension is signed *separately* from the vote it is extending, an intermediate node
|
||||
can now remove (i.e., censor) vote extensions from precommit messages at will.
|
||||
- Further, there is no point anymore in the spec requiring the Application to accept a vote extension
|
||||
passed via `VerifyVoteExtension` to consider a precommit message valid in its entirety. Remember
|
||||
this behavior of `VerifyVoteExtension` is adding a constraint to Tendermint's conditions for
|
||||
liveness.
|
||||
In this situation, it is better and simpler to just drop the vote extension rejected by the
|
||||
Application via `VerifyVoteExtension`, but still consider the precommit vote itself valid as long
|
||||
as its signature verifies.
|
||||
|
||||
- **Solution 1.** *Include vote extensions in the blockchain*.
|
||||
|
||||
Another obvious solution, which has somehow been considered in the past, is to include the vote
|
||||
extensions and their signatures in the blockchain.
|
||||
The blockchain would thus include the extended commit, rather than a regular commit, as the structure
|
||||
to be canonicalized in the next block.
|
||||
With this solution, the current mechanisms implemented both in the blocksync and consensus reactors
|
||||
would still be correct, as all the information a node needs to catch up, and to start proposing when
|
||||
it considers itself as caught-up, can now be recovered from past blocks saved in the block store.
|
||||
|
||||
This solution has two main drawbacks.
|
||||
|
||||
- As the block format must change, upgrading a chain requires a hard fork. Furthermore,
|
||||
all existing light client implementations will stop working until they are upgraded to deal with
|
||||
the new format (e.g., how certain hashes calculated and/or how certain signatures are checked).
|
||||
For instance, let us consider IBC, which relies on light clients. An IBC connection between
|
||||
two chains will be broken if only one chain upgrades.
|
||||
- The extra information (i.e., the vote extensions) that is now kept in the blockchain is not really
|
||||
needed *at every height* for a late node to catch up.
|
||||
- This information is only needed to be able to *propose* at the height the validator considers
|
||||
itself as caught-up. If a validator is indeed late for height *h*, it is useless (although
|
||||
correct) for it to call `PrepareProposal`, or `ExtendVote`, since the block is already decided.
|
||||
- Moreover, some use cases require pretty sizeable vote extensions, which would result in an
|
||||
important waste of space in the blockchain.
|
||||
|
||||
- **Solution 2.** *Skip* propose *step in Tendermint algorithm*.
|
||||
|
||||
This solution consists in modifying the Tendermint algorithm to skip the *send proposal* step in
|
||||
heights where the node does not have the required vote extensions to populate the call to
|
||||
`PrepareProposal`. The main idea behind this is that it should only happen when the validator is late
|
||||
and, therefore, up-to-date validators have already proposed (and decided) for that height.
|
||||
A small variation of this solution is, rather than skipping the *send proposal* step, the validator
|
||||
sends a special *empty* or *bottom* (⊥) proposal to signal other nodes that it is not ready to propose
|
||||
at (any round of) the current height.
|
||||
|
||||
The appeal of this solution is its simplicity. A possible implementation does not need to extend
|
||||
the data structures, or change the current catch-up mechanisms implemented in the blocksync or
|
||||
in the consensus reactor. When we lack the needed information (vote extensions), we simply rely
|
||||
on another correct validator to propose a valid block in other rounds of the current height.
|
||||
|
||||
However, this solution can be attacked by a byzantine node in the network in the following way.
|
||||
Let us consider the following scenario:
|
||||
|
||||
- all validators in *valset<sub>h</sub>* send out precommit messages, with vote extensions,
|
||||
for height *h*, round 0, roughly at the same time,
|
||||
- all those precommit messages contain non-`nil` precommit votes, which vote for block *b*
|
||||
- all those precommit messages sent in height *h*, round 0, and all messages sent in
|
||||
height *h*, round *r > 0* get delayed indefinitely, so,
|
||||
- all validators in *valset<sub>h</sub>* keep waiting for enough precommit
|
||||
messages for height *h*, round 0, needed for deciding in height *h*
|
||||
- an intermediate (malicious) full node *m* manages to receive block *b*, and gather more than
|
||||
*2n<sub>h</sub>/3* precommit messages for height *h*, round 0,
|
||||
- one way or another, the solution should have either (a) a mechanism for a full node to *tell*
|
||||
another full node it is late, or (b) a mechanism for a full node to conclude it is late based
|
||||
on other full nodes' messages; any of these mechanisms should, at the very least,
|
||||
require the late node receiving the decided block and a commit (not necessarily an extended
|
||||
commit) for *h*,
|
||||
- node *m* uses the gathered precommit messages to build a commit for height *h*, round 0,
|
||||
- in order to convince full nodes that they are late, node *m* either (a) *tells* them they
|
||||
are late, or (b) shows them it (i.e. *m*) is ahead, by sending them block *b*, along with the
|
||||
commit for height *h*, round 0,
|
||||
- all full nodes conclude they are late from *m*'s behavior, and use block *b* and the commit for
|
||||
height *h*, round 0, to decide on height *h*, and proceed to height *h+1*.
|
||||
|
||||
At this point, *all* full nodes, including all validators in *valset<sub>h+1</sub>*, have advanced
|
||||
to height *h+1* believing they are late, and so, expecting the *hypothetical* leading majority of
|
||||
validators in *valset<sub>h+1</sub>* to propose for *h+1*. As a result, the blockhain
|
||||
grinds to a halt.
|
||||
A (rather complex) ad-hoc mechanism would need to be carried out by node operators to roll
|
||||
back all validators to the precommit step of height *h*, round *r*, so that they can regenerate
|
||||
vote extensions (remember vote extensions are non-deterministic) and continue execution.
|
||||
|
||||
- **Solution 3.** *Require extended commits to be available at switching time*.
|
||||
|
||||
This one is more involved than all previous solutions, and builds on an idea present in Solution 2:
|
||||
vote extensions are actually not needed for Tendermint to make progress as long as the
|
||||
validator is *certain* it is late.
|
||||
|
||||
We define two modes. The first is denoted *catch-up mode*, and Tendermint only calls
|
||||
`FinalizeBlock` for each height when in this mode. The second is denoted *consensus mode*, in
|
||||
which the validator considers itself up to date and fully participates in consensus and calls
|
||||
`PrepareProposal`/`ProcessProposal`, `ExtendVote`, and `VerifyVoteExtension`, before calling
|
||||
`FinalizeBlock`.
|
||||
|
||||
The catch-up mode does not need vote extension information to make progress, as all it needs is the
|
||||
decided block at each height to call `FinalizeBlock` and keep the state-machine replication making
|
||||
progress. The consensus mode, on the other hand, does need vote extension information when
|
||||
starting every height.
|
||||
|
||||
Validators are in consensus mode by default. When a validator in consensus mode falls behind
|
||||
for whatever reason, e.g. cases (b), (d), (e), (f), (g), or (h) above, we introduce the following
|
||||
key safety property:
|
||||
|
||||
- for every height *h<sub>p</sub>*, a full node *f* in *h<sub>p</sub>* refuses to switch to catch-up
|
||||
mode **until** there exists a height *h'* such that:
|
||||
- *p* has received and (light-client) verified the blocks of
|
||||
all heights *h*, where *h<sub>p</sub> ≤ h ≤ h'*
|
||||
- it has received an extended commit for *h'* and has verified:
|
||||
- the precommit vote signatures in the extended commit
|
||||
- the vote extension signatures in the extended commit: each is signed with the same
|
||||
key as the precommit vote it extends
|
||||
|
||||
If the condition above holds for *h<sub>p</sub>*, namely receiving a valid sequence of blocks in
|
||||
the *f*'s future, and an extended commit corresponding to the last block in the sequence, then
|
||||
node *f*:
|
||||
|
||||
- switches to catch-up mode,
|
||||
- applies all blocks between *h<sub>p</sub>* and *h'* (calling `FinalizeBlock` only), and
|
||||
- switches back to consensus mode using the extended commit for *h'* to propose in the rounds of
|
||||
*h' + 1* where it is the proposer.
|
||||
|
||||
This mechanism, together with the invariant it uses, ensures that the node cannot be attacked by
|
||||
being fed a block without extensions to make it believe it is late, in a similar way as explained
|
||||
for Solution 2.
|
||||
|
||||
### Feasibility of the Proposed Solutions
|
||||
|
||||
Solution 0, besides the drawbacks described in the previous section, provides guarantees that are
|
||||
weaker than the rest. The Application does not have the assurance that more than *2n<sub>h</sub>/3* vote
|
||||
extensions will *always* be available when calling `PrepareProposal` at height *h+1*.
|
||||
This level of guarantees is probably not strong enough for vote extensions to be useful for some
|
||||
important use cases that motivated them in the first place, e.g., encrypted mempool transactions.
|
||||
|
||||
Solution 1, while being simple in that the changes needed in the current Tendermint codebase would
|
||||
be rather small, is changing the block format, and would therefore require all blockchains using
|
||||
Tendermint v0.35 or earlier to hard-fork when upgrading to v0.36.
|
||||
|
||||
Since Solution 2 can be attacked, one might prefer Solution 3, even if it is more involved
|
||||
to implement. Further, we must elaborate on how we can turn Solution 3, described in abstract
|
||||
terms in the previous section, into a concrete implementation compatible with the current
|
||||
Tendermint codebase.
|
||||
|
||||
### Current Limitations and Possible Implementations
|
||||
|
||||
The main limitations affecting the current version of Tendermint are the following.
|
||||
|
||||
- The current version of the blocksync reactor does not use the full
|
||||
[light client verification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/light-client/README.md)
|
||||
algorithm to validate blocks coming from other peers.
|
||||
- The code being structured into the blocksync and consensus reactors, only switching from the
|
||||
blocksync reactor to the consensus reactor is supported; switching in the opposite direction is
|
||||
not supported. Alternatively, the consensus reactor could have a mechanism allowing a late node
|
||||
to catch up by skipping calls to `PrepareProposal`/`ProcessProposal`, and
|
||||
`ExtendVote`/`VerifyVoteExtension` and only calling `FinalizeBlock` for each height.
|
||||
Such a mechanism does not exist at the time of writing this RFC.
|
||||
|
||||
The blocksync reactor featuring light client verification is being actively worked on (tentatively
|
||||
for v0.37). So it is best if this RFC does not try to delve into that problem, but just makes sure
|
||||
its outcomes are compatible with that effort.
|
||||
|
||||
In subsection [Cases to Address](#cases-to-address), we concluded that we can focus on
|
||||
solving case (h) in theoretical terms.
|
||||
However, as the current Tendermint version does not yet support switching back to blocksync once a
|
||||
node has switched to consensus, we need to split case (h) into two cases. When a full node needs to
|
||||
catch up...
|
||||
|
||||
- **(h.1)** ... it has not switched yet from the blocksync reactor to the consensus reactor, or
|
||||
|
||||
- **(h.2)** ... it has already switched to the consensus reactor.
|
||||
|
||||
This is important in order to discuss the different possible implementations.
|
||||
|
||||
#### Base Implementation: Persist and Propagate Extended Commit History
|
||||
|
||||
In order to circumvent the fact that we cannot switch from the consensus reactor back to blocksync,
|
||||
rather than just keeping the few most recent extended commits, nodes will need to keep
|
||||
and gossip a backlog of extended commits so that the consensus reactor can still propose and decide
|
||||
in out-of-date heights (even if those proposals will be useless).
|
||||
|
||||
The base implementation - for which an experimental patch exists - consists in the conservative
|
||||
approach of persisting in the block store *all* extended commits for which we have also stored
|
||||
the full block. Currently, when statesync is run at startup, it saves light blocks.
|
||||
This base implementation does not seek
|
||||
to receive or persist extended commits for those light blocks as they would not be of any use.
|
||||
|
||||
Then, we modify the blocksync reactor so that peers *always* send requested full blocks together
|
||||
with the corresponding extended commit in the `BlockResponse` messages. This guarantees that the
|
||||
block store being reconstructed by blocksync has the same information as that of peers that are
|
||||
up to date (at least starting from the latest snapshot applied by statesync before starting blocksync).
|
||||
Thus, blocksync has all the data it requires to switch to the consensus reactor, as long as one of
|
||||
the following exit conditions are met:
|
||||
|
||||
- The node is still at height 0 (where no commit or extended commit is needed)
|
||||
- The node has processed at least 1 block in blocksync
|
||||
|
||||
The second condition is needed in case the node has installed an Application snapshot during statesync.
|
||||
If that is the case, at the time blocksync starts, the block store only has the data statesync has saved:
|
||||
light blocks, and no extended commits.
|
||||
Hence we need to blocksync at least one block from another node, which will be sent with its corresponding extended commit, before we can switch to consensus.
|
||||
|
||||
As a side note, a chain might be started at a height *h<sub>i</sub> > 0*, all other heights
|
||||
*h < h<sub>i</sub>* being non-existent. In this case, the chain is still considered to be at height 0 before
|
||||
block *h<sub>i</sub>* is applied, so the first condition above allows the node to switch to consensus even
|
||||
if blocksync has not processed any block (which is always the case if all nodes are starting from scratch).
|
||||
|
||||
When a validator falls behind while having already switched to the consensus reactor, a peer node can
|
||||
simply retrieve the extended commit for the required height from the block store and reconstruct a set of
|
||||
precommit votes together with their extensions and send them in the form of precommit messages to the
|
||||
validator falling behind, regardless of whether the peer node holds the extended commit because it
|
||||
actually participated in that consensus and thus received the precommit messages, or it received the extended commit via a `BlockResponse` message while running blocksync.
|
||||
|
||||
This solution requires a few changes to the consensus reactor:
|
||||
|
||||
- upon saving the block for a given height in the block store at decision time, save the
|
||||
corresponding extended commit as well
|
||||
- in the catch-up mechanism, when a node realizes that another peer is more than 2 heights
|
||||
behind, it uses the extended commit (rather than the canoncial commit as done previously) to
|
||||
reconstruct the precommit votes with their corresponding extensions
|
||||
|
||||
The changes to the blocksync reactor are more substantial:
|
||||
|
||||
- the `BlockResponse` message is extended to include the extended commit of the same height as
|
||||
the block included in the response (just as they are stored in the block store)
|
||||
- structure `bpRequester` is likewise extended to hold the received extended commits coming in
|
||||
`BlockResponse` messages
|
||||
- method `PeekTwoBlocks` is modified to also return the extended commit corresponding to the first block
|
||||
- when successfully verifying a received block, the reactor saves its corresponding extended commit in
|
||||
the block store
|
||||
|
||||
The two main drawbacks of this base implementation are:
|
||||
|
||||
- the increased size taken by the block store, in particular with big extensions
|
||||
- the increased bandwith taken by the new format of `BlockResponse`
|
||||
|
||||
#### Possible Optimization: Pruning the Extended Commit History
|
||||
|
||||
If we cannot switch from the consensus reactor back to the blocksync reactor we cannot prune the extended commit backlog in the block store without sacrificing the implementation's correctness. The asynchronous
|
||||
nature of our distributed system model allows a process to fall behing an arbitrary number of
|
||||
heights, and thus all extended commits need to be kept *just in case* a node that late had
|
||||
previously switched to the consensus reactor.
|
||||
|
||||
However, there is a possibility to optimize the base implementation. Every time we enter a new height,
|
||||
we could prune from the block store all extended commits that are more than *d* heights in the past.
|
||||
Then, we need to handle two new situations, roughly equivalent to cases (h.1) and (h.2) described above.
|
||||
|
||||
- (h.1) A node starts from scratch or recovers after a crash. In thisy case, we need to modify the
|
||||
blocksync reactor's base implementation.
|
||||
- when receiving a `BlockResponse` message, it MUST accept that the extended commit set to `nil`,
|
||||
- when sending a `BlockResponse` message, if the block store contains the extended commit for that
|
||||
height, it MUST set it in the message, otherwise it sets it to `nil`,
|
||||
- the exit conditions used for the base implementation are no longer valid; the only reliable exit
|
||||
condition now consists in making sure that the last block processed by blocksync was received with
|
||||
the corresponding commit, and not `nil`; this extended commit will allow the node to switch from
|
||||
the blocksync reactor to the consensus reactor and immediately act as a proposer if required.
|
||||
- (h.2) A node already running the consensus reactor falls behind beyond *d* heights. In principle,
|
||||
the node will be stuck forever as no other node can provide the vote extensions it needs to make
|
||||
progress (they all have pruned the corresponding extended commit).
|
||||
However we can manually have the node crash and recover as a workaround. This effectively converts
|
||||
this case into (h.1).
|
||||
|
||||
### Formalization Work
|
||||
|
||||
A formalization work to show or prove the correctness of the different use cases and solutions
|
||||
presented here (and any other that may be found) needs to be carried out.
|
||||
A question that needs a precise answer is how many extended commits (one?, two?) a node needs
|
||||
to keep in persistent memory when implementing Solution 3 described above without Tendermint's
|
||||
current limitations.
|
||||
Another important invariant we need to prove formally is that the set of vote extensions
|
||||
required to make progress will always be held somewhere in the network.
|
||||
|
||||
## References
|
||||
|
||||
- [ABCI++ specification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md)
|
||||
- [ABCI as of v0.35](https://github.com/tendermint/spec/blob/4fb99af/spec/abci/README.md)
|
||||
- [Vote extensions issue](https://github.com/tendermint/tendermint/issues/8174)
|
||||
- [Light client verification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/light-client/README.md)
|
||||
555
docs/rfc/rfc-018-bls-agg-exploration.md
Normal file
555
docs/rfc/rfc-018-bls-agg-exploration.md
Normal file
@@ -0,0 +1,555 @@
|
||||
# RFC 018: BLS Signature Aggregation Exploration
|
||||
|
||||
## Changelog
|
||||
|
||||
- 01-April-2022: Initial draft (@williambanfield).
|
||||
- 15-April-2022: Draft complete (@williambanfield).
|
||||
|
||||
## Abstract
|
||||
|
||||
## Background
|
||||
|
||||
### Glossary
|
||||
|
||||
The terms that are attached to these types of cryptographic signing systems
|
||||
become confusing quickly. Different sources appear to use slightly different
|
||||
meanings of each term and this can certainly add to the confusion. Below is
|
||||
a brief glossary that may be helpful in understanding the discussion that follows.
|
||||
|
||||
* **Short Signature**: A signature that does not vary in length with the
|
||||
number of signers.
|
||||
* **Multi-Signature**: A signature generated over a single message
|
||||
where, given the message and signature, a verifier is able to determine that
|
||||
all parties signed the message. May be short or may vary with the number of signers.
|
||||
* **Aggregated Signature**: A _short_ signature generated over messages with
|
||||
possibly different content where, given the messages and signature, a verifier
|
||||
should be able to determine that all the parties signed the designated messages.
|
||||
* **Threshold Signature**: A _short_ signature generated from multiple signers
|
||||
where, given a message and the signature, a verifier is able to determine that
|
||||
a large enough share of the parties signed the message. The identities of the
|
||||
parties that contributed to the signature are not revealed.
|
||||
* **BLS Signature**: An elliptic-curve pairing-based signature system that
|
||||
has some nice properties for short multi-signatures. May stand for
|
||||
*Boneh-Lynn-Schacham* or *Barreto-Lynn-Scott* depending on the context. A
|
||||
BLS signature is type of signature scheme that is distinct from other forms
|
||||
of elliptic-curve signatures such as ECDSA and EdDSA.
|
||||
* **Interactive**: Cryptographic scheme where parties need to perform one or
|
||||
more request-response cycles to produce the cryptographic material. For
|
||||
example, an interactive signature scheme may require the signer and the
|
||||
verifier to cooperate to create and/or verify the signature, rather than a
|
||||
signature being created ahead of time.
|
||||
* **Non-interactive**: Cryptographic scheme where parties do not need to
|
||||
perform any request-response cycles to produce the cryptographic material.
|
||||
|
||||
### Brief notes on pairing-based elliptic-curve cryptography
|
||||
|
||||
Pairing-based elliptic-curve cryptography is quite complex and relies on several
|
||||
types of high-level math. Cryptography, in general, relies on being able to find
|
||||
problems with an asymmetry between the difficulty of calculating the solution
|
||||
and verifying that a given solution is correct.
|
||||
|
||||
Pairing-based cryptography works by operating on mathematical functions that
|
||||
satisfy the property of **bilinear mapping**. This property is satisfied for
|
||||
functions `e` with values `P`, `Q`, `R` and `S` where `e(P, Q + R) = e(P, Q) * e(P, R)`
|
||||
and `e(P + S, Q) = e(P, Q) * e(S, Q)`. The most familiar example of this is
|
||||
exponentiation. Written in common notation, `g^P*(Q+R) = g^(P*Q) * g^(P*R)` for
|
||||
some value `g`.
|
||||
|
||||
Pairing-based elliptic-curve cryptography creates a bilinear mapping using
|
||||
elliptic curves over a finite field. With some original curve, you can define two groups,
|
||||
`G1` and `G2` which are points of the original curve _modulo_ different values.
|
||||
Finally, you define a third group `Gt`, where points from `G1` and `G2` satisfy
|
||||
the property of bilinearity with `Gt`. In this scheme, the function `e` takes
|
||||
as inputs points in `G1` and `G2` and outputs values in `Gt`. Succintly, given
|
||||
some point `P` in `G1` and some point `Q` in `G1`, `e(P, Q) = C` where `C` is in `Gt`.
|
||||
You can efficiently compute the mapping of points in `G1` and `G2` into `Gt`,
|
||||
but you cannot efficiently determine what points were summed and paired to
|
||||
produce the value in `Gt`.
|
||||
|
||||
Functions are then defined to map digital signatures, messages, and keys into
|
||||
and out of points of `G1` or `G2` and signature verification is the process
|
||||
of calculating if a set of values representing a message, public key, and digital
|
||||
signature produce the same value in `Gt` through `e`.
|
||||
|
||||
Signatures can be created as either points in `G1` with public keys being
|
||||
created as points in `G2` or vice versa. For the case of BLS12-381, the popular
|
||||
curve used, points in `G1` are represented with 48 bytes and points in `G2` are
|
||||
represented with 96 bytes. It is up to the implementer of the cryptosystem to
|
||||
decide which should be larger, the public keys or the signatures.
|
||||
|
||||
BLS signatures rely on pairing-based elliptic-curve cryptography to produce
|
||||
various types of signatures. For a more in-depth but still high level discussion
|
||||
pairing-based elliptic-curve cryptography, see Vitalik Buterin's post on
|
||||
[Exploring Elliptic Curve Pairings][vitalik-pairing-post]. For much more in
|
||||
depth discussion, see the specific paper on BLS12-381, [Short signatures from
|
||||
the Weil Pairing][bls-weil-pairing] and
|
||||
[Compact Multi-Signatures for Smaller Blockchains][multi-signatures-smaller-blockchains].
|
||||
|
||||
### Adoption
|
||||
|
||||
BLS signatures have already gained traction within several popular projects.
|
||||
|
||||
* Algorand is working on an implementation.
|
||||
* [Zcash][zcash-adoption] has adopted BLS12-381 into the protocol.
|
||||
* [Ethereum 2.0][eth-2-adoption] has adopted BLS12-381 into the protocol.
|
||||
* [Chia Network][chia-adoption] has adopted BLS for signing blocks.
|
||||
* [Ostracon][line-ostracon-pr], a fork of Tendermint has adopted BLS for signing blocks.
|
||||
|
||||
### What systems may be affected by adding aggregated signatures?
|
||||
|
||||
#### Gossip
|
||||
|
||||
Gossip could be updated to aggregate vote signatures during a consensus round.
|
||||
This appears to be of frankly little utility. Creating an aggregated signature
|
||||
incurs overhead, so frequently re-aggregating may incur a significant
|
||||
overhead. How costly this is is still subject to further investigation and
|
||||
performance testing.
|
||||
|
||||
Even if vote signatures were aggregated before gossip, each validator would still
|
||||
need to receive and verify vote extension data from each (individual) peer validator in
|
||||
order for consensus to proceed. That displaces any advantage gained by aggregating signatures across the vote message in the presence of vote extensions.
|
||||
|
||||
#### Block Creation
|
||||
|
||||
When creating a block, the proposer may create a small set of short
|
||||
multi-signatures and attach these to the block instead of including one
|
||||
signature per validator.
|
||||
|
||||
#### Block Verification
|
||||
|
||||
Currently, we verify each validator signature using the public key associated
|
||||
with that validator. With signature aggregation, verification of blocks would
|
||||
not verify many signatures individually, but would instead check the (single)
|
||||
multi-signature using the public keys stored by the validator. This would also
|
||||
require a mechanism for indicating which validators are included in the
|
||||
aggregated signature.
|
||||
|
||||
#### IBC Relaying
|
||||
|
||||
IBC would no longer need to transmit a large set of signatures when
|
||||
updating state. These state updates do not happen for every IBC packet, only
|
||||
when changing an IBC light client's view of the counterparty chain's state.
|
||||
General [IBC packets][ibc-packet] only contain enough information to correctly
|
||||
route the data to the counterparty chain.
|
||||
|
||||
IBC does persist commit signatures to the chain in these `MsgUpdateClient`
|
||||
message when updating state. This message would no longer need the full set
|
||||
of unique signatures and would instead only need one signature for all of the
|
||||
data in the header.
|
||||
|
||||
Adding BLS signatures would create a new signature type that must be
|
||||
understood by the IBC module and by the relayers. For some operations, such
|
||||
as state updates, the set of data written into the chain and received by the
|
||||
IBC module could be slightly smaller.
|
||||
|
||||
## Discussion
|
||||
|
||||
### What are the proposed benefits to aggregated signatures?
|
||||
|
||||
#### Reduce Block Size
|
||||
|
||||
At the moment, a commit contains a 64-byte (512-bit) signature for each validator
|
||||
that voted for the block. For the Cosmos Hub, which has 175 validators in the
|
||||
active set, this amounts to about 11 KiB per block. That gives an upper bound of
|
||||
around 113 GiB over the lifetime of the chain's 10.12M blocks. (Note, the Hub has
|
||||
increased the number of validators in the active set over time so the total
|
||||
signature size over the history of the chain is likely somewhat less than that).
|
||||
|
||||
Signature aggregation would only produce two signatures for the entire block.
|
||||
One for the yeas and one for the nays. Each BLS aggregated signature is 48
|
||||
bytes, per the [IETF standard of BLS signatures][bls-ietf-ecdsa-compare].
|
||||
Over the lifetime of the same Cosmos Hub chain, that would amount to about 1
|
||||
GB, a savings of 112 GB. While that is a large factor of reduction it's worth
|
||||
bearing in mind that, at [GCP's cost][gcp-storage-pricing] of $.026 USD per GB,
|
||||
that is a total savings of around $2.50 per month.
|
||||
|
||||
#### Reduce Signature Creation and Verification Time
|
||||
|
||||
From the [IETF draft standard on BLS Signatures][bls-ietf], BLS signatures can be
|
||||
created in 370 microseconds and verified in 2700 microseconds. Our current
|
||||
[Ed25519 implementation][voi-ed25519-perf] was benchmarked locally to take
|
||||
13.9 microseconds to produce a signature and 2.03 milliseconds to batch verify
|
||||
128 signatures, which is slightly fewer than the 175 in the Hub. blst, a popular
|
||||
implementation of BLS signature aggregation was benchmarked to perform verification
|
||||
on 100 signatures in 1.5 milliseconds [when run locally][blst-verify-bench]
|
||||
on an 8 thread machine and pre-aggregated public keys. It is worth noting that
|
||||
the `ed25519` library verification time grew steadily with the number of signatures,
|
||||
whereas the bls library verification time remains constant. This is because the
|
||||
number of operations used to verify a signature does not grow at all with the
|
||||
number of signatures included in the aggregate signature (as long as the signers
|
||||
signed over the same message data as is the case in Tendermint).
|
||||
|
||||
It is worth noting that this would also represent a _degredation_ in signature
|
||||
verification time for chains with small validator sets. When batch verifying
|
||||
only 32 signatures, our ed25519 library takes .57 milliseconds, whereas BLS
|
||||
would still require the same 1.5 milliseconds.
|
||||
|
||||
For massive validator sets, blst dominates, taking the same 1.5 milliseconds to
|
||||
check an aggregated signature from 1024 validators versus our ed25519 library's
|
||||
13.066 milliseconds to batch verify a set of that size.
|
||||
|
||||
#### Reduce Light-Client Verification Time
|
||||
|
||||
The light client aims to be a faster and lighter-weight way to verify that a
|
||||
block was voted on by a Tendermint network. The light client fetches
|
||||
Tendermint block headers and commit signatures, performing public key
|
||||
verification to ensure that the associated validator set signed the block.
|
||||
Reducing the size of the commit signature would allow the light client to fetch
|
||||
block data more quickly.
|
||||
|
||||
Additionally, the faster signature verification times of BLS signatures mean
|
||||
that light client verification would proceed more quickly.
|
||||
|
||||
However, verification of an aggregated signature is all-or-nothing. The verifier
|
||||
cannot check that some singular signer had a signature included in the block.
|
||||
Instead, the verifier must use all public keys to check if some signature
|
||||
was included. This does mean that any light client implementation must always
|
||||
be able to fetch all public keys for any height instead of potentially being
|
||||
able to check if some singular validator's key signed the block.
|
||||
|
||||
#### Reduce Gossip Bandwidth
|
||||
|
||||
##### Vote Gossip
|
||||
|
||||
It is possible to aggregate subsets of signatures during voting, so that the
|
||||
network need not gossip all *n* validator signatures to all *n* validators.
|
||||
Theoretically, subsets of the signatures could be aggregated during consensus
|
||||
and vote messages could carry those aggregated signatures. Implementing this
|
||||
would certainly increase the complexity of the gossip layer but could possibly
|
||||
reduce the total number of signatures required to be verified by each validator.
|
||||
|
||||
##### Block Gossip
|
||||
|
||||
A reduction in the block size as a result of signature aggregation would
|
||||
naturally lead to a reduction in the bandwidth required to gossip a block.
|
||||
Each validator would only send and receive the smaller aggregated signatures
|
||||
instead of the full list of multi-signatures as we have them now.
|
||||
|
||||
### What are the drawbacks to aggregated signatures?
|
||||
|
||||
#### Heterogeneous key types cannot be aggregated
|
||||
|
||||
Aggregation requires a specific signature algorithm, and our legacy signing schemes
|
||||
cannot be aggregated. In practice, this means that aggregated signatures could
|
||||
be created for a subset of validators using BLS signatures, and validators
|
||||
with other key types (such as Ed25519) would still have to be be separately
|
||||
propagated in blocks and votes.
|
||||
|
||||
#### Many HSMs do not support aggregated signatures
|
||||
|
||||
**Hardware Signing Modules** (HSM) are a popular way to manage private keys.
|
||||
They provide additional security for key management and should be used when
|
||||
possible for storing highly sensitive private key material.
|
||||
|
||||
Below is a list of popular HSMs along with their support for BLS signatures.
|
||||
|
||||
* YubiKey
|
||||
* [No support][yubi-key-bls-support]
|
||||
* Amazon Cloud HSM
|
||||
* [No support][cloud-hsm-support]
|
||||
* Ledger
|
||||
* [Lists support for the BLS12-381 curve][ledger-bls-announce]
|
||||
|
||||
I cannot find support listed for Google Cloud, although perhaps it exists.
|
||||
|
||||
## Feasibility of implementation
|
||||
|
||||
This section outlines the various hurdles that would exist to implementing BLS
|
||||
signature aggregation into Tendermint. It aims to demonstrate that we _could_
|
||||
implement BLS signatures but that it would incur risk and require breaking changes for a
|
||||
reasonably unclear benefit.
|
||||
|
||||
### Can aggregated signatures be added as soft-upgrades?
|
||||
|
||||
In my estimation, yes. With the implementation of proposer-based timestamps,
|
||||
all validators now produce signatures on only one of two messages:
|
||||
|
||||
1. A [CanonicalVote][canonical-vote-proto] where the BlockID is the hash of the block or
|
||||
2. A `CanonicalVote` where the `BlockID` is nil.
|
||||
|
||||
The block structure can be updated to perform hashing and validation in a new
|
||||
way as a soft upgrade. This would look like adding a new section to the [Block.Commit][commit-proto] structure
|
||||
alongside the current `Commit.Signatures` field. This new field, tentatively named
|
||||
`AggregatedSignature` would contain the following structure:
|
||||
|
||||
```proto
|
||||
message AggregatedSignature {
|
||||
// yeas is a BitArray representing which validators in the active validator
|
||||
// set issued a 'yea' vote for the block.
|
||||
tendermint.libs.bits.BitArray yeas = 1;
|
||||
|
||||
// absent is a BitArray representing which validators in the active
|
||||
// validator set did not issue votes for the block.
|
||||
tendermint.libs.bits.BitArray absent = 2;
|
||||
|
||||
// yea_signature is an aggregated signature produced from all of the vote
|
||||
// signatures for the block.
|
||||
repeated bytes yea_signature = 3;
|
||||
|
||||
// nay_signature is an aggregated signature produced from all of the vote
|
||||
// signatures from votes for 'nil' for this block.
|
||||
// nay_signature should be made from all of the validators that were both not
|
||||
// in the 'yeas' BitArray and not in the 'absent' BitArray.
|
||||
repeated bytes nay_signature = 4;
|
||||
}
|
||||
```
|
||||
|
||||
Adding this new field as a soft upgrade would mean hashing this data structure
|
||||
into the blockID along with the old `Commit.Signatures` when both are present
|
||||
as well as ensuring that the voting power represented in the new
|
||||
`AggregatedSignature` and `Signatures` field was enough to commit the block
|
||||
during block validation. One can certainly imagine other possible schemes for
|
||||
implementing this but the above should serve as a simple enough proof of concept.
|
||||
|
||||
### Implementing vote-time and commit-time signature aggregation separately
|
||||
|
||||
Implementing aggregated BLS signatures as part of the block structure can easily be
|
||||
achieved without implementing any 'vote-time' signature aggregation.
|
||||
The block proposer would gather all of the votes, complete with signatures,
|
||||
as it does now, and produce a set of aggregate signatures from all of the
|
||||
individual vote signatures.
|
||||
|
||||
Implementing 'vote-time' signature aggregation cannot be achieved without
|
||||
also implementing commit-time signature aggregation. This is because such
|
||||
signatures cannot be dis-aggregated into their constituent pieces. Therefore,
|
||||
in order to implement 'vote-time' signature aggregation, we would need to
|
||||
either first implement 'commit-time' signature aggregation, or implement both
|
||||
'vote-time' signature aggregation while also updating the block creation and
|
||||
verification protocols to allow for aggregated signatures.
|
||||
|
||||
### Updating IBC clients
|
||||
|
||||
In order for IBC clients to function, they must be able to perform light-client
|
||||
verification of blocks on counterparty chains. Because BLS signatures are not
|
||||
currently part of light-clients, chains that transmit messages over IBC
|
||||
cannot update to using BLS signatures without their counterparties first
|
||||
being upgraded to parse and verify BLS. If chains upgrade without their
|
||||
counterparties first updating, they will lose the ability to interoperate with
|
||||
non-updated chains.
|
||||
|
||||
### New attack surfaces
|
||||
|
||||
BLS signatures and signature aggregation comes with a new set of attack surfaces.
|
||||
Additionally, it's not clear that all possible major attacks are currently known
|
||||
on the BLS aggregation schemes since new ones have been discovered since the ietf
|
||||
draft standard was written. The known attacks are manageable and are listed below.
|
||||
Our implementation would need to prevent against these but this does not appear
|
||||
to present a significant hurdle to implementation.
|
||||
|
||||
#### Rogue key attack prevention
|
||||
|
||||
Generating an aggregated signature requires guarding against what is called
|
||||
a [rogue key attack][bls-ietf-terms]. A rogue key attack is one in which a
|
||||
malicious actor can craft an _aggregate_ key that can produce signatures that
|
||||
appear to include a signature from a private key that the malicious actor
|
||||
does not actually know. In Tendermint terms, this would look like a Validator
|
||||
producing a vote signed by both itself and some other validator where the other
|
||||
validator did not actually produce the vote itself.
|
||||
|
||||
The main mechanisms for preventing this require that each entity prove that it
|
||||
can can sign data with just their private key. The options involve either
|
||||
ensuring that each entity sign a _different_ message when producing every
|
||||
signature _or_ producing a [proof of possession][bls-ietf-pop] (PoP) when announcing
|
||||
their key to the network.
|
||||
|
||||
A PoP is a message that demonstrates ownership of a private
|
||||
key. A simple scheme for PoP is one where the entity announcing
|
||||
its new public key to the network includes a digital signature over the bytes
|
||||
of the public key generated using the associated private key. Everyone receiving
|
||||
the public key and associated proof-of-possession can easily verify the
|
||||
signature and be sure the entity owns the private key.
|
||||
|
||||
This PoP scheme suits the Tendermint use case quite well since
|
||||
validator keys change infrequently so the associated PoPs would not be onerous
|
||||
to produce, verify, and store. Using this scheme allows signature verification
|
||||
to proceed more quickly, since all signatures are over identical data and
|
||||
can therefore be checked using an aggregated public key instead of one at a
|
||||
time, public key by public key.
|
||||
|
||||
#### Summing Zero Attacks
|
||||
|
||||
[Summing zero attacks][summing-zero-paper] are attacks that rely on using the '0' point of an
|
||||
elliptic curve. For BLS signatures, if the point 0 is chosen as the private
|
||||
key, then the 0 point will also always be the public key and all signatures
|
||||
produced by the key will also be the 0 point. This is easy enough to
|
||||
detect when verifying each signature individually.
|
||||
|
||||
However, because BLS signature aggregation creates an aggregated signature and
|
||||
an aggregated public key, a set of colluding signers can create a pair or set
|
||||
of signatures that are non-zero but which aggregate ("sum") to 0. The signatures that sum zero along with the
|
||||
summed public key of the colluding signers will verify any message. This would
|
||||
allow the colluding signers to sign any block or message with the same signature.
|
||||
This would be reasonably easy to detect and create evidence for because, in
|
||||
all other cases, the same signature should not verify more than message. It's
|
||||
not exactly clear how such an attack would advantage the colluding validators
|
||||
because the normal mechanisms of evidence gathering would still detect the
|
||||
double signing, regardless of the signatures on both blocks being identical.
|
||||
|
||||
### Backwards Compatibility
|
||||
|
||||
Backwards compatibility is an important consideration for signature verification.
|
||||
Specifically, it is important to consider whether chains using current versions
|
||||
of IBC would be able to interact with chains adopting BLS.
|
||||
|
||||
Because the `Block` shared by IBC and Tendermint is produced and parsed using
|
||||
protobuf, new structures can be added to the Block without breaking the
|
||||
ability of legacy users to parse the new structure. Breaking changes between
|
||||
current users of IBC and new Tendermint blocks only occur if data that is
|
||||
relied upon by the current users is no longer included in the current fields.
|
||||
|
||||
For the case of BLS aggregated signatures, a new `AggregatedSignature` field
|
||||
can therefore be added to the `Commit` field without breaking current users.
|
||||
Current users will be broken when counterparty chains upgrade to the new version
|
||||
and _begin using_ BLS signatures. Once counterparty chains begin using BLS
|
||||
signatures, the BlockID hashes will include hashes of the `AggregatedSignature`
|
||||
data structure that the legacy users will not be able to compute. Additionally,
|
||||
the legacy software will not be able to parse and verify the signatures to
|
||||
ensure that a supermajority of validators from the counterparty chain signed
|
||||
the block.
|
||||
|
||||
### Library Support
|
||||
|
||||
Libraries for BLS signature creation are limited in number, although active
|
||||
development appears to be ongoing. Cryptographic algorithms are difficult to
|
||||
implement correctly and correctness issues are extremely serious and dangerous.
|
||||
No further exploration of BLS should be undertaken without strong assurance of
|
||||
a well-tested library with continuing support for creating and verifying BLS
|
||||
signatures.
|
||||
|
||||
At the moment, there is one candidate, `blst`, that appears to be the most
|
||||
mature and well vetted. While this library is undergoing continuing auditing
|
||||
and is supported by funds from the Ethereum foundation, adopting a new cryptographic
|
||||
library presents some serious risks. Namely, if the support for the library were
|
||||
to be discontinued, Tendermint may become saddled with the requirement of supporting
|
||||
a very complex piece of software or force a massive ecosystem-wide migration away
|
||||
from BLS signatures.
|
||||
|
||||
This is one of the more serious reasons to avoid adopting BLS signatures at this
|
||||
time. There is no gold standard library. Some projects look promising, but no
|
||||
project has been formally verified with a long term promise of being supported
|
||||
well into the future.
|
||||
|
||||
#### Go Standard Library
|
||||
|
||||
The Go Standard library has no implementation of BLS signatures.
|
||||
|
||||
#### BLST
|
||||
|
||||
[blst][blst], or 'blast' is an implementation of BLS signatures written in C
|
||||
that provides bindings into Go as part of the repository. This library is
|
||||
actively undergoing formal verification by Galois and previously received an
|
||||
initial audit by NCC group, a firm I'd never heard of.
|
||||
|
||||
`blst` is [targeted for use in prysm][prysm-blst], the golang implementation of Ethereum 2.0.
|
||||
|
||||
#### Gnark-Crypto
|
||||
|
||||
[Gnark-Crypto][gnark] is a Go-native implementation of elliptic-curve pairing-based
|
||||
cryptography. It is not audited and is documented as 'as-is', although
|
||||
development appears to be active so formal verification may be forthcoming.
|
||||
|
||||
#### CIRCL
|
||||
|
||||
[CIRCL][circl] is a go-native implementation of several cryptographic primitives,
|
||||
bls12-381 among them. The library is written and maintained by Cloudflare and
|
||||
appears to receive frequent contributions. However, it lists itself as experimental
|
||||
and urges users to take caution before using it in production.
|
||||
|
||||
### Added complexity to light client verification
|
||||
|
||||
Implementing BLS signature aggregation in Tendermint would pose issues for the
|
||||
light client. The light client currently validates a subset of the signatures
|
||||
on a block when performing the verification algorithm. This is no longer possible
|
||||
with an aggregated signature. Aggregated signature verification is all-or-nothing.
|
||||
The light client could no longer check that a subset of validators from some
|
||||
set of validators is represented in the signature. Instead, it would need to create
|
||||
a new aggregated key with all the stated signers for each height it verified where
|
||||
the validator set changed.
|
||||
|
||||
This means that the speed advantages gained by using BLS cannot be fully realized
|
||||
by the light client since the client needs to perform the expensive operation
|
||||
of re-aggregating the public key. Aggregation is _not_ constant time in the
|
||||
number of keys and instead grows linearly. When [benchmarked locally][blst-verify-bench-agg],
|
||||
blst public key aggregation of 128 keys took 2.43 milliseconds. This, along with
|
||||
the 1.5 milliseconds to verify a signature would raise light client signature
|
||||
verification time to 3.9 milliseconds, a time above the previously mentioned
|
||||
batch verification time using our ed25519 library of 2.0 milliseconds.
|
||||
|
||||
Schemes to cache aggregated subsets of keys could certainly cut this time down at the
|
||||
cost of adding complexity to the light client.
|
||||
|
||||
### Added complexity to evidence handling
|
||||
|
||||
Implementing BLS signature aggregation in Tendermint would add complexity to
|
||||
the evidence handling within Tendermint. Currently, the light client can submit
|
||||
evidence of a fork attempt to the chain. This evidence consists of the set of
|
||||
validators that double-signed, including their public keys, with the conflicting
|
||||
block.
|
||||
|
||||
We can quickly check that the listed validators double signed by verifying
|
||||
that each of their signatures are in the submitted conflicting block. A BLS
|
||||
signature scheme would change this by requiring the light client to submit
|
||||
the public keys of all of the validators that signed the conflicting block so
|
||||
that the aggregated signature may be checked against the full signature set.
|
||||
Again, aggregated signature verification is all-or-nothing, so without all of
|
||||
the public keys, we cannot verify the signature at all. These keys would be
|
||||
retrievable. Any party that wanted to create a fork would want to convince a
|
||||
network that its fork is legitimate, so it would need to gossip the public keys.
|
||||
This does not hamper the feasibility of implementing BLS signature aggregation
|
||||
into Tendermint, but does represent yet another piece of added complexity to
|
||||
the associated protocols.
|
||||
|
||||
## Open Questions
|
||||
|
||||
* *Q*: Can you aggregate Ed25519 signatures in Tendermint?
|
||||
* There is a suggested scheme in github issue [7892][suggested-ed25519-agg],
|
||||
but additional rigor would be required to fully verify its correctness.
|
||||
|
||||
## Current Consideration
|
||||
|
||||
Adopting a signature aggregation scheme presents some serious risks and costs
|
||||
to the Tendermint project. It requires multiple backwards-incompatible changes
|
||||
to the code, namely a change in the structure of the block and a new backwards-incompatible
|
||||
signature and key type. It risks adding a new signature type for which new attack
|
||||
types are still being discovered _and_ for which no industry standard, battle-tested
|
||||
library yet exists.
|
||||
|
||||
The gains boasted by this new signing scheme are modest: Verification time is
|
||||
marginally faster and block sizes shrink by a few kilobytes. These are relatively
|
||||
minor gains in exchange for the complexity of the change and the listed risks of the technology.
|
||||
We should take a wait-and-see approach to BLS signature aggregation, monitoring
|
||||
the up-and-coming projects and consider implementing it as the libraries and
|
||||
standards develop.
|
||||
|
||||
### References
|
||||
|
||||
[line-ostracon-repo]: https://github.com/line/ostracon
|
||||
[line-ostracon-pr]: https://github.com/line/ostracon/pull/117
|
||||
[mit-BLS-lecture]: https://youtu.be/BFwc2XA8rSk?t=2521
|
||||
[gcp-storage-pricing]: https://cloud.google.com/storage/pricing#north-america_2
|
||||
[yubi-key-bls-support]: https://github.com/Yubico/yubihsm-shell/issues/66
|
||||
[cloud-hsm-support]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/pkcs11-key-types.html
|
||||
[bls-ietf]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04
|
||||
[bls-ietf-terms]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-1.3
|
||||
[bls-ietf-pop]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-3.3
|
||||
[multi-signatures-smaller-blockchains]: https://eprint.iacr.org/2018/483.pdf
|
||||
[ibc-tendermint]: https://github.com/cosmos/ibc/tree/master/spec/client/ics-007-tendermint-client
|
||||
[zcash-adoption]: https://github.com/zcash/zcash/issues/2502
|
||||
[chia-adoption]: https://github.com/Chia-Network/chia-blockchain#chia-blockchain
|
||||
[bls-ietf-ecdsa-compare]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-1.1
|
||||
[voi-ed25519-perf]: https://github.com/williambanfield/curve25519-voi/blob/benchmark/primitives/ed25519/PERFORMANCE.txt#L79
|
||||
[blst-verify-bench]: https://github.com/williambanfield/blst/blame/bench/bindings/go/PERFORMANCE.md#L9
|
||||
[blst-verify-bench-agg]: https://github.com/williambanfield/blst/blame/bench/bindings/go/PERFORMANCE.md#L23
|
||||
[vitalik-pairing-post]: https://medium.com/@VitalikButerin/exploring-elliptic-curve-pairings-c73c1864e627
|
||||
[ledger-bls-announce]: https://www.ledger.com/first-ever-firmware-update-coming-to-the-ledger-nano-x
|
||||
[commit-proto]: https://github.com/tendermint/tendermint/blob/be7cb50bb3432ee652f88a443e8ee7b8ef7122bc/proto/tendermint/types/types.proto#L121
|
||||
[canonical-vote-proto]: https://github.com/tendermint/tendermint/blob/be7cb50bb3432ee652f88a443e8ee7b8ef7122bc/spec/core/encoding.md#L283
|
||||
[blst]: https://github.com/supranational/blst
|
||||
[prysm-blst]: https://github.com/prysmaticlabs/prysm/blob/develop/go.mod#L75
|
||||
[gnark]: https://github.com/ConsenSys/gnark-crypto/
|
||||
[eth-2-adoption]: https://notes.ethereum.org/@GW1ZUbNKR5iRjjKYx6_dJQ/Skxf3tNcg_
|
||||
[bls-weil-pairing]: https://www.iacr.org/archive/asiacrypt2001/22480516.pdf
|
||||
[summing-zero-paper]: https://eprint.iacr.org/2021/323.pdf
|
||||
[circl]: https://github.com/cloudflare/circl
|
||||
[light-client-evidence]: https://github.com/tendermint/tendermint/blob/a6fd1fe20116d4b1f7e819cded81cece8e5c1ac7/types/evidence.go#L245
|
||||
[suggested-ed25519-agg]: https://github.com/tendermint/tendermint/issues/7892
|
||||
68
go.mod
68
go.mod
@@ -40,18 +40,26 @@ require (
|
||||
require (
|
||||
github.com/creachadair/atomicfile v0.2.6
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/golangci/golangci-lint v1.45.2
|
||||
github.com/golangci/golangci-lint v1.46.0
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/vektra/mockery/v2 v2.12.1
|
||||
github.com/vektra/mockery/v2 v2.12.2
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
||||
require github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
|
||||
require (
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect
|
||||
github.com/firefart/nonamedreturns v1.0.1 // indirect
|
||||
github.com/lufeee/execinquery v1.0.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.0 // indirect
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
4d63.com/gochecknoglobals v0.1.0 // indirect
|
||||
github.com/Antonboom/errname v0.1.5 // indirect
|
||||
github.com/Antonboom/nilnil v0.1.0 // indirect
|
||||
github.com/Antonboom/errname v0.1.6 // indirect
|
||||
github.com/Antonboom/nilnil v0.1.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/DataDog/zstd v1.4.1 // indirect
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
||||
@@ -64,18 +72,18 @@ require (
|
||||
github.com/ashanbrown/makezero v1.1.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bkielbasa/cyclop v1.2.0 // indirect
|
||||
github.com/blizzy78/varnamelen v0.6.1 // indirect
|
||||
github.com/blizzy78/varnamelen v0.8.0 // indirect
|
||||
github.com/bombsimon/wsl/v3 v3.3.0 // indirect
|
||||
github.com/breml/bidichk v0.2.2 // indirect
|
||||
github.com/breml/errchkjson v0.2.3 // indirect
|
||||
github.com/breml/bidichk v0.2.3 // indirect
|
||||
github.com/breml/errchkjson v0.3.0 // indirect
|
||||
github.com/butuzov/ireturn v0.1.1 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/charithe/durationcheck v0.0.9 // indirect
|
||||
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect
|
||||
github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect
|
||||
github.com/containerd/continuity v0.2.1 // indirect
|
||||
github.com/creachadair/tomledit v0.0.19
|
||||
github.com/creachadair/tomledit v0.0.22
|
||||
github.com/daixiang0/gci v0.3.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.4.3 // indirect
|
||||
@@ -92,9 +100,9 @@ require (
|
||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/fzipp/gocyclo v0.4.0 // indirect
|
||||
github.com/go-critic/go-critic v0.6.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/fzipp/gocyclo v0.5.1 // indirect
|
||||
github.com/go-critic/go-critic v0.6.3 // indirect
|
||||
github.com/go-toolsmith/astcast v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astequal v1.0.1 // indirect
|
||||
@@ -108,7 +116,7 @@ require (
|
||||
github.com/golang/snappy v0.0.3 // indirect
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
||||
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
|
||||
@@ -134,10 +142,10 @@ require (
|
||||
github.com/julz/importas v0.1.0 // indirect
|
||||
github.com/kisielk/errcheck v1.6.0 // indirect
|
||||
github.com/kisielk/gotool v1.0.0 // indirect
|
||||
github.com/kulti/thelper v0.5.1 // indirect
|
||||
github.com/kulti/thelper v0.6.2 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.3 // indirect
|
||||
github.com/kyoh86/exportloopref v0.1.8 // indirect
|
||||
github.com/ldez/gomoddirectives v0.2.2 // indirect
|
||||
github.com/ldez/gomoddirectives v0.2.3 // indirect
|
||||
github.com/ldez/tagliatelle v0.3.1 // indirect
|
||||
github.com/leonklingele/grouper v1.1.0 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
@@ -148,19 +156,19 @@ require (
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 // indirect
|
||||
github.com/mgechev/revive v1.1.4 // indirect
|
||||
github.com/mgechev/revive v1.2.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moricho/tparallel v0.2.1 // indirect
|
||||
github.com/nakabonne/nestif v0.3.1 // indirect
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
||||
github.com/nishanths/exhaustive v0.7.11 // indirect
|
||||
github.com/nishanths/predeclared v0.2.1 // indirect
|
||||
github.com/nishanths/predeclared v0.2.2 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.0.3 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
@@ -168,17 +176,17 @@ require (
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.15 // indirect
|
||||
github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect
|
||||
github.com/ryancurrah/gomodguard v1.2.3 // indirect
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect
|
||||
github.com/securego/gosec/v2 v2.10.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.11.0 // indirect
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/sivchari/containedctx v1.0.2 // indirect
|
||||
github.com/sivchari/tenv v1.4.7 // indirect
|
||||
github.com/sivchari/tenv v1.5.0 // indirect
|
||||
github.com/sonatard/noctx v0.0.1 // indirect
|
||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
@@ -194,28 +202,28 @@ require (
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect
|
||||
github.com/tetafro/godot v1.4.11 // indirect
|
||||
github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.5.0 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.1 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect
|
||||
github.com/ultraware/funlen v0.0.3 // indirect
|
||||
github.com/ultraware/whitespace v0.0.5 // indirect
|
||||
github.com/uudashr/gocognit v1.0.5 // indirect
|
||||
github.com/yagipy/maintidx v1.0.0 // indirect
|
||||
github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 // indirect
|
||||
github.com/yeya24/promlinter v0.2.0 // indirect
|
||||
gitlab.com/bosi/decorder v0.2.1 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.10 // indirect
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
honnef.co/go/tools v0.2.2 // indirect
|
||||
mvdan.cc/gofumpt v0.3.0 // indirect
|
||||
honnef.co/go/tools v0.3.1 // indirect
|
||||
mvdan.cc/gofumpt v0.3.1 // indirect
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
|
||||
mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect
|
||||
|
||||
159
go.sum
159
go.sum
@@ -46,7 +46,6 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW
|
||||
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
|
||||
cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
@@ -62,17 +61,17 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Antonboom/errname v0.1.5 h1:IM+A/gz0pDhKmlt5KSNTVAvfLMb+65RxavBXpRtCUEg=
|
||||
github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo=
|
||||
github.com/Antonboom/nilnil v0.1.0 h1:DLDavmg0a6G/F4Lt9t7Enrbgb3Oph6LnDE6YVsmTt74=
|
||||
github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo=
|
||||
github.com/Antonboom/errname v0.1.6 h1:LzIJZlyLOCSu51o3/t2n9Ck7PcoP9wdbrdaW6J8fX24=
|
||||
github.com/Antonboom/errname v0.1.6/go.mod h1:7lz79JAnuoMNDAWE9MeeIr1/c/VpSUWatBv2FH9NYpI=
|
||||
github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q=
|
||||
github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZR+xdJEaI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
|
||||
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
@@ -83,6 +82,8 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 h1:LAPPhJ4KR5Z8aKVZF5S48csJkxL5RMKmE/98fMs1u5M=
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0/go.mod h1:LGOGuvEgCfCQsy3JF2tRmpGDpzA53iZfyGEWSPwQ6/4=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
@@ -148,14 +149,14 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A=
|
||||
github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
|
||||
github.com/blizzy78/varnamelen v0.6.1 h1:kttPCLzXFa+0nt++Cw9fb7GrSSM4KkyIAoX/vXsbuqA=
|
||||
github.com/blizzy78/varnamelen v0.6.1/go.mod h1:zy2Eic4qWqjrxa60jG34cfL0VXcSwzUrIx68eJPb4Q8=
|
||||
github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=
|
||||
github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
|
||||
github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM=
|
||||
github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
|
||||
github.com/breml/bidichk v0.2.2 h1:w7QXnpH0eCBJm55zGCTJveZEkQBt6Fs5zThIdA6qQ9Y=
|
||||
github.com/breml/bidichk v0.2.2/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso=
|
||||
github.com/breml/errchkjson v0.2.3 h1:97eGTmR/w0paL2SwfRPI1jaAZHaH/fXnxWTw2eEIqE0=
|
||||
github.com/breml/errchkjson v0.2.3/go.mod h1:jZEATw/jF69cL1iy7//Yih8yp/mXp2CBoBr9GJwCAsY=
|
||||
github.com/breml/bidichk v0.2.3 h1:qe6ggxpTfA8E75hdjWPZ581sY3a2lnl0IRxLQFelECI=
|
||||
github.com/breml/bidichk v0.2.3/go.mod h1:8u2C6DnAy0g2cEq+k/A2+tr9O1s+vHGxWn0LTc70T2A=
|
||||
github.com/breml/errchkjson v0.3.0 h1:YdDqhfqMT+I1vIxPSas44P+9Z9HzJwCeAzjB8PxP1xw=
|
||||
github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92zSDFcofU=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c=
|
||||
github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y=
|
||||
@@ -187,8 +188,8 @@ github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cb
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk=
|
||||
github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg=
|
||||
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI=
|
||||
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU=
|
||||
github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M=
|
||||
github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4/go.mod h1:W8EnPSQ8Nv4fUjc/v1/8tHFqhuOJXnRub0dTfuAQktU=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
@@ -229,10 +230,11 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg=
|
||||
github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc=
|
||||
github.com/creachadair/command v0.0.0-20220426235536-a748effdf6a1/go.mod h1:bAM+qFQb/KwWyCc9MLC4U1jvn3XyakqP5QRkds5T6cY=
|
||||
github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM=
|
||||
github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk=
|
||||
github.com/creachadair/tomledit v0.0.19 h1:zbpfUtYFYFdpRjwJY9HJlto1iZ4M5YwYB6qqc37F6UM=
|
||||
github.com/creachadair/tomledit v0.0.19/go.mod h1:gvtfnSZLa+YNQD28vaPq0Nk12bRxEhmUdBzAWn+EGF4=
|
||||
github.com/creachadair/tomledit v0.0.22 h1:lRtepmrwhzDq+g1gv5ftVn5itgo7CjYbm6abKTToqJ4=
|
||||
github.com/creachadair/tomledit v0.0.22/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
@@ -299,6 +301,8 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
|
||||
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
|
||||
github.com/firefart/nonamedreturns v1.0.1 h1:fSvcq6ZpK/uBAgJEGMvzErlzyM4NELLqqdTofVjVNag=
|
||||
github.com/firefart/nonamedreturns v1.0.1/go.mod h1:D3dpIBojGGNh5UfElmwPu73SwDCm+VKhHYqwlNOk2uQ=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
@@ -309,14 +313,15 @@ github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnX
|
||||
github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM=
|
||||
github.com/fzipp/gocyclo v0.4.0 h1:IykTnjwh2YLyYkGa0y92iTTEQcnyAz0r9zOo15EbJ7k=
|
||||
github.com/fzipp/gocyclo v0.4.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/fzipp/gocyclo v0.5.1 h1:L66amyuYogbxl0j2U+vGqJXusPF2IkduvXLnYD5TFgw=
|
||||
github.com/fzipp/gocyclo v0.5.1/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-critic/go-critic v0.6.2 h1:L5SDut1N4ZfsWZY0sH4DCrsHLHnhuuWak2wa165t9gs=
|
||||
github.com/go-critic/go-critic v0.6.2/go.mod h1:td1s27kfmLpe5G/DPjlnFI7o1UCzePptwU7Az0V5iCM=
|
||||
github.com/go-critic/go-critic v0.6.3 h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+DxRDaw=
|
||||
github.com/go-critic/go-critic v0.6.3/go.mod h1:c6b3ZP1MQ7o6lPR7Rv3lEf7pYQUmAcx8ABHgdZCQt/k=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -418,12 +423,12 @@ github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
|
||||
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw=
|
||||
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo=
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
|
||||
github.com/golangci/golangci-lint v1.45.2 h1:9I3PzkvscJkFAQpTQi5Ga0V4qWdJERajX1UZ7QqkW+I=
|
||||
github.com/golangci/golangci-lint v1.45.2/go.mod h1:f20dpzMmUTRp+oYnX0OGjV1Au3Jm2JeI9yLqHq1/xsI=
|
||||
github.com/golangci/golangci-lint v1.46.0 h1:uz9AtEcIP63FH+FIyuAXcQGVQO4vCUavEsMTJpPeD4s=
|
||||
github.com/golangci/golangci-lint v1.46.0/go.mod h1:IJpcNOUfx/XLRwE95FHQ6QtbhYwwqcm0H5QkwUfF4ZE=
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
|
||||
@@ -655,15 +660,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kulti/thelper v0.5.1 h1:Uf4CUekH0OvzQTFPrWkstJvXgm6pnNEtQu3HiqEkpB0=
|
||||
github.com/kulti/thelper v0.5.1/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U=
|
||||
github.com/kulti/thelper v0.6.2 h1:K4xulKkwOCnT1CDms6Ex3uG1dvSMUUQe9zxgYQgbRXs=
|
||||
github.com/kulti/thelper v0.6.2/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
|
||||
github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI=
|
||||
github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M=
|
||||
github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg=
|
||||
github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5OPFVg=
|
||||
github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
|
||||
github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA=
|
||||
github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
|
||||
github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM=
|
||||
github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88=
|
||||
github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg=
|
||||
@@ -677,7 +682,8 @@ github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ=
|
||||
github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
|
||||
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
|
||||
github.com/lufeee/execinquery v1.0.0 h1:1XUTuLIVPDlFvUU3LXmmZwHDsolsxXnY67lzhpeqe0I=
|
||||
github.com/lufeee/execinquery v1.0.0/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
@@ -718,8 +724,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
|
||||
github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
|
||||
github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
|
||||
github.com/mgechev/revive v1.1.4 h1:sZOjY6GU35Kr9jKa/wsKSHgrFz8eASIB5i3tqWZMp0A=
|
||||
github.com/mgechev/revive v1.1.4/go.mod h1:ZZq2bmyssGh8MSPz3VVziqRNIMYTJXzP8MUKG90vZ9A=
|
||||
github.com/mgechev/revive v1.2.1 h1:GjFml7ZsoR0IrQ2E2YIvWFNS5GPDV7xNwvA5GM1HZC4=
|
||||
github.com/mgechev/revive v1.2.1/go.mod h1:+Ro3wqY4vakcYNtkBWdZC7dBg1xSB6sp054wWwmeFm0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
@@ -738,8 +744,9 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
@@ -777,8 +784,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
|
||||
github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA=
|
||||
github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI=
|
||||
github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ=
|
||||
github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw=
|
||||
github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE=
|
||||
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
|
||||
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
@@ -832,10 +839,12 @@ github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0 h1:P7Bq0SaI8nsexyay5UAyDo+ICWy5MQPgEZ5+l8JQTKo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
|
||||
@@ -892,19 +901,23 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
|
||||
github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
|
||||
github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
|
||||
github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30=
|
||||
github.com/quasilyte/go-ruleguard v0.3.15 h1:iWYzp1z72IlXTioET0+XI6SjQdPfMGfuAiZiKznOt7g=
|
||||
github.com/quasilyte/go-ruleguard v0.3.15/go.mod h1:NhuWhnlVEM1gT1A4VJHYfy9MuYSxxwHgxWoPsn9llB4=
|
||||
github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a h1:sWFavxtIctGrVs5SYZ5Ml1CvrDAs8Kf5kx2PI3C41dA=
|
||||
github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a/go.mod h1:VMX+OnnSw4LicdiEGtRSD/1X8kW7GuEscjYNr4cOIT4=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.12-0.20220101150716-969a394a9451/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.12/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.17/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.16/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.19/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc=
|
||||
github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50=
|
||||
github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 h1:P4QPNn+TK49zJjXKERt/vyPbv/mCHB/zQ4flDYOMN+M=
|
||||
github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM=
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8=
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM=
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY=
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs=
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/go-dbus v0.0.0-20121104212943-b7232d34b1d5/go.mod h1:+u151txRmLpwxBmpYn9z3d1sdJdjRPQpsXuYeY9jNls=
|
||||
github.com/remyoudompheng/go-liblzma v0.0.0-20190506200333-81bf2d431b96/go.mod h1:90HvCY7+oHHUKkbeMCiHt1WuFR2/hPJ9QrljDG+v6ls=
|
||||
github.com/remyoudompheng/go-misc v0.0.0-20190427085024-2d6ac652a50e/go.mod h1:80FQABjoFzZ2M5uEa6FUaJYEmqU2UOKojlFVak1UAwI=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
@@ -925,7 +938,6 @@ github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoL
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw=
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
|
||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||
github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM=
|
||||
github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA=
|
||||
@@ -933,12 +945,12 @@ github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/securego/gosec/v2 v2.10.0 h1:l6BET4EzWtyUXCpY2v7N92v0DDCas0L7ngg3bpqbr8g=
|
||||
github.com/securego/gosec/v2 v2.10.0/go.mod h1:PVq8Ewh/nCN8l/kKC6zrGXSr7m2NmEK6ITIAWMtIaA0=
|
||||
github.com/securego/gosec/v2 v2.11.0 h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI=
|
||||
github.com/securego/gosec/v2 v2.11.0/go.mod h1:SX8bptShuG8reGC0XS09+a4H2BoWSJi+fscA+Pulbpo=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
|
||||
github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY=
|
||||
github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
@@ -950,8 +962,8 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI=
|
||||
github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw=
|
||||
github.com/sivchari/tenv v1.4.7 h1:FdTpgRlTue5eb5nXIYgS/lyVXSjugU8UUVDwhP1NLU8=
|
||||
github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0=
|
||||
github.com/sivchari/tenv v1.5.0 h1:wxW0mFpKI6DIb3s6m1jCDYvkWXCskrimXMuGd0K/kSQ=
|
||||
github.com/sivchari/tenv v1.5.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY=
|
||||
@@ -989,13 +1001,14 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
|
||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
||||
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
||||
github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44=
|
||||
github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
@@ -1033,13 +1046,13 @@ github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw=
|
||||
github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
|
||||
github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro=
|
||||
github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
|
||||
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
|
||||
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.5.0 h1:g27SGGHNoQdvHz4KZA9o4v09RcWzylR+b1yueE5ECiw=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.5.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.1 h1:Cf4a/iwuMp9s7kKrh74GTgijRVim0wEpKjgAsT7Wctw=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.1/go.mod h1:Eo+Opt6pyMW1b6cNllOcDSSoHO0aTJ+iF6BfCUbHltA=
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
@@ -1059,8 +1072,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/vektra/mockery/v2 v2.12.1 h1:BAJk2fGjVg/P9Fi+BxZD1/ZeKTOclpeAb/SKCc12zXc=
|
||||
github.com/vektra/mockery/v2 v2.12.1/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U=
|
||||
github.com/vektra/mockery/v2 v2.12.2 h1:JbRx9F+XcCJiDTyCm3V5lXYwl56m5ZouV6I9eZa1Dj0=
|
||||
github.com/vektra/mockery/v2 v2.12.2/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U=
|
||||
github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
@@ -1069,8 +1082,8 @@ github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1z
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
|
||||
github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
|
||||
github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 h1:YAaOqqMTstELMMGblt6yJ/fcOt4owSYuw3IttMnKfAM=
|
||||
github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc=
|
||||
github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=
|
||||
github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
@@ -1152,7 +1165,7 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1168,7 +1181,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 h1:FR+oGxGfbQu1d+jglI3rCkjAjUnhRSZcUxr+DqlDLNo=
|
||||
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
@@ -1385,11 +1401,9 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1400,14 +1414,17 @@ golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
||||
golang.org/x/sys v0.0.0-20220403020550-483a9cbc67c0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
@@ -1435,6 +1452,7 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190228203856-589c23e65e65/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
@@ -1508,7 +1526,6 @@ golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4X
|
||||
golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
@@ -1528,8 +1545,9 @@ golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a h1:ofrrl6c6NG5/IOSx/R1cyiQxxjqlur0h/TvbUhkH0II=
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1733,7 +1751,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||
@@ -1765,10 +1782,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk=
|
||||
honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
|
||||
mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4=
|
||||
mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo=
|
||||
honnef.co/go/tools v0.3.1 h1:1kJlrWJLkaGXgcaeosRXViwviqjI7nkBvU2+sZW0AYc=
|
||||
honnef.co/go/tools v0.3.1/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70=
|
||||
mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8=
|
||||
mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
|
||||
|
||||
@@ -200,16 +200,20 @@ func (pool *BlockPool) IsCaughtUp() bool {
|
||||
return pool.height >= (pool.maxPeerHeight - 1)
|
||||
}
|
||||
|
||||
// PeekTwoBlocks returns blocks at pool.height and pool.height+1.
|
||||
// We need to see the second block's Commit to validate the first block.
|
||||
// So we peek two blocks at a time.
|
||||
// PeekTwoBlocks returns blocks at pool.height and pool.height+1. We need to
|
||||
// see the second block's Commit to validate the first block. So we peek two
|
||||
// blocks at a time. We return an extended commit, containing vote extensions
|
||||
// and their associated signatures, as this is critical to consensus in ABCI++
|
||||
// as we switch from block sync to consensus mode.
|
||||
//
|
||||
// The caller will verify the commit.
|
||||
func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) {
|
||||
func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtCommit *types.ExtendedCommit) {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
if r := pool.requesters[pool.height]; r != nil {
|
||||
first = r.getBlock()
|
||||
firstExtCommit = r.getExtendedCommit()
|
||||
}
|
||||
if r := pool.requesters[pool.height+1]; r != nil {
|
||||
second = r.getBlock()
|
||||
@@ -218,7 +222,8 @@ func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block)
|
||||
}
|
||||
|
||||
// PopRequest pops the first block at pool.height.
|
||||
// It must have been validated by 'second'.Commit from PeekTwoBlocks().
|
||||
// It must have been validated by the second Commit from PeekTwoBlocks.
|
||||
// TODO(thane): (?) and its corresponding ExtendedCommit.
|
||||
func (pool *BlockPool) PopRequest() {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -262,16 +267,25 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
return peerID
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// AddBlock validates that the block comes from the peer it was expected from
|
||||
// and calls the requester to store it.
|
||||
//
|
||||
// This requires an extended commit at the same height as the supplied block -
|
||||
// the block contains the last commit, but we need the latest commit in case we
|
||||
// need to switch over from block sync to consensus at this height. If the
|
||||
// height of the extended commit and the height of the block do not match, we
|
||||
// do not add the block and return an error.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) {
|
||||
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, extCommit *types.ExtendedCommit, blockSize int) error {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
if block.Height != extCommit.Height {
|
||||
return fmt.Errorf("heights don't match, not adding block (block height: %d, commit height: %d)", block.Height, extCommit.Height)
|
||||
}
|
||||
|
||||
requester := pool.requesters[block.Height]
|
||||
if requester == nil {
|
||||
pool.logger.Error("peer sent us a block we didn't expect",
|
||||
"peer", peerID, "curHeight", pool.height, "blockHeight", block.Height)
|
||||
diff := pool.height - block.Height
|
||||
if diff < 0 {
|
||||
diff *= -1
|
||||
@@ -279,10 +293,10 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi
|
||||
if diff > maxDiffBetweenCurrentAndReceivedBlockHeight {
|
||||
pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID)
|
||||
}
|
||||
return
|
||||
return fmt.Errorf("peer sent us a block we didn't expect (peer: %s, current height: %d, block height: %d)", peerID, pool.height, block.Height)
|
||||
}
|
||||
|
||||
if requester.setBlock(block, peerID) {
|
||||
if requester.setBlock(block, extCommit, peerID) {
|
||||
atomic.AddInt32(&pool.numPending, -1)
|
||||
peer := pool.peers[peerID]
|
||||
if peer != nil {
|
||||
@@ -290,9 +304,11 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi
|
||||
}
|
||||
} else {
|
||||
err := errors.New("requester is different or block already exists")
|
||||
pool.logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height)
|
||||
pool.sendError(err, peerID)
|
||||
return fmt.Errorf("%w (peer: %s, requester: %s, block height: %d)", err, peerID, requester.getPeerID(), block.Height)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MaxPeerHeight returns the highest reported height.
|
||||
@@ -456,6 +472,7 @@ func (pool *BlockPool) debug() string {
|
||||
} else {
|
||||
str += fmt.Sprintf("H(%v):", h)
|
||||
str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
|
||||
str += fmt.Sprintf("C?(%v) ", pool.requesters[h].extCommit != nil)
|
||||
}
|
||||
}
|
||||
return str
|
||||
@@ -544,9 +561,10 @@ type bpRequester struct {
|
||||
gotBlockCh chan struct{}
|
||||
redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
|
||||
mtx sync.Mutex
|
||||
peerID types.NodeID
|
||||
block *types.Block
|
||||
mtx sync.Mutex
|
||||
peerID types.NodeID
|
||||
block *types.Block
|
||||
extCommit *types.ExtendedCommit
|
||||
}
|
||||
|
||||
func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequester {
|
||||
@@ -572,13 +590,14 @@ func (bpr *bpRequester) OnStart(ctx context.Context) error {
|
||||
func (*bpRequester) OnStop() {}
|
||||
|
||||
// Returns true if the peer matches and block doesn't already exist.
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool {
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.ExtendedCommit, peerID types.NodeID) bool {
|
||||
bpr.mtx.Lock()
|
||||
if bpr.block != nil || bpr.peerID != peerID {
|
||||
bpr.mtx.Unlock()
|
||||
return false
|
||||
}
|
||||
bpr.block = block
|
||||
bpr.extCommit = extCommit
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
select {
|
||||
@@ -594,6 +613,12 @@ func (bpr *bpRequester) getBlock() *types.Block {
|
||||
return bpr.block
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getExtendedCommit() *types.ExtendedCommit {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
return bpr.extCommit
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getPeerID() types.NodeID {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
@@ -611,6 +636,7 @@ func (bpr *bpRequester) reset() {
|
||||
|
||||
bpr.peerID = ""
|
||||
bpr.block = nil
|
||||
bpr.extCommit = nil
|
||||
}
|
||||
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
|
||||
@@ -43,7 +43,10 @@ func (p testPeer) runInputRoutine() {
|
||||
// Request desired, pretend like we got the block immediately.
|
||||
func (p testPeer) simulateInput(input inputData) {
|
||||
block := &types.Block{Header: types.Header{Height: input.request.Height}}
|
||||
input.pool.AddBlock(input.request.PeerID, block, 123)
|
||||
extCommit := &types.ExtendedCommit{
|
||||
Height: input.request.Height,
|
||||
}
|
||||
_ = input.pool.AddBlock(input.request.PeerID, block, extCommit, 123)
|
||||
// TODO: uncommenting this creates a race which is detected by:
|
||||
// https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856
|
||||
// see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890
|
||||
@@ -110,7 +113,7 @@ func TestBlockPoolBasic(t *testing.T) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
first, second := pool.PeekTwoBlocks()
|
||||
first, second, _ := pool.PeekTwoBlocks()
|
||||
if first != nil && second != nil {
|
||||
pool.PopRequest()
|
||||
} else {
|
||||
@@ -164,7 +167,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
first, second := pool.PeekTwoBlocks()
|
||||
first, second, _ := pool.PeekTwoBlocks()
|
||||
if first != nil && second != nil {
|
||||
pool.PopRequest()
|
||||
} else {
|
||||
|
||||
@@ -76,7 +76,7 @@ type Reactor struct {
|
||||
stateStore sm.Store
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
store sm.BlockStore
|
||||
pool *BlockPool
|
||||
consReactor consensusReactor
|
||||
blockSync *atomicBool
|
||||
@@ -186,15 +186,21 @@ func (r *Reactor) OnStop() {
|
||||
func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
extCommit := r.store.LoadBlockExtendedCommit(msg.Height)
|
||||
if extCommit == nil {
|
||||
return fmt.Errorf("found block in store without extended commit: %v", block)
|
||||
}
|
||||
blockProto, err := block.ToProto()
|
||||
if err != nil {
|
||||
r.logger.Error("failed to convert msg to protobuf", "err", err)
|
||||
return err
|
||||
return fmt.Errorf("failed to convert block to protobuf: %w", err)
|
||||
}
|
||||
|
||||
return blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.BlockResponse{Block: blockProto},
|
||||
To: peerID,
|
||||
Message: &bcproto.BlockResponse{
|
||||
Block: blockProto,
|
||||
ExtCommit: extCommit.ToProto(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -236,8 +242,17 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo
|
||||
"err", err)
|
||||
return err
|
||||
}
|
||||
extCommit, err := types.ExtendedCommitFromProto(msg.ExtCommit)
|
||||
if err != nil {
|
||||
r.logger.Error("failed to convert extended commit from proto",
|
||||
"peer", envelope.From,
|
||||
"err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
r.pool.AddBlock(envelope.From, block, block.Size())
|
||||
if err := r.pool.AddBlock(envelope.From, block, extCommit, block.Size()); err != nil {
|
||||
r.logger.Error("failed to add block", "err", err)
|
||||
}
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
return blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
@@ -448,6 +463,20 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh
|
||||
)
|
||||
|
||||
switch {
|
||||
// TODO(sergio) Might be needed for implementing the upgrading solution. Remove after that
|
||||
//case state.LastBlockHeight > 0 && r.store.LoadBlockExtCommit(state.LastBlockHeight) == nil:
|
||||
case state.LastBlockHeight > 0 && blocksSynced == 0:
|
||||
// Having state-synced, we need to blocksync at least one block
|
||||
r.logger.Info(
|
||||
"no seen commit yet",
|
||||
"height", height,
|
||||
"last_block_height", state.LastBlockHeight,
|
||||
"initial_height", state.InitialHeight,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"timeout_in", syncTimeout-time.Since(lastAdvance),
|
||||
)
|
||||
continue
|
||||
|
||||
case r.pool.IsCaughtUp():
|
||||
r.logger.Info("switching to consensus reactor", "height", height)
|
||||
|
||||
@@ -490,9 +519,13 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh
|
||||
// TODO: Uncouple from request routine.
|
||||
|
||||
// see if there are any blocks to sync
|
||||
first, second := r.pool.PeekTwoBlocks()
|
||||
if first == nil || second == nil {
|
||||
// we need both to sync the first block
|
||||
first, second, extCommit := r.pool.PeekTwoBlocks()
|
||||
if first == nil || second == nil || extCommit == nil {
|
||||
if first != nil && extCommit == nil {
|
||||
// See https://github.com/tendermint/tendermint/pull/8433#discussion_r866790631
|
||||
panic(fmt.Errorf("peeked first block without extended commit at height %d - possible node store corruption", first.Height))
|
||||
}
|
||||
// we need all to sync the first block
|
||||
continue
|
||||
} else {
|
||||
// try again quickly next loop
|
||||
@@ -517,8 +550,17 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh
|
||||
// NOTE: We can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
if err = state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit); err != nil {
|
||||
err = fmt.Errorf("invalid last commit: %w", err)
|
||||
// TODO(sergio): Should we also validate against the extended commit?
|
||||
err = state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
|
||||
if err == nil {
|
||||
// validate the block before we persist it
|
||||
err = r.blockExec.ValidateBlock(ctx, state, first)
|
||||
}
|
||||
|
||||
// If either of the checks failed we log the error and request for a new block
|
||||
// at that height
|
||||
if err != nil {
|
||||
r.logger.Error(
|
||||
err.Error(),
|
||||
"last_commit", second.LastCommit,
|
||||
@@ -545,37 +587,35 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r.pool.PopRequest()
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: batch saves so we do not persist to disk every block
|
||||
r.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
r.pool.PopRequest()
|
||||
|
||||
var err error
|
||||
// TODO: batch saves so we do not persist to disk every block
|
||||
r.store.SaveBlock(first, firstParts, extCommit)
|
||||
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, err = r.blockExec.ApplyBlock(ctx, state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO: This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, err = r.blockExec.ApplyBlock(ctx, state, firstID, first)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
r.metrics.RecordConsMetrics(first)
|
||||
r.metrics.RecordConsMetrics(first)
|
||||
|
||||
blocksSynced++
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.logger.Info(
|
||||
"block sync rate",
|
||||
"height", r.pool.height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"blocks/s", lastRate,
|
||||
)
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.logger.Info(
|
||||
"block sync rate",
|
||||
"height", r.pool.height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"blocks/s", lastRate,
|
||||
)
|
||||
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,39 +147,43 @@ func (rts *reactorTestSuite) addNode(
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
var lastExtCommit *types.ExtendedCommit
|
||||
|
||||
// The commit we are building for the current height.
|
||||
seenExtCommit := &types.ExtendedCommit{}
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
lastExtCommit = seenExtCommit.Clone()
|
||||
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote, err := factory.MakeVote(
|
||||
ctx,
|
||||
privVal,
|
||||
lastBlock.Header.ChainID, 0,
|
||||
lastBlock.Header.Height, 0, 2,
|
||||
lastBlockMeta.BlockID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
lastCommit = types.NewCommit(
|
||||
vote.Height,
|
||||
vote.Round,
|
||||
lastBlockMeta.BlockID,
|
||||
[]types.CommitSig{vote.CommitSig()},
|
||||
)
|
||||
}
|
||||
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastExtCommit.StripExtensions())
|
||||
thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
// Simulate a commit for the current height
|
||||
vote, err := factory.MakeVote(
|
||||
ctx,
|
||||
privVal,
|
||||
thisBlock.Header.ChainID,
|
||||
0,
|
||||
thisBlock.Header.Height,
|
||||
0,
|
||||
2,
|
||||
blockID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
seenExtCommit = &types.ExtendedCommit{
|
||||
Height: vote.Height,
|
||||
Round: vote.Round,
|
||||
BlockID: blockID,
|
||||
ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()},
|
||||
}
|
||||
|
||||
state, err = blockExec.ApplyBlock(ctx, state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
blockStore.SaveBlock(thisBlock, thisParts, seenExtCommit)
|
||||
}
|
||||
|
||||
rts.peerChans[nodeID] = make(chan p2p.PeerUpdate)
|
||||
|
||||
@@ -178,22 +178,22 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
lazyNodeState.decideProposal = func(ctx context.Context, height int64, round int32) {
|
||||
require.NotNil(t, lazyNodeState.privValidator)
|
||||
|
||||
var commit *types.Commit
|
||||
var extCommit *types.ExtendedCommit
|
||||
switch {
|
||||
case lazyNodeState.Height == lazyNodeState.state.InitialHeight:
|
||||
// We're creating a proposal for the first block.
|
||||
// The commit is empty, but not nil.
|
||||
commit = types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
extCommit = &types.ExtendedCommit{}
|
||||
case lazyNodeState.LastCommit.HasTwoThirdsMajority():
|
||||
// Make the commit from LastCommit
|
||||
commit = lazyNodeState.LastCommit.MakeCommit()
|
||||
extCommit = lazyNodeState.LastCommit.MakeExtendedCommit()
|
||||
default: // This shouldn't happen.
|
||||
lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block")
|
||||
return
|
||||
}
|
||||
|
||||
// omit the last signature in the commit
|
||||
commit.Signatures[len(commit.Signatures)-1] = types.NewCommitSigAbsent()
|
||||
extCommit.ExtendedSignatures[len(extCommit.ExtendedSignatures)-1] = types.NewExtendedCommitSigAbsent()
|
||||
|
||||
if lazyNodeState.privValidatorPubKey == nil {
|
||||
// If this node is a validator & proposer in the current round, it will
|
||||
@@ -204,7 +204,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
proposerAddr := lazyNodeState.privValidatorPubKey.Address()
|
||||
|
||||
block, err := lazyNodeState.blockExec.CreateProposalBlock(
|
||||
ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, lazyNodeState.LastCommit.GetVotes())
|
||||
ctx, lazyNodeState.Height, lazyNodeState.state, extCommit, proposerAddr)
|
||||
require.NoError(t, err)
|
||||
blockParts, err := block.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -160,7 +160,8 @@ func signVote(
|
||||
blockID types.BlockID) *types.Vote {
|
||||
|
||||
var ext []byte
|
||||
if voteType == tmproto.PrecommitType {
|
||||
// Only non-nil precommits are allowed to carry vote extensions.
|
||||
if voteType == tmproto.PrecommitType && !blockID.IsNil() {
|
||||
ext = []byte("extension")
|
||||
}
|
||||
v, err := vs.signVote(ctx, voteType, chainID, blockID, ext)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
@@ -103,6 +104,33 @@ type Metrics struct {
|
||||
// the proposal message and the local time of the validator at the time
|
||||
// that the validator received the message.
|
||||
ProposalTimestampDifference metrics.Histogram
|
||||
|
||||
// VoteExtensionReceiveCount is the number of vote extensions received by this
|
||||
// node. The metric is annotated by the status of the vote extension from the
|
||||
// application, either 'accepted' or 'rejected'.
|
||||
VoteExtensionReceiveCount metrics.Counter
|
||||
|
||||
// ProposalReceiveCount is the total number of proposals received by this node
|
||||
// since process start.
|
||||
// The metric is annotated by the status of the proposal from the application,
|
||||
// either 'accepted' or 'rejected'.
|
||||
ProposalReceiveCount metrics.Counter
|
||||
|
||||
// ProposalCreationCount is the total number of proposals created by this node
|
||||
// since process start.
|
||||
// The metric is annotated by the status of the proposal from the application,
|
||||
// either 'accepted' or 'rejected'.
|
||||
ProposalCreateCount metrics.Counter
|
||||
|
||||
// RoundVotingPowerPercent is the percentage of the total voting power received
|
||||
// with a round. The value begins at 0 for each round and approaches 1.0 as
|
||||
// additional voting power is observed. The metric is labeled by vote type.
|
||||
RoundVotingPowerPercent metrics.Gauge
|
||||
|
||||
// LateVotes stores the number of votes that were received by this node that
|
||||
// correspond to earlier heights and rounds than this node is currently
|
||||
// in.
|
||||
LateVotes metrics.Counter
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
@@ -280,6 +308,43 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
"Only calculated when a new block is proposed.",
|
||||
Buckets: []float64{-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10},
|
||||
}, append(labels, "is_timely")).With(labelsAndValues...),
|
||||
VoteExtensionReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "vote_extension_receive_count",
|
||||
Help: "Number of vote extensions received by the node since process start, labeled by " +
|
||||
"the application's response to VerifyVoteExtension, either accept or reject.",
|
||||
}, append(labels, "status")).With(labelsAndValues...),
|
||||
|
||||
ProposalReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "proposal_receive_count",
|
||||
Help: "Number of vote proposals received by the node since process start, labeled by " +
|
||||
"the application's response to ProcessProposal, either accept or reject.",
|
||||
}, append(labels, "status")).With(labelsAndValues...),
|
||||
|
||||
ProposalCreateCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "proposal_create_count",
|
||||
Help: "Number of proposals created by the node since process start.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
RoundVotingPowerPercent: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "round_voting_power_percent",
|
||||
Help: "Percentage of the total voting power received with a round. " +
|
||||
"The value begins at 0 for each round and approaches 1.0 as additional " +
|
||||
"voting power is observed.",
|
||||
}, append(labels, "vote_type")).With(labelsAndValues...),
|
||||
LateVotes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "late_votes",
|
||||
Help: "Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in.",
|
||||
}, append(labels, "vote_type")).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,6 +382,11 @@ func NopMetrics() *Metrics {
|
||||
QuorumPrevoteDelay: discard.NewGauge(),
|
||||
FullPrevoteDelay: discard.NewGauge(),
|
||||
ProposalTimestampDifference: discard.NewHistogram(),
|
||||
VoteExtensionReceiveCount: discard.NewCounter(),
|
||||
ProposalReceiveCount: discard.NewCounter(),
|
||||
ProposalCreateCount: discard.NewCounter(),
|
||||
RoundVotingPowerPercent: discard.NewGauge(),
|
||||
LateVotes: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,10 +406,45 @@ func (m *Metrics) MarkBlockGossipComplete() {
|
||||
m.BlockGossipReceiveLatency.Observe(time.Since(m.blockGossipStart).Seconds())
|
||||
}
|
||||
|
||||
func (m *Metrics) MarkProposalProcessed(accepted bool) {
|
||||
status := "accepted"
|
||||
if !accepted {
|
||||
status = "rejected"
|
||||
}
|
||||
m.ProposalReceiveCount.With("status", status).Add(1)
|
||||
}
|
||||
|
||||
func (m *Metrics) MarkVoteExtensionReceived(accepted bool) {
|
||||
status := "accepted"
|
||||
if !accepted {
|
||||
status = "rejected"
|
||||
}
|
||||
m.VoteExtensionReceiveCount.With("status", status).Add(1)
|
||||
}
|
||||
|
||||
func (m *Metrics) MarkVoteReceived(vt tmproto.SignedMsgType, power, totalPower int64) {
|
||||
p := float64(power) / float64(totalPower)
|
||||
n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_"))
|
||||
m.RoundVotingPowerPercent.With("vote_type", n).Add(p)
|
||||
}
|
||||
|
||||
func (m *Metrics) MarkRound(r int32, st time.Time) {
|
||||
m.Rounds.Set(float64(r))
|
||||
roundTime := time.Since(st).Seconds()
|
||||
m.RoundDuration.Observe(roundTime)
|
||||
|
||||
pvt := tmproto.PrevoteType
|
||||
pvn := strings.ToLower(strings.TrimPrefix(pvt.String(), "SIGNED_MSG_TYPE_"))
|
||||
m.RoundVotingPowerPercent.With("vote_type", pvn).Set(0)
|
||||
|
||||
pct := tmproto.PrecommitType
|
||||
pcn := strings.ToLower(strings.TrimPrefix(pct.String(), "SIGNED_MSG_TYPE_"))
|
||||
m.RoundVotingPowerPercent.With("vote_type", pcn).Set(0)
|
||||
}
|
||||
|
||||
func (m *Metrics) MarkLateVote(vt tmproto.SignedMsgType) {
|
||||
n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_"))
|
||||
m.LateVotes.With("vote_type", n).Add(1)
|
||||
}
|
||||
|
||||
func (m *Metrics) MarkStep(s cstypes.RoundStepType) {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
testing "testing"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
state "github.com/tendermint/tendermint/internal/state"
|
||||
)
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestMsgToProto(t *testing.T) {
|
||||
|
||||
pv := types.NewMockPV()
|
||||
vote, err := factory.MakeVote(ctx, pv, factory.DefaultTestChainID,
|
||||
0, 1, 0, 2, types.BlockID{}, time.Now())
|
||||
0, 1, 0, 2, bi, time.Now())
|
||||
require.NoError(t, err)
|
||||
pbVote := vote.ToProto()
|
||||
|
||||
|
||||
@@ -794,10 +794,10 @@ func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState, voteCh
|
||||
// catchup logic -- if peer is lagging by more than 1, send Commit
|
||||
blockStoreBase := r.state.blockStore.Base()
|
||||
if blockStoreBase > 0 && prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase {
|
||||
// Load the block commit for prs.Height, which contains precommit
|
||||
// Load the block's extended commit for prs.Height, which contains precommit
|
||||
// signatures for prs.Height.
|
||||
if commit := r.state.blockStore.LoadBlockCommit(prs.Height); commit != nil {
|
||||
if ok, err := r.pickSendVote(ctx, ps, commit, voteCh); err != nil {
|
||||
if ec := r.state.blockStore.LoadBlockExtendedCommit(prs.Height); ec != nil {
|
||||
if ok, err := r.pickSendVote(ctx, ps, ec, voteCh); err != nil {
|
||||
return
|
||||
} else if ok {
|
||||
logger.Debug("picked Catchup commit to send", "height", prs.Height)
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -600,6 +601,106 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestSwitchToConsensusVoteExtensions(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
storedHeight int64
|
||||
initialRequiredHeight int64
|
||||
includeExtensions bool
|
||||
shouldPanic bool
|
||||
}{
|
||||
{
|
||||
name: "no vote extensions but not required",
|
||||
initialRequiredHeight: 0,
|
||||
storedHeight: 2,
|
||||
includeExtensions: false,
|
||||
shouldPanic: false,
|
||||
},
|
||||
{
|
||||
name: "no vote extensions but required this height",
|
||||
initialRequiredHeight: 2,
|
||||
storedHeight: 2,
|
||||
includeExtensions: false,
|
||||
shouldPanic: true,
|
||||
},
|
||||
{
|
||||
name: "no vote extensions and required in future",
|
||||
initialRequiredHeight: 3,
|
||||
storedHeight: 2,
|
||||
includeExtensions: false,
|
||||
shouldPanic: false,
|
||||
},
|
||||
{
|
||||
name: "no vote extensions and required previous height",
|
||||
initialRequiredHeight: 1,
|
||||
storedHeight: 2,
|
||||
includeExtensions: false,
|
||||
shouldPanic: true,
|
||||
},
|
||||
{
|
||||
name: "vote extensions and required previous height",
|
||||
initialRequiredHeight: 1,
|
||||
storedHeight: 2,
|
||||
includeExtensions: true,
|
||||
shouldPanic: false,
|
||||
},
|
||||
} {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
cs, vs := makeState(ctx, t, makeStateArgs{validators: 1})
|
||||
validator := vs[0]
|
||||
validator.Height = testCase.storedHeight
|
||||
|
||||
cs.state.LastBlockHeight = testCase.storedHeight
|
||||
cs.state.LastValidators = cs.state.Validators.Copy()
|
||||
cs.state.ConsensusParams.Vote.ExtensionRequireHeight = testCase.initialRequiredHeight
|
||||
|
||||
propBlock, err := cs.createProposalBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Consensus is preparing to do the next height after the stored height.
|
||||
cs.Height = testCase.storedHeight + 1
|
||||
propBlock.Height = testCase.storedHeight
|
||||
blockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
voteSet := types.NewVoteSet(cs.state.ChainID, testCase.storedHeight, 0, tmproto.PrecommitType, cs.state.Validators, false)
|
||||
signedVote := signVote(ctx, t, validator, tmproto.PrecommitType, cs.state.ChainID, types.BlockID{
|
||||
Hash: propBlock.Hash(),
|
||||
PartSetHeader: blockParts.Header(),
|
||||
})
|
||||
|
||||
if !testCase.includeExtensions {
|
||||
signedVote.Extension = nil
|
||||
signedVote.ExtensionSignature = nil
|
||||
}
|
||||
|
||||
added, err := voteSet.AddVote(signedVote)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
cs.blockStore.SaveBlock(propBlock, blockParts, voteSet.MakeExtendedCommit())
|
||||
reactor := NewReactor(
|
||||
log.NewNopLogger(),
|
||||
cs,
|
||||
nil,
|
||||
nil,
|
||||
cs.eventBus,
|
||||
true,
|
||||
NopMetrics(),
|
||||
)
|
||||
|
||||
if testCase.shouldPanic {
|
||||
assert.Panics(t, func() {
|
||||
reactor.SwitchToConsensus(ctx, cs.state, false)
|
||||
})
|
||||
} else {
|
||||
reactor.SwitchToConsensus(ctx, cs.state, false)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
@@ -297,7 +297,7 @@ type simulatorTestSuite struct {
|
||||
GenesisState sm.State
|
||||
Config *config.Config
|
||||
Chain []*types.Block
|
||||
Commits []*types.Commit
|
||||
ExtCommits []*types.ExtendedCommit
|
||||
CleanupFunc cleanupFunc
|
||||
|
||||
Mempool mempool.Mempool
|
||||
@@ -578,11 +578,11 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
}
|
||||
ensureNewRound(t, newRoundCh, height+1, 0)
|
||||
|
||||
sim.Chain = make([]*types.Block, 0)
|
||||
sim.Commits = make([]*types.Commit, 0)
|
||||
sim.Chain = []*types.Block{}
|
||||
sim.ExtCommits = []*types.ExtendedCommit{}
|
||||
for i := 1; i <= numBlocks; i++ {
|
||||
sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i)))
|
||||
sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i)))
|
||||
sim.ExtCommits = append(sim.ExtCommits, css[0].blockStore.LoadBlockExtendedCommit(int64(i)))
|
||||
}
|
||||
|
||||
return sim
|
||||
@@ -679,7 +679,7 @@ func testHandshakeReplay(
|
||||
testValidatorsChange bool,
|
||||
) {
|
||||
var chain []*types.Block
|
||||
var commits []*types.Commit
|
||||
var extCommits []*types.ExtendedCommit
|
||||
var store *mockBlockStore
|
||||
var stateDB dbm.DB
|
||||
var genesisState sm.State
|
||||
@@ -699,7 +699,7 @@ func testHandshakeReplay(
|
||||
genesisState = sim.GenesisState
|
||||
cfg = sim.Config
|
||||
chain = append([]*types.Block{}, sim.Chain...) // copy chain
|
||||
commits = sim.Commits
|
||||
extCommits = sim.ExtCommits
|
||||
store = newMockBlockStore(t, cfg, genesisState.ConsensusParams)
|
||||
} else { // test single node
|
||||
testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_s", t.Name(), mode))
|
||||
@@ -718,7 +718,7 @@ func testHandshakeReplay(
|
||||
err = wal.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { cancel(); wal.Wait() })
|
||||
chain, commits = makeBlockchainFromWAL(t, wal)
|
||||
chain, extCommits = makeBlockchainFromWAL(t, wal)
|
||||
pubKey, err := privVal.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
stateDB, genesisState, store = stateAndStore(t, cfg, pubKey, kvstore.ProtocolVersion)
|
||||
@@ -726,7 +726,7 @@ func testHandshakeReplay(
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
store.chain = chain
|
||||
store.commits = commits
|
||||
store.extCommits = extCommits
|
||||
|
||||
state := genesisState.Copy()
|
||||
// run the chain through state.ApplyBlock to build up the tendermint state
|
||||
@@ -1034,7 +1034,7 @@ func (app *badApp) Commit(context.Context) (*abci.ResponseCommit, error) {
|
||||
//--------------------------
|
||||
// utils for making blocks
|
||||
|
||||
func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Commit) {
|
||||
func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.ExtendedCommit) {
|
||||
t.Helper()
|
||||
var height int64
|
||||
|
||||
@@ -1047,10 +1047,10 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm
|
||||
// log.Notice("Build a blockchain by reading from the WAL")
|
||||
|
||||
var (
|
||||
blocks []*types.Block
|
||||
commits []*types.Commit
|
||||
thisBlockParts *types.PartSet
|
||||
thisBlockCommit *types.Commit
|
||||
blocks []*types.Block
|
||||
extCommits []*types.ExtendedCommit
|
||||
thisBlockParts *types.PartSet
|
||||
thisBlockExtCommit *types.ExtendedCommit
|
||||
)
|
||||
|
||||
dec := NewWALDecoder(gr)
|
||||
@@ -1082,12 +1082,12 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm
|
||||
require.Equal(t, block.Height, height+1,
|
||||
"read bad block from wal. got height %d, expected %d", block.Height, height+1)
|
||||
|
||||
commitHeight := thisBlockCommit.Height
|
||||
commitHeight := thisBlockExtCommit.Height
|
||||
require.Equal(t, commitHeight, height+1,
|
||||
"commit doesnt match. got height %d, expected %d", commitHeight, height+1)
|
||||
|
||||
blocks = append(blocks, block)
|
||||
commits = append(commits, thisBlockCommit)
|
||||
extCommits = append(extCommits, thisBlockExtCommit)
|
||||
height++
|
||||
}
|
||||
case *types.PartSetHeader:
|
||||
@@ -1097,8 +1097,12 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm
|
||||
require.NoError(t, err)
|
||||
case *types.Vote:
|
||||
if p.Type == tmproto.PrecommitType {
|
||||
thisBlockCommit = types.NewCommit(p.Height, p.Round,
|
||||
p.BlockID, []types.CommitSig{p.CommitSig()})
|
||||
thisBlockExtCommit = &types.ExtendedCommit{
|
||||
Height: p.Height,
|
||||
Round: p.Round,
|
||||
BlockID: p.BlockID,
|
||||
ExtendedSignatures: []types.ExtendedCommitSig{p.ExtendedCommitSig()},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1113,12 +1117,12 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, block.Height, height+1, "read bad block from wal. got height %d, expected %d", block.Height, height+1)
|
||||
commitHeight := thisBlockCommit.Height
|
||||
commitHeight := thisBlockExtCommit.Height
|
||||
require.Equal(t, commitHeight, height+1, "commit does not match. got height %d, expected %d", commitHeight, height+1)
|
||||
|
||||
blocks = append(blocks, block)
|
||||
commits = append(commits, thisBlockCommit)
|
||||
return blocks, commits
|
||||
extCommits = append(extCommits, thisBlockExtCommit)
|
||||
return blocks, extCommits
|
||||
}
|
||||
|
||||
func readPieceFromWAL(msg *TimedWALMessage) interface{} {
|
||||
@@ -1162,14 +1166,16 @@ func stateAndStore(
|
||||
// mock block store
|
||||
|
||||
type mockBlockStore struct {
|
||||
cfg *config.Config
|
||||
params types.ConsensusParams
|
||||
chain []*types.Block
|
||||
commits []*types.Commit
|
||||
base int64
|
||||
t *testing.T
|
||||
cfg *config.Config
|
||||
params types.ConsensusParams
|
||||
chain []*types.Block
|
||||
extCommits []*types.ExtendedCommit
|
||||
base int64
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
var _ sm.BlockStore = &mockBlockStore{}
|
||||
|
||||
// TODO: NewBlockStore(db.NewMemDB) ...
|
||||
func newMockBlockStore(t *testing.T, cfg *config.Config, params types.ConsensusParams) *mockBlockStore {
|
||||
return &mockBlockStore{
|
||||
@@ -1198,20 +1204,24 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
}
|
||||
}
|
||||
func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil }
|
||||
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) {
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
return bs.commits[height-1]
|
||||
return bs.extCommits[height-1].StripExtensions()
|
||||
}
|
||||
func (bs *mockBlockStore) LoadSeenCommit() *types.Commit {
|
||||
return bs.commits[len(bs.commits)-1]
|
||||
return bs.extCommits[len(bs.extCommits)-1].StripExtensions()
|
||||
}
|
||||
func (bs *mockBlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit {
|
||||
return bs.extCommits[height-1]
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) {
|
||||
pruned := uint64(0)
|
||||
for i := int64(0); i < height-1; i++ {
|
||||
bs.chain[i] = nil
|
||||
bs.commits[i] = nil
|
||||
bs.extCommits[i] = nil
|
||||
pruned++
|
||||
}
|
||||
bs.base = height
|
||||
|
||||
@@ -695,24 +695,48 @@ func (cs *State) sendInternalMessage(ctx context.Context, mi msgInfo) {
|
||||
// Reconstruct LastCommit from SeenCommit, which we saved along with the block,
|
||||
// (which happens even before saving the state)
|
||||
func (cs *State) reconstructLastCommit(state sm.State) {
|
||||
requireExtensions := requireVoteExtensions(cs.state.ConsensusParams.Vote.ExtensionRequireHeight, state.LastBlockHeight)
|
||||
votes, err := cs.votesFromExtendedCommit(state, requireExtensions)
|
||||
if err == nil {
|
||||
cs.LastCommit = votes
|
||||
return
|
||||
}
|
||||
if requireExtensions {
|
||||
panic(fmt.Sprintf("failed to reconstruct last commit; %s", err))
|
||||
}
|
||||
votes, err = cs.votesFromSeenCommit(state)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to reconstruct last commit; %s", err))
|
||||
}
|
||||
cs.LastCommit = votes
|
||||
}
|
||||
|
||||
func (cs *State) votesFromExtendedCommit(state sm.State, requireExtensions bool) (*types.VoteSet, error) {
|
||||
ec := cs.blockStore.LoadBlockExtendedCommit(state.LastBlockHeight)
|
||||
if ec == nil {
|
||||
return nil, fmt.Errorf("commit for height %v not found", state.LastBlockHeight)
|
||||
}
|
||||
vs := ec.ToVoteSet(state.ChainID, state.LastValidators, requireExtensions)
|
||||
if !vs.HasTwoThirdsMajority() {
|
||||
return nil, errors.New("seen commit does not have +2/3 majority")
|
||||
}
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
func (cs *State) votesFromSeenCommit(state sm.State) (*types.VoteSet, error) {
|
||||
commit := cs.blockStore.LoadSeenCommit()
|
||||
if commit == nil || commit.Height != state.LastBlockHeight {
|
||||
commit = cs.blockStore.LoadBlockCommit(state.LastBlockHeight)
|
||||
}
|
||||
|
||||
if commit == nil {
|
||||
panic(fmt.Sprintf(
|
||||
"failed to reconstruct last commit; commit for height %v not found",
|
||||
state.LastBlockHeight,
|
||||
))
|
||||
return nil, fmt.Errorf("commit for height %v not found", state.LastBlockHeight)
|
||||
}
|
||||
|
||||
lastPrecommits := types.CommitToVoteSet(state.ChainID, commit, state.LastValidators)
|
||||
if !lastPrecommits.HasTwoThirdsMajority() {
|
||||
panic("failed to reconstruct last commit; does not have +2/3 maj")
|
||||
vs := commit.ToVoteSet(state.ChainID, state.LastValidators)
|
||||
if !vs.HasTwoThirdsMajority() {
|
||||
return nil, errors.New("commit does not have +2/3 majority")
|
||||
}
|
||||
|
||||
cs.LastCommit = lastPrecommits
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
// Updates State and increments height to match that of state.
|
||||
@@ -814,7 +838,7 @@ func (cs *State) updateToState(state sm.State) {
|
||||
cs.ValidRound = -1
|
||||
cs.ValidBlock = nil
|
||||
cs.ValidBlockParts = nil
|
||||
cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators)
|
||||
cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators, requireVoteExtensions(state.ConsensusParams.Vote.ExtensionRequireHeight, height))
|
||||
cs.CommitRound = -1
|
||||
cs.LastValidators = state.LastValidators
|
||||
cs.TriggeredTimeoutPrecommit = false
|
||||
@@ -1334,6 +1358,7 @@ func (cs *State) defaultDecideProposal(ctx context.Context, height int64, round
|
||||
} else if block == nil {
|
||||
return
|
||||
}
|
||||
cs.metrics.ProposalCreateCount.Add(1)
|
||||
blockParts, err = block.MakePartSet(types.BlockPartSizeBytes)
|
||||
if err != nil {
|
||||
cs.logger.Error("unable to create proposal block part set", "error", err)
|
||||
@@ -1400,16 +1425,17 @@ func (cs *State) createProposalBlock(ctx context.Context) (*types.Block, error)
|
||||
return nil, errors.New("entered createProposalBlock with privValidator being nil")
|
||||
}
|
||||
|
||||
var commit *types.Commit
|
||||
// TODO(sergio): wouldn't it be easier if CreateProposalBlock accepted cs.LastCommit directly?
|
||||
var lastExtCommit *types.ExtendedCommit
|
||||
switch {
|
||||
case cs.Height == cs.state.InitialHeight:
|
||||
// We're creating a proposal for the first block.
|
||||
// The commit is empty, but not nil.
|
||||
commit = types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
lastExtCommit = &types.ExtendedCommit{}
|
||||
|
||||
case cs.LastCommit.HasTwoThirdsMajority():
|
||||
// Make the commit from LastCommit
|
||||
commit = cs.LastCommit.MakeCommit()
|
||||
lastExtCommit = cs.LastCommit.MakeExtendedCommit()
|
||||
|
||||
default: // This shouldn't happen.
|
||||
cs.logger.Error("propose step; cannot propose anything without commit for the previous block")
|
||||
@@ -1425,7 +1451,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (*types.Block, error)
|
||||
|
||||
proposerAddr := cs.privValidatorPubKey.Address()
|
||||
|
||||
ret, err := cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, commit, proposerAddr, cs.LastCommit.GetVotes())
|
||||
ret, err := cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, lastExtCommit, proposerAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -1531,6 +1557,7 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ProcessProposal: %v", err))
|
||||
}
|
||||
cs.metrics.MarkProposalProcessed(isAppValid)
|
||||
|
||||
// Vote nil if the Application rejected the block
|
||||
if !isAppValid {
|
||||
@@ -1923,8 +1950,7 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) {
|
||||
// NOTE: the seenCommit is local justification to commit this block,
|
||||
// but may differ from the LastCommit included in the next block
|
||||
precommits := cs.Votes.Precommits(cs.CommitRound)
|
||||
seenCommit := precommits.MakeCommit()
|
||||
cs.blockStore.SaveBlock(block, blockParts, seenCommit)
|
||||
cs.blockStore.SaveBlock(block, blockParts, precommits.MakeExtendedCommit())
|
||||
} else {
|
||||
// Happens during replay if we already saved the block but didn't commit
|
||||
logger.Debug("calling finalizeCommit on already stored block", "height", block.Height)
|
||||
@@ -2026,7 +2052,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
|
||||
|
||||
for i, val := range cs.LastValidators.Validators {
|
||||
commitSig := block.LastCommit.Signatures[i]
|
||||
if commitSig.Absent() {
|
||||
if commitSig.BlockIDFlag == types.BlockIDFlagAbsent {
|
||||
missingValidators++
|
||||
missingValidatorsPower += val.VotingPower
|
||||
}
|
||||
@@ -2036,7 +2062,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
|
||||
"validator_address", val.Address.String(),
|
||||
}
|
||||
cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower))
|
||||
if commitSig.ForBlock() {
|
||||
if commitSig.BlockIDFlag == types.BlockIDFlagCommit {
|
||||
cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height))
|
||||
} else {
|
||||
cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1))
|
||||
@@ -2297,6 +2323,10 @@ func (cs *State) addVote(
|
||||
"cs_height", cs.Height,
|
||||
)
|
||||
|
||||
if vote.Height < cs.Height || (vote.Height == cs.Height && vote.Round < cs.Round) {
|
||||
cs.metrics.MarkLateVote(vote.Type)
|
||||
}
|
||||
|
||||
// A precommit for the previous height?
|
||||
// These come in while we wait timeoutCommit
|
||||
if vote.Height+1 == cs.Height && vote.Type == tmproto.PrecommitType {
|
||||
@@ -2306,6 +2336,7 @@ func (cs *State) addVote(
|
||||
return
|
||||
}
|
||||
|
||||
// late votes still _must_ have extensions.
|
||||
added, err = cs.LastCommit.AddVote(vote)
|
||||
if !added {
|
||||
return
|
||||
@@ -2335,10 +2366,33 @@ func (cs *State) addVote(
|
||||
return
|
||||
}
|
||||
|
||||
// Verify VoteExtension if precommit
|
||||
if vote.Type == tmproto.PrecommitType {
|
||||
if err = cs.blockExec.VerifyVoteExtension(ctx, vote); err != nil {
|
||||
return false, err
|
||||
// Verify VoteExtension if precommit and not nil
|
||||
// https://github.com/tendermint/tendermint/issues/8487
|
||||
if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() &&
|
||||
!bytes.Equal(vote.ValidatorAddress, cs.privValidatorPubKey.Address()) {
|
||||
// The core fields of the vote message were already validated in the
|
||||
// consensus reactor when the vote was received.
|
||||
// Here, we valdiate that the vote extension was included in the vote
|
||||
// message.
|
||||
// Chains that are not configured to require vote extensions
|
||||
// will consider the vote valid even if the extension is absent.
|
||||
// VerifyVoteExtension will not be called in this case if the extension
|
||||
// is absent.
|
||||
err := vote.EnsureExtension()
|
||||
if err == nil {
|
||||
_, val := cs.state.Validators.GetByIndex(vote.ValidatorIndex)
|
||||
err = vote.VerifyWithExtension(cs.state.ChainID, val.PubKey)
|
||||
}
|
||||
if err == nil {
|
||||
err := cs.blockExec.VerifyVoteExtension(ctx, vote)
|
||||
cs.metrics.MarkVoteExtensionReceived(err == nil)
|
||||
} else {
|
||||
if !errors.Is(err, types.ErrVoteExtensionAbsent) {
|
||||
return false, err
|
||||
}
|
||||
if requireVoteExtensions(cs.state.ConsensusParams.Vote.ExtensionRequireHeight, cs.Height) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2348,6 +2402,11 @@ func (cs *State) addVote(
|
||||
// Either duplicate, or error upon cs.Votes.AddByIndex()
|
||||
return
|
||||
}
|
||||
if vote.Round == cs.Round {
|
||||
vals := cs.state.Validators
|
||||
_, val := vals.GetByIndex(vote.ValidatorIndex)
|
||||
cs.metrics.MarkVoteReceived(vote.Type, val.VotingPower, vals.TotalVotingPower())
|
||||
}
|
||||
|
||||
if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil {
|
||||
return added, err
|
||||
@@ -2484,15 +2543,15 @@ func (cs *State) signVote(
|
||||
// use our local precommit Timeout as the max wait time for getting a singed commit. The same goes for prevote.
|
||||
timeout := cs.voteTimeout(cs.Round)
|
||||
|
||||
switch msgType {
|
||||
case tmproto.PrecommitType:
|
||||
// if the signedMessage type is for a precommit, add VoteExtension
|
||||
if msgType == tmproto.PrecommitType && !vote.BlockID.IsNil() {
|
||||
// if the signedMessage type is for a non-nil precommit, add
|
||||
// VoteExtension
|
||||
ext, err := cs.blockExec.ExtendVote(ctx, vote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vote.Extension = ext
|
||||
default:
|
||||
} else {
|
||||
timeout = time.Second
|
||||
}
|
||||
|
||||
@@ -2731,6 +2790,13 @@ func (cs *State) calculateProposalTimestampDifferenceMetric() {
|
||||
}
|
||||
}
|
||||
|
||||
func requireVoteExtensions(requireHeight, currentHeight int64) bool {
|
||||
if requireHeight == 0 || currentHeight < requireHeight {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// proposerWaitTime determines how long the proposer should wait to propose its next block.
|
||||
// If the result is zero, a block can be proposed immediately.
|
||||
//
|
||||
|
||||
@@ -1950,7 +1950,7 @@ func TestFinalizeBlockCalled(t *testing.T) {
|
||||
expectCalled bool
|
||||
}{
|
||||
{
|
||||
name: "finalze block called when block committed",
|
||||
name: "finalize block called when block committed",
|
||||
voteNil: false,
|
||||
expectCalled: true,
|
||||
},
|
||||
@@ -1970,11 +1970,15 @@ func TestFinalizeBlockCalled(t *testing.T) {
|
||||
Status: abci.ResponseProcessProposal_ACCEPT,
|
||||
}, nil)
|
||||
m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil)
|
||||
m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{
|
||||
Status: abci.ResponseVerifyVoteExtension_ACCEPT,
|
||||
}, nil)
|
||||
// We only expect VerifyVoteExtension to be called on non-nil precommits.
|
||||
// https://github.com/tendermint/tendermint/issues/8487
|
||||
if !testCase.voteNil {
|
||||
m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil)
|
||||
m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{
|
||||
Status: abci.ResponseVerifyVoteExtension_ACCEPT,
|
||||
}, nil)
|
||||
}
|
||||
m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe()
|
||||
m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil)
|
||||
m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe()
|
||||
|
||||
cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m})
|
||||
@@ -2072,19 +2076,13 @@ func TestExtendVoteCalled(t *testing.T) {
|
||||
Hash: blockID.Hash,
|
||||
})
|
||||
|
||||
m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{
|
||||
Hash: blockID.Hash,
|
||||
ValidatorAddress: addr,
|
||||
Height: height,
|
||||
VoteExtension: []byte("extension"),
|
||||
})
|
||||
signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...)
|
||||
ensureNewRound(t, newRoundCh, height+1, 0)
|
||||
m.AssertExpectations(t)
|
||||
|
||||
// Only 3 of the vote extensions are seen, as consensus proceeds as soon as the +2/3 threshold
|
||||
// is observed by the consensus engine.
|
||||
for _, pv := range vss[:3] {
|
||||
for _, pv := range vss[1:3] {
|
||||
pv, err := pv.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
addr := pv.Address()
|
||||
@@ -2144,13 +2142,6 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) {
|
||||
Hash: blockID.Hash,
|
||||
})
|
||||
|
||||
m.AssertCalled(t, "VerifyVoteExtension", mock.Anything, &abci.RequestVerifyVoteExtension{
|
||||
Hash: blockID.Hash,
|
||||
ValidatorAddress: addr,
|
||||
Height: height,
|
||||
VoteExtension: []byte("extension"),
|
||||
})
|
||||
|
||||
m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe()
|
||||
signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[2:]...)
|
||||
ensureNewRound(t, newRoundCh, height+1, 0)
|
||||
@@ -2262,6 +2253,118 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestVoteExtensionRequiredHeight tests that 'ExtensionRequireHeight' correctly
|
||||
// enforces that vote extensions be present in consensus for heights greater than
|
||||
// or equal to the configured value.
|
||||
func TestVoteExtensionRequiredHeight(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
initialRequiredHeight int64
|
||||
hasExtension bool
|
||||
expectSuccessfulRound bool
|
||||
}{
|
||||
{
|
||||
name: "extension present but not required",
|
||||
hasExtension: true,
|
||||
initialRequiredHeight: 0,
|
||||
expectSuccessfulRound: true,
|
||||
},
|
||||
{
|
||||
name: "extension absent but not required",
|
||||
hasExtension: false,
|
||||
initialRequiredHeight: 0,
|
||||
expectSuccessfulRound: true,
|
||||
},
|
||||
{
|
||||
name: "extension present and required",
|
||||
hasExtension: true,
|
||||
initialRequiredHeight: 1,
|
||||
expectSuccessfulRound: true,
|
||||
},
|
||||
{
|
||||
name: "extension absent but required",
|
||||
hasExtension: false,
|
||||
initialRequiredHeight: 1,
|
||||
expectSuccessfulRound: false,
|
||||
},
|
||||
{
|
||||
name: "extension absent but required in future height",
|
||||
hasExtension: false,
|
||||
initialRequiredHeight: 2,
|
||||
expectSuccessfulRound: true,
|
||||
},
|
||||
} {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
numValidators := 3
|
||||
m := abcimocks.NewApplication(t)
|
||||
m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{
|
||||
Status: abci.ResponseProcessProposal_ACCEPT,
|
||||
}, nil)
|
||||
m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil)
|
||||
m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil)
|
||||
if testCase.hasExtension {
|
||||
m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{
|
||||
Status: abci.ResponseVerifyVoteExtension_ACCEPT,
|
||||
}, nil).Times(numValidators - 1)
|
||||
}
|
||||
m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe()
|
||||
m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe()
|
||||
cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m, validators: numValidators})
|
||||
cs1.state.ConsensusParams.Vote.ExtensionRequireHeight = testCase.initialRequiredHeight
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
timeoutCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound)
|
||||
pv1, err := cs1.privValidator.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
voteCh := subscribeToVoter(ctx, t, cs1, addr)
|
||||
|
||||
startTestRound(ctx, cs1, cs1.Height, round)
|
||||
ensureNewRound(t, newRoundCh, height, round)
|
||||
ensureNewProposal(t, proposalCh, height, round)
|
||||
rs := cs1.GetRoundState()
|
||||
|
||||
blockID := types.BlockID{
|
||||
Hash: rs.ProposalBlock.Hash(),
|
||||
PartSetHeader: rs.ProposalBlockParts.Header(),
|
||||
}
|
||||
|
||||
// sign all of the votes
|
||||
signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...)
|
||||
ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash())
|
||||
|
||||
var ext []byte
|
||||
if testCase.hasExtension {
|
||||
ext = []byte("extension")
|
||||
}
|
||||
|
||||
for _, vs := range vss[1:] {
|
||||
vote, err := vs.signVote(ctx, tmproto.PrecommitType, config.ChainID(), blockID, ext)
|
||||
if !testCase.hasExtension {
|
||||
vote.ExtensionSignature = nil
|
||||
}
|
||||
require.NoError(t, err)
|
||||
addVotes(cs1, vote)
|
||||
}
|
||||
if testCase.expectSuccessfulRound {
|
||||
ensurePrecommit(t, voteCh, height, round)
|
||||
height++
|
||||
ensureNewRound(t, newRoundCh, height, round)
|
||||
} else {
|
||||
ensureNoNewTimeout(t, timeoutCh, cs1.state.ConsensusParams.Timeout.VoteTimeout(round).Nanoseconds())
|
||||
}
|
||||
|
||||
m.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// 4 vals, 3 Nil Precommits at P0
|
||||
// What we want:
|
||||
// P0 waits for timeoutPrecommit before starting next round
|
||||
|
||||
@@ -38,9 +38,10 @@ We let each peer provide us with up to 2 unexpected "catchup" rounds.
|
||||
One for their LastCommit round, and another for the official commit round.
|
||||
*/
|
||||
type HeightVoteSet struct {
|
||||
chainID string
|
||||
height int64
|
||||
valSet *types.ValidatorSet
|
||||
chainID string
|
||||
height int64
|
||||
valSet *types.ValidatorSet
|
||||
requireExtensions bool
|
||||
|
||||
mtx sync.Mutex
|
||||
round int32 // max tracked round
|
||||
@@ -48,9 +49,10 @@ type HeightVoteSet struct {
|
||||
peerCatchupRounds map[types.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds
|
||||
}
|
||||
|
||||
func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet {
|
||||
func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet, requireExtensions bool) *HeightVoteSet {
|
||||
hvs := &HeightVoteSet{
|
||||
chainID: chainID,
|
||||
chainID: chainID,
|
||||
requireExtensions: requireExtensions,
|
||||
}
|
||||
hvs.Reset(height, valSet)
|
||||
return hvs
|
||||
@@ -107,8 +109,8 @@ func (hvs *HeightVoteSet) addRound(round int32) {
|
||||
panic("addRound() for an existing round")
|
||||
}
|
||||
// log.Debug("addRound(round)", "round", round)
|
||||
prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrevoteType, hvs.valSet)
|
||||
precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet)
|
||||
prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrevoteType, hvs.valSet, hvs.requireExtensions)
|
||||
precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet, hvs.requireExtensions)
|
||||
hvs.roundVoteSets[round] = RoundVoteSet{
|
||||
Prevotes: prevotes,
|
||||
Precommits: precommits,
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 10, 1)
|
||||
|
||||
chainID := cfg.ChainID()
|
||||
hvs := NewHeightVoteSet(chainID, 1, valSet)
|
||||
hvs := NewHeightVoteSet(chainID, 1, valSet, false)
|
||||
|
||||
vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals, chainID)
|
||||
added, err := hvs.AddVote(vote999_0, "peer1")
|
||||
|
||||
@@ -249,8 +249,8 @@ func TestEvidencePoolUpdate(t *testing.T) {
|
||||
evidenceChainID,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
lastCommit := makeCommit(height, val.PrivKey.PubKey().Address())
|
||||
block := types.MakeBlock(height+1, []types.Tx{}, lastCommit, []types.Evidence{ev})
|
||||
lastExtCommit := makeExtCommit(height, val.PrivKey.PubKey().Address())
|
||||
block := types.MakeBlock(height+1, []types.Tx{}, lastExtCommit.StripExtensions(), []types.Evidence{ev})
|
||||
|
||||
// update state (partially)
|
||||
state.LastBlockHeight = height + 1
|
||||
@@ -568,8 +568,8 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo
|
||||
blockStore := store.NewBlockStore(db)
|
||||
|
||||
for i := int64(1); i <= state.LastBlockHeight; i++ {
|
||||
lastCommit := makeCommit(i-1, valAddr)
|
||||
block := sf.MakeBlock(state, i, lastCommit)
|
||||
lastCommit := makeExtCommit(i-1, valAddr)
|
||||
block := sf.MakeBlock(state, i, lastCommit.StripExtensions())
|
||||
|
||||
block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute)
|
||||
block.Header.Version = version.Consensus{Block: version.BlockProtocol, App: 1}
|
||||
@@ -579,22 +579,25 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo
|
||||
return nil, err
|
||||
}
|
||||
|
||||
seenCommit := makeCommit(i, valAddr)
|
||||
seenCommit := makeExtCommit(i, valAddr)
|
||||
blockStore.SaveBlock(block, partSet, seenCommit)
|
||||
}
|
||||
|
||||
return blockStore, nil
|
||||
}
|
||||
|
||||
func makeCommit(height int64, valAddr []byte) *types.Commit {
|
||||
commitSigs := []types.CommitSig{{
|
||||
BlockIDFlag: types.BlockIDFlagCommit,
|
||||
ValidatorAddress: valAddr,
|
||||
Timestamp: defaultEvidenceTime,
|
||||
Signature: []byte("Signature"),
|
||||
}}
|
||||
|
||||
return types.NewCommit(height, 0, types.BlockID{}, commitSigs)
|
||||
func makeExtCommit(height int64, valAddr []byte) *types.ExtendedCommit {
|
||||
return &types.ExtendedCommit{
|
||||
Height: height,
|
||||
ExtendedSignatures: []types.ExtendedCommitSig{{
|
||||
CommitSig: types.CommitSig{
|
||||
BlockIDFlag: types.BlockIDFlagCommit,
|
||||
ValidatorAddress: valAddr,
|
||||
Timestamp: defaultEvidenceTime,
|
||||
Signature: []byte("Signature"),
|
||||
},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, types.MockPV, *eventbus.EventBus) {
|
||||
|
||||
@@ -233,9 +233,10 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
|
||||
// we are simulating a duplicate vote attack where all the validators in the conflictingVals set
|
||||
// except the last validator vote twice
|
||||
blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash())
|
||||
voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
commit, err := factory.MakeCommit(ctx, blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime)
|
||||
voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals, false)
|
||||
extCommit, err := factory.MakeExtendedCommit(ctx, blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
ev := &types.LightClientAttackEvidence{
|
||||
ConflictingBlock: &types.LightBlock{
|
||||
@@ -252,10 +253,11 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
|
||||
}
|
||||
|
||||
trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash"))
|
||||
trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
trustedCommit, err := factory.MakeCommit(ctx, trustedBlockID, 10, 1,
|
||||
trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals, false)
|
||||
trustedExtCommit, err := factory.MakeExtendedCommit(ctx, trustedBlockID, 10, 1,
|
||||
trustedVoteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
trustedCommit := trustedExtCommit.StripExtensions()
|
||||
|
||||
trustedSignedHeader := &types.SignedHeader{
|
||||
Header: trustedHeader,
|
||||
@@ -334,9 +336,10 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
|
||||
// we are simulating an amnesia attack where all the validators in the conflictingVals set
|
||||
// except the last validator vote twice. However this time the commits are of different rounds.
|
||||
blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash"))
|
||||
voteSet := types.NewVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals)
|
||||
commit, err := factory.MakeCommit(ctx, blockID, height, 0, voteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
voteSet := types.NewVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals, false)
|
||||
extCommit, err := factory.MakeExtendedCommit(ctx, blockID, height, 0, voteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
ev := &types.LightClientAttackEvidence{
|
||||
ConflictingBlock: &types.LightBlock{
|
||||
@@ -353,10 +356,11 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
|
||||
}
|
||||
|
||||
trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash"))
|
||||
trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
trustedCommit, err := factory.MakeCommit(ctx, trustedBlockID, height, 1,
|
||||
trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals, false)
|
||||
trustedExtCommit, err := factory.MakeExtendedCommit(ctx, trustedBlockID, height, 1,
|
||||
trustedVoteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
trustedCommit := trustedExtCommit.StripExtensions()
|
||||
|
||||
trustedSignedHeader := &types.SignedHeader{
|
||||
Header: trustedHeader,
|
||||
@@ -549,9 +553,10 @@ func makeLunaticEvidence(
|
||||
})
|
||||
|
||||
blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash())
|
||||
voteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
commit, err := factory.MakeCommit(ctx, blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
voteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals, false)
|
||||
extCommit, err := factory.MakeExtendedCommit(ctx, blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
ev = &types.LightClientAttackEvidence{
|
||||
ConflictingBlock: &types.LightBlock{
|
||||
@@ -577,9 +582,10 @@ func makeLunaticEvidence(
|
||||
}
|
||||
trustedBlockID := factory.MakeBlockIDWithHash(trustedHeader.Hash())
|
||||
trustedVals, privVals := factory.ValidatorSet(ctx, t, totalVals, defaultVotingPower)
|
||||
trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals)
|
||||
trustedCommit, err := factory.MakeCommit(ctx, trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime)
|
||||
trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals, false)
|
||||
trustedExtCommit, err := factory.MakeExtendedCommit(ctx, trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
trustedCommit := trustedExtCommit.StripExtensions()
|
||||
|
||||
trusted = &types.LightBlock{
|
||||
SignedHeader: &types.SignedHeader{
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
@@ -87,9 +88,8 @@ func (blockExec *BlockExecutor) CreateProposalBlock(
|
||||
ctx context.Context,
|
||||
height int64,
|
||||
state State,
|
||||
commit *types.Commit,
|
||||
lastExtCommit *types.ExtendedCommit,
|
||||
proposerAddr []byte,
|
||||
votes []*types.Vote,
|
||||
) (*types.Block, error) {
|
||||
|
||||
maxBytes := state.ConsensusParams.Block.MaxBytes
|
||||
@@ -101,15 +101,14 @@ func (blockExec *BlockExecutor) CreateProposalBlock(
|
||||
maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size())
|
||||
|
||||
txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas)
|
||||
commit := lastExtCommit.StripExtensions()
|
||||
block := state.MakeBlock(height, txs, commit, evidence, proposerAddr)
|
||||
|
||||
localLastCommit := buildLastCommitInfo(block, blockExec.store, state.InitialHeight)
|
||||
rpp, err := blockExec.appClient.PrepareProposal(
|
||||
ctx,
|
||||
&abci.RequestPrepareProposal{
|
||||
MaxTxBytes: maxDataBytes,
|
||||
Txs: block.Txs.ToSliceOfBytes(),
|
||||
LocalLastCommit: extendedCommitInfo(localLastCommit, votes),
|
||||
LocalLastCommit: buildExtendedCommitInfo(lastExtCommit, blockExec.store, state.InitialHeight, state.ConsensusParams.Vote.ExtensionRequireHeight),
|
||||
ByzantineValidators: block.Evidence.ToABCI(),
|
||||
Height: block.Height,
|
||||
Time: block.Time,
|
||||
@@ -247,6 +246,10 @@ func (blockExec *BlockExecutor) ApplyBlock(
|
||||
}
|
||||
if len(validatorUpdates) > 0 {
|
||||
blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates))
|
||||
blockExec.metrics.ValidatorSetUpdates.Add(1)
|
||||
}
|
||||
if finalizeBlockResponse.ConsensusParamUpdates != nil {
|
||||
blockExec.metrics.ConsensusParamUpdates.Add(1)
|
||||
}
|
||||
|
||||
// Update the state with the block and responses.
|
||||
@@ -318,7 +321,7 @@ func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *t
|
||||
}
|
||||
|
||||
if !resp.IsOK() {
|
||||
return types.ErrVoteInvalidExtension
|
||||
return errors.New("invalid vote extension")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -377,14 +380,14 @@ func (blockExec *BlockExecutor) Commit(
|
||||
|
||||
func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.CommitInfo {
|
||||
if block.Height == initialHeight {
|
||||
// there is no last commmit for the initial height.
|
||||
// there is no last commit for the initial height.
|
||||
// return an empty value.
|
||||
return abci.CommitInfo{}
|
||||
}
|
||||
|
||||
lastValSet, err := store.LoadValidators(block.Height - 1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(fmt.Errorf("failed to load validator set at height %d: %w", block.Height-1, err))
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -406,7 +409,7 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a
|
||||
commitSig := block.LastCommit.Signatures[i]
|
||||
votes[i] = abci.VoteInfo{
|
||||
Validator: types.TM2PB.Validator(val),
|
||||
SignedLastBlock: !commitSig.Absent(),
|
||||
SignedLastBlock: commitSig.BlockIDFlag != types.BlockIDFlagAbsent,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -416,44 +419,74 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a
|
||||
}
|
||||
}
|
||||
|
||||
// extendedCommitInfo expects a CommitInfo struct along with all of the
|
||||
// original votes relating to that commit, including their vote extensions. The
|
||||
// order of votes does not matter.
|
||||
func extendedCommitInfo(c abci.CommitInfo, votes []*types.Vote) abci.ExtendedCommitInfo {
|
||||
if len(c.Votes) != len(votes) {
|
||||
panic(fmt.Sprintf("extendedCommitInfo: number of votes from commit differ from the number of votes supplied (%d != %d)", len(c.Votes), len(votes)))
|
||||
// buildExtendedCommitInfo populates an ABCI extended commit from the
|
||||
// corresponding Tendermint extended commit ec, using the stored validator set
|
||||
// from ec. It requires ec to include the original precommit votes along with
|
||||
// the vote extensions from the last commit.
|
||||
//
|
||||
// For heights below the initial height, for which we do not have the required
|
||||
// data, it returns an empty record.
|
||||
//
|
||||
// Assumes that the commit signatures are sorted according to validator index.
|
||||
func buildExtendedCommitInfo(ec *types.ExtendedCommit, store Store, initialHeight, extensionRequireHeight int64) abci.ExtendedCommitInfo {
|
||||
if ec.Height < initialHeight {
|
||||
// There are no extended commits for heights below the initial height.
|
||||
return abci.ExtendedCommitInfo{}
|
||||
}
|
||||
votesByVal := make(map[string]*types.Vote)
|
||||
for _, vote := range votes {
|
||||
if vote != nil {
|
||||
valAddr := vote.ValidatorAddress.String()
|
||||
if _, ok := votesByVal[valAddr]; ok {
|
||||
panic(fmt.Sprintf("extendedCommitInfo: found duplicate vote for validator with address %s", valAddr))
|
||||
}
|
||||
votesByVal[valAddr] = vote
|
||||
|
||||
valSet, err := store.LoadValidators(ec.Height)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to load validator set at height %d, initial height %d: %w", ec.Height, initialHeight, err))
|
||||
}
|
||||
|
||||
var (
|
||||
ecSize = ec.Size()
|
||||
valSetLen = len(valSet.Validators)
|
||||
)
|
||||
|
||||
// Ensure that the size of the validator set in the extended commit matches
|
||||
// the size of the validator set in the state store.
|
||||
if ecSize != valSetLen {
|
||||
panic(fmt.Errorf(
|
||||
"extended commit size (%d) does not match validator set length (%d) at height %d\n\n%v\n\n%v",
|
||||
ecSize, valSetLen, ec.Height, ec.ExtendedSignatures, valSet.Validators,
|
||||
))
|
||||
}
|
||||
|
||||
votes := make([]abci.ExtendedVoteInfo, ecSize)
|
||||
for i, val := range valSet.Validators {
|
||||
ecs := ec.ExtendedSignatures[i]
|
||||
|
||||
// Absent signatures have empty validator addresses, but otherwise we
|
||||
// expect the validator addresses to be the same.
|
||||
if ecs.BlockIDFlag != types.BlockIDFlagAbsent && !bytes.Equal(ecs.ValidatorAddress, val.Address) {
|
||||
panic(fmt.Errorf("validator address of extended commit signature in position %d (%s) does not match the corresponding validator's at height %d (%s)",
|
||||
i, ecs.ValidatorAddress, ec.Height, val.Address,
|
||||
))
|
||||
}
|
||||
}
|
||||
vs := make([]abci.ExtendedVoteInfo, len(c.Votes))
|
||||
for i := range vs {
|
||||
|
||||
var ext []byte
|
||||
// votes[i] will be nil if c.Votes[i].SignedLastBlock is false
|
||||
if c.Votes[i].SignedLastBlock {
|
||||
valAddr := crypto.Address(c.Votes[i].Validator.Address).String()
|
||||
vote, ok := votesByVal[valAddr]
|
||||
if !ok || vote == nil {
|
||||
panic(fmt.Sprintf("extendedCommitInfo: validator with address %s signed last block, but could not find vote for it", valAddr))
|
||||
if ecs.BlockIDFlag == types.BlockIDFlagCommit {
|
||||
// We only care about vote extensions if a validator has voted to
|
||||
// commit.
|
||||
if ecs.Extension == nil && ecs.ExtensionSignature == nil &&
|
||||
extensionRequireHeight != 0 && ec.Height >= extensionRequireHeight {
|
||||
// TODO: this error is akward, make it better
|
||||
panic(fmt.Errorf("commit received with missing vote extension data"))
|
||||
}
|
||||
ext = vote.Extension
|
||||
ext = ecs.Extension
|
||||
}
|
||||
vs[i] = abci.ExtendedVoteInfo{
|
||||
Validator: c.Votes[i].Validator,
|
||||
SignedLastBlock: c.Votes[i].SignedLastBlock,
|
||||
|
||||
votes[i] = abci.ExtendedVoteInfo{
|
||||
Validator: types.TM2PB.Validator(val),
|
||||
SignedLastBlock: ecs.BlockIDFlag != types.BlockIDFlagAbsent,
|
||||
VoteExtension: ext,
|
||||
}
|
||||
}
|
||||
|
||||
return abci.ExtendedCommitInfo{
|
||||
Round: c.Round,
|
||||
Votes: vs,
|
||||
Round: ec.Round,
|
||||
Votes: votes,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -79,9 +79,10 @@ func TestApplyBlock(t *testing.T) {
|
||||
assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated")
|
||||
}
|
||||
|
||||
// TestFinalizeBlockDecidedLastCommit ensures we correctly send the DecidedLastCommit to the
|
||||
// application. The test ensures that the DecidedLastCommit properly reflects
|
||||
// which validators signed the preceding block.
|
||||
// TestFinalizeBlockDecidedLastCommit ensures we correctly send the
|
||||
// DecidedLastCommit to the application. The test ensures that the
|
||||
// DecidedLastCommit properly reflects which validators signed the preceding
|
||||
// block.
|
||||
func TestFinalizeBlockDecidedLastCommit(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -96,7 +97,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) {
|
||||
|
||||
state, stateDB, privVals := makeState(t, 7, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
absentSig := types.NewCommitSigAbsent()
|
||||
absentSig := types.NewExtendedCommitSigAbsent()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -134,12 +135,12 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) {
|
||||
|
||||
for idx, isAbsent := range tc.absentCommitSigs {
|
||||
if isAbsent {
|
||||
lastCommit.Signatures[idx] = absentSig
|
||||
lastCommit.ExtendedSignatures[idx] = absentSig
|
||||
}
|
||||
}
|
||||
|
||||
// block for height 2
|
||||
block := sf.MakeBlock(state, 2, lastCommit)
|
||||
block := sf.MakeBlock(state, 2, lastCommit.StripExtensions())
|
||||
bps, err := block.MakePartSet(testPartSize)
|
||||
require.NoError(t, err)
|
||||
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()}
|
||||
@@ -198,12 +199,15 @@ func TestFinalizeBlockByzantineValidators(t *testing.T) {
|
||||
ConflictingBlock: &types.LightBlock{
|
||||
SignedHeader: &types.SignedHeader{
|
||||
Header: header,
|
||||
Commit: types.NewCommit(10, 0, makeBlockID(header.Hash(), 100, []byte("partshash")), []types.CommitSig{{
|
||||
BlockIDFlag: types.BlockIDFlagNil,
|
||||
ValidatorAddress: crypto.AddressHash([]byte("validator_address")),
|
||||
Timestamp: defaultEvidenceTime,
|
||||
Signature: crypto.CRandBytes(types.MaxSignatureSize),
|
||||
}}),
|
||||
Commit: &types.Commit{
|
||||
Height: 10,
|
||||
BlockID: makeBlockID(header.Hash(), 100, []byte("partshash")),
|
||||
Signatures: []types.CommitSig{{
|
||||
BlockIDFlag: types.BlockIDFlagNil,
|
||||
ValidatorAddress: crypto.AddressHash([]byte("validator_address")),
|
||||
Timestamp: defaultEvidenceTime,
|
||||
Signature: crypto.CRandBytes(types.MaxSignatureSize)}},
|
||||
},
|
||||
},
|
||||
ValidatorSet: state.Validators,
|
||||
},
|
||||
@@ -324,8 +328,10 @@ func TestProcessProposal(t *testing.T) {
|
||||
lastCommitSig = append(lastCommitSig, vote.CommitSig())
|
||||
}
|
||||
|
||||
lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, lastCommitSig)
|
||||
block1 := sf.MakeBlock(state, height, lastCommit)
|
||||
block1 := sf.MakeBlock(state, height, &types.Commit{
|
||||
Height: height - 1,
|
||||
Signatures: lastCommitSig,
|
||||
})
|
||||
block1.Txs = txs
|
||||
|
||||
expectedRpp := &abci.RequestProcessProposal{
|
||||
@@ -653,8 +659,8 @@ func TestEmptyPrepareProposal(t *testing.T) {
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
_, err = blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes)
|
||||
commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
_, err = blockExec.CreateProposalBlock(ctx, height, state, commit, pa)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -708,8 +714,8 @@ func TestPrepareProposalErrorOnNonExistingRemoved(t *testing.T) {
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes)
|
||||
commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa)
|
||||
require.ErrorContains(t, err, "new transaction incorrectly marked as removed")
|
||||
require.Nil(t, block)
|
||||
|
||||
@@ -764,8 +770,8 @@ func TestPrepareProposalRemoveTxs(t *testing.T) {
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes)
|
||||
commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, block.Data.Txs.ToSliceOfBytes(), len(trs)-2)
|
||||
|
||||
@@ -823,8 +829,8 @@ func TestPrepareProposalAddedTxsIncluded(t *testing.T) {
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes)
|
||||
commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, txs[0], block.Data.Txs[0])
|
||||
@@ -879,8 +885,8 @@ func TestPrepareProposalReorderTxs(t *testing.T) {
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes)
|
||||
commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa)
|
||||
require.NoError(t, err)
|
||||
for i, tx := range block.Data.Txs {
|
||||
require.Equal(t, types.Tx(trs[i].Tx), tx)
|
||||
@@ -939,9 +945,8 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) {
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes)
|
||||
commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa)
|
||||
require.ErrorContains(t, err, "transaction data size exceeds maximum")
|
||||
require.Nil(t, block, "")
|
||||
|
||||
@@ -991,9 +996,8 @@ func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) {
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes)
|
||||
commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa)
|
||||
require.Nil(t, block)
|
||||
require.ErrorContains(t, err, "an injected error")
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ func makeAndCommitGoodBlock(
|
||||
blockExec *sm.BlockExecutor,
|
||||
privVals map[string]types.PrivValidator,
|
||||
evidence []types.Evidence,
|
||||
) (sm.State, types.BlockID, *types.Commit) {
|
||||
) (sm.State, types.BlockID, *types.ExtendedCommit) {
|
||||
t.Helper()
|
||||
|
||||
// A good block passes
|
||||
@@ -82,19 +82,23 @@ func makeValidCommit(
|
||||
blockID types.BlockID,
|
||||
vals *types.ValidatorSet,
|
||||
privVals map[string]types.PrivValidator,
|
||||
) (*types.Commit, []*types.Vote) {
|
||||
) (*types.ExtendedCommit, []*types.Vote) {
|
||||
t.Helper()
|
||||
sigs := make([]types.CommitSig, vals.Size())
|
||||
sigs := make([]types.ExtendedCommitSig, vals.Size())
|
||||
votes := make([]*types.Vote, vals.Size())
|
||||
for i := 0; i < vals.Size(); i++ {
|
||||
_, val := vals.GetByIndex(int32(i))
|
||||
vote, err := factory.MakeVote(ctx, privVals[val.Address.String()], chainID, int32(i), height, 0, 2, blockID, time.Now())
|
||||
require.NoError(t, err)
|
||||
sigs[i] = vote.CommitSig()
|
||||
sigs[i] = vote.ExtendedCommitSig()
|
||||
votes[i] = vote
|
||||
}
|
||||
|
||||
return types.NewCommit(height, 0, blockID, sigs), votes
|
||||
return &types.ExtendedCommit{
|
||||
Height: height,
|
||||
BlockID: blockID,
|
||||
ExtendedSignatures: sigs,
|
||||
}, votes
|
||||
}
|
||||
|
||||
func makeState(t *testing.T, nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) {
|
||||
|
||||
51
internal/state/indexer/metrics.gen.go
Normal file
51
internal/state/indexer/metrics.gen.go
Normal file
@@ -0,0 +1,51 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
BlockEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_events_seconds",
|
||||
Help: "Latency for indexing block events.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
TxEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "tx_events_seconds",
|
||||
Help: "Latency for indexing transaction events.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlocksIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "blocks_indexed",
|
||||
Help: "Number of complete blocks indexed.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
TransactionsIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "transactions_indexed",
|
||||
Help: "Number of transactions indexed.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
BlockEventsSeconds: discard.NewHistogram(),
|
||||
TxEventsSeconds: discard.NewHistogram(),
|
||||
BlocksIndexed: discard.NewCounter(),
|
||||
TransactionsIndexed: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
@@ -2,12 +2,10 @@ package indexer
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/tendermint/tendermint/scripts/metricsgen -struct=Metrics
|
||||
|
||||
// MetricsSubsystem is a the subsystem label for the indexer package.
|
||||
const MetricsSubsystem = "indexer"
|
||||
|
||||
@@ -25,49 +23,3 @@ type Metrics struct {
|
||||
// Number of transactions indexed.
|
||||
TransactionsIndexed metrics.Counter
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
// Optionally, labels can be provided along with their values ("foo",
|
||||
// "fooValue").
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
BlockEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_events_seconds",
|
||||
Help: "Latency for indexing block events.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
TxEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "tx_events_seconds",
|
||||
Help: "Latency for indexing transaction events.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlocksIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "blocks_indexed",
|
||||
Help: "Number of complete blocks indexed.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
TransactionsIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "transactions_indexed",
|
||||
Help: "Number of transactions indexed.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns an indexer metrics stub that discards all samples.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
BlockEventsSeconds: discard.NewHistogram(),
|
||||
TxEventsSeconds: discard.NewHistogram(),
|
||||
BlocksIndexed: discard.NewCounter(),
|
||||
TransactionsIndexed: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
indexer "github.com/tendermint/tendermint/internal/state/indexer"
|
||||
|
||||
query "github.com/tendermint/tendermint/internal/pubsub/query"
|
||||
|
||||
@@ -17,6 +17,14 @@ const (
|
||||
type Metrics struct {
|
||||
// Time between BeginBlock and EndBlock.
|
||||
BlockProcessingTime metrics.Histogram
|
||||
|
||||
// ConsensusParamUpdates is the total number of times the application has
|
||||
// udated the consensus params since process start.
|
||||
ConsensusParamUpdates metrics.Counter
|
||||
|
||||
// ValidatorSetUpdates is the total number of times the application has
|
||||
// udated the validator set since process start.
|
||||
ValidatorSetUpdates metrics.Counter
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
@@ -35,12 +43,29 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Help: "Time between BeginBlock and EndBlock in ms.",
|
||||
Buckets: stdprometheus.LinearBuckets(1, 10, 10),
|
||||
}, labels).With(labelsAndValues...),
|
||||
ConsensusParamUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "consensus_param_updates",
|
||||
Help: "The total number of times the application as updated the consensus " +
|
||||
"parameters since process start.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
ValidatorSetUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "validator_set_updates",
|
||||
Help: "The total number of times the application as updated the validator " +
|
||||
"set since process start.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns no-op Metrics.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
BlockProcessingTime: discard.NewHistogram(),
|
||||
BlockProcessingTime: discard.NewHistogram(),
|
||||
ConsensusParamUpdates: discard.NewCounter(),
|
||||
ValidatorSetUpdates: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,6 +107,22 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadBlockExtendedCommit provides a mock function with given fields: height
|
||||
func (_m *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit {
|
||||
ret := _m.Called(height)
|
||||
|
||||
var r0 *types.ExtendedCommit
|
||||
if rf, ok := ret.Get(0).(func(int64) *types.ExtendedCommit); ok {
|
||||
r0 = rf(height)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ExtendedCommit)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadBlockMeta provides a mock function with given fields: height
|
||||
func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
ret := _m.Called(height)
|
||||
@@ -193,7 +209,7 @@ func (_m *BlockStore) PruneBlocks(height int64) (uint64, error) {
|
||||
}
|
||||
|
||||
// SaveBlock provides a mock function with given fields: block, blockParts, seenCommit
|
||||
func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) {
|
||||
_m.Called(block, blockParts, seenCommit)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
state "github.com/tendermint/tendermint/internal/state"
|
||||
|
||||
testing "testing"
|
||||
|
||||
@@ -4,6 +4,7 @@ package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
state "github.com/tendermint/tendermint/internal/state"
|
||||
tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ type BlockStore interface {
|
||||
LoadBlockMeta(height int64) *types.BlockMeta
|
||||
LoadBlock(height int64) *types.Block
|
||||
|
||||
SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||
SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit)
|
||||
|
||||
PruneBlocks(height int64) (uint64, error)
|
||||
|
||||
@@ -36,6 +36,7 @@ type BlockStore interface {
|
||||
|
||||
LoadBlockCommit(height int64) *types.Commit
|
||||
LoadSeenCommit() *types.Commit
|
||||
LoadBlockExtendedCommit(height int64) *types.ExtendedCommit
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
@@ -26,6 +26,9 @@ const (
|
||||
|
||||
//------------------------------------------------------------------------
|
||||
|
||||
// NB: Before modifying these, cross-check them with those in
|
||||
// internal/store/store.go
|
||||
// TODO(thane): Move these and the ones in internal/store/store.go to their own package.
|
||||
const (
|
||||
// prefixes are unique across all tm db's
|
||||
prefixValidators = int64(5)
|
||||
|
||||
@@ -63,7 +63,7 @@ func makeBlockAndPartSet(
|
||||
) (*types.Block, *types.PartSet) {
|
||||
t.Helper()
|
||||
|
||||
lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
|
||||
lastCommit := &types.Commit{Height: height - 1}
|
||||
if height > 1 {
|
||||
vote, err := factory.MakeVote(
|
||||
ctx,
|
||||
@@ -73,8 +73,12 @@ func makeBlockAndPartSet(
|
||||
lastBlockMeta.BlockID,
|
||||
time.Now())
|
||||
require.NoError(t, err)
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
lastCommit = &types.Commit{
|
||||
Height: vote.Height,
|
||||
Round: vote.Round,
|
||||
BlockID: lastBlock.LastBlockID,
|
||||
Signatures: []types.CommitSig{vote.CommitSig()},
|
||||
}
|
||||
}
|
||||
|
||||
block := state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
|
||||
@@ -65,7 +65,8 @@ func TestValidateBlockHeader(t *testing.T) {
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
lastCommit := &types.Commit{}
|
||||
var lastExtCommit *types.ExtendedCommit
|
||||
|
||||
// some bad values
|
||||
wrongHash := crypto.Checksum([]byte("this hash is wrong"))
|
||||
@@ -100,7 +101,7 @@ func TestValidateBlockHeader(t *testing.T) {
|
||||
{"Proposer invalid", func(block *types.Block) { block.ProposerAddress = []byte("wrong size") }},
|
||||
|
||||
{"first LastCommit contains signatures", func(block *types.Block) {
|
||||
block.LastCommit = types.NewCommit(0, 0, types.BlockID{}, []types.CommitSig{types.NewCommitSigAbsent()})
|
||||
block.LastCommit = &types.Commit{Signatures: []types.CommitSig{types.NewCommitSigAbsent()}}
|
||||
block.LastCommitHash = block.LastCommit.Hash()
|
||||
}},
|
||||
}
|
||||
@@ -121,8 +122,9 @@ func TestValidateBlockHeader(t *testing.T) {
|
||||
/*
|
||||
A good block passes
|
||||
*/
|
||||
state, _, lastCommit = makeAndCommitGoodBlock(ctx, t,
|
||||
state, _, lastExtCommit = makeAndCommitGoodBlock(ctx, t,
|
||||
state, height, lastCommit, state.Validators.GetProposer().Address, blockExec, privVals, nil)
|
||||
lastCommit = lastExtCommit.StripExtensions()
|
||||
}
|
||||
|
||||
nextHeight := validationTestsStopHeight
|
||||
@@ -169,8 +171,9 @@ func TestValidateBlockCommit(t *testing.T) {
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
wrongSigsCommit := types.NewCommit(1, 0, types.BlockID{}, nil)
|
||||
lastCommit := &types.Commit{}
|
||||
var lastExtCommit *types.ExtendedCommit
|
||||
wrongSigsCommit := &types.Commit{Height: 1}
|
||||
badPrivVal := types.NewMockPV()
|
||||
|
||||
for height := int64(1); height < validationTestsStopHeight; height++ {
|
||||
@@ -192,12 +195,12 @@ func TestValidateBlockCommit(t *testing.T) {
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
wrongHeightCommit := types.NewCommit(
|
||||
wrongHeightVote.Height,
|
||||
wrongHeightVote.Round,
|
||||
state.LastBlockID,
|
||||
[]types.CommitSig{wrongHeightVote.CommitSig()},
|
||||
)
|
||||
wrongHeightCommit := &types.Commit{
|
||||
Height: wrongHeightVote.Height,
|
||||
Round: wrongHeightVote.Round,
|
||||
BlockID: state.LastBlockID,
|
||||
Signatures: []types.CommitSig{wrongHeightVote.CommitSig()},
|
||||
}
|
||||
block := statefactory.MakeBlock(state, height, wrongHeightCommit)
|
||||
err = blockExec.ValidateBlock(ctx, state, block)
|
||||
_, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight)
|
||||
@@ -220,7 +223,7 @@ func TestValidateBlockCommit(t *testing.T) {
|
||||
A good block passes
|
||||
*/
|
||||
var blockID types.BlockID
|
||||
state, blockID, lastCommit = makeAndCommitGoodBlock(
|
||||
state, blockID, lastExtCommit = makeAndCommitGoodBlock(
|
||||
ctx,
|
||||
t,
|
||||
state,
|
||||
@@ -231,6 +234,7 @@ func TestValidateBlockCommit(t *testing.T) {
|
||||
privVals,
|
||||
nil,
|
||||
)
|
||||
lastCommit = lastExtCommit.StripExtensions()
|
||||
|
||||
/*
|
||||
wrongSigsCommit is fine except for the extra bad precommit
|
||||
@@ -270,8 +274,12 @@ func TestValidateBlockCommit(t *testing.T) {
|
||||
|
||||
goodVote.Signature, badVote.Signature = g.Signature, b.Signature
|
||||
|
||||
wrongSigsCommit = types.NewCommit(goodVote.Height, goodVote.Round,
|
||||
blockID, []types.CommitSig{goodVote.CommitSig(), badVote.CommitSig()})
|
||||
wrongSigsCommit = &types.Commit{
|
||||
Height: goodVote.Height,
|
||||
Round: goodVote.Round,
|
||||
BlockID: blockID,
|
||||
Signatures: []types.CommitSig{goodVote.CommitSig(), badVote.CommitSig()},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -319,7 +327,8 @@ func TestValidateBlockEvidence(t *testing.T) {
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
lastCommit := &types.Commit{}
|
||||
var lastExtCommit *types.ExtendedCommit
|
||||
|
||||
for height := int64(1); height < validationTestsStopHeight; height++ {
|
||||
proposerAddr := state.Validators.GetProposer().Address
|
||||
@@ -364,7 +373,7 @@ func TestValidateBlockEvidence(t *testing.T) {
|
||||
evidence = append(evidence, newEv)
|
||||
}
|
||||
|
||||
state, _, lastCommit = makeAndCommitGoodBlock(
|
||||
state, _, lastExtCommit = makeAndCommitGoodBlock(
|
||||
ctx,
|
||||
t,
|
||||
state,
|
||||
@@ -375,6 +384,7 @@ func TestValidateBlockEvidence(t *testing.T) {
|
||||
privVals,
|
||||
evidence,
|
||||
)
|
||||
lastCommit = lastExtCommit.StripExtensions()
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
state "github.com/tendermint/tendermint/internal/state"
|
||||
|
||||
testing "testing"
|
||||
|
||||
@@ -855,13 +855,13 @@ func mockLB(ctx context.Context, t *testing.T, height int64, time time.Time, las
|
||||
header.NextValidatorsHash = nextVals.Hash()
|
||||
header.ConsensusHash = types.DefaultConsensusParams().HashConsensusParams()
|
||||
lastBlockID = factory.MakeBlockIDWithHash(header.Hash())
|
||||
voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals)
|
||||
commit, err := factory.MakeCommit(ctx, lastBlockID, height, 0, voteSet, currentPrivVals, time)
|
||||
voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals, false)
|
||||
extCommit, err := factory.MakeExtendedCommit(ctx, lastBlockID, height, 0, voteSet, currentPrivVals, time)
|
||||
require.NoError(t, err)
|
||||
return nextVals, nextPrivVals, &types.LightBlock{
|
||||
SignedHeader: &types.SignedHeader{
|
||||
Header: header,
|
||||
Commit: commit,
|
||||
Commit: extCommit.StripExtensions(),
|
||||
},
|
||||
ValidatorSet: currentVals,
|
||||
}
|
||||
|
||||
@@ -273,11 +273,31 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
}
|
||||
commit, err := types.CommitFromProto(pbc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error reading block commit: %w", err))
|
||||
panic(fmt.Errorf("converting commit to proto: %w", err))
|
||||
}
|
||||
return commit
|
||||
}
|
||||
|
||||
func (bs *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit {
|
||||
pbec := new(tmproto.ExtendedCommit)
|
||||
bz, err := bs.db.Get(extCommitKey(height))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("fetching extended commit: %w", err))
|
||||
}
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
err = proto.Unmarshal(bz, pbec)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("decoding extended commit: %w", err))
|
||||
}
|
||||
extCommit, err := types.ExtendedCommitFromProto(pbec)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("converting extended commit: %w", err))
|
||||
}
|
||||
return extCommit
|
||||
}
|
||||
|
||||
// LoadSeenCommit returns the last locally seen Commit before being
|
||||
// cannonicalized. This is useful when we've seen a commit, but there
|
||||
// has not yet been a new block at `height + 1` that includes this
|
||||
@@ -298,7 +318,7 @@ func (bs *BlockStore) LoadSeenCommit() *types.Commit {
|
||||
|
||||
commit, err := types.CommitFromProto(pbc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error from proto commit: %w", err))
|
||||
panic(fmt.Errorf("converting seen commit: %w", err))
|
||||
}
|
||||
return commit
|
||||
}
|
||||
@@ -446,7 +466,7 @@ func (bs *BlockStore) batchDelete(
|
||||
// If all the nodes restart after committing a block,
|
||||
// we need this to reload the precommits to catch-up nodes to the
|
||||
// most recent height. Otherwise they'd stall at H-1.
|
||||
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) {
|
||||
if block == nil {
|
||||
panic("BlockStore can only save a non-nil block")
|
||||
}
|
||||
@@ -462,6 +482,10 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
if !blockParts.IsComplete() {
|
||||
panic("BlockStore can only save complete block part sets")
|
||||
}
|
||||
if height != seenCommit.Height {
|
||||
panic(fmt.Sprintf("BlockStore cannot save seen commit of a different height (block: %d, commit: %d)",
|
||||
height, seenCommit.Height))
|
||||
}
|
||||
|
||||
// Save block parts. This must be done before the block meta, since callers
|
||||
// typically load the block meta first as an indication that the block exists
|
||||
@@ -494,12 +518,18 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
}
|
||||
|
||||
// Save seen commit (seen +2/3 precommits for block)
|
||||
pbsc := seenCommit.ToProto()
|
||||
pbsc := seenCommit.StripExtensions().ToProto()
|
||||
seenCommitBytes := mustEncode(pbsc)
|
||||
if err := batch.Set(seenCommitKey(), seenCommitBytes); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pbec := seenCommit.ToProto()
|
||||
extCommitBytes := mustEncode(pbec)
|
||||
if err := batch.Set(extCommitKey(height), extCommitBytes); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := batch.WriteSync(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -579,6 +609,9 @@ func (bs *BlockStore) Close() error {
|
||||
//---------------------------------- KEY ENCODING -----------------------------------------
|
||||
|
||||
// key prefixes
|
||||
// NB: Before modifying these, cross-check them with those in
|
||||
// internal/state/store.go
|
||||
// TODO(thane): Move these and the ones in internal/state/store.go to their own package.
|
||||
const (
|
||||
// prefixes are unique across all tm db's
|
||||
prefixBlockMeta = int64(0)
|
||||
@@ -586,6 +619,7 @@ const (
|
||||
prefixBlockCommit = int64(2)
|
||||
prefixSeenCommit = int64(3)
|
||||
prefixBlockHash = int64(4)
|
||||
prefixExtCommit = int64(9) // 5..8 are used by state/store
|
||||
)
|
||||
|
||||
func blockMetaKey(height int64) []byte {
|
||||
@@ -635,6 +669,14 @@ func seenCommitKey() []byte {
|
||||
return key
|
||||
}
|
||||
|
||||
func extCommitKey(height int64) []byte {
|
||||
key, err := orderedcode.Append(nil, prefixExtCommit, height)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func blockHashKey(hash []byte) []byte {
|
||||
key, err := orderedcode.Append(nil, prefixBlockHash, string(hash))
|
||||
if err != nil {
|
||||
|
||||
@@ -2,7 +2,6 @@ package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
stdlog "log"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
@@ -27,22 +26,25 @@ import (
|
||||
// test.
|
||||
type cleanupFunc func()
|
||||
|
||||
// make a Commit with a single vote containing just the height and a timestamp
|
||||
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
|
||||
commitSigs := []types.CommitSig{{
|
||||
BlockIDFlag: types.BlockIDFlagCommit,
|
||||
ValidatorAddress: tmrand.Bytes(crypto.AddressSize),
|
||||
Timestamp: timestamp,
|
||||
Signature: []byte("Signature"),
|
||||
// make an extended commit with a single vote containing just the height and a
|
||||
// timestamp
|
||||
func makeTestExtCommit(height int64, timestamp time.Time) *types.ExtendedCommit {
|
||||
extCommitSigs := []types.ExtendedCommitSig{{
|
||||
CommitSig: types.CommitSig{
|
||||
BlockIDFlag: types.BlockIDFlagCommit,
|
||||
ValidatorAddress: tmrand.Bytes(crypto.AddressSize),
|
||||
Timestamp: timestamp,
|
||||
Signature: []byte("Signature"),
|
||||
},
|
||||
}}
|
||||
return types.NewCommit(
|
||||
height,
|
||||
0,
|
||||
types.BlockID{
|
||||
return &types.ExtendedCommit{
|
||||
Height: height,
|
||||
BlockID: types.BlockID{
|
||||
Hash: crypto.CRandBytes(32),
|
||||
PartSetHeader: types.PartSetHeader{Hash: crypto.CRandBytes(32), Total: 2},
|
||||
},
|
||||
commitSigs)
|
||||
ExtendedSignatures: extCommitSigs,
|
||||
}
|
||||
}
|
||||
|
||||
func makeStateAndBlockStore(dir string) (sm.State, *BlockStore, cleanupFunc, error) {
|
||||
@@ -59,47 +61,11 @@ func makeStateAndBlockStore(dir string) (sm.State, *BlockStore, cleanupFunc, err
|
||||
return state, NewBlockStore(blockDB), func() { os.RemoveAll(cfg.RootDir) }, nil
|
||||
}
|
||||
|
||||
func freshBlockStore() (*BlockStore, dbm.DB) {
|
||||
func newInMemoryBlockStore() (*BlockStore, dbm.DB) {
|
||||
db := dbm.NewMemDB()
|
||||
return NewBlockStore(db), db
|
||||
}
|
||||
|
||||
var (
|
||||
state sm.State
|
||||
block *types.Block
|
||||
partSet *types.PartSet
|
||||
part1 *types.Part
|
||||
part2 *types.Part
|
||||
seenCommit1 *types.Commit
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
dir, err := os.MkdirTemp("", "store_test")
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
var cleanup cleanupFunc
|
||||
|
||||
state, _, cleanup, err = makeStateAndBlockStore(dir)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
|
||||
block = factory.MakeBlock(state, 1, new(types.Commit))
|
||||
|
||||
partSet, err = block.MakePartSet(2)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
part1 = partSet.GetPart(0)
|
||||
part2 = partSet.GetPart(1)
|
||||
seenCommit1 = makeTestCommit(10, tmtime.Now())
|
||||
code := m.Run()
|
||||
cleanup()
|
||||
os.RemoveAll(dir) // best-effort
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// TODO: This test should be simplified ...
|
||||
func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir())
|
||||
@@ -120,8 +86,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit))
|
||||
validPartSet, err := block.MakePartSet(2)
|
||||
require.NoError(t, err)
|
||||
seenCommit := makeTestCommit(10, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
part2 := validPartSet.GetPart(1)
|
||||
|
||||
seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now())
|
||||
bs.SaveBlock(block, validPartSet, seenCommit)
|
||||
require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed")
|
||||
require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed")
|
||||
|
||||
@@ -139,11 +107,11 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
// End of setup, test data
|
||||
commitAtH10 := makeTestCommit(10, tmtime.Now())
|
||||
commitAtH10 := makeTestExtCommit(10, tmtime.Now()).StripExtensions()
|
||||
tuples := []struct {
|
||||
block *types.Block
|
||||
parts *types.PartSet
|
||||
seenCommit *types.Commit
|
||||
seenCommit *types.ExtendedCommit
|
||||
wantPanic string
|
||||
wantErr bool
|
||||
|
||||
@@ -156,7 +124,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
seenCommit: seenCommit,
|
||||
},
|
||||
|
||||
{
|
||||
@@ -172,10 +140,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
ChainID: "block_test",
|
||||
Time: tmtime.Now(),
|
||||
ProposerAddress: tmrand.Bytes(crypto.AddressSize)},
|
||||
makeTestCommit(5, tmtime.Now()),
|
||||
makeTestExtCommit(5, tmtime.Now()).StripExtensions(),
|
||||
),
|
||||
parts: validPartSet,
|
||||
seenCommit: makeTestCommit(5, tmtime.Now()),
|
||||
seenCommit: makeTestExtCommit(5, tmtime.Now()),
|
||||
},
|
||||
|
||||
{
|
||||
@@ -187,7 +155,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
seenCommit: seenCommit,
|
||||
corruptCommitInDB: true, // Corrupt the DB's commit entry
|
||||
wantPanic: "error reading block commit",
|
||||
},
|
||||
@@ -195,7 +163,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
seenCommit: seenCommit,
|
||||
wantPanic: "unmarshal to tmproto.BlockMeta",
|
||||
corruptBlockInDB: true, // Corrupt the DB's block entry
|
||||
},
|
||||
@@ -203,7 +171,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
seenCommit: seenCommit,
|
||||
|
||||
// Expecting no error and we want a nil back
|
||||
eraseSeenCommitInDB: true,
|
||||
@@ -212,7 +180,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
seenCommit: seenCommit,
|
||||
|
||||
corruptSeenCommitInDB: true,
|
||||
wantPanic: "error reading block seen commit",
|
||||
@@ -221,7 +189,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
seenCommit: seenCommit,
|
||||
|
||||
// Expecting no error and we want a nil back
|
||||
eraseCommitInDB: true,
|
||||
@@ -238,7 +206,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
|
||||
for i, tuple := range tuples {
|
||||
tuple := tuple
|
||||
bs, db := freshBlockStore()
|
||||
bs, db := newInMemoryBlockStore()
|
||||
// SaveBlock
|
||||
res, err, panicErr := doFn(func() (interface{}, error) {
|
||||
bs.SaveBlock(tuple.block, tuple.parts, tuple.seenCommit)
|
||||
@@ -324,7 +292,7 @@ func TestLoadBaseMeta(t *testing.T) {
|
||||
block := factory.MakeBlock(state, h, new(types.Commit))
|
||||
partSet, err := block.MakePartSet(2)
|
||||
require.NoError(t, err)
|
||||
seenCommit := makeTestCommit(h, tmtime.Now())
|
||||
seenCommit := makeTestExtCommit(h, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
}
|
||||
|
||||
@@ -338,13 +306,19 @@ func TestLoadBaseMeta(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLoadBlockPart(t *testing.T) {
|
||||
bs, db := freshBlockStore()
|
||||
height, index := int64(10), 1
|
||||
cfg, err := config.ResetTestRoot(t.TempDir(), "blockchain_reactor_test")
|
||||
require.NoError(t, err)
|
||||
|
||||
bs, db := newInMemoryBlockStore()
|
||||
const height, index = 10, 1
|
||||
loadPart := func() (interface{}, error) {
|
||||
part := bs.LoadBlockPart(height, index)
|
||||
return part, nil
|
||||
}
|
||||
|
||||
state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initially no contents.
|
||||
// 1. Requesting for a non-existent block shouldn't fail
|
||||
res, _, panicErr := doFn(loadPart)
|
||||
@@ -352,13 +326,18 @@ func TestLoadBlockPart(t *testing.T) {
|
||||
require.Nil(t, res, "a non-existent block part should return nil")
|
||||
|
||||
// 2. Next save a corrupted block then try to load it
|
||||
err := db.Set(blockPartKey(height, index), []byte("Tendermint"))
|
||||
err = db.Set(blockPartKey(height, index), []byte("Tendermint"))
|
||||
require.NoError(t, err)
|
||||
res, _, panicErr = doFn(loadPart)
|
||||
require.NotNil(t, panicErr, "expecting a non-nil panic")
|
||||
require.Contains(t, panicErr.Error(), "unmarshal to tmproto.Part failed")
|
||||
|
||||
// 3. A good block serialized and saved to the DB should be retrievable
|
||||
block := factory.MakeBlock(state, height, new(types.Commit))
|
||||
partSet, err := block.MakePartSet(2)
|
||||
require.NoError(t, err)
|
||||
part1 := partSet.GetPart(0)
|
||||
|
||||
pb1, err := part1.ToProto()
|
||||
require.NoError(t, err)
|
||||
err = db.Set(blockPartKey(height, index), mustEncode(pb1))
|
||||
@@ -391,7 +370,7 @@ func TestPruneBlocks(t *testing.T) {
|
||||
block := factory.MakeBlock(state, h, new(types.Commit))
|
||||
partSet, err := block.MakePartSet(2)
|
||||
require.NoError(t, err)
|
||||
seenCommit := makeTestCommit(h, tmtime.Now())
|
||||
seenCommit := makeTestExtCommit(h, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
}
|
||||
|
||||
@@ -452,7 +431,7 @@ func TestPruneBlocks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLoadBlockMeta(t *testing.T) {
|
||||
bs, db := freshBlockStore()
|
||||
bs, db := newInMemoryBlockStore()
|
||||
height := int64(10)
|
||||
loadMeta := func() (interface{}, error) {
|
||||
meta := bs.LoadBlockMeta(height)
|
||||
@@ -499,7 +478,7 @@ func TestBlockFetchAtHeight(t *testing.T) {
|
||||
|
||||
partSet, err := block.MakePartSet(2)
|
||||
require.NoError(t, err)
|
||||
seenCommit := makeTestCommit(10, tmtime.Now())
|
||||
seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
|
||||
|
||||
@@ -521,9 +500,12 @@ func TestBlockFetchAtHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSeenAndCanonicalCommit(t *testing.T) {
|
||||
bs, _ := freshBlockStore()
|
||||
state, store, cleanup, err := makeStateAndBlockStore(t.TempDir())
|
||||
defer cleanup()
|
||||
require.NoError(t, err)
|
||||
|
||||
loadCommit := func() (interface{}, error) {
|
||||
meta := bs.LoadSeenCommit()
|
||||
meta := store.LoadSeenCommit()
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
@@ -536,19 +518,19 @@ func TestSeenAndCanonicalCommit(t *testing.T) {
|
||||
// produce a few blocks and check that the correct seen and cannoncial commits
|
||||
// are persisted.
|
||||
for h := int64(3); h <= 5; h++ {
|
||||
blockCommit := makeTestCommit(h-1, tmtime.Now())
|
||||
blockCommit := makeTestExtCommit(h-1, tmtime.Now()).StripExtensions()
|
||||
block := factory.MakeBlock(state, h, blockCommit)
|
||||
partSet, err := block.MakePartSet(2)
|
||||
require.NoError(t, err)
|
||||
seenCommit := makeTestCommit(h, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
c3 := bs.LoadSeenCommit()
|
||||
seenCommit := makeTestExtCommit(h, tmtime.Now())
|
||||
store.SaveBlock(block, partSet, seenCommit)
|
||||
c3 := store.LoadSeenCommit()
|
||||
require.NotNil(t, c3)
|
||||
require.Equal(t, h, c3.Height)
|
||||
require.Equal(t, seenCommit.Hash(), c3.Hash())
|
||||
c5 := bs.LoadBlockCommit(h)
|
||||
require.Equal(t, seenCommit.StripExtensions().Hash(), c3.Hash())
|
||||
c5 := store.LoadBlockCommit(h)
|
||||
require.Nil(t, c5)
|
||||
c6 := bs.LoadBlockCommit(h - 1)
|
||||
c6 := store.LoadBlockCommit(h - 1)
|
||||
require.Equal(t, blockCommit.Hash(), c6.Hash())
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func MakeCommit(ctx context.Context, blockID types.BlockID, height int64, round int32, voteSet *types.VoteSet, validators []types.PrivValidator, now time.Time) (*types.Commit, error) {
|
||||
func MakeExtendedCommit(ctx context.Context, blockID types.BlockID, height int64, round int32, voteSet *types.VoteSet, validators []types.PrivValidator, now time.Time) (*types.ExtendedCommit, error) {
|
||||
// all sign
|
||||
for i := 0; i < len(validators); i++ {
|
||||
pubKey, err := validators[i].GetPubKey(ctx)
|
||||
@@ -37,5 +37,5 @@ func MakeCommit(ctx context.Context, blockID types.BlockID, height int64, round
|
||||
}
|
||||
}
|
||||
|
||||
return voteSet.MakeCommit(), nil
|
||||
return voteSet.MakeExtendedCommit(), nil
|
||||
}
|
||||
|
||||
@@ -72,7 +72,12 @@ func (pkz privKeys) signHeader(t testing.TB, header *types.Header, valSet *types
|
||||
commitSigs[vote.ValidatorIndex] = vote.CommitSig()
|
||||
}
|
||||
|
||||
return types.NewCommit(header.Height, 1, blockID, commitSigs)
|
||||
return &types.Commit{
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
BlockID: blockID,
|
||||
Signatures: commitSigs,
|
||||
}
|
||||
}
|
||||
|
||||
func makeVote(t testing.TB, header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey, blockID types.BlockID) *types.Vote {
|
||||
|
||||
@@ -279,8 +279,7 @@ func checkRequiredHeaderFields(h *types.SignedHeader) error {
|
||||
return errors.New("height in trusted header must be set (non zero")
|
||||
}
|
||||
|
||||
zeroTime := time.Time{}
|
||||
if h.Time == zeroTime {
|
||||
if h.Time.IsZero() {
|
||||
return errors.New("time in trusted header must be set")
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -339,13 +340,13 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
commit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
|
||||
extCommit := &types.ExtendedCommit{Height: height - 1}
|
||||
block, err := blockExec.CreateProposalBlock(
|
||||
ctx,
|
||||
height,
|
||||
state, commit,
|
||||
state,
|
||||
extCommit,
|
||||
proposerAddr,
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -419,13 +420,13 @@ func TestMaxTxsProposalBlockSize(t *testing.T) {
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
commit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
|
||||
extCommit := &types.ExtendedCommit{Height: height - 1}
|
||||
block, err := blockExec.CreateProposalBlock(
|
||||
ctx,
|
||||
height,
|
||||
state, commit,
|
||||
state,
|
||||
extCommit,
|
||||
proposerAddr,
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -525,38 +526,41 @@ func TestMaxProposalBlockSize(t *testing.T) {
|
||||
}
|
||||
state.ChainID = maxChainID
|
||||
|
||||
cs := types.CommitSig{
|
||||
BlockIDFlag: types.BlockIDFlagNil,
|
||||
ValidatorAddress: crypto.AddressHash([]byte("validator_address")),
|
||||
Timestamp: timestamp,
|
||||
Signature: crypto.CRandBytes(types.MaxSignatureSize),
|
||||
}
|
||||
|
||||
commit := &types.Commit{
|
||||
Height: math.MaxInt64,
|
||||
Round: math.MaxInt32,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
votes := make([]*types.Vote, types.MaxVotesCount)
|
||||
voteSet := types.NewVoteSet(state.ChainID, math.MaxInt64-1, math.MaxInt32, tmproto.PrecommitType, state.Validators, false)
|
||||
|
||||
// add maximum amount of signatures to a single commit
|
||||
for i := 0; i < types.MaxVotesCount; i++ {
|
||||
pubKey, err := privVals[i].GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
votes[i] = &types.Vote{
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
valIdx, val := state.Validators.GetByAddress(pubKey.Address())
|
||||
require.NotNil(t, val)
|
||||
|
||||
vote := &types.Vote{
|
||||
Type: tmproto.PrecommitType,
|
||||
Height: math.MaxInt64 - 1,
|
||||
Round: math.MaxInt32,
|
||||
BlockID: blockID,
|
||||
Timestamp: timestamp,
|
||||
ValidatorAddress: val.Address,
|
||||
ValidatorIndex: valIdx,
|
||||
Extension: []byte("extension"),
|
||||
}
|
||||
commit.Signatures = append(commit.Signatures, cs)
|
||||
vpb := vote.ToProto()
|
||||
require.NoError(t, privVals[i].SignVote(ctx, state.ChainID, vpb))
|
||||
vote.Signature = vpb.Signature
|
||||
vote.ExtensionSignature = vpb.ExtensionSignature
|
||||
|
||||
added, err := voteSet.AddVote(vote)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
}
|
||||
|
||||
block, err := blockExec.CreateProposalBlock(
|
||||
ctx,
|
||||
math.MaxInt64,
|
||||
state,
|
||||
commit,
|
||||
voteSet.MakeExtendedCommit(),
|
||||
proposerAddr,
|
||||
votes,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
partSet, err := block.MakePartSet(types.BlockPartSizeBytes)
|
||||
|
||||
@@ -375,17 +375,17 @@ func (pv *FilePV) signVote(chainID string, vote *tmproto.Vote) error {
|
||||
|
||||
// Vote extensions are non-deterministic, so it is possible that an
|
||||
// application may have created a different extension. We therefore always
|
||||
// re-sign the vote extensions of precommits. For prevotes, the extension
|
||||
// signature will always be empty.
|
||||
// re-sign the vote extensions of precommits. For prevotes and nil
|
||||
// precommits, the extension signature will always be empty.
|
||||
var extSig []byte
|
||||
if vote.Type == tmproto.PrecommitType {
|
||||
if vote.Type == tmproto.PrecommitType && !types.ProtoBlockIDIsNil(&vote.BlockID) {
|
||||
extSignBytes := types.VoteExtensionSignBytes(chainID, vote)
|
||||
extSig, err = pv.Key.PrivKey.Sign(extSignBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(vote.Extension) > 0 {
|
||||
return errors.New("unexpected vote extension - extensions are only allowed in precommits")
|
||||
return errors.New("unexpected vote extension - extensions are only allowed in non-nil precommits")
|
||||
}
|
||||
|
||||
// We might crash before writing to the wal,
|
||||
|
||||
@@ -116,7 +116,8 @@ func (m *NoBlockResponse) GetHeight() int64 {
|
||||
|
||||
// BlockResponse returns block to the requested
|
||||
type BlockResponse struct {
|
||||
Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"`
|
||||
Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"`
|
||||
ExtCommit *types.ExtendedCommit `protobuf:"bytes,2,opt,name=ext_commit,json=extCommit,proto3" json:"ext_commit,omitempty"`
|
||||
}
|
||||
|
||||
func (m *BlockResponse) Reset() { *m = BlockResponse{} }
|
||||
@@ -159,6 +160,13 @@ func (m *BlockResponse) GetBlock() *types.Block {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BlockResponse) GetExtCommit() *types.ExtendedCommit {
|
||||
if m != nil {
|
||||
return m.ExtCommit
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatusRequest requests the status of a peer.
|
||||
type StatusRequest struct {
|
||||
}
|
||||
@@ -385,30 +393,33 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/blocksync/types.proto", fileDescriptor_19b397c236e0fa07) }
|
||||
|
||||
var fileDescriptor_19b397c236e0fa07 = []byte{
|
||||
// 368 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x4d, 0x4f, 0xfa, 0x40,
|
||||
0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x1b, 0xa3, 0xc4, 0x98, 0x86, 0xd4, 0x97,
|
||||
0xe8, 0xc1, 0x36, 0xc1, 0xa3, 0xc6, 0x03, 0x27, 0x4c, 0x7c, 0x49, 0x4a, 0xbc, 0x78, 0x21, 0x14,
|
||||
0x37, 0x40, 0x94, 0x2e, 0x32, 0xdb, 0x03, 0xdf, 0xc2, 0x2f, 0xe0, 0xf7, 0xf1, 0xc8, 0xd1, 0xa3,
|
||||
0x81, 0x2f, 0x62, 0x98, 0x2d, 0x65, 0x69, 0xb0, 0xb7, 0xdd, 0xe9, 0x33, 0xbf, 0x79, 0xfa, 0x64,
|
||||
0x16, 0xea, 0x82, 0x45, 0x2f, 0x6c, 0x32, 0x1a, 0x46, 0xc2, 0x0f, 0xdf, 0x78, 0xef, 0x15, 0xa7,
|
||||
0x51, 0xcf, 0x17, 0xd3, 0x31, 0x43, 0x6f, 0x3c, 0xe1, 0x82, 0xdb, 0xbb, 0x6b, 0x85, 0x97, 0x2a,
|
||||
0x0e, 0x0e, 0x95, 0x3e, 0x52, 0xcb, 0x6e, 0xd9, 0xe3, 0x9e, 0x42, 0xa5, 0xb9, 0xbc, 0x06, 0xec,
|
||||
0x3d, 0x66, 0x28, 0xec, 0x3d, 0x28, 0x0d, 0xd8, 0xb0, 0x3f, 0x10, 0x35, 0xbd, 0xae, 0x9f, 0x19,
|
||||
0x41, 0x72, 0x73, 0xcf, 0xc1, 0x7a, 0xe0, 0x89, 0x12, 0xc7, 0x3c, 0x42, 0xf6, 0xa7, 0xf4, 0x06,
|
||||
0xcc, 0x4d, 0xe1, 0x05, 0x14, 0x69, 0x24, 0xe9, 0xca, 0x8d, 0x7d, 0x4f, 0xf1, 0x29, 0xfd, 0x4b,
|
||||
0xbd, 0x54, 0xb9, 0x16, 0x98, 0x6d, 0xd1, 0x15, 0x31, 0x26, 0x9e, 0xdc, 0x6b, 0xa8, 0xae, 0x0a,
|
||||
0xf9, 0xa3, 0x6d, 0x1b, 0x0a, 0x61, 0x17, 0x59, 0xed, 0x1f, 0x55, 0xe9, 0xec, 0x7e, 0x1a, 0xf0,
|
||||
0xff, 0x9e, 0x21, 0x76, 0xfb, 0xcc, 0xbe, 0x05, 0x93, 0x66, 0x74, 0x26, 0x12, 0x9d, 0x38, 0x72,
|
||||
0xbd, 0x6d, 0xc9, 0x79, 0x6a, 0x30, 0x2d, 0x2d, 0xa8, 0x84, 0x6a, 0x50, 0x6d, 0xd8, 0x89, 0x78,
|
||||
0x67, 0x45, 0x93, 0xbe, 0x68, 0x6e, 0xb9, 0x71, 0xb2, 0x1d, 0x97, 0xc9, 0xaf, 0xa5, 0x05, 0x56,
|
||||
0x94, 0x89, 0xf4, 0x0e, 0xaa, 0x19, 0xa2, 0x41, 0xc4, 0xa3, 0x5c, 0x83, 0x29, 0xcf, 0x0c, 0xb3,
|
||||
0x34, 0xa4, 0xdc, 0xd2, 0xdf, 0x2d, 0xe4, 0xd1, 0x36, 0x42, 0x5f, 0xd2, 0x50, 0x2d, 0xd8, 0x8f,
|
||||
0x60, 0xa5, 0xb4, 0xc4, 0x5c, 0x91, 0x70, 0xc7, 0xf9, 0xb8, 0xd4, 0x5d, 0x15, 0x37, 0x2a, 0xcd,
|
||||
0x22, 0x18, 0x18, 0x8f, 0x9a, 0x4f, 0x5f, 0x73, 0x47, 0x9f, 0xcd, 0x1d, 0xfd, 0x67, 0xee, 0xe8,
|
||||
0x1f, 0x0b, 0x47, 0x9b, 0x2d, 0x1c, 0xed, 0x7b, 0xe1, 0x68, 0xcf, 0x57, 0xfd, 0xa1, 0x18, 0xc4,
|
||||
0xa1, 0xd7, 0xe3, 0x23, 0x5f, 0x5d, 0xe2, 0xf5, 0x91, 0x76, 0xd8, 0xdf, 0xf6, 0x30, 0xc2, 0x12,
|
||||
0x7d, 0xbb, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x1c, 0xa3, 0x45, 0x37, 0x03, 0x00, 0x00,
|
||||
// 404 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0xcd, 0x4a, 0xeb, 0x50,
|
||||
0x10, 0xc7, 0x93, 0x9b, 0xb6, 0x97, 0x3b, 0xb7, 0x69, 0xb8, 0xe1, 0xa2, 0x45, 0x24, 0x94, 0xf8,
|
||||
0x81, 0x2e, 0x4c, 0x40, 0x97, 0x0a, 0x42, 0x45, 0xa8, 0xe0, 0x07, 0xa4, 0xb8, 0x71, 0x53, 0x9a,
|
||||
0xf4, 0xd0, 0x06, 0x4d, 0x4e, 0xed, 0x39, 0x81, 0x76, 0xe5, 0x2b, 0xf8, 0x02, 0xbe, 0x8f, 0xcb,
|
||||
0x2e, 0x5d, 0x4a, 0xfb, 0x22, 0xd2, 0x39, 0x69, 0x9a, 0xc6, 0x98, 0xdd, 0x64, 0xce, 0x7f, 0x7e,
|
||||
0xf9, 0xcf, 0x0c, 0x03, 0x0d, 0x4e, 0xc2, 0x1e, 0x19, 0x05, 0x7e, 0xc8, 0x6d, 0xf7, 0x89, 0x7a,
|
||||
0x8f, 0x6c, 0x12, 0x7a, 0x36, 0x9f, 0x0c, 0x09, 0xb3, 0x86, 0x23, 0xca, 0xa9, 0xfe, 0x7f, 0xa5,
|
||||
0xb0, 0x12, 0xc5, 0xd6, 0x76, 0xaa, 0x0e, 0xd5, 0xa2, 0x5a, 0xd4, 0xe4, 0xbc, 0xa6, 0x88, 0xe6,
|
||||
0x3e, 0x54, 0x9b, 0x0b, 0xb1, 0x43, 0x9e, 0x23, 0xc2, 0xb8, 0xbe, 0x01, 0x95, 0x01, 0xf1, 0xfb,
|
||||
0x03, 0x5e, 0x97, 0x1b, 0xf2, 0x81, 0xe2, 0xc4, 0x5f, 0xe6, 0x21, 0x68, 0xb7, 0x34, 0x56, 0xb2,
|
||||
0x21, 0x0d, 0x19, 0xf9, 0x51, 0xfa, 0x02, 0xea, 0xba, 0xf0, 0x08, 0xca, 0x68, 0x08, 0x75, 0x7f,
|
||||
0x8f, 0x37, 0xad, 0x54, 0x17, 0xc2, 0x8b, 0xd0, 0x0b, 0x95, 0x7e, 0x0e, 0x40, 0xc6, 0xbc, 0xe3,
|
||||
0xd1, 0x20, 0xf0, 0x79, 0xfd, 0x17, 0xd6, 0x34, 0xbe, 0xd7, 0x5c, 0x8e, 0x31, 0xd5, 0xbb, 0x40,
|
||||
0x9d, 0xf3, 0x87, 0x8c, 0xb9, 0x08, 0x4d, 0x0d, 0xd4, 0x36, 0xef, 0xf2, 0x88, 0xc5, 0x4d, 0x99,
|
||||
0x67, 0x50, 0x5b, 0x26, 0x8a, 0xbd, 0xeb, 0x3a, 0x94, 0xdc, 0x2e, 0x23, 0xf8, 0x57, 0xc5, 0xc1,
|
||||
0xd8, 0x7c, 0x53, 0xe0, 0xf7, 0x0d, 0x61, 0xac, 0xdb, 0x27, 0xfa, 0x15, 0xa8, 0x68, 0xb2, 0x33,
|
||||
0x12, 0xe8, 0xb8, 0x25, 0xd3, 0xca, 0x5b, 0x8c, 0x95, 0x9e, 0x6c, 0x4b, 0x72, 0xaa, 0x6e, 0x7a,
|
||||
0xd2, 0x6d, 0xf8, 0x17, 0xd2, 0xce, 0x92, 0x26, 0x7c, 0xc5, 0xdd, 0xee, 0xe5, 0xe3, 0x32, 0x0b,
|
||||
0x68, 0x49, 0x8e, 0x16, 0x66, 0x76, 0x72, 0x0d, 0xb5, 0x0c, 0x51, 0x41, 0xe2, 0x4e, 0xa1, 0xc1,
|
||||
0x84, 0xa7, 0xba, 0x59, 0x1a, 0xc3, 0xb9, 0x25, 0xed, 0x96, 0x8a, 0x68, 0x6b, 0x43, 0x5f, 0xd0,
|
||||
0x58, 0x3a, 0xa1, 0xdf, 0x81, 0x96, 0xd0, 0x62, 0x73, 0x65, 0xc4, 0xed, 0x16, 0xe3, 0x12, 0x77,
|
||||
0x35, 0xb6, 0x96, 0x69, 0x96, 0x41, 0x61, 0x51, 0xd0, 0xbc, 0x7f, 0x9f, 0x19, 0xf2, 0x74, 0x66,
|
||||
0xc8, 0x9f, 0x33, 0x43, 0x7e, 0x9d, 0x1b, 0xd2, 0x74, 0x6e, 0x48, 0x1f, 0x73, 0x43, 0x7a, 0x38,
|
||||
0xed, 0xfb, 0x7c, 0x10, 0xb9, 0x96, 0x47, 0x03, 0x3b, 0x7d, 0x05, 0xab, 0x10, 0x8f, 0xc0, 0xce,
|
||||
0xbb, 0x3b, 0xb7, 0x82, 0x6f, 0x27, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x13, 0x4f, 0x42,
|
||||
0x96, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *BlockRequest) Marshal() (dAtA []byte, err error) {
|
||||
@@ -487,6 +498,18 @@ func (m *BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.ExtCommit != nil {
|
||||
{
|
||||
size, err := m.ExtCommit.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if m.Block != nil {
|
||||
{
|
||||
size, err := m.Block.MarshalToSizedBuffer(dAtA[:i])
|
||||
@@ -740,6 +763,10 @@ func (m *BlockResponse) Size() (n int) {
|
||||
l = m.Block.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
if m.ExtCommit != nil {
|
||||
l = m.ExtCommit.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -1049,6 +1076,42 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ExtCommit", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.ExtCommit == nil {
|
||||
m.ExtCommit = &types.ExtendedCommit{}
|
||||
}
|
||||
if err := m.ExtCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
|
||||
@@ -4,6 +4,7 @@ package tendermint.blocksync;
|
||||
option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync";
|
||||
|
||||
import "tendermint/types/block.proto";
|
||||
import "tendermint/types/types.proto";
|
||||
|
||||
// BlockRequest requests a block for a specific height
|
||||
message BlockRequest {
|
||||
@@ -19,6 +20,7 @@ message NoBlockResponse {
|
||||
// BlockResponse returns block to the requested
|
||||
message BlockResponse {
|
||||
tendermint.types.Block block = 1;
|
||||
tendermint.types.ExtendedCommit ext_commit = 2;
|
||||
}
|
||||
|
||||
// StatusRequest requests the status of a peer.
|
||||
|
||||
@@ -726,6 +726,162 @@ func (m *CommitSig) GetSignature() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExtendedCommit struct {
|
||||
Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"`
|
||||
Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"`
|
||||
BlockID BlockID `protobuf:"bytes,3,opt,name=block_id,json=blockId,proto3" json:"block_id"`
|
||||
ExtendedSignatures []ExtendedCommitSig `protobuf:"bytes,4,rep,name=extended_signatures,json=extendedSignatures,proto3" json:"extended_signatures"`
|
||||
}
|
||||
|
||||
func (m *ExtendedCommit) Reset() { *m = ExtendedCommit{} }
|
||||
func (m *ExtendedCommit) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExtendedCommit) ProtoMessage() {}
|
||||
func (*ExtendedCommit) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{8}
|
||||
}
|
||||
func (m *ExtendedCommit) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ExtendedCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ExtendedCommit.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ExtendedCommit) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ExtendedCommit.Merge(m, src)
|
||||
}
|
||||
func (m *ExtendedCommit) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ExtendedCommit) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ExtendedCommit.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ExtendedCommit proto.InternalMessageInfo
|
||||
|
||||
func (m *ExtendedCommit) GetHeight() int64 {
|
||||
if m != nil {
|
||||
return m.Height
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ExtendedCommit) GetRound() int32 {
|
||||
if m != nil {
|
||||
return m.Round
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ExtendedCommit) GetBlockID() BlockID {
|
||||
if m != nil {
|
||||
return m.BlockID
|
||||
}
|
||||
return BlockID{}
|
||||
}
|
||||
|
||||
func (m *ExtendedCommit) GetExtendedSignatures() []ExtendedCommitSig {
|
||||
if m != nil {
|
||||
return m.ExtendedSignatures
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtendedCommitSig retains all the same fields as CommitSig but adds vote
|
||||
// extension-related fields.
|
||||
type ExtendedCommitSig struct {
|
||||
BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"`
|
||||
ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"`
|
||||
Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
|
||||
Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"`
|
||||
// Vote extension data
|
||||
Extension []byte `protobuf:"bytes,5,opt,name=extension,proto3" json:"extension,omitempty"`
|
||||
// Vote extension signature
|
||||
ExtensionSignature []byte `protobuf:"bytes,6,opt,name=extension_signature,json=extensionSignature,proto3" json:"extension_signature,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) Reset() { *m = ExtendedCommitSig{} }
|
||||
func (m *ExtendedCommitSig) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExtendedCommitSig) ProtoMessage() {}
|
||||
func (*ExtendedCommitSig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{9}
|
||||
}
|
||||
func (m *ExtendedCommitSig) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ExtendedCommitSig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ExtendedCommitSig.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ExtendedCommitSig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ExtendedCommitSig.Merge(m, src)
|
||||
}
|
||||
func (m *ExtendedCommitSig) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ExtendedCommitSig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ExtendedCommitSig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ExtendedCommitSig proto.InternalMessageInfo
|
||||
|
||||
func (m *ExtendedCommitSig) GetBlockIdFlag() BlockIDFlag {
|
||||
if m != nil {
|
||||
return m.BlockIdFlag
|
||||
}
|
||||
return BlockIDFlagUnknown
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) GetValidatorAddress() []byte {
|
||||
if m != nil {
|
||||
return m.ValidatorAddress
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) GetTimestamp() time.Time {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) GetSignature() []byte {
|
||||
if m != nil {
|
||||
return m.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) GetExtension() []byte {
|
||||
if m != nil {
|
||||
return m.Extension
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) GetExtensionSignature() []byte {
|
||||
if m != nil {
|
||||
return m.ExtensionSignature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Proposal struct {
|
||||
Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"`
|
||||
Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"`
|
||||
@@ -740,7 +896,7 @@ func (m *Proposal) Reset() { *m = Proposal{} }
|
||||
func (m *Proposal) String() string { return proto.CompactTextString(m) }
|
||||
func (*Proposal) ProtoMessage() {}
|
||||
func (*Proposal) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{8}
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{10}
|
||||
}
|
||||
func (m *Proposal) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -827,7 +983,7 @@ func (m *SignedHeader) Reset() { *m = SignedHeader{} }
|
||||
func (m *SignedHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignedHeader) ProtoMessage() {}
|
||||
func (*SignedHeader) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{9}
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{11}
|
||||
}
|
||||
func (m *SignedHeader) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -879,7 +1035,7 @@ func (m *LightBlock) Reset() { *m = LightBlock{} }
|
||||
func (m *LightBlock) String() string { return proto.CompactTextString(m) }
|
||||
func (*LightBlock) ProtoMessage() {}
|
||||
func (*LightBlock) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{10}
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{12}
|
||||
}
|
||||
func (m *LightBlock) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -933,7 +1089,7 @@ func (m *BlockMeta) Reset() { *m = BlockMeta{} }
|
||||
func (m *BlockMeta) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockMeta) ProtoMessage() {}
|
||||
func (*BlockMeta) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{11}
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{13}
|
||||
}
|
||||
func (m *BlockMeta) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1002,7 +1158,7 @@ func (m *TxProof) Reset() { *m = TxProof{} }
|
||||
func (m *TxProof) String() string { return proto.CompactTextString(m) }
|
||||
func (*TxProof) ProtoMessage() {}
|
||||
func (*TxProof) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{12}
|
||||
return fileDescriptor_d3a6e55e2345de56, []int{14}
|
||||
}
|
||||
func (m *TxProof) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1063,6 +1219,8 @@ func init() {
|
||||
proto.RegisterType((*Vote)(nil), "tendermint.types.Vote")
|
||||
proto.RegisterType((*Commit)(nil), "tendermint.types.Commit")
|
||||
proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig")
|
||||
proto.RegisterType((*ExtendedCommit)(nil), "tendermint.types.ExtendedCommit")
|
||||
proto.RegisterType((*ExtendedCommitSig)(nil), "tendermint.types.ExtendedCommitSig")
|
||||
proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal")
|
||||
proto.RegisterType((*SignedHeader)(nil), "tendermint.types.SignedHeader")
|
||||
proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock")
|
||||
@@ -1073,91 +1231,95 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) }
|
||||
|
||||
var fileDescriptor_d3a6e55e2345de56 = []byte{
|
||||
// 1341 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x73, 0xdb, 0xc4,
|
||||
0x17, 0x8f, 0x62, 0x25, 0xb6, 0x9f, 0xed, 0xc4, 0xd9, 0x6f, 0xda, 0xba, 0x6e, 0xe3, 0x68, 0xfc,
|
||||
0x1d, 0x20, 0x2d, 0x8c, 0x52, 0x52, 0x86, 0x1f, 0x07, 0x0e, 0xb6, 0x93, 0xb6, 0x9e, 0x26, 0x8e,
|
||||
0x91, 0xdd, 0x32, 0x70, 0xd1, 0xc8, 0xd6, 0xd6, 0x16, 0x95, 0x25, 0x8d, 0x76, 0x1d, 0x92, 0xfe,
|
||||
0x05, 0x4c, 0x4e, 0x3d, 0x71, 0xcb, 0x09, 0x0e, 0xdc, 0x39, 0x70, 0x65, 0x38, 0xf5, 0xd8, 0x1b,
|
||||
0x5c, 0x28, 0x4c, 0x3a, 0xc3, 0xdf, 0xc1, 0xec, 0x0f, 0xc9, 0x72, 0x9c, 0x40, 0xa7, 0xd3, 0xe1,
|
||||
0xe2, 0xd1, 0xbe, 0xf7, 0x79, 0x6f, 0xdf, 0x8f, 0xcf, 0xee, 0x5b, 0xc3, 0x75, 0x8a, 0x3d, 0x1b,
|
||||
0x87, 0x23, 0xc7, 0xa3, 0x9b, 0xf4, 0x28, 0xc0, 0x44, 0xfc, 0xea, 0x41, 0xe8, 0x53, 0x1f, 0x15,
|
||||
0x27, 0x5a, 0x9d, 0xcb, 0xcb, 0xab, 0x03, 0x7f, 0xe0, 0x73, 0xe5, 0x26, 0xfb, 0x12, 0xb8, 0xf2,
|
||||
0xfa, 0xc0, 0xf7, 0x07, 0x2e, 0xde, 0xe4, 0xab, 0xde, 0xf8, 0xd1, 0x26, 0x75, 0x46, 0x98, 0x50,
|
||||
0x6b, 0x14, 0x48, 0xc0, 0x5a, 0x62, 0x9b, 0x7e, 0x78, 0x14, 0x50, 0x9f, 0x61, 0xfd, 0x47, 0x52,
|
||||
0x5d, 0x49, 0xa8, 0x0f, 0x70, 0x48, 0x1c, 0xdf, 0x4b, 0xc6, 0x51, 0xd6, 0x66, 0xa2, 0x3c, 0xb0,
|
||||
0x5c, 0xc7, 0xb6, 0xa8, 0x1f, 0x0a, 0x44, 0xf5, 0x13, 0x28, 0xb4, 0xad, 0x90, 0x76, 0x30, 0xbd,
|
||||
0x87, 0x2d, 0x1b, 0x87, 0x68, 0x15, 0x16, 0xa8, 0x4f, 0x2d, 0xb7, 0xa4, 0x68, 0xca, 0x46, 0xc1,
|
||||
0x10, 0x0b, 0x84, 0x40, 0x1d, 0x5a, 0x64, 0x58, 0x9a, 0xd7, 0x94, 0x8d, 0xbc, 0xc1, 0xbf, 0xab,
|
||||
0x43, 0x50, 0x99, 0x29, 0xb3, 0x70, 0x3c, 0x1b, 0x1f, 0x46, 0x16, 0x7c, 0xc1, 0xa4, 0xbd, 0x23,
|
||||
0x8a, 0x89, 0x34, 0x11, 0x0b, 0xf4, 0x01, 0x2c, 0xf0, 0xf8, 0x4b, 0x29, 0x4d, 0xd9, 0xc8, 0x6d,
|
||||
0x95, 0xf4, 0x44, 0xa1, 0x44, 0x7e, 0x7a, 0x9b, 0xe9, 0xeb, 0xea, 0xb3, 0x17, 0xeb, 0x73, 0x86,
|
||||
0x00, 0x57, 0x5d, 0x48, 0xd7, 0x5d, 0xbf, 0xff, 0xb8, 0xb9, 0x1d, 0x07, 0xa2, 0x4c, 0x02, 0x41,
|
||||
0x7b, 0xb0, 0x1c, 0x58, 0x21, 0x35, 0x09, 0xa6, 0xe6, 0x90, 0x67, 0xc1, 0x37, 0xcd, 0x6d, 0xad,
|
||||
0xeb, 0x67, 0xfb, 0xa0, 0x4f, 0x25, 0x2b, 0x77, 0x29, 0x04, 0x49, 0x61, 0xf5, 0x2f, 0x15, 0x16,
|
||||
0x65, 0x31, 0x3e, 0x85, 0xb4, 0x2c, 0x2b, 0xdf, 0x30, 0xb7, 0xb5, 0x96, 0xf4, 0x28, 0x55, 0x7a,
|
||||
0xc3, 0xf7, 0x08, 0xf6, 0xc8, 0x98, 0x48, 0x7f, 0x91, 0x0d, 0x7a, 0x1b, 0x32, 0xfd, 0xa1, 0xe5,
|
||||
0x78, 0xa6, 0x63, 0xf3, 0x88, 0xb2, 0xf5, 0xdc, 0xe9, 0x8b, 0xf5, 0x74, 0x83, 0xc9, 0x9a, 0xdb,
|
||||
0x46, 0x9a, 0x2b, 0x9b, 0x36, 0xba, 0x0c, 0x8b, 0x43, 0xec, 0x0c, 0x86, 0x94, 0x97, 0x25, 0x65,
|
||||
0xc8, 0x15, 0xfa, 0x18, 0x54, 0x46, 0x88, 0x92, 0xca, 0xf7, 0x2e, 0xeb, 0x82, 0x2d, 0x7a, 0xc4,
|
||||
0x16, 0xbd, 0x1b, 0xb1, 0xa5, 0x9e, 0x61, 0x1b, 0x3f, 0xfd, 0x63, 0x5d, 0x31, 0xb8, 0x05, 0x6a,
|
||||
0x40, 0xc1, 0xb5, 0x08, 0x35, 0x7b, 0xac, 0x6c, 0x6c, 0xfb, 0x05, 0xee, 0xe2, 0xea, 0x6c, 0x41,
|
||||
0x64, 0x61, 0x65, 0xe8, 0x39, 0x66, 0x25, 0x44, 0x36, 0xda, 0x80, 0x22, 0x77, 0xd2, 0xf7, 0x47,
|
||||
0x23, 0x87, 0x9a, 0xbc, 0xee, 0x8b, 0xbc, 0xee, 0x4b, 0x4c, 0xde, 0xe0, 0xe2, 0x7b, 0xac, 0x03,
|
||||
0xd7, 0x20, 0x6b, 0x5b, 0xd4, 0x12, 0x90, 0x34, 0x87, 0x64, 0x98, 0x80, 0x2b, 0xdf, 0x81, 0xe5,
|
||||
0x98, 0x75, 0x44, 0x40, 0x32, 0xc2, 0xcb, 0x44, 0xcc, 0x81, 0xb7, 0x60, 0xd5, 0xc3, 0x87, 0xd4,
|
||||
0x3c, 0x8b, 0xce, 0x72, 0x34, 0x62, 0xba, 0x87, 0xd3, 0x16, 0x6f, 0xc1, 0x52, 0x3f, 0x2a, 0xbe,
|
||||
0xc0, 0x02, 0xc7, 0x16, 0x62, 0x29, 0x87, 0x5d, 0x85, 0x8c, 0x15, 0x04, 0x02, 0x90, 0xe3, 0x80,
|
||||
0xb4, 0x15, 0x04, 0x5c, 0x75, 0x13, 0x56, 0x78, 0x8e, 0x21, 0x26, 0x63, 0x97, 0x4a, 0x27, 0x79,
|
||||
0x8e, 0x59, 0x66, 0x0a, 0x43, 0xc8, 0x39, 0xf6, 0xff, 0x50, 0xc0, 0x07, 0x8e, 0x8d, 0xbd, 0x3e,
|
||||
0x16, 0xb8, 0x02, 0xc7, 0xe5, 0x23, 0x21, 0x07, 0xdd, 0x80, 0x62, 0x10, 0xfa, 0x81, 0x4f, 0x70,
|
||||
0x68, 0x5a, 0xb6, 0x1d, 0x62, 0x42, 0x4a, 0x4b, 0xc2, 0x5f, 0x24, 0xaf, 0x09, 0x71, 0xb5, 0x04,
|
||||
0xea, 0xb6, 0x45, 0x2d, 0x54, 0x84, 0x14, 0x3d, 0x24, 0x25, 0x45, 0x4b, 0x6d, 0xe4, 0x0d, 0xf6,
|
||||
0x59, 0xfd, 0x29, 0x05, 0xea, 0x43, 0x9f, 0x62, 0x74, 0x1b, 0x54, 0xd6, 0x26, 0xce, 0xbe, 0xa5,
|
||||
0xf3, 0xf8, 0xdc, 0x71, 0x06, 0x1e, 0xb6, 0xf7, 0xc8, 0xa0, 0x7b, 0x14, 0x60, 0x83, 0x83, 0x13,
|
||||
0x74, 0x9a, 0x9f, 0xa2, 0xd3, 0x2a, 0x2c, 0x84, 0xfe, 0xd8, 0xb3, 0x39, 0xcb, 0x16, 0x0c, 0xb1,
|
||||
0x40, 0x3b, 0x90, 0x89, 0x59, 0xa2, 0xfe, 0x1b, 0x4b, 0x96, 0x19, 0x4b, 0x18, 0x87, 0xa5, 0xc0,
|
||||
0x48, 0xf7, 0x24, 0x59, 0xea, 0x90, 0x8d, 0x2f, 0x2f, 0xc9, 0xb6, 0x57, 0x23, 0xec, 0xc4, 0x0c,
|
||||
0xbd, 0x0b, 0x2b, 0x71, 0xef, 0xe3, 0xe2, 0x09, 0xc6, 0x15, 0x63, 0x85, 0xac, 0xde, 0x14, 0xad,
|
||||
0x4c, 0x71, 0x01, 0xa5, 0x79, 0x5e, 0x13, 0x5a, 0x35, 0xf9, 0x4d, 0x74, 0x1d, 0xb2, 0xc4, 0x19,
|
||||
0x78, 0x16, 0x1d, 0x87, 0x58, 0x32, 0x6f, 0x22, 0x60, 0x5a, 0x7c, 0x48, 0xb1, 0xc7, 0x0f, 0xb9,
|
||||
0x60, 0xda, 0x44, 0x80, 0x36, 0xe1, 0x7f, 0xf1, 0xc2, 0x9c, 0x78, 0x11, 0x2c, 0x43, 0xb1, 0xaa,
|
||||
0x13, 0x69, 0xaa, 0x3f, 0x2b, 0xb0, 0x28, 0x0e, 0x46, 0xa2, 0x0d, 0xca, 0xf9, 0x6d, 0x98, 0xbf,
|
||||
0xa8, 0x0d, 0xa9, 0xd7, 0x6f, 0x43, 0x0d, 0x20, 0x0e, 0x93, 0x94, 0x54, 0x2d, 0xb5, 0x91, 0xdb,
|
||||
0xba, 0x36, 0xeb, 0x48, 0x84, 0xd8, 0x71, 0x06, 0xf2, 0xdc, 0x27, 0x8c, 0xaa, 0xbf, 0x2b, 0x90,
|
||||
0x8d, 0xf5, 0xa8, 0x06, 0x85, 0x28, 0x2e, 0xf3, 0x91, 0x6b, 0x0d, 0x24, 0x15, 0xd7, 0x2e, 0x0c,
|
||||
0xee, 0x8e, 0x6b, 0x0d, 0x8c, 0x9c, 0x8c, 0x87, 0x2d, 0xce, 0x6f, 0xeb, 0xfc, 0x05, 0x6d, 0x9d,
|
||||
0xe2, 0x51, 0xea, 0xf5, 0x78, 0x34, 0xd5, 0x71, 0xf5, 0x4c, 0xc7, 0xab, 0x3f, 0xce, 0x43, 0xa6,
|
||||
0xcd, 0x8f, 0xa2, 0xe5, 0xfe, 0x17, 0x07, 0xec, 0x1a, 0x64, 0x03, 0xdf, 0x35, 0x85, 0x46, 0xe5,
|
||||
0x9a, 0x4c, 0xe0, 0xbb, 0xc6, 0x4c, 0xdb, 0x17, 0xde, 0xd0, 0xe9, 0x5b, 0x7c, 0x03, 0x55, 0x4b,
|
||||
0x9f, 0xad, 0x5a, 0x08, 0x79, 0x51, 0x0a, 0x39, 0x1a, 0x6f, 0xb1, 0x1a, 0xf0, 0x59, 0xab, 0xcc,
|
||||
0x8e, 0x72, 0x11, 0xb6, 0x40, 0x1a, 0x12, 0xc7, 0x2c, 0xc4, 0x24, 0x91, 0xd3, 0xb9, 0x74, 0x11,
|
||||
0x2d, 0x0d, 0x89, 0xab, 0x7e, 0xab, 0x00, 0xec, 0xb2, 0xca, 0xf2, 0x7c, 0xd9, 0x50, 0x23, 0x3c,
|
||||
0x04, 0x73, 0x6a, 0xe7, 0xca, 0x45, 0x4d, 0x93, 0xfb, 0xe7, 0x49, 0x32, 0xee, 0x06, 0x14, 0x26,
|
||||
0x64, 0x24, 0x38, 0x0a, 0xe6, 0x1c, 0x27, 0xf1, 0xac, 0xe9, 0x60, 0x6a, 0xe4, 0x0f, 0x12, 0xab,
|
||||
0xea, 0x2f, 0x0a, 0x64, 0x79, 0x4c, 0x7b, 0x98, 0x5a, 0x53, 0x3d, 0x54, 0x5e, 0xbf, 0x87, 0x6b,
|
||||
0x00, 0xc2, 0x0d, 0x71, 0x9e, 0x60, 0xc9, 0xac, 0x2c, 0x97, 0x74, 0x9c, 0x27, 0x18, 0x7d, 0x18,
|
||||
0x17, 0x3c, 0xf5, 0xcf, 0x05, 0x97, 0x47, 0x3a, 0x2a, 0xfb, 0x15, 0x48, 0x7b, 0xe3, 0x91, 0xc9,
|
||||
0x26, 0x8c, 0x2a, 0xd8, 0xea, 0x8d, 0x47, 0xdd, 0x43, 0x52, 0xfd, 0x0a, 0xd2, 0xdd, 0x43, 0xfe,
|
||||
0xda, 0x62, 0x14, 0x0d, 0x7d, 0x5f, 0x8e, 0x78, 0xf1, 0xb4, 0xca, 0x30, 0x01, 0x9f, 0x68, 0x08,
|
||||
0x54, 0x36, 0xcb, 0xa3, 0xb7, 0x1f, 0xfb, 0x46, 0xfa, 0x2b, 0xbe, 0xe3, 0xe4, 0x0b, 0xee, 0xe6,
|
||||
0xaf, 0x0a, 0xe4, 0x12, 0xf7, 0x03, 0x7a, 0x1f, 0x2e, 0xd5, 0x77, 0xf7, 0x1b, 0xf7, 0xcd, 0xe6,
|
||||
0xb6, 0x79, 0x67, 0xb7, 0x76, 0xd7, 0x7c, 0xd0, 0xba, 0xdf, 0xda, 0xff, 0xbc, 0x55, 0x9c, 0x2b,
|
||||
0x5f, 0x3e, 0x3e, 0xd1, 0x50, 0x02, 0xfb, 0xc0, 0x7b, 0xec, 0xf9, 0x5f, 0xb3, 0xab, 0x78, 0x75,
|
||||
0xda, 0xa4, 0x56, 0xef, 0xec, 0xb4, 0xba, 0x45, 0xa5, 0x7c, 0xe9, 0xf8, 0x44, 0x5b, 0x49, 0x58,
|
||||
0xd4, 0x7a, 0x04, 0x7b, 0x74, 0xd6, 0xa0, 0xb1, 0xbf, 0xb7, 0xd7, 0xec, 0x16, 0xe7, 0x67, 0x0c,
|
||||
0xe4, 0x85, 0x7d, 0x03, 0x56, 0xa6, 0x0d, 0x5a, 0xcd, 0xdd, 0x62, 0xaa, 0x8c, 0x8e, 0x4f, 0xb4,
|
||||
0xa5, 0x04, 0xba, 0xe5, 0xb8, 0xe5, 0xcc, 0x37, 0xdf, 0x55, 0xe6, 0x7e, 0xf8, 0xbe, 0xa2, 0xb0,
|
||||
0xcc, 0x0a, 0x53, 0x77, 0x04, 0x7a, 0x0f, 0xae, 0x74, 0x9a, 0x77, 0x5b, 0x3b, 0xdb, 0xe6, 0x5e,
|
||||
0xe7, 0xae, 0xd9, 0xfd, 0xa2, 0xbd, 0x93, 0xc8, 0x6e, 0xf9, 0xf8, 0x44, 0xcb, 0xc9, 0x94, 0x2e,
|
||||
0x42, 0xb7, 0x8d, 0x9d, 0x87, 0xfb, 0xdd, 0x9d, 0xa2, 0x22, 0xd0, 0xed, 0x10, 0x1f, 0xf8, 0x14,
|
||||
0x73, 0xf4, 0x2d, 0xb8, 0x7a, 0x0e, 0x3a, 0x4e, 0x6c, 0xe5, 0xf8, 0x44, 0x2b, 0xb4, 0x43, 0x2c,
|
||||
0xce, 0x0f, 0xb7, 0xd0, 0xa1, 0x34, 0x6b, 0xb1, 0xdf, 0xde, 0xef, 0xd4, 0x76, 0x8b, 0x5a, 0xb9,
|
||||
0x78, 0x7c, 0xa2, 0xe5, 0xa3, 0xcb, 0x90, 0xe1, 0x27, 0x99, 0xd5, 0x3f, 0x7b, 0x76, 0x5a, 0x51,
|
||||
0x9e, 0x9f, 0x56, 0x94, 0x3f, 0x4f, 0x2b, 0xca, 0xd3, 0x97, 0x95, 0xb9, 0xe7, 0x2f, 0x2b, 0x73,
|
||||
0xbf, 0xbd, 0xac, 0xcc, 0x7d, 0xf9, 0xd1, 0xc0, 0xa1, 0xc3, 0x71, 0x4f, 0xef, 0xfb, 0xa3, 0xcd,
|
||||
0xe4, 0x3f, 0x8c, 0xc9, 0xa7, 0xf8, 0xa7, 0x73, 0xf6, 0xdf, 0x47, 0x6f, 0x91, 0xcb, 0x6f, 0xff,
|
||||
0x1d, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xc0, 0x81, 0x37, 0x3e, 0x0d, 0x00, 0x00,
|
||||
// 1396 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4b, 0x6f, 0x1b, 0xd5,
|
||||
0x17, 0xcf, 0xd8, 0xe3, 0xd7, 0xb1, 0x9d, 0x38, 0xf7, 0x9f, 0xb6, 0xae, 0xdb, 0x38, 0x96, 0xab,
|
||||
0x3f, 0xa4, 0x05, 0x39, 0x25, 0x45, 0x3c, 0x16, 0x2c, 0x6c, 0xc7, 0x6d, 0xad, 0x26, 0x8e, 0x19,
|
||||
0xbb, 0x45, 0x74, 0x33, 0x1a, 0x7b, 0x6e, 0xed, 0xa1, 0xf6, 0xcc, 0x68, 0xe6, 0x3a, 0x38, 0xfd,
|
||||
0x04, 0x28, 0xab, 0xae, 0xd8, 0x65, 0x05, 0x0b, 0xf6, 0x20, 0xb1, 0x45, 0xac, 0xba, 0xec, 0x0e,
|
||||
0x36, 0x14, 0x48, 0x25, 0x3e, 0x07, 0xba, 0x8f, 0x19, 0xcf, 0xc4, 0x31, 0x54, 0x51, 0x05, 0x12,
|
||||
0x9b, 0x68, 0xee, 0x39, 0xbf, 0x73, 0xee, 0x79, 0xfc, 0xee, 0xc9, 0x31, 0x5c, 0x25, 0xd8, 0xd4,
|
||||
0xb1, 0x33, 0x36, 0x4c, 0xb2, 0x45, 0x0e, 0x6d, 0xec, 0xf2, 0xbf, 0x15, 0xdb, 0xb1, 0x88, 0x85,
|
||||
0x72, 0x33, 0x6d, 0x85, 0xc9, 0x0b, 0x6b, 0x03, 0x6b, 0x60, 0x31, 0xe5, 0x16, 0xfd, 0xe2, 0xb8,
|
||||
0xc2, 0xc6, 0xc0, 0xb2, 0x06, 0x23, 0xbc, 0xc5, 0x4e, 0xbd, 0xc9, 0xa3, 0x2d, 0x62, 0x8c, 0xb1,
|
||||
0x4b, 0xb4, 0xb1, 0x2d, 0x00, 0xeb, 0x81, 0x6b, 0xfa, 0xce, 0xa1, 0x4d, 0x2c, 0x8a, 0xb5, 0x1e,
|
||||
0x09, 0x75, 0x31, 0xa0, 0x3e, 0xc0, 0x8e, 0x6b, 0x58, 0x66, 0x30, 0x8e, 0x42, 0x69, 0x2e, 0xca,
|
||||
0x03, 0x6d, 0x64, 0xe8, 0x1a, 0xb1, 0x1c, 0x8e, 0x28, 0x7f, 0x08, 0xd9, 0xb6, 0xe6, 0x90, 0x0e,
|
||||
0x26, 0x77, 0xb1, 0xa6, 0x63, 0x07, 0xad, 0x41, 0x8c, 0x58, 0x44, 0x1b, 0xe5, 0xa5, 0x92, 0xb4,
|
||||
0x99, 0x55, 0xf8, 0x01, 0x21, 0x90, 0x87, 0x9a, 0x3b, 0xcc, 0x47, 0x4a, 0xd2, 0x66, 0x46, 0x61,
|
||||
0xdf, 0xe5, 0x21, 0xc8, 0xd4, 0x94, 0x5a, 0x18, 0xa6, 0x8e, 0xa7, 0x9e, 0x05, 0x3b, 0x50, 0x69,
|
||||
0xef, 0x90, 0x60, 0x57, 0x98, 0xf0, 0x03, 0x7a, 0x17, 0x62, 0x2c, 0xfe, 0x7c, 0xb4, 0x24, 0x6d,
|
||||
0xa6, 0xb7, 0xf3, 0x95, 0x40, 0xa1, 0x78, 0x7e, 0x95, 0x36, 0xd5, 0xd7, 0xe4, 0x67, 0x2f, 0x36,
|
||||
0x96, 0x14, 0x0e, 0x2e, 0x8f, 0x20, 0x51, 0x1b, 0x59, 0xfd, 0xc7, 0xcd, 0x1d, 0x3f, 0x10, 0x69,
|
||||
0x16, 0x08, 0xda, 0x83, 0x15, 0x5b, 0x73, 0x88, 0xea, 0x62, 0xa2, 0x0e, 0x59, 0x16, 0xec, 0xd2,
|
||||
0xf4, 0xf6, 0x46, 0xe5, 0x74, 0x1f, 0x2a, 0xa1, 0x64, 0xc5, 0x2d, 0x59, 0x3b, 0x28, 0x2c, 0xff,
|
||||
0x21, 0x43, 0x5c, 0x14, 0xe3, 0x23, 0x48, 0x88, 0xb2, 0xb2, 0x0b, 0xd3, 0xdb, 0xeb, 0x41, 0x8f,
|
||||
0x42, 0x55, 0xa9, 0x5b, 0xa6, 0x8b, 0x4d, 0x77, 0xe2, 0x0a, 0x7f, 0x9e, 0x0d, 0x7a, 0x03, 0x92,
|
||||
0xfd, 0xa1, 0x66, 0x98, 0xaa, 0xa1, 0xb3, 0x88, 0x52, 0xb5, 0xf4, 0xc9, 0x8b, 0x8d, 0x44, 0x9d,
|
||||
0xca, 0x9a, 0x3b, 0x4a, 0x82, 0x29, 0x9b, 0x3a, 0xba, 0x08, 0xf1, 0x21, 0x36, 0x06, 0x43, 0xc2,
|
||||
0xca, 0x12, 0x55, 0xc4, 0x09, 0x7d, 0x00, 0x32, 0x25, 0x44, 0x5e, 0x66, 0x77, 0x17, 0x2a, 0x9c,
|
||||
0x2d, 0x15, 0x8f, 0x2d, 0x95, 0xae, 0xc7, 0x96, 0x5a, 0x92, 0x5e, 0xfc, 0xf4, 0xd7, 0x0d, 0x49,
|
||||
0x61, 0x16, 0xa8, 0x0e, 0xd9, 0x91, 0xe6, 0x12, 0xb5, 0x47, 0xcb, 0x46, 0xaf, 0x8f, 0x31, 0x17,
|
||||
0x97, 0xe7, 0x0b, 0x22, 0x0a, 0x2b, 0x42, 0x4f, 0x53, 0x2b, 0x2e, 0xd2, 0xd1, 0x26, 0xe4, 0x98,
|
||||
0x93, 0xbe, 0x35, 0x1e, 0x1b, 0x44, 0x65, 0x75, 0x8f, 0xb3, 0xba, 0x2f, 0x53, 0x79, 0x9d, 0x89,
|
||||
0xef, 0xd2, 0x0e, 0x5c, 0x81, 0x94, 0xae, 0x11, 0x8d, 0x43, 0x12, 0x0c, 0x92, 0xa4, 0x02, 0xa6,
|
||||
0x7c, 0x13, 0x56, 0x7c, 0xd6, 0xb9, 0x1c, 0x92, 0xe4, 0x5e, 0x66, 0x62, 0x06, 0xbc, 0x09, 0x6b,
|
||||
0x26, 0x9e, 0x12, 0xf5, 0x34, 0x3a, 0xc5, 0xd0, 0x88, 0xea, 0x1e, 0x84, 0x2d, 0xfe, 0x0f, 0xcb,
|
||||
0x7d, 0xaf, 0xf8, 0x1c, 0x0b, 0x0c, 0x9b, 0xf5, 0xa5, 0x0c, 0x76, 0x19, 0x92, 0x9a, 0x6d, 0x73,
|
||||
0x40, 0x9a, 0x01, 0x12, 0x9a, 0x6d, 0x33, 0xd5, 0x0d, 0x58, 0x65, 0x39, 0x3a, 0xd8, 0x9d, 0x8c,
|
||||
0x88, 0x70, 0x92, 0x61, 0x98, 0x15, 0xaa, 0x50, 0xb8, 0x9c, 0x61, 0xaf, 0x41, 0x16, 0x1f, 0x18,
|
||||
0x3a, 0x36, 0xfb, 0x98, 0xe3, 0xb2, 0x0c, 0x97, 0xf1, 0x84, 0x0c, 0x74, 0x1d, 0x72, 0xb6, 0x63,
|
||||
0xd9, 0x96, 0x8b, 0x1d, 0x55, 0xd3, 0x75, 0x07, 0xbb, 0x6e, 0x7e, 0x99, 0xfb, 0xf3, 0xe4, 0x55,
|
||||
0x2e, 0x2e, 0xe7, 0x41, 0xde, 0xd1, 0x88, 0x86, 0x72, 0x10, 0x25, 0x53, 0x37, 0x2f, 0x95, 0xa2,
|
||||
0x9b, 0x19, 0x85, 0x7e, 0x96, 0xbf, 0x8f, 0x82, 0xfc, 0xc0, 0x22, 0x18, 0xdd, 0x02, 0x99, 0xb6,
|
||||
0x89, 0xb1, 0x6f, 0xf9, 0x2c, 0x3e, 0x77, 0x8c, 0x81, 0x89, 0xf5, 0x3d, 0x77, 0xd0, 0x3d, 0xb4,
|
||||
0xb1, 0xc2, 0xc0, 0x01, 0x3a, 0x45, 0x42, 0x74, 0x5a, 0x83, 0x98, 0x63, 0x4d, 0x4c, 0x9d, 0xb1,
|
||||
0x2c, 0xa6, 0xf0, 0x03, 0x6a, 0x40, 0xd2, 0x67, 0x89, 0xfc, 0x77, 0x2c, 0x59, 0xa1, 0x2c, 0xa1,
|
||||
0x1c, 0x16, 0x02, 0x25, 0xd1, 0x13, 0x64, 0xa9, 0x41, 0xca, 0x1f, 0x5e, 0x82, 0x6d, 0xaf, 0x46,
|
||||
0xd8, 0x99, 0x19, 0x7a, 0x0b, 0x56, 0xfd, 0xde, 0xfb, 0xc5, 0xe3, 0x8c, 0xcb, 0xf9, 0x0a, 0x51,
|
||||
0xbd, 0x10, 0xad, 0x54, 0x3e, 0x80, 0x12, 0x2c, 0xaf, 0x19, 0xad, 0x9a, 0x6c, 0x12, 0x5d, 0x85,
|
||||
0x94, 0x6b, 0x0c, 0x4c, 0x8d, 0x4c, 0x1c, 0x2c, 0x98, 0x37, 0x13, 0x50, 0x2d, 0x9e, 0x12, 0x6c,
|
||||
0xb2, 0x47, 0xce, 0x99, 0x36, 0x13, 0xa0, 0x2d, 0xf8, 0x9f, 0x7f, 0x50, 0x67, 0x5e, 0x38, 0xcb,
|
||||
0x90, 0xaf, 0xea, 0x78, 0x9a, 0xf2, 0x0f, 0x12, 0xc4, 0xf9, 0xc3, 0x08, 0xb4, 0x41, 0x3a, 0xbb,
|
||||
0x0d, 0x91, 0x45, 0x6d, 0x88, 0x9e, 0xbf, 0x0d, 0x55, 0x00, 0x3f, 0x4c, 0x37, 0x2f, 0x97, 0xa2,
|
||||
0x9b, 0xe9, 0xed, 0x2b, 0xf3, 0x8e, 0x78, 0x88, 0x1d, 0x63, 0x20, 0xde, 0x7d, 0xc0, 0xa8, 0xfc,
|
||||
0x8b, 0x04, 0x29, 0x5f, 0x8f, 0xaa, 0x90, 0xf5, 0xe2, 0x52, 0x1f, 0x8d, 0xb4, 0x81, 0xa0, 0xe2,
|
||||
0xfa, 0xc2, 0xe0, 0x6e, 0x8f, 0xb4, 0x81, 0x92, 0x16, 0xf1, 0xd0, 0xc3, 0xd9, 0x6d, 0x8d, 0x2c,
|
||||
0x68, 0x6b, 0x88, 0x47, 0xd1, 0xf3, 0xf1, 0x28, 0xd4, 0x71, 0xf9, 0x54, 0xc7, 0xcb, 0xbf, 0x4b,
|
||||
0xb0, 0xdc, 0x98, 0xb2, 0xf0, 0xf5, 0x7f, 0xb3, 0x55, 0x0f, 0x05, 0xb7, 0x74, 0xac, 0xab, 0x73,
|
||||
0x3d, 0xbb, 0x36, 0xef, 0x31, 0x1c, 0xf3, 0xac, 0x77, 0xc8, 0xf3, 0xd2, 0x99, 0xf5, 0xf0, 0xbb,
|
||||
0x08, 0xac, 0xce, 0xe1, 0xff, 0x7b, 0xbd, 0x0c, 0xbf, 0xde, 0xd8, 0x2b, 0xbe, 0xde, 0xf8, 0xc2,
|
||||
0xd7, 0xfb, 0x6d, 0x04, 0x92, 0x6d, 0x36, 0xa5, 0xb5, 0xd1, 0x3f, 0x31, 0x7b, 0xaf, 0x40, 0xca,
|
||||
0xb6, 0x46, 0x2a, 0xd7, 0xc8, 0x4c, 0x93, 0xb4, 0xad, 0x91, 0x32, 0x47, 0xb3, 0xd8, 0x6b, 0x1a,
|
||||
0xcc, 0xf1, 0xd7, 0xd0, 0x84, 0xc4, 0xe9, 0x07, 0xe5, 0x40, 0x86, 0x97, 0x42, 0x6c, 0x4d, 0x37,
|
||||
0x69, 0x0d, 0xd8, 0x1a, 0x26, 0xcd, 0x6f, 0x79, 0x3c, 0x6c, 0x8e, 0x54, 0x04, 0x8e, 0x5a, 0xf0,
|
||||
0x25, 0x43, 0x2c, 0x6e, 0xf9, 0x45, 0x13, 0x4b, 0x11, 0xb8, 0xf2, 0x97, 0x12, 0xc0, 0x2e, 0xad,
|
||||
0x2c, 0xcb, 0x97, 0xee, 0x3b, 0x2e, 0x0b, 0x41, 0x0d, 0xdd, 0x5c, 0x5c, 0xd4, 0x34, 0x71, 0x7f,
|
||||
0xc6, 0x0d, 0xc6, 0x5d, 0x87, 0xec, 0x8c, 0xdb, 0x2e, 0xf6, 0x82, 0x39, 0xc3, 0x89, 0xbf, 0x86,
|
||||
0x74, 0x30, 0x51, 0x32, 0x07, 0x81, 0x53, 0xf9, 0x47, 0x09, 0x52, 0x2c, 0xa6, 0x3d, 0x4c, 0xb4,
|
||||
0x50, 0x0f, 0xa5, 0xf3, 0xf7, 0x70, 0x1d, 0x80, 0xbb, 0x71, 0x8d, 0x27, 0x58, 0x30, 0x2b, 0xc5,
|
||||
0x24, 0x1d, 0xe3, 0x09, 0x46, 0xef, 0xf9, 0x05, 0x8f, 0xfe, 0x75, 0xc1, 0xc5, 0xc4, 0xf0, 0xca,
|
||||
0x7e, 0x09, 0x12, 0xe6, 0x64, 0xac, 0xd2, 0xe5, 0x43, 0xe6, 0x6c, 0x35, 0x27, 0xe3, 0xee, 0xd4,
|
||||
0x2d, 0x7f, 0x06, 0x89, 0xee, 0x94, 0x2d, 0xe2, 0x94, 0xa2, 0x8e, 0x65, 0x89, 0xed, 0x8f, 0x6f,
|
||||
0xdd, 0x49, 0x2a, 0x60, 0xcb, 0x0e, 0x02, 0x99, 0xae, 0x79, 0xde, 0xcf, 0x02, 0xfa, 0x8d, 0x2a,
|
||||
0xaf, 0xb8, 0xe2, 0x8b, 0xe5, 0xfe, 0xc6, 0x4f, 0x12, 0xa4, 0x03, 0xe3, 0x06, 0xbd, 0x03, 0x17,
|
||||
0x6a, 0xbb, 0xfb, 0xf5, 0x7b, 0x6a, 0x73, 0x47, 0xbd, 0xbd, 0x5b, 0xbd, 0xa3, 0xde, 0x6f, 0xdd,
|
||||
0x6b, 0xed, 0x7f, 0xd2, 0xca, 0x2d, 0x15, 0x2e, 0x1e, 0x1d, 0x97, 0x50, 0x00, 0x7b, 0xdf, 0x7c,
|
||||
0x6c, 0x5a, 0x9f, 0xd3, 0x77, 0xbe, 0x16, 0x36, 0xa9, 0xd6, 0x3a, 0x8d, 0x56, 0x37, 0x27, 0x15,
|
||||
0x2e, 0x1c, 0x1d, 0x97, 0x56, 0x03, 0x16, 0xd5, 0x9e, 0x8b, 0x4d, 0x32, 0x6f, 0x50, 0xdf, 0xdf,
|
||||
0xdb, 0x6b, 0x76, 0x73, 0x91, 0x39, 0x03, 0xf1, 0x0f, 0xe2, 0x3a, 0xac, 0x86, 0x0d, 0x5a, 0xcd,
|
||||
0xdd, 0x5c, 0xb4, 0x80, 0x8e, 0x8e, 0x4b, 0xcb, 0x01, 0x74, 0xcb, 0x18, 0x15, 0x92, 0x5f, 0x7c,
|
||||
0x55, 0x5c, 0xfa, 0xe6, 0xeb, 0xa2, 0x44, 0x33, 0xcb, 0x86, 0x66, 0x04, 0x7a, 0x1b, 0x2e, 0x75,
|
||||
0x9a, 0x77, 0x5a, 0x8d, 0x1d, 0x75, 0xaf, 0x73, 0x47, 0xed, 0x7e, 0xda, 0x6e, 0x04, 0xb2, 0x5b,
|
||||
0x39, 0x3a, 0x2e, 0xa5, 0x45, 0x4a, 0x8b, 0xd0, 0x6d, 0xa5, 0xf1, 0x60, 0xbf, 0xdb, 0xc8, 0x49,
|
||||
0x1c, 0xdd, 0x76, 0xf0, 0x81, 0x45, 0x30, 0x43, 0xdf, 0x84, 0xcb, 0x67, 0xa0, 0xfd, 0xc4, 0x56,
|
||||
0x8f, 0x8e, 0x4b, 0xd9, 0xb6, 0x83, 0xf9, 0xfb, 0x61, 0x16, 0x15, 0xc8, 0xcf, 0x5b, 0xec, 0xb7,
|
||||
0xf7, 0x3b, 0xd5, 0xdd, 0x5c, 0xa9, 0x90, 0x3b, 0x3a, 0x2e, 0x65, 0xbc, 0x61, 0x48, 0xf1, 0xb3,
|
||||
0xcc, 0x6a, 0x1f, 0x3f, 0x3b, 0x29, 0x4a, 0xcf, 0x4f, 0x8a, 0xd2, 0x6f, 0x27, 0x45, 0xe9, 0xe9,
|
||||
0xcb, 0xe2, 0xd2, 0xf3, 0x97, 0xc5, 0xa5, 0x9f, 0x5f, 0x16, 0x97, 0x1e, 0xbe, 0x3f, 0x30, 0xc8,
|
||||
0x70, 0xd2, 0xab, 0xf4, 0xad, 0xf1, 0x56, 0xf0, 0xc7, 0xe7, 0xec, 0x93, 0xff, 0x08, 0x3e, 0xfd,
|
||||
0xc3, 0xb4, 0x17, 0x67, 0xf2, 0x5b, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0x46, 0xcf, 0x37, 0x28,
|
||||
0x59, 0x0f, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *PartSetHeader) Marshal() (dAtA []byte, err error) {
|
||||
@@ -1634,6 +1796,127 @@ func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ExtendedCommit) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ExtendedCommit) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ExtendedCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.ExtendedSignatures) > 0 {
|
||||
for iNdEx := len(m.ExtendedSignatures) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.ExtendedSignatures[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
}
|
||||
{
|
||||
size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
if m.Round != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Round))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.Height != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Height))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.ExtensionSignature) > 0 {
|
||||
i -= len(m.ExtensionSignature)
|
||||
copy(dAtA[i:], m.ExtensionSignature)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature)))
|
||||
i--
|
||||
dAtA[i] = 0x32
|
||||
}
|
||||
if len(m.Extension) > 0 {
|
||||
i -= len(m.Extension)
|
||||
copy(dAtA[i:], m.Extension)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Extension)))
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
}
|
||||
if len(m.Signature) > 0 {
|
||||
i -= len(m.Signature)
|
||||
copy(dAtA[i:], m.Signature)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature)))
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
|
||||
if err11 != nil {
|
||||
return 0, err11
|
||||
}
|
||||
i -= n11
|
||||
i = encodeVarintTypes(dAtA, i, uint64(n11))
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
if len(m.ValidatorAddress) > 0 {
|
||||
i -= len(m.ValidatorAddress)
|
||||
copy(dAtA[i:], m.ValidatorAddress)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if m.BlockIdFlag != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Proposal) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@@ -1661,12 +1944,12 @@ func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i--
|
||||
dAtA[i] = 0x3a
|
||||
}
|
||||
n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
|
||||
if err10 != nil {
|
||||
return 0, err10
|
||||
n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
|
||||
if err12 != nil {
|
||||
return 0, err12
|
||||
}
|
||||
i -= n10
|
||||
i = encodeVarintTypes(dAtA, i, uint64(n10))
|
||||
i -= n12
|
||||
i = encodeVarintTypes(dAtA, i, uint64(n12))
|
||||
i--
|
||||
dAtA[i] = 0x32
|
||||
{
|
||||
@@ -2117,6 +2400,59 @@ func (m *CommitSig) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ExtendedCommit) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Height != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Height))
|
||||
}
|
||||
if m.Round != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Round))
|
||||
}
|
||||
l = m.BlockID.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
if len(m.ExtendedSignatures) > 0 {
|
||||
for _, e := range m.ExtendedSignatures {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ExtendedCommitSig) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.BlockIdFlag != 0 {
|
||||
n += 1 + sovTypes(uint64(m.BlockIdFlag))
|
||||
}
|
||||
l = len(m.ValidatorAddress)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
l = len(m.Signature)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
l = len(m.Extension)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
l = len(m.ExtensionSignature)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Proposal) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
@@ -3823,6 +4159,399 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ExtendedCommit) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ExtendedCommit: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ExtendedCommit: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
|
||||
}
|
||||
m.Height = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Height |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType)
|
||||
}
|
||||
m.Round = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Round |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ExtendedSignatures", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ExtendedSignatures = append(m.ExtendedSignatures, ExtendedCommitSig{})
|
||||
if err := m.ExtendedSignatures[len(m.ExtendedSignatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ExtendedCommitSig: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ExtendedCommitSig: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType)
|
||||
}
|
||||
m.BlockIdFlag = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.ValidatorAddress == nil {
|
||||
m.ValidatorAddress = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Signature == nil {
|
||||
m.Signature = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Extension = append(m.Extension[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Extension == nil {
|
||||
m.Extension = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 6:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ExtensionSignature", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ExtensionSignature = append(m.ExtensionSignature[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.ExtensionSignature == nil {
|
||||
m.ExtensionSignature = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Proposal) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
||||
@@ -142,6 +142,28 @@ message CommitSig {
|
||||
bytes signature = 4;
|
||||
}
|
||||
|
||||
message ExtendedCommit {
|
||||
int64 height = 1;
|
||||
int32 round = 2;
|
||||
BlockID block_id = 3
|
||||
[(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"];
|
||||
repeated ExtendedCommitSig extended_signatures = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// ExtendedCommitSig retains all the same fields as CommitSig but adds vote
|
||||
// extension-related fields.
|
||||
message ExtendedCommitSig {
|
||||
BlockIDFlag block_id_flag = 1;
|
||||
bytes validator_address = 2;
|
||||
google.protobuf.Timestamp timestamp = 3
|
||||
[(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
bytes signature = 4;
|
||||
// Vote extension data
|
||||
bytes extension = 5;
|
||||
// Vote extension signature
|
||||
bytes extension_signature = 6;
|
||||
}
|
||||
|
||||
message Proposal {
|
||||
SignedMsgType type = 1;
|
||||
int64 height = 2;
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/creachadair/tomledit"
|
||||
"github.com/creachadair/tomledit/transform"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
)
|
||||
|
||||
|
||||
334
scripts/metricsgen/metricsgen.go
Normal file
334
scripts/metricsgen/metricsgen.go
Normal file
@@ -0,0 +1,334 @@
|
||||
// metricsgen is a code generation tool for creating constructors for Tendermint
|
||||
// metrics types.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintf(os.Stderr, `Usage: %[1]s -struct <struct>
|
||||
|
||||
Generate constructors for the metrics type specified by -struct contained in
|
||||
the current directory. The tool creates a new file in the current directory
|
||||
containing the generated code.
|
||||
|
||||
Options:
|
||||
`, filepath.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
}
|
||||
|
||||
const metricsPackageName = "github.com/go-kit/kit/metrics"
|
||||
|
||||
const (
|
||||
metricNameTag = "metrics_name"
|
||||
labelsTag = "metrics_labels"
|
||||
bucketTypeTag = "metrics_buckettype"
|
||||
bucketSizeTag = "metrics_bucketsizes"
|
||||
)
|
||||
|
||||
var (
|
||||
dir = flag.String("dir", ".", "Path to the directory containing the target package")
|
||||
strct = flag.String("struct", "Metrics", "Struct to parse for metrics")
|
||||
)
|
||||
|
||||
var bucketType = map[string]string{
|
||||
"exprange": "stdprometheus.ExponentialBucketsRange",
|
||||
"exp": "stdprometheus.ExponentialBuckets",
|
||||
"lin": "stdprometheus.LinearBuckets",
|
||||
}
|
||||
|
||||
var tmpl = template.Must(template.New("tmpl").Parse(`// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package {{ .Package }}
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
{{ range $metric := .ParsedMetrics }}
|
||||
{{- $metric.FieldName }}: prometheus.New{{ $metric.TypeName }}From(stdprometheus.{{$metric.TypeName }}Opts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "{{$metric.MetricName }}",
|
||||
Help: "{{ $metric.Description }}",
|
||||
{{ if ne $metric.HistogramOptions.BucketType "" }}
|
||||
Buckets: {{ $metric.HistogramOptions.BucketType }}({{ $metric.HistogramOptions.BucketSizes }}),
|
||||
{{ else if ne $metric.HistogramOptions.BucketSizes "" }}
|
||||
Buckets: []float64{ {{ $metric.HistogramOptions.BucketSizes }} },
|
||||
{{ end }}
|
||||
{{- if eq (len $metric.Labels) 0 }}
|
||||
}, labels).With(labelsAndValues...),
|
||||
{{ else }}
|
||||
}, append(labels, {{$metric.Labels | printf "%q" }})).With(labelsAndValues...),
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
{{- range $metric := .ParsedMetrics }}
|
||||
{{ $metric.FieldName }}: discard.New{{ $metric.TypeName }}(),
|
||||
{{- end }}
|
||||
}
|
||||
}
|
||||
`))
|
||||
|
||||
// ParsedMetricField is the data parsed for a single field of a metric struct.
|
||||
type ParsedMetricField struct {
|
||||
TypeName string
|
||||
FieldName string
|
||||
MetricName string
|
||||
Description string
|
||||
Labels string
|
||||
|
||||
HistogramOptions HistogramOpts
|
||||
}
|
||||
|
||||
type HistogramOpts struct {
|
||||
BucketType string
|
||||
BucketSizes string
|
||||
}
|
||||
|
||||
// TemplateData is all of the data required for rendering a metric file template.
|
||||
type TemplateData struct {
|
||||
Package string
|
||||
ParsedMetrics []ParsedMetricField
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *strct == "" {
|
||||
log.Fatal("You must specify a non-empty -struct")
|
||||
}
|
||||
td, err := ParseMetricsDir(".", *strct)
|
||||
if err != nil {
|
||||
log.Fatalf("Parsing file: %v", err)
|
||||
}
|
||||
out := filepath.Join(*dir, "metrics.gen.go")
|
||||
f, err := os.Create(out)
|
||||
if err != nil {
|
||||
log.Fatalf("Opening file: %v", err)
|
||||
}
|
||||
err = GenerateMetricsFile(f, td)
|
||||
if err != nil {
|
||||
log.Fatalf("Generating code: %v", err)
|
||||
}
|
||||
}
|
||||
func ignoreTestFiles(f fs.FileInfo) bool {
|
||||
return !strings.Contains(f.Name(), "_test.go")
|
||||
}
|
||||
|
||||
// ParseMetricsDir parses the dir and scans for a struct matching structName,
|
||||
// ignoring all test files. ParseMetricsDir iterates the fields of the metrics
|
||||
// struct and builds a TemplateData using the data obtained from the abstract syntax tree.
|
||||
func ParseMetricsDir(dir string, structName string) (TemplateData, error) {
|
||||
fs := token.NewFileSet()
|
||||
d, err := parser.ParseDir(fs, dir, ignoreTestFiles, parser.ParseComments)
|
||||
if err != nil {
|
||||
return TemplateData{}, err
|
||||
}
|
||||
if len(d) > 1 {
|
||||
return TemplateData{}, fmt.Errorf("multiple packages found in %s", dir)
|
||||
}
|
||||
if len(d) == 0 {
|
||||
return TemplateData{}, fmt.Errorf("no go pacakges found in %s", dir)
|
||||
}
|
||||
|
||||
// Grab the package name.
|
||||
var pkgName string
|
||||
var pkg *ast.Package
|
||||
for pkgName, pkg = range d {
|
||||
}
|
||||
td := TemplateData{
|
||||
Package: pkgName,
|
||||
}
|
||||
// Grab the metrics struct
|
||||
m, mPkgName, err := findMetricsStruct(pkg.Files, structName)
|
||||
if err != nil {
|
||||
return TemplateData{}, err
|
||||
}
|
||||
for _, f := range m.Fields.List {
|
||||
if !isMetric(f.Type, mPkgName) {
|
||||
continue
|
||||
}
|
||||
pmf := parseMetricField(f)
|
||||
td.ParsedMetrics = append(td.ParsedMetrics, pmf)
|
||||
}
|
||||
|
||||
return td, err
|
||||
}
|
||||
|
||||
// GenerateMetricsFile executes the metrics file template, writing the result
|
||||
// into the io.Writer.
|
||||
func GenerateMetricsFile(w io.Writer, td TemplateData) error {
|
||||
b := []byte{}
|
||||
buf := bytes.NewBuffer(b)
|
||||
err := tmpl.Execute(buf, td)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(w, bytes.NewBuffer(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func findMetricsStruct(files map[string]*ast.File, structName string) (*ast.StructType, string, error) {
|
||||
var (
|
||||
st *ast.StructType
|
||||
)
|
||||
for _, file := range files {
|
||||
mPkgName, err := extractMetricsPackageName(file.Imports)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("unable to determine metrics package name: %v", err)
|
||||
}
|
||||
if !ast.FilterFile(file, func(name string) bool {
|
||||
return name == structName
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
ast.Inspect(file, func(n ast.Node) bool {
|
||||
switch f := n.(type) {
|
||||
case *ast.TypeSpec:
|
||||
if f.Name.Name == structName {
|
||||
var ok bool
|
||||
st, ok = f.Type.(*ast.StructType)
|
||||
if !ok {
|
||||
err = fmt.Errorf("found identifier for %q of wrong type", structName)
|
||||
}
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if st != nil {
|
||||
return st, mPkgName, nil
|
||||
}
|
||||
}
|
||||
return nil, "", fmt.Errorf("target struct %q not found in dir", structName)
|
||||
}
|
||||
|
||||
func parseMetricField(f *ast.Field) ParsedMetricField {
|
||||
var comment string
|
||||
if f.Doc != nil {
|
||||
for _, c := range f.Doc.List {
|
||||
comment += strings.TrimPrefix(c.Text, "// ")
|
||||
}
|
||||
}
|
||||
pmf := ParsedMetricField{
|
||||
Description: comment,
|
||||
MetricName: extractFieldName(f.Names[0].String(), f.Tag),
|
||||
FieldName: f.Names[0].String(),
|
||||
TypeName: extractTypeName(f.Type),
|
||||
Labels: extractLabels(f.Tag),
|
||||
}
|
||||
if pmf.TypeName == "Histogram" {
|
||||
pmf.HistogramOptions = extractHistogramOptions(f.Tag)
|
||||
}
|
||||
return pmf
|
||||
}
|
||||
|
||||
func extractTypeName(e ast.Expr) string {
|
||||
return strings.TrimPrefix(path.Ext(types.ExprString(e)), ".")
|
||||
}
|
||||
|
||||
func isMetric(e ast.Expr, mPkgName string) bool {
|
||||
return strings.Contains(types.ExprString(e), fmt.Sprintf("%s.", mPkgName))
|
||||
}
|
||||
|
||||
func extractLabels(bl *ast.BasicLit) string {
|
||||
if bl != nil {
|
||||
t := reflect.StructTag(strings.Trim(bl.Value, "`"))
|
||||
if v := t.Get(labelsTag); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func extractFieldName(name string, tag *ast.BasicLit) string {
|
||||
if tag != nil {
|
||||
t := reflect.StructTag(strings.Trim(tag.Value, "`"))
|
||||
if v := t.Get(metricNameTag); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return toSnakeCase(name)
|
||||
}
|
||||
|
||||
func extractHistogramOptions(tag *ast.BasicLit) HistogramOpts {
|
||||
h := HistogramOpts{}
|
||||
if tag != nil {
|
||||
t := reflect.StructTag(strings.Trim(tag.Value, "`"))
|
||||
if v := t.Get(bucketTypeTag); v != "" {
|
||||
h.BucketType = bucketType[v]
|
||||
}
|
||||
if v := t.Get(bucketSizeTag); v != "" {
|
||||
h.BucketSizes = v
|
||||
}
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func extractMetricsPackageName(imports []*ast.ImportSpec) (string, error) {
|
||||
for _, i := range imports {
|
||||
u, err := strconv.Unquote(i.Path.Value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if u == metricsPackageName {
|
||||
if i.Name != nil {
|
||||
return i.Name.Name, nil
|
||||
}
|
||||
return path.Base(u), nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var capitalChange = regexp.MustCompile("([a-z0-9])([A-Z])")
|
||||
|
||||
func toSnakeCase(str string) string {
|
||||
snake := capitalChange.ReplaceAllString(str, "${1}_${2}")
|
||||
return strings.ToLower(snake)
|
||||
}
|
||||
259
scripts/metricsgen/metricsgen_test.go
Normal file
259
scripts/metricsgen/metricsgen_test.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package main_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metricsgen "github.com/tendermint/tendermint/scripts/metricsgen"
|
||||
)
|
||||
|
||||
const testDataDir = "./testdata"
|
||||
|
||||
func TestSimpleTemplate(t *testing.T) {
|
||||
m := metricsgen.ParsedMetricField{
|
||||
TypeName: "Histogram",
|
||||
FieldName: "MyMetric",
|
||||
MetricName: "request_count",
|
||||
Description: "how many requests were made since the start of the process",
|
||||
Labels: "first, second, third",
|
||||
}
|
||||
td := metricsgen.TemplateData{
|
||||
Package: "mypack",
|
||||
ParsedMetrics: []metricsgen.ParsedMetricField{m},
|
||||
}
|
||||
b := bytes.NewBuffer([]byte{})
|
||||
err := metricsgen.GenerateMetricsFile(b, td)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse template %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromData(t *testing.T) {
|
||||
infos, err := ioutil.ReadDir(testDataDir)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open file %v", err)
|
||||
}
|
||||
for _, dir := range infos {
|
||||
t.Run(dir.Name(), func(t *testing.T) {
|
||||
if !dir.IsDir() {
|
||||
t.Fatalf("expected file %s to be directory", dir.Name())
|
||||
}
|
||||
dirName := path.Join(testDataDir, dir.Name())
|
||||
pt, err := metricsgen.ParseMetricsDir(dirName, "Metrics")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse from dir %q: %v", dir, err)
|
||||
}
|
||||
outFile := path.Join(dirName, "out.go")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open file %s: %v", outFile, err)
|
||||
}
|
||||
of, err := os.Create(outFile)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open file %s: %v", outFile, err)
|
||||
}
|
||||
defer os.Remove(outFile)
|
||||
if err := metricsgen.GenerateMetricsFile(of, pt); err != nil {
|
||||
t.Fatalf("unable to generate metrics file %s: %v", outFile, err)
|
||||
}
|
||||
if _, err := parser.ParseFile(token.NewFileSet(), outFile, nil, parser.AllErrors); err != nil {
|
||||
t.Fatalf("unable to parse generated file %s: %v", outFile, err)
|
||||
}
|
||||
bNew, err := ioutil.ReadFile(outFile)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read generated file %s: %v", outFile, err)
|
||||
}
|
||||
goldenFile := path.Join(dirName, "metrics.gen.go")
|
||||
bOld, err := ioutil.ReadFile(goldenFile)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read file %s: %v", goldenFile, err)
|
||||
}
|
||||
if !bytes.Equal(bNew, bOld) {
|
||||
t.Fatalf("newly generated code in file %s does not match golden file %s\n"+
|
||||
"if the output of the metricsgen tool is expected to change run the following make target: \n"+
|
||||
"\tmake metrics", outFile, goldenFile)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMetricsStruct(t *testing.T) {
|
||||
const pkgName = "mypkg"
|
||||
metricsTests := []struct {
|
||||
name string
|
||||
shouldError bool
|
||||
metricsStruct string
|
||||
expected metricsgen.TemplateData
|
||||
}{
|
||||
{
|
||||
name: "basic",
|
||||
metricsStruct: `type Metrics struct {
|
||||
myGauge metrics.Gauge
|
||||
}`,
|
||||
expected: metricsgen.TemplateData{
|
||||
Package: pkgName,
|
||||
ParsedMetrics: []metricsgen.ParsedMetricField{
|
||||
{
|
||||
TypeName: "Gauge",
|
||||
FieldName: "myGauge",
|
||||
MetricName: "my_gauge",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "histogram",
|
||||
metricsStruct: "type Metrics struct {\n" +
|
||||
"myHistogram metrics.Histogram `metrics_buckettype:\"exp\" metrics_bucketsizes:\"1, 100, .8\"`\n" +
|
||||
"}",
|
||||
expected: metricsgen.TemplateData{
|
||||
Package: pkgName,
|
||||
ParsedMetrics: []metricsgen.ParsedMetricField{
|
||||
{
|
||||
TypeName: "Histogram",
|
||||
FieldName: "myHistogram",
|
||||
MetricName: "my_histogram",
|
||||
|
||||
HistogramOptions: metricsgen.HistogramOpts{
|
||||
BucketType: "stdprometheus.ExponentialBuckets",
|
||||
BucketSizes: "1, 100, .8",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "labeled name",
|
||||
metricsStruct: "type Metrics struct {\n" +
|
||||
"myCounter metrics.Counter `metrics_name:\"new_name\"`\n" +
|
||||
"}",
|
||||
expected: metricsgen.TemplateData{
|
||||
Package: pkgName,
|
||||
ParsedMetrics: []metricsgen.ParsedMetricField{
|
||||
{
|
||||
TypeName: "Counter",
|
||||
FieldName: "myCounter",
|
||||
MetricName: "new_name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metric labels",
|
||||
metricsStruct: "type Metrics struct {\n" +
|
||||
"myCounter metrics.Counter `metrics_labels:\"label1, label2\"`\n" +
|
||||
"}",
|
||||
expected: metricsgen.TemplateData{
|
||||
Package: pkgName,
|
||||
ParsedMetrics: []metricsgen.ParsedMetricField{
|
||||
{
|
||||
TypeName: "Counter",
|
||||
FieldName: "myCounter",
|
||||
MetricName: "my_counter",
|
||||
Labels: "label1, label2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignore non-metric field",
|
||||
metricsStruct: `type Metrics struct {
|
||||
myCounter metrics.Counter
|
||||
nonMetric string
|
||||
}`,
|
||||
expected: metricsgen.TemplateData{
|
||||
Package: pkgName,
|
||||
ParsedMetrics: []metricsgen.ParsedMetricField{
|
||||
{
|
||||
TypeName: "Counter",
|
||||
FieldName: "myCounter",
|
||||
MetricName: "my_counter",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, testCase := range metricsTests {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
dir, err := os.MkdirTemp(os.TempDir(), "metricsdir")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create directory: %v", err)
|
||||
}
|
||||
defer os.Remove(dir)
|
||||
f, err := os.Create(filepath.Join(dir, "metrics.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open file: %v", err)
|
||||
}
|
||||
pkgLine := fmt.Sprintf("package %s\n", pkgName)
|
||||
importClause := `
|
||||
import(
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
`
|
||||
|
||||
_, err = io.WriteString(f, pkgLine)
|
||||
require.NoError(t, err)
|
||||
_, err = io.WriteString(f, importClause)
|
||||
require.NoError(t, err)
|
||||
_, err = io.WriteString(f, testCase.metricsStruct)
|
||||
require.NoError(t, err)
|
||||
|
||||
td, err := metricsgen.ParseMetricsDir(dir, "Metrics")
|
||||
if testCase.shouldError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testCase.expected, td)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAliasedMetric(t *testing.T) {
|
||||
aliasedData := `
|
||||
package mypkg
|
||||
|
||||
import(
|
||||
mymetrics "github.com/go-kit/kit/metrics"
|
||||
)
|
||||
type Metrics struct {
|
||||
m mymetrics.Gauge
|
||||
}
|
||||
`
|
||||
dir, err := os.MkdirTemp(os.TempDir(), "metricsdir")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create directory: %v", err)
|
||||
}
|
||||
defer os.Remove(dir)
|
||||
f, err := os.Create(filepath.Join(dir, "metrics.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open file: %v", err)
|
||||
}
|
||||
_, err = io.WriteString(f, aliasedData)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to write to file: %v", err)
|
||||
}
|
||||
td, err := metricsgen.ParseMetricsDir(dir, "Metrics")
|
||||
require.NoError(t, err)
|
||||
|
||||
expected :=
|
||||
metricsgen.TemplateData{
|
||||
Package: "mypkg",
|
||||
ParsedMetrics: []metricsgen.ParsedMetricField{
|
||||
{
|
||||
TypeName: "Gauge",
|
||||
FieldName: "m",
|
||||
MetricName: "m",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.Equal(t, expected, td)
|
||||
}
|
||||
30
scripts/metricsgen/testdata/basic/metrics.gen.go
vendored
Normal file
30
scripts/metricsgen/testdata/basic/metrics.gen.go
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package basic
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "height",
|
||||
Help: "simple metric that tracks the height of the chain.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Height: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
11
scripts/metricsgen/testdata/basic/metrics.go
vendored
Normal file
11
scripts/metricsgen/testdata/basic/metrics.go
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
package basic
|
||||
|
||||
import "github.com/go-kit/kit/metrics"
|
||||
|
||||
//go:generate go run ../../../../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// simple metric that tracks the height of the chain.
|
||||
Height metrics.Gauge
|
||||
}
|
||||
30
scripts/metricsgen/testdata/commented/metrics.gen.go
vendored
Normal file
30
scripts/metricsgen/testdata/commented/metrics.gen.go
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package commented
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Field: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "field",
|
||||
Help: "Height of the chain.We expect multi-line comments to parse correctly.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Field: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
11
scripts/metricsgen/testdata/commented/metrics.go
vendored
Normal file
11
scripts/metricsgen/testdata/commented/metrics.go
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
package commented
|
||||
|
||||
import "github.com/go-kit/kit/metrics"
|
||||
|
||||
//go:generate go run ../../../../scripts/metricsgen -struct=Metrics
|
||||
|
||||
type Metrics struct {
|
||||
// Height of the chain.
|
||||
// We expect multi-line comments to parse correctly.
|
||||
Field metrics.Gauge
|
||||
}
|
||||
54
scripts/metricsgen/testdata/tags/metrics.gen.go
vendored
Normal file
54
scripts/metricsgen/testdata/tags/metrics.gen.go
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package tags
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
WithLabels: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "with_labels",
|
||||
Help: "",
|
||||
}, append(labels, "step,time")).With(labelsAndValues...), WithExpBuckets: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "with_exp_buckets",
|
||||
Help: "",
|
||||
|
||||
Buckets: stdprometheus.ExponentialBuckets(.1, 100, 8),
|
||||
}, labels).With(labelsAndValues...),
|
||||
WithBuckets: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "with_buckets",
|
||||
Help: "",
|
||||
|
||||
Buckets: []float64{1, 2, 3, 4, 5},
|
||||
}, labels).With(labelsAndValues...),
|
||||
Named: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "metric_with_name",
|
||||
Help: "",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
WithLabels: discard.NewCounter(),
|
||||
WithExpBuckets: discard.NewHistogram(),
|
||||
WithBuckets: discard.NewHistogram(),
|
||||
Named: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
12
scripts/metricsgen/testdata/tags/metrics.go
vendored
Normal file
12
scripts/metricsgen/testdata/tags/metrics.go
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
package tags
|
||||
|
||||
import "github.com/go-kit/kit/metrics"
|
||||
|
||||
//go:generate go run ../../../../scripts/metricsgen -struct=Metrics
|
||||
|
||||
type Metrics struct {
|
||||
WithLabels metrics.Counter `metrics_labels:"step,time"`
|
||||
WithExpBuckets metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:".1,100,8"`
|
||||
WithBuckets metrics.Histogram `metrics_bucketsizes:"1, 2, 3, 4, 5"`
|
||||
Named metrics.Counter `metrics_name:"metric_with_name"`
|
||||
}
|
||||
@@ -6,47 +6,49 @@ title: Application Requirements
|
||||
# Application Requirements
|
||||
|
||||
This section specifies what Tendermint expects from the Application. It is structured as a set
|
||||
of formal requirement that can be used for testing and verification of the Application's logic.
|
||||
of formal requirements that can be used for testing and verification of the Application's logic.
|
||||
|
||||
Let $p$ and $q$ be two different correct proposers in rounds $r_p$ and $r_q$ respectively, in height $h$.
|
||||
Let $s_{p,h-1}$ be $p$'s Application's state committed for height $h-1$.
|
||||
Let $v_p$ (resp. $v_q$) be the block that $p$'s (resp. $q$'s) Tendermint passes on to the Application
|
||||
via `RequestPrepareProposal` as proposer of round $r_p$ (resp $r_q$), height $h$, also known as the
|
||||
raw proposal.
|
||||
Let $v'_p$ (resp. $v'_q$) the possibly modified block $p$'s (resp. $q$'s) Application returns via
|
||||
`ResponsePrepareProposal` to Tendermint, also known as the prepared proposal.
|
||||
Let *p* and *q* be two different correct proposers in rounds *r<sub>p</sub>* and *r<sub>q</sub>*
|
||||
respectively, in height *h*.
|
||||
Let *s<sub>p,h-1</sub>* be *p*'s Application's state committed for height *h-1*.
|
||||
Let *v<sub>p</sub>* (resp. *v<sub>q</sub>*) be the block that *p*'s (resp. *q*'s) Tendermint passes
|
||||
on to the Application
|
||||
via `RequestPrepareProposal` as proposer of round *r<sub>p</sub>* (resp *r<sub>q</sub>*), height *h*,
|
||||
also known as the raw proposal.
|
||||
Let *v'<sub>p</sub>* (resp. *v'<sub>q</sub>*) the possibly modified block *p*'s (resp. *q*'s) Application
|
||||
returns via `ResponsePrepareProposal` to Tendermint, also known as the prepared proposal.
|
||||
|
||||
Process $p$'s prepared proposal can differ in two different rounds where $p$ is the proposer.
|
||||
Process *p*'s prepared proposal can differ in two different rounds where *p* is the proposer.
|
||||
|
||||
* Requirement 1 [`PrepareProposal`, header-changes] When the blockchain is in same-block execution mode,
|
||||
$p$'s Application provides values for the following parameters in `ResponsePrepareProposal`:
|
||||
_AppHash_, _TxResults_, _ConsensusParams_, _ValidatorUpdates_. Provided values for
|
||||
_ConsensusParams_ and _ValidatorUpdates_ MAY be empty to denote that the Application
|
||||
* Requirement 1 [`PrepareProposal`, header-changes]: When the blockchain is in same-block execution mode,
|
||||
*p*'s Application provides values for the following parameters in `ResponsePrepareProposal`:
|
||||
`AppHash`, `TxResults`, `ConsensusParams`, `ValidatorUpdates`. Provided values for
|
||||
`ConsensusParams` and `ValidatorUpdates` MAY be empty to denote that the Application
|
||||
wishes to keep the current values.
|
||||
|
||||
Parameters _AppHash_, _TxResults_, _ConsensusParams_, and _ValidatorUpdates_ are used by Tendermint to
|
||||
Parameters `AppHash`, `TxResults`, `ConsensusParams`, and `ValidatorUpdates` are used by Tendermint to
|
||||
compute various hashes in the block header that will finally be part of the proposal.
|
||||
|
||||
* Requirement 2 [`PrepareProposal`, no-header-changes] When the blockchain is in next-block execution
|
||||
mode, $p$'s Application does not provide values for the following parameters in `ResponsePrepareProposal`:
|
||||
_AppHash_, _TxResults_, _ConsensusParams_, _ValidatorUpdates_.
|
||||
* Requirement 2 [`PrepareProposal`, no-header-changes]: When the blockchain is in next-block execution
|
||||
mode, *p*'s Application does not provide values for the following parameters in `ResponsePrepareProposal`:
|
||||
`AppHash`, `TxResults`, `ConsensusParams`, `ValidatorUpdates`.
|
||||
|
||||
In practical terms, Requirements 1 and 2 imply that Tendermint will (a) panic if the Application is in
|
||||
same-block execution mode and _does_ _not_ provide values for
|
||||
_AppHash_, _TxResults_, _ConsensusParams_, and _ValidatorUpdates_, or
|
||||
(b) log an error if the Application is in next-block execution mode and _does_ provide values for
|
||||
_AppHash_, _TxResults_, _ConsensusParams_, or _ValidatorUpdates_ (the values provided will be ignored).
|
||||
same-block execution mode and *does not* provide values for
|
||||
`AppHash`, `TxResults`, `ConsensusParams`, and `ValidatorUpdates`, or
|
||||
(b) log an error if the Application is in next-block execution mode and *does* provide values for
|
||||
`AppHash`, `TxResults`, `ConsensusParams`, or `ValidatorUpdates` (the values provided will be ignored).
|
||||
|
||||
* Requirement 3 [`PrepareProposal`, timeliness] If $p$'s Application fully executes prepared blocks in
|
||||
`PrepareProposal` and the network is in a synchronous period while processes $p$ and $q$ are in $r_p$, then
|
||||
the value of *TimeoutPropose* at $q$ must be such that $q$'s propose timer does not time out
|
||||
(which would result in $q$ prevoting *nil* in $r_p$).
|
||||
* Requirement 3 [`PrepareProposal`, timeliness]: If *p*'s Application fully executes prepared blocks in
|
||||
`PrepareProposal` and the network is in a synchronous period while processes *p* and *q* are in *r<sub>p</sub>*,
|
||||
then the value of *TimeoutPropose* at *q* must be such that *q*'s propose timer does not time out
|
||||
(which would result in *q* prevoting `nil` in *r<sub>p</sub>*).
|
||||
|
||||
Full execution of blocks at `PrepareProposal` time stands on Tendermint's critical path. Thus,
|
||||
Requirement 3 ensures the Application will set a value for _TimeoutPropose_ such that the time it takes
|
||||
Requirement 3 ensures the Application will set a value for `TimeoutPropose` such that the time it takes
|
||||
to fully execute blocks in `PrepareProposal` does not interfere with Tendermint's propose timer.
|
||||
|
||||
* Requirement 4 [`PrepareProposal`, tx-size] When $p$'s Application calls `ResponsePrepareProposal`, the
|
||||
* Requirement 4 [`PrepareProposal`, tx-size]: When *p*'s Application calls `ResponsePrepareProposal`, the
|
||||
total size in bytes of the transactions returned does not exceed `RequestPrepareProposal.max_tx_bytes`.
|
||||
|
||||
Busy blockchains might seek to maximize the amount of transactions included in each block. Under those conditions,
|
||||
@@ -54,29 +56,31 @@ Tendermint might choose to increase the transactions passed to the Application v
|
||||
beyond the `RequestPrepareProposal.max_tx_bytes` limit. The idea is that, if the Application drops some of
|
||||
those transactions, it can still return a transaction list whose byte size is as close to
|
||||
`RequestPrepareProposal.max_tx_bytes` as possible. Thus, Requirement 4 ensures that the size in bytes of the
|
||||
transaction list returned by the application will never cause the resulting block to go beyond its byte limit.
|
||||
transaction list returned by the application will never cause the resulting block to go beyond its byte size
|
||||
limit.
|
||||
|
||||
* Requirement 5 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes $p$ and $q$,
|
||||
if $q$'s Tendermint calls `RequestProcessProposal` on $v'_p$,
|
||||
$q$'s Application returns Accept in `ResponseProcessProposal`.
|
||||
* Requirement 5 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes *p* and *q*,
|
||||
if *q*'s Tendermint calls `RequestProcessProposal` on *v'<sub>p</sub>*,
|
||||
*q*'s Application returns Accept in `ResponseProcessProposal`.
|
||||
|
||||
Requirement 5 makes sure that blocks proposed by correct processes _always_ pass the correct receiving process's
|
||||
Requirement 5 makes sure that blocks proposed by correct processes *always* pass the correct receiving process's
|
||||
`ProcessProposal` check.
|
||||
On the other hand, if there is a deterministic bug in `PrepareProposal` or `ProcessProposal` (or in both),
|
||||
strictly speaking, this makes all processes that hit the bug byzantine. This is a problem in practice,
|
||||
as very often validators are running the Application from the same codebase, so potentially _all_ would
|
||||
as very often validators are running the Application from the same codebase, so potentially *all* would
|
||||
likely hit the bug at the same time. This would result in most (or all) processes prevoting `nil`, with the
|
||||
serious consequences on Tendermint's liveness that this entails. Due to its criticality, Requirement 5 is a
|
||||
target for extensive testing and automated verification.
|
||||
|
||||
* Requirement 6 [`ProcessProposal`, determinism-1]: `ProcessProposal` is a (deterministic) function of the current
|
||||
state and the block that is about to be applied. In other words, for any correct process $p$, and any arbitrary block $v'$,
|
||||
if $p$'s Tendermint calls `RequestProcessProposal` on $v'$ at height $h$,
|
||||
then $p$'s Application's acceptance or rejection **exclusively** depends on $v'$ and $s_{p,h-1}$.
|
||||
state and the block that is about to be applied. In other words, for any correct process *p*, and any arbitrary block *v'*,
|
||||
if *p*'s Tendermint calls `RequestProcessProposal` on *v'* at height *h*,
|
||||
then *p*'s Application's acceptance or rejection **exclusively** depends on *v'* and *s<sub>p,h-1</sub>*.
|
||||
|
||||
* Requirement 7 [`ProcessProposal`, determinism-2]: For any two correct processes $p$ and $q$, and any arbitrary block $v'$,
|
||||
if $p$'s (resp. $q$'s) Tendermint calls `RequestProcessProposal` on $v'$ at height $h$,
|
||||
then $p$'s Application accepts $v'$ if and only if $q$'s Application accepts $v'$.
|
||||
* Requirement 7 [`ProcessProposal`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary
|
||||
block *v'*,
|
||||
if *p*'s (resp. *q*'s) Tendermint calls `RequestProcessProposal` on *v'* at height *h*,
|
||||
then *p*'s Application accepts *v'* if and only if *q*'s Application accepts *v'*.
|
||||
Note that this requirement follows from Requirement 6 and the Agreement property of consensus.
|
||||
|
||||
Requirements 6 and 7 ensure that all correct processes will react in the same way to a proposed block, even
|
||||
@@ -87,20 +91,26 @@ In such a scenario, Tendermint's liveness cannot be guaranteed.
|
||||
Again, this is a problem in practice if most validators are running the same software, as they are likely
|
||||
to hit the bug at the same point. There is currently no clear solution to help with this situation, so
|
||||
the Application designers/implementors must proceed very carefully with the logic/implementation
|
||||
of `ProcessProposal`. As a general rule `ProcessProposal` _should_ always accept the block.
|
||||
of `ProcessProposal`. As a general rule `ProcessProposal` SHOULD always accept the block.
|
||||
|
||||
According to the Tendermint algorithm, a correct process can broadcast at most one precommit message in round $r$, height $h$.
|
||||
Since, as stated in the [Description](#description) section, `ResponseExtendVote` is only called when Tendermint
|
||||
is about to broadcast a non-`nil` precommit message, a correct process can only produce one vote extension in round $r$, height $h$.
|
||||
Let $e^r_p$ be the vote extension that the Application of a correct process $p$ returns via `ResponseExtendVote` in round $r$, height $h$.
|
||||
Let $w^r_p$ be the proposed block that $p$'s Tendermint passes to the Application via `RequestExtendVote` in round $r$, height $h$.
|
||||
According to the Tendermint algorithm, a correct process can broadcast at most one precommit
|
||||
message in round *r*, height *h*.
|
||||
Since, as stated in the [Methods](./abci++_methods_002_draft.md#extendvote) section, `ResponseExtendVote`
|
||||
is only called when Tendermint
|
||||
is about to broadcast a non-`nil` precommit message, a correct process can only produce one vote extension
|
||||
in round *r*, height *h*.
|
||||
Let *e<sup>r</sup><sub>p</sub>* be the vote extension that the Application of a correct process *p* returns via
|
||||
`ResponseExtendVote` in round *r*, height *h*.
|
||||
Let *w<sup>r</sup><sub>p</sub>* be the proposed block that *p*'s Tendermint passes to the Application via `RequestExtendVote`
|
||||
in round *r*, height *h*.
|
||||
|
||||
* Requirement 8 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two correct processes $p$ and $q$, if $q$ receives $e^r_p$
|
||||
from $p$ in height $h$, $q$'s Application returns Accept in `ResponseVerifyVoteExtension`.
|
||||
* Requirement 8 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two correct processes *p* and *q*, if *q*
|
||||
receives *e<sup>r</sup><sub>p</sub>*
|
||||
from *p* in height *h*, *q*'s Application returns Accept in `ResponseVerifyVoteExtension`.
|
||||
|
||||
Requirement 8 constrains the creation and handling of vote extensions in a similar way as Requirement 5
|
||||
contrains the creation and handling of proposed blocks.
|
||||
Requirement 8 ensures that extensions created by correct processes _always_ pass the `VerifyVoteExtension`
|
||||
constrains the creation and handling of proposed blocks.
|
||||
Requirement 8 ensures that extensions created by correct processes *always* pass the `VerifyVoteExtension`
|
||||
checks performed by correct processes receiving those extensions.
|
||||
However, if there is a (deterministic) bug in `ExtendVote` or `VerifyVoteExtension` (or in both),
|
||||
we will face the same liveness issues as described for Requirement 5, as Precommit messages with invalid vote
|
||||
@@ -108,58 +118,62 @@ extensions will be discarded.
|
||||
|
||||
* Requirement 9 [`VerifyVoteExtension`, determinism-1]: `VerifyVoteExtension` is a (deterministic) function of
|
||||
the current state, the vote extension received, and the prepared proposal that the extension refers to.
|
||||
In other words, for any correct process $p$, and any arbitrary vote extension $e$, and any arbitrary
|
||||
block $w$, if $p$'s (resp. $q$'s) Tendermint calls `RequestVerifyVoteExtension` on $e$ and $w$ at height $h$,
|
||||
then $p$'s Application's acceptance or rejection **exclusively** depends on $e$, $w$ and $s_{p,h-1}$.
|
||||
In other words, for any correct process *p*, and any arbitrary vote extension *e*, and any arbitrary
|
||||
block *w*, if *p*'s (resp. *q*'s) Tendermint calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*,
|
||||
then *p*'s Application's acceptance or rejection **exclusively** depends on *e*, *w* and *s<sub>p,h-1</sub>*.
|
||||
|
||||
* Requirement 10 [`VerifyVoteExtension`, determinism-2]: For any two correct processes $p$ and $q$,
|
||||
and any arbitrary vote extension $e$, and any arbitrary block $w$,
|
||||
if $p$'s (resp. $q$'s) Tendermint calls `RequestVerifyVoteExtension` on $e$ and $w$ at height $h$,
|
||||
then $p$'s Application accepts $e$ if and only if $q$'s Application accepts $e$.
|
||||
* Requirement 10 [`VerifyVoteExtension`, determinism-2]: For any two correct processes *p* and *q*,
|
||||
and any arbitrary vote extension *e*, and any arbitrary block *w*,
|
||||
if *p*'s (resp. *q*'s) Tendermint calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*,
|
||||
then *p*'s Application accepts *e* if and only if *q*'s Application accepts *e*.
|
||||
Note that this requirement follows from Requirement 9 and the Agreement property of consensus.
|
||||
|
||||
Requirements 9 and 10 ensure that the validation of vote extensions will be deterministic at all
|
||||
correct processes.
|
||||
Requirements 9 and 10 protect against arbitrary vote extension data from Byzantine processes
|
||||
similarly to Requirements 6 and 7 and proposed blocks.
|
||||
Requirements 9 and 10 protect against arbitrary vote extension data from Byzantine processes,
|
||||
in a similar way as Requirements 6 and 7 protect against arbitrary proposed blocks.
|
||||
Requirements 9 and 10 can be violated by a bug inducing non-determinism in
|
||||
`VerifyVoteExtension`. In this case liveness can be compromised.
|
||||
Extra care should be put in the implementation of `ExtendVote` and `VerifyVoteExtension` and,
|
||||
as a general rule, `VerifyVoteExtension` _should_ always accept the vote extension.
|
||||
Extra care should be put in the implementation of `ExtendVote` and `VerifyVoteExtension`.
|
||||
As a general rule, `VerifyVoteExtension` SHOULD always accept the vote extension.
|
||||
|
||||
* Requirement 11 [_all_, no-side-effects]: $p$'s calls to `RequestPrepareProposal`,
|
||||
`RequestProcessProposal`, `RequestExtendVote`, and `RequestVerifyVoteExtension` at height $h$ do
|
||||
not modify $s_{p,h-1}$.
|
||||
* Requirement 11 [*all*, no-side-effects]: *p*'s calls to `RequestPrepareProposal`,
|
||||
`RequestProcessProposal`, `RequestExtendVote`, and `RequestVerifyVoteExtension` at height *h* do
|
||||
not modify *s<sub>p,h-1</sub>*.
|
||||
|
||||
* Requirement 12 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process $p$,
|
||||
and any vote extension $e$ that $p$ received at height $h$, the computation of
|
||||
$s_{p,h}$ does not depend on $e$.
|
||||
* Requirement 12 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process *p*,
|
||||
and any vote extension *e* that *p* received at height *h*, the computation of
|
||||
*s<sub>p,h</sub>* does not depend on *e*.
|
||||
|
||||
The call to correct process $p$'s `RequestFinalizeBlock` at height $h$, with block $v_{p,h}$
|
||||
passed as parameter, creates state $s_{p,h}$.
|
||||
The call to correct process *p*'s `RequestFinalizeBlock` at height *h*, with block *v<sub>p,h</sub>*
|
||||
passed as parameter, creates state *s<sub>p,h</sub>*.
|
||||
Additionally,
|
||||
|
||||
* in next-block execution mode, $p$'s `FinalizeBlock` creates a set of transaction results $T_{p,h}$,
|
||||
* in same-block execution mode, $p$'s `PrepareProposal` creates a set of transaction results $T_{p,h}$
|
||||
if $p$ was the proposer of $v_{p,h}$, otherwise `FinalizeBlock` creates $T_{p,h}$.
|
||||
* in next-block execution mode, *p*'s `FinalizeBlock` creates a set of transaction results *T<sub>p,h</sub>*,
|
||||
* in same-block execution mode, *p*'s `PrepareProposal` creates a set of transaction results *T<sub>p,h</sub>*
|
||||
if *p* was the proposer of *v<sub>p,h</sub>*. If *p* was not the proposer of *v<sub>p,h</sub>*,
|
||||
`ProcessProposal` creates *T<sub>p,h</sub>*. `FinalizeBlock` MAY re-create *T<sub>p,h</sub>* if it was
|
||||
removed from memory during the execution of height *h*.
|
||||
|
||||
* Requirement 13 [`FinalizeBlock`, determinism-1]: For any correct process $p$,
|
||||
$s_{p,h}$ exclusively depends on $s_{p,h-1}$ and $v_{p,h}$.
|
||||
* Requirement 13 [`FinalizeBlock`, determinism-1]: For any correct process *p*,
|
||||
*s<sub>p,h</sub>* exclusively depends on *s<sub>p,h-1</sub>* and *v<sub>p,h</sub>*.
|
||||
|
||||
* Requirement 14 [`FinalizeBlock`, determinism-2]: For any correct process $p$,
|
||||
the contents of $T_{p,h}$ exclusively depend on $s_{p,h-1}$ and $v_{p,h}$.
|
||||
* Requirement 14 [`FinalizeBlock`, determinism-2]: For any correct process *p*,
|
||||
the contents of *T<sub>p,h</sub>* exclusively depend on *s<sub>p,h-1</sub>* and *v<sub>p,h</sub>*.
|
||||
|
||||
Note that Requirements 13 and 14, combined with Agreement property of consensus ensure
|
||||
the Application state evolves consistently at all correct processes.
|
||||
state machine replication, i.e., the Application state evolves consistently at all correct processes.
|
||||
|
||||
Finally, notice that neither `PrepareProposal` nor `ExtendVote` have determinism-related
|
||||
requirements associated.
|
||||
Indeed, `PrepareProposal` is not required to be deterministic:
|
||||
|
||||
* $v'_p$ may depend on $v_p$ and $s_{p,h-1}$, but may also depend on other values or operations.
|
||||
* $v_p = v_q \nRightarrow v'_p = v'_q$.
|
||||
* *v'<sub>p</sub>* may depend on *v<sub>p</sub>* and *s<sub>p,h-1</sub>*, but may also depend on other values or operations.
|
||||
* *v<sub>p</sub> = v<sub>q</sub> ⇏ v'<sub>p</sub> = v'<sub>q</sub>*.
|
||||
|
||||
Likewise, `ExtendVote` can also be non-deterministic:
|
||||
|
||||
* $e^r_p$ may depend on $w^r_p$ and $s_{p,h-1}$, but may also depend on other values or operations.
|
||||
* $w^r_p = w^r_q \nRightarrow e^r_p = e^r_q$
|
||||
* *e<sup>r</sup><sub>p</sub>* may depend on *w<sup>r</sup><sub>p</sub>* and *s<sub>p,h-1</sub>*,
|
||||
but may also depend on other values or operations.
|
||||
* *w<sup>r</sup><sub>p</sub> = w<sup>r</sup><sub>q</sub> ⇏
|
||||
e<sup>r</sup><sub>p</sub> = e<sup>r</sup><sub>q</sub>*
|
||||
|
||||
@@ -165,9 +165,9 @@ func generateLightClientAttackEvidence(
|
||||
|
||||
// create a commit for the forged header
|
||||
blockID := makeBlockID(header.Hash(), 1000, []byte("partshash"))
|
||||
voteSet := types.NewVoteSet(chainID, forgedHeight, 0, tmproto.SignedMsgType(2), conflictingVals)
|
||||
voteSet := types.NewVoteSet(chainID, forgedHeight, 0, tmproto.SignedMsgType(2), conflictingVals, false)
|
||||
|
||||
commit, err := factory.MakeCommit(ctx, blockID, forgedHeight, 0, voteSet, pv, forgedTime)
|
||||
commit, err := factory.MakeExtendedCommit(ctx, blockID, forgedHeight, 0, voteSet, pv, forgedTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -176,7 +176,7 @@ func generateLightClientAttackEvidence(
|
||||
ConflictingBlock: &types.LightBlock{
|
||||
SignedHeader: &types.SignedHeader{
|
||||
Header: header,
|
||||
Commit: commit,
|
||||
Commit: commit.StripExtensions(),
|
||||
},
|
||||
ValidatorSet: conflictingVals,
|
||||
},
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# fuzz
|
||||
|
||||
Fuzzing for various packages in Tendermint using [go-fuzz](https://github.com/dvyukov/go-fuzz) library.
|
||||
Fuzzing for various packages in Tendermint using the fuzzing infrastructure included in
|
||||
Go 1.18.
|
||||
|
||||
Inputs:
|
||||
|
||||
|
||||
445
types/block.go
445
types/block.go
@@ -608,16 +608,6 @@ type CommitSig struct {
|
||||
Signature []byte `json:"signature"`
|
||||
}
|
||||
|
||||
// NewCommitSigForBlock returns new CommitSig with BlockIDFlagCommit.
|
||||
func NewCommitSigForBlock(signature []byte, valAddr Address, ts time.Time) CommitSig {
|
||||
return CommitSig{
|
||||
BlockIDFlag: BlockIDFlagCommit,
|
||||
ValidatorAddress: valAddr,
|
||||
Timestamp: ts,
|
||||
Signature: signature,
|
||||
}
|
||||
}
|
||||
|
||||
func MaxCommitBytes(valCount int) int64 {
|
||||
// From the repeated commit sig field
|
||||
var protoEncodingOverhead int64 = 2
|
||||
@@ -632,16 +622,6 @@ func NewCommitSigAbsent() CommitSig {
|
||||
}
|
||||
}
|
||||
|
||||
// ForBlock returns true if CommitSig is for the block.
|
||||
func (cs CommitSig) ForBlock() bool {
|
||||
return cs.BlockIDFlag == BlockIDFlagCommit
|
||||
}
|
||||
|
||||
// Absent returns true if CommitSig is absent.
|
||||
func (cs CommitSig) Absent() bool {
|
||||
return cs.BlockIDFlag == BlockIDFlagAbsent
|
||||
}
|
||||
|
||||
// CommitSig returns a string representation of CommitSig.
|
||||
//
|
||||
// 1. first 6 bytes of signature
|
||||
@@ -730,7 +710,6 @@ func (cs *CommitSig) ToProto() *tmproto.CommitSig {
|
||||
// FromProto sets a protobuf CommitSig to the given pointer.
|
||||
// It returns an error if the CommitSig is invalid.
|
||||
func (cs *CommitSig) FromProto(csp tmproto.CommitSig) error {
|
||||
|
||||
cs.BlockIDFlag = BlockIDFlag(csp.BlockIdFlag)
|
||||
cs.ValidatorAddress = csp.ValidatorAddress
|
||||
cs.Timestamp = csp.Timestamp
|
||||
@@ -741,6 +720,99 @@ func (cs *CommitSig) FromProto(csp tmproto.CommitSig) error {
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// ExtendedCommitSig contains a commit signature along with its corresponding
|
||||
// vote extension and vote extension signature.
|
||||
type ExtendedCommitSig struct {
|
||||
CommitSig // Commit signature
|
||||
Extension []byte // Vote extension
|
||||
ExtensionSignature []byte // Vote extension signature
|
||||
}
|
||||
|
||||
// NewExtendedCommitSigAbsent returns new ExtendedCommitSig with
|
||||
// BlockIDFlagAbsent. Other fields are all empty.
|
||||
func NewExtendedCommitSigAbsent() ExtendedCommitSig {
|
||||
return ExtendedCommitSig{CommitSig: NewCommitSigAbsent()}
|
||||
}
|
||||
|
||||
// String returns a string representation of an ExtendedCommitSig.
|
||||
//
|
||||
// 1. commit sig
|
||||
// 2. first 6 bytes of vote extension
|
||||
// 3. first 6 bytes of vote extension signature
|
||||
func (ecs ExtendedCommitSig) String() string {
|
||||
return fmt.Sprintf("ExtendedCommitSig{%s with %X %X}",
|
||||
ecs.CommitSig,
|
||||
tmbytes.Fingerprint(ecs.Extension),
|
||||
tmbytes.Fingerprint(ecs.ExtensionSignature),
|
||||
)
|
||||
}
|
||||
|
||||
// ValidateBasic checks whether the structure is well-formed.
|
||||
func (ecs ExtendedCommitSig) ValidateBasic() error {
|
||||
if err := ecs.CommitSig.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ecs.BlockIDFlag == BlockIDFlagCommit {
|
||||
if len(ecs.Extension) > MaxVoteExtensionSize {
|
||||
return fmt.Errorf("vote extension is too big (max: %d)", MaxVoteExtensionSize)
|
||||
}
|
||||
if len(ecs.ExtensionSignature) > MaxSignatureSize {
|
||||
return fmt.Errorf("vote extension signature is too big (max: %d)", MaxSignatureSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We expect there to not be any vote extension or vote extension signature
|
||||
// on nil or absent votes.
|
||||
if len(ecs.Extension) != 0 {
|
||||
return fmt.Errorf("vote extension is present for commit sig with block ID flag %v", ecs.BlockIDFlag)
|
||||
}
|
||||
if len(ecs.ExtensionSignature) != 0 {
|
||||
return fmt.Errorf("vote extension signature is present for commit sig with block ID flag %v", ecs.BlockIDFlag)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ecs ExtendedCommitSig) ValidateExtension() error {
|
||||
if len(ecs.ExtensionSignature) == 0 {
|
||||
return errors.New("vote extension signature is missing")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToProto converts the ExtendedCommitSig to its Protobuf representation.
|
||||
func (ecs *ExtendedCommitSig) ToProto() *tmproto.ExtendedCommitSig {
|
||||
if ecs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &tmproto.ExtendedCommitSig{
|
||||
BlockIdFlag: tmproto.BlockIDFlag(ecs.BlockIDFlag),
|
||||
ValidatorAddress: ecs.ValidatorAddress,
|
||||
Timestamp: ecs.Timestamp,
|
||||
Signature: ecs.Signature,
|
||||
Extension: ecs.Extension,
|
||||
ExtensionSignature: ecs.ExtensionSignature,
|
||||
}
|
||||
}
|
||||
|
||||
// FromProto populates the ExtendedCommitSig with values from the given
|
||||
// Protobuf representation. Returns an error if the ExtendedCommitSig is
|
||||
// invalid.
|
||||
func (ecs *ExtendedCommitSig) FromProto(ecsp tmproto.ExtendedCommitSig) error {
|
||||
ecs.BlockIDFlag = BlockIDFlag(ecsp.BlockIdFlag)
|
||||
ecs.ValidatorAddress = ecsp.ValidatorAddress
|
||||
ecs.Timestamp = ecsp.Timestamp
|
||||
ecs.Signature = ecsp.Signature
|
||||
ecs.Extension = ecsp.Extension
|
||||
ecs.ExtensionSignature = ecsp.ExtensionSignature
|
||||
|
||||
return ecs.ValidateBasic()
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// Commit contains the evidence that a block was committed by a set of validators.
|
||||
// NOTE: Commit is empty for height 1, but never nil.
|
||||
type Commit struct {
|
||||
@@ -756,42 +828,12 @@ type Commit struct {
|
||||
// Memoized in first call to corresponding method.
|
||||
// NOTE: can't memoize in constructor because constructor isn't used for
|
||||
// unmarshaling.
|
||||
hash tmbytes.HexBytes
|
||||
bitArray *bits.BitArray
|
||||
hash tmbytes.HexBytes
|
||||
}
|
||||
|
||||
// NewCommit returns a new Commit.
|
||||
func NewCommit(height int64, round int32, blockID BlockID, commitSigs []CommitSig) *Commit {
|
||||
return &Commit{
|
||||
Height: height,
|
||||
Round: round,
|
||||
BlockID: blockID,
|
||||
Signatures: commitSigs,
|
||||
}
|
||||
}
|
||||
|
||||
// CommitToVoteSet constructs a VoteSet from the Commit and validator set.
|
||||
// Panics if signatures from the commit can't be added to the voteset.
|
||||
// Inverse of VoteSet.MakeCommit().
|
||||
func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSet {
|
||||
voteSet := NewVoteSet(chainID, commit.Height, commit.Round, tmproto.PrecommitType, vals)
|
||||
for idx, commitSig := range commit.Signatures {
|
||||
if commitSig.Absent() {
|
||||
continue // OK, some precommits can be missing.
|
||||
}
|
||||
vote := commit.GetVote(int32(idx))
|
||||
if err := vote.ValidateBasic(); err != nil {
|
||||
panic(fmt.Errorf("failed to validate vote reconstructed from LastCommit: %w", err))
|
||||
}
|
||||
added, err := voteSet.AddVote(vote)
|
||||
if !added || err != nil {
|
||||
panic(fmt.Errorf("failed to reconstruct LastCommit: %w", err))
|
||||
}
|
||||
}
|
||||
return voteSet
|
||||
}
|
||||
|
||||
// GetVote converts the CommitSig for the given valIdx to a Vote.
|
||||
// GetVote converts the CommitSig for the given valIdx to a Vote. Commits do
|
||||
// not contain vote extensions, so the vote extension and vote extension
|
||||
// signature will not be present in the returned vote.
|
||||
// Returns nil if the precommit at valIdx is nil.
|
||||
// Panics if valIdx >= commit.Size().
|
||||
func (commit *Commit) GetVote(valIdx int32) *Vote {
|
||||
@@ -822,26 +864,7 @@ func (commit *Commit) VoteSignBytes(chainID string, valIdx int32) []byte {
|
||||
return VoteSignBytes(chainID, v)
|
||||
}
|
||||
|
||||
// Type returns the vote type of the commit, which is always VoteTypePrecommit
|
||||
// Implements VoteSetReader.
|
||||
func (commit *Commit) Type() byte {
|
||||
return byte(tmproto.PrecommitType)
|
||||
}
|
||||
|
||||
// GetHeight returns height of the commit.
|
||||
// Implements VoteSetReader.
|
||||
func (commit *Commit) GetHeight() int64 {
|
||||
return commit.Height
|
||||
}
|
||||
|
||||
// GetRound returns height of the commit.
|
||||
// Implements VoteSetReader.
|
||||
func (commit *Commit) GetRound() int32 {
|
||||
return commit.Round
|
||||
}
|
||||
|
||||
// Size returns the number of signatures in the commit.
|
||||
// Implements VoteSetReader.
|
||||
func (commit *Commit) Size() int {
|
||||
if commit == nil {
|
||||
return 0
|
||||
@@ -849,33 +872,6 @@ func (commit *Commit) Size() int {
|
||||
return len(commit.Signatures)
|
||||
}
|
||||
|
||||
// BitArray returns a BitArray of which validators voted for BlockID or nil in this commit.
|
||||
// Implements VoteSetReader.
|
||||
func (commit *Commit) BitArray() *bits.BitArray {
|
||||
if commit.bitArray == nil {
|
||||
commit.bitArray = bits.NewBitArray(len(commit.Signatures))
|
||||
for i, commitSig := range commit.Signatures {
|
||||
// TODO: need to check the BlockID otherwise we could be counting conflicts,
|
||||
// not just the one with +2/3 !
|
||||
commit.bitArray.SetIndex(i, !commitSig.Absent())
|
||||
}
|
||||
}
|
||||
return commit.bitArray
|
||||
}
|
||||
|
||||
// GetByIndex returns the vote corresponding to a given validator index.
|
||||
// Panics if `index >= commit.Size()`.
|
||||
// Implements VoteSetReader.
|
||||
func (commit *Commit) GetByIndex(valIdx int32) *Vote {
|
||||
return commit.GetVote(valIdx)
|
||||
}
|
||||
|
||||
// IsCommit returns true if there is at least one signature.
|
||||
// Implements VoteSetReader.
|
||||
func (commit *Commit) IsCommit() bool {
|
||||
return len(commit.Signatures) != 0
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation that doesn't involve state data.
|
||||
// Does not actually check the cryptographic signatures.
|
||||
func (commit *Commit) ValidateBasic() error {
|
||||
@@ -999,7 +995,246 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) {
|
||||
return commit, commit.ValidateBasic()
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
//-------------------------------------
|
||||
|
||||
// ExtendedCommit is similar to Commit, except that its signatures also retain
|
||||
// their corresponding vote extensions and vote extension signatures.
|
||||
type ExtendedCommit struct {
|
||||
Height int64
|
||||
Round int32
|
||||
BlockID BlockID
|
||||
ExtendedSignatures []ExtendedCommitSig
|
||||
|
||||
bitArray *bits.BitArray
|
||||
}
|
||||
|
||||
// Clone creates a deep copy of this extended commit.
|
||||
func (ec *ExtendedCommit) Clone() *ExtendedCommit {
|
||||
sigs := make([]ExtendedCommitSig, len(ec.ExtendedSignatures))
|
||||
copy(sigs, ec.ExtendedSignatures)
|
||||
ecc := *ec
|
||||
ecc.ExtendedSignatures = sigs
|
||||
return &ecc
|
||||
}
|
||||
|
||||
// ToVoteSet constructs a VoteSet from the Commit and validator set.
|
||||
// Panics if signatures from the commit can't be added to the voteset.
|
||||
// Inverse of VoteSet.MakeExtendedCommit().
|
||||
func (ec *ExtendedCommit) ToVoteSet(chainID string, vals *ValidatorSet, requireExtensions bool) *VoteSet {
|
||||
voteSet := NewVoteSet(chainID, ec.Height, ec.Round, tmproto.PrecommitType, vals, requireExtensions)
|
||||
for idx, ecs := range ec.ExtendedSignatures {
|
||||
if ecs.BlockIDFlag == BlockIDFlagAbsent {
|
||||
continue // OK, some precommits can be missing.
|
||||
}
|
||||
vote := ec.GetExtendedVote(int32(idx))
|
||||
if err := vote.ValidateBasic(); err != nil {
|
||||
panic(fmt.Errorf("failed to validate vote reconstructed from LastCommit: %w", err))
|
||||
}
|
||||
added, err := voteSet.AddVote(vote)
|
||||
if !added || err != nil {
|
||||
panic(fmt.Errorf("failed to reconstruct vote set from extended commit: %w", err))
|
||||
}
|
||||
}
|
||||
return voteSet
|
||||
}
|
||||
|
||||
// ToVoteSet constructs a VoteSet from the Commit and validator set.
|
||||
// Panics if signatures from the commit can't be added to the voteset.
|
||||
// Inverse of VoteSet.MakeCommit().
|
||||
func (c *Commit) ToVoteSet(chainID string, vals *ValidatorSet) *VoteSet {
|
||||
voteSet := NewVoteSet(chainID, c.Height, c.Round, tmproto.PrecommitType, vals, false)
|
||||
for idx, cs := range c.Signatures {
|
||||
if cs.BlockIDFlag == BlockIDFlagAbsent {
|
||||
continue // OK, some precommits can be missing.
|
||||
}
|
||||
vote := c.GetVote(int32(idx))
|
||||
if err := vote.ValidateBasic(); err != nil {
|
||||
panic(fmt.Errorf("failed to validate vote reconstructed from commit: %w", err))
|
||||
}
|
||||
added, err := voteSet.AddVote(vote)
|
||||
if !added || err != nil {
|
||||
panic(fmt.Errorf("failed to reconstruct vote set from commit: %w", err))
|
||||
}
|
||||
}
|
||||
return voteSet
|
||||
}
|
||||
|
||||
// TODO Comment
|
||||
// this should probably also verify the signature
|
||||
// probably want to change to just verify when present.
|
||||
func (ec *ExtendedCommit) EnsureExtensions() error {
|
||||
for idx, ecs := range ec.ExtendedSignatures {
|
||||
if ecs.BlockIDFlag == BlockIDFlagAbsent {
|
||||
continue
|
||||
}
|
||||
vote := ec.GetExtendedVote(int32(idx))
|
||||
if err := vote.EnsureExtension(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StripExtensions converts an ExtendedCommit to a Commit by removing all vote
|
||||
// extension-related fields.
|
||||
func (ec *ExtendedCommit) StripExtensions() *Commit {
|
||||
cs := make([]CommitSig, len(ec.ExtendedSignatures))
|
||||
for idx, ecs := range ec.ExtendedSignatures {
|
||||
cs[idx] = ecs.CommitSig
|
||||
}
|
||||
return &Commit{
|
||||
Height: ec.Height,
|
||||
Round: ec.Round,
|
||||
BlockID: ec.BlockID,
|
||||
Signatures: cs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetExtendedVote converts the ExtendedCommitSig for the given validator
|
||||
// index to a Vote with a vote extensions.
|
||||
// It panics if valIndex is out of range.
|
||||
func (ec *ExtendedCommit) GetExtendedVote(valIndex int32) *Vote {
|
||||
ecs := ec.ExtendedSignatures[valIndex]
|
||||
return &Vote{
|
||||
Type: tmproto.PrecommitType,
|
||||
Height: ec.Height,
|
||||
Round: ec.Round,
|
||||
BlockID: ecs.BlockID(ec.BlockID),
|
||||
Timestamp: ecs.Timestamp,
|
||||
ValidatorAddress: ecs.ValidatorAddress,
|
||||
ValidatorIndex: valIndex,
|
||||
Signature: ecs.Signature,
|
||||
Extension: ecs.Extension,
|
||||
ExtensionSignature: ecs.ExtensionSignature,
|
||||
}
|
||||
}
|
||||
|
||||
// Type returns the vote type of the extended commit, which is always
|
||||
// VoteTypePrecommit
|
||||
// Implements VoteSetReader.
|
||||
func (ec *ExtendedCommit) Type() byte { return byte(tmproto.PrecommitType) }
|
||||
|
||||
// GetHeight returns height of the extended commit.
|
||||
// Implements VoteSetReader.
|
||||
func (ec *ExtendedCommit) GetHeight() int64 { return ec.Height }
|
||||
|
||||
// GetRound returns height of the extended commit.
|
||||
// Implements VoteSetReader.
|
||||
func (ec *ExtendedCommit) GetRound() int32 { return ec.Round }
|
||||
|
||||
// Size returns the number of signatures in the extended commit.
|
||||
// Implements VoteSetReader.
|
||||
func (ec *ExtendedCommit) Size() int {
|
||||
if ec == nil {
|
||||
return 0
|
||||
}
|
||||
return len(ec.ExtendedSignatures)
|
||||
}
|
||||
|
||||
// BitArray returns a BitArray of which validators voted for BlockID or nil in
|
||||
// this extended commit.
|
||||
// Implements VoteSetReader.
|
||||
func (ec *ExtendedCommit) BitArray() *bits.BitArray {
|
||||
if ec.bitArray == nil {
|
||||
ec.bitArray = bits.NewBitArray(len(ec.ExtendedSignatures))
|
||||
for i, extCommitSig := range ec.ExtendedSignatures {
|
||||
// TODO: need to check the BlockID otherwise we could be counting conflicts,
|
||||
// not just the one with +2/3 !
|
||||
ec.bitArray.SetIndex(i, extCommitSig.BlockIDFlag != BlockIDFlagAbsent)
|
||||
}
|
||||
}
|
||||
return ec.bitArray
|
||||
}
|
||||
|
||||
// GetByIndex returns the vote corresponding to a given validator index.
|
||||
// Panics if `index >= extCommit.Size()`.
|
||||
// Implements VoteSetReader.
|
||||
func (ec *ExtendedCommit) GetByIndex(valIdx int32) *Vote {
|
||||
return ec.GetExtendedVote(valIdx)
|
||||
}
|
||||
|
||||
// IsCommit returns true if there is at least one signature.
|
||||
// Implements VoteSetReader.
|
||||
func (ec *ExtendedCommit) IsCommit() bool {
|
||||
return len(ec.ExtendedSignatures) != 0
|
||||
}
|
||||
|
||||
// ValidateBasic checks whether the extended commit is well-formed. Does not
|
||||
// actually check the cryptographic signatures.
|
||||
func (ec *ExtendedCommit) ValidateBasic() error {
|
||||
if ec.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
if ec.Round < 0 {
|
||||
return errors.New("negative Round")
|
||||
}
|
||||
|
||||
if ec.Height >= 1 {
|
||||
if ec.BlockID.IsNil() {
|
||||
return errors.New("commit cannot be for nil block")
|
||||
}
|
||||
|
||||
if len(ec.ExtendedSignatures) == 0 {
|
||||
return errors.New("no signatures in commit")
|
||||
}
|
||||
for i, extCommitSig := range ec.ExtendedSignatures {
|
||||
if err := extCommitSig.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("wrong ExtendedCommitSig #%d: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToProto converts ExtendedCommit to protobuf
|
||||
func (ec *ExtendedCommit) ToProto() *tmproto.ExtendedCommit {
|
||||
if ec == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := new(tmproto.ExtendedCommit)
|
||||
sigs := make([]tmproto.ExtendedCommitSig, len(ec.ExtendedSignatures))
|
||||
for i := range ec.ExtendedSignatures {
|
||||
sigs[i] = *ec.ExtendedSignatures[i].ToProto()
|
||||
}
|
||||
c.ExtendedSignatures = sigs
|
||||
|
||||
c.Height = ec.Height
|
||||
c.Round = ec.Round
|
||||
c.BlockID = ec.BlockID.ToProto()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// ExtendedCommitFromProto constructs an ExtendedCommit from the given Protobuf
|
||||
// representation. It returns an error if the extended commit is invalid.
|
||||
func ExtendedCommitFromProto(ecp *tmproto.ExtendedCommit) (*ExtendedCommit, error) {
|
||||
if ecp == nil {
|
||||
return nil, errors.New("nil ExtendedCommit")
|
||||
}
|
||||
|
||||
extCommit := new(ExtendedCommit)
|
||||
|
||||
bi, err := BlockIDFromProto(&ecp.BlockID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sigs := make([]ExtendedCommitSig, len(ecp.ExtendedSignatures))
|
||||
for i := range ecp.ExtendedSignatures {
|
||||
if err := sigs[i].FromProto(ecp.ExtendedSignatures[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
extCommit.ExtendedSignatures = sigs
|
||||
extCommit.Height = ecp.Height
|
||||
extCommit.Round = ecp.Round
|
||||
extCommit.BlockID = *bi
|
||||
|
||||
return extCommit, extCommit.ValidateBasic()
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// Data contains the set of transactions included in the block
|
||||
type Data struct {
|
||||
@@ -1170,3 +1405,9 @@ func BlockIDFromProto(bID *tmproto.BlockID) (*BlockID, error) {
|
||||
|
||||
return blockID, blockID.ValidateBasic()
|
||||
}
|
||||
|
||||
// ProtoBlockIDIsNil is similar to the IsNil function on BlockID, but for the
|
||||
// Protobuf representation.
|
||||
func ProtoBlockIDIsNil(bID *tmproto.BlockID) bool {
|
||||
return len(bID.Hash) == 0 && ProtoPartSetHeaderIsZero(&bID.PartSetHeader)
|
||||
}
|
||||
|
||||
@@ -42,14 +42,14 @@ func TestBlockAddEvidence(t *testing.T) {
|
||||
h := int64(3)
|
||||
|
||||
voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1)
|
||||
commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
require.NoError(t, err)
|
||||
|
||||
ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain")
|
||||
require.NoError(t, err)
|
||||
evList := []Evidence{ev}
|
||||
|
||||
block := MakeBlock(h, txs, commit, evList)
|
||||
block := MakeBlock(h, txs, extCommit.StripExtensions(), evList)
|
||||
require.NotNil(t, block)
|
||||
require.Equal(t, 1, len(block.Evidence))
|
||||
require.NotNil(t, block.EvidenceHash)
|
||||
@@ -66,9 +66,9 @@ func TestBlockValidateBasic(t *testing.T) {
|
||||
h := int64(3)
|
||||
|
||||
voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1)
|
||||
commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
|
||||
extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain")
|
||||
require.NoError(t, err)
|
||||
@@ -104,7 +104,10 @@ func TestBlockValidateBasic(t *testing.T) {
|
||||
blk.LastCommit = nil
|
||||
}, true},
|
||||
{"Invalid LastCommit", func(blk *Block) {
|
||||
blk.LastCommit = NewCommit(-1, 0, *voteSet.maj23, nil)
|
||||
blk.LastCommit = &Commit{
|
||||
Height: -1,
|
||||
BlockID: *voteSet.maj23,
|
||||
}
|
||||
}, true},
|
||||
{"Invalid Evidence", func(blk *Block) {
|
||||
emptyEv := &DuplicateVoteEvidence{}
|
||||
@@ -153,15 +156,14 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) {
|
||||
h := int64(3)
|
||||
|
||||
voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1)
|
||||
commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
|
||||
extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
require.NoError(t, err)
|
||||
|
||||
ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain")
|
||||
require.NoError(t, err)
|
||||
evList := []Evidence{ev}
|
||||
|
||||
partSet, err := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(512)
|
||||
partSet, err := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.StripExtensions(), evList).MakePartSet(512)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, partSet)
|
||||
@@ -178,14 +180,14 @@ func TestBlockHashesTo(t *testing.T) {
|
||||
h := int64(3)
|
||||
|
||||
voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1)
|
||||
commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
require.NoError(t, err)
|
||||
|
||||
ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain")
|
||||
require.NoError(t, err)
|
||||
evList := []Evidence{ev}
|
||||
|
||||
block := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList)
|
||||
block := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.StripExtensions(), evList)
|
||||
block.ValidatorsHash = valSet.Hash()
|
||||
assert.False(t, block.HashesTo([]byte{}))
|
||||
assert.False(t, block.HashesTo([]byte("something else")))
|
||||
@@ -260,7 +262,7 @@ func TestCommit(t *testing.T) {
|
||||
lastID := makeBlockIDRandom()
|
||||
h := int64(3)
|
||||
voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1)
|
||||
commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
commit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, h-1, commit.Height)
|
||||
@@ -273,7 +275,7 @@ func TestCommit(t *testing.T) {
|
||||
require.NotNil(t, commit.BitArray())
|
||||
assert.Equal(t, bits.NewBitArray(10).Size(), commit.BitArray().Size())
|
||||
|
||||
assert.Equal(t, voteWithoutExtension(voteSet.GetByIndex(0)), commit.GetByIndex(0))
|
||||
assert.Equal(t, voteSet.GetByIndex(0), commit.GetByIndex(0))
|
||||
assert.True(t, commit.IsCommit())
|
||||
}
|
||||
|
||||
@@ -477,11 +479,11 @@ func randCommit(ctx context.Context, t *testing.T, now time.Time) *Commit {
|
||||
lastID := makeBlockIDRandom()
|
||||
h := int64(3)
|
||||
voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1)
|
||||
commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, now)
|
||||
commit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, now)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
return commit
|
||||
return commit.StripExtensions()
|
||||
}
|
||||
|
||||
func hexBytesFromString(t *testing.T, s string) bytes.HexBytes {
|
||||
@@ -554,34 +556,127 @@ func TestBlockMaxDataBytesNoEvidence(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitToVoteSet(t *testing.T) {
|
||||
lastID := makeBlockIDRandom()
|
||||
h := int64(3)
|
||||
// TestVoteSetToExtendedCommit tests that the extended commit produced from a
|
||||
// vote set contains the same vote information as the vote set. The test ensures
|
||||
// that the MakeExtendedCommit method behaves as expected, whether vote extensions
|
||||
// are present in the original votes or not.
|
||||
func TestVoteSetToExtendedCommit(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
includeExtension bool
|
||||
}{
|
||||
{
|
||||
name: "no extensions",
|
||||
includeExtension: false,
|
||||
},
|
||||
{
|
||||
name: "with extensions",
|
||||
includeExtension: true,
|
||||
},
|
||||
} {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
blockID := makeBlockIDRandom()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1)
|
||||
commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
voteSet, _, vals := randVoteSet(ctx, t, 3, 1, tmproto.PrecommitType, 10, 1)
|
||||
for i := 0; i < len(vals); i++ {
|
||||
pubKey, err := vals[i].GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
vote := &Vote{
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
ValidatorIndex: int32(i),
|
||||
Height: 3,
|
||||
Round: 1,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: blockID,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
v := vote.ToProto()
|
||||
err = vals[i].SignVote(ctx, voteSet.ChainID(), v)
|
||||
require.NoError(t, err)
|
||||
vote.Signature = v.Signature
|
||||
if testCase.includeExtension {
|
||||
vote.ExtensionSignature = v.ExtensionSignature
|
||||
}
|
||||
added, err := voteSet.AddVote(vote)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
}
|
||||
ec := voteSet.MakeExtendedCommit()
|
||||
|
||||
assert.NoError(t, err)
|
||||
for i := int32(0); int(i) < len(vals); i++ {
|
||||
vote1 := voteSet.GetByIndex(i)
|
||||
vote2 := ec.GetExtendedVote(i)
|
||||
|
||||
chainID := voteSet.ChainID()
|
||||
voteSet2 := CommitToVoteSet(chainID, commit, valSet)
|
||||
vote1bz, err := vote1.ToProto().Marshal()
|
||||
require.NoError(t, err)
|
||||
vote2bz, err := vote2.ToProto().Marshal()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, vote1bz, vote2bz)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for i := int32(0); int(i) < len(vals); i++ {
|
||||
vote1 := voteWithoutExtension(voteSet.GetByIndex(i))
|
||||
vote2 := voteSet2.GetByIndex(i)
|
||||
vote3 := commit.GetVote(i)
|
||||
// TestExtendedCommitToVoteSet tests that the vote set produced from an extended commit
|
||||
// contains the same vote information as the extended commit. The test ensures
|
||||
// that the ToVoteSet method behaves as expected, whether vote extensions
|
||||
// are present in the original votes or not.
|
||||
func TestExtendedCommitToVoteSet(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
includeExtension bool
|
||||
}{
|
||||
{
|
||||
name: "no extensions",
|
||||
includeExtension: false,
|
||||
},
|
||||
{
|
||||
name: "with extensions",
|
||||
includeExtension: true,
|
||||
},
|
||||
} {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
lastID := makeBlockIDRandom()
|
||||
h := int64(3)
|
||||
|
||||
vote1bz, err := vote1.ToProto().Marshal()
|
||||
require.NoError(t, err)
|
||||
vote2bz, err := vote2.ToProto().Marshal()
|
||||
require.NoError(t, err)
|
||||
vote3bz, err := vote3.ToProto().Marshal()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, vote1bz, vote2bz)
|
||||
assert.Equal(t, vote1bz, vote3bz)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1)
|
||||
extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now())
|
||||
assert.NoError(t, err)
|
||||
|
||||
if !testCase.includeExtension {
|
||||
for i := 0; i < len(vals); i++ {
|
||||
v := voteSet.GetByIndex(int32(i))
|
||||
v.Extension = nil
|
||||
v.ExtensionSignature = nil
|
||||
extCommit.ExtendedSignatures[i].Extension = nil
|
||||
extCommit.ExtendedSignatures[i].ExtensionSignature = nil
|
||||
}
|
||||
}
|
||||
|
||||
chainID := voteSet.ChainID()
|
||||
voteSet2 := extCommit.ToVoteSet(chainID, valSet, true)
|
||||
|
||||
for i := int32(0); int(i) < len(vals); i++ {
|
||||
vote1 := voteSet.GetByIndex(i)
|
||||
vote2 := voteSet2.GetByIndex(i)
|
||||
vote3 := extCommit.GetExtendedVote(i)
|
||||
|
||||
vote1bz, err := vote1.ToProto().Marshal()
|
||||
require.NoError(t, err)
|
||||
vote2bz, err := vote2.ToProto().Marshal()
|
||||
require.NoError(t, err)
|
||||
vote3bz, err := vote3.ToProto().Marshal()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, vote1bz, vote2bz)
|
||||
assert.Equal(t, vote1bz, vote3bz)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -634,12 +729,12 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
if tc.valid {
|
||||
commit := voteSet.MakeCommit() // panics without > 2/3 valid votes
|
||||
assert.NotNil(t, commit)
|
||||
err := valSet.VerifyCommit(voteSet.ChainID(), blockID, height-1, commit)
|
||||
extCommit := voteSet.MakeExtendedCommit() // panics without > 2/3 valid votes
|
||||
assert.NotNil(t, extCommit)
|
||||
err := valSet.VerifyCommit(voteSet.ChainID(), blockID, height-1, extCommit.StripExtensions())
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
assert.Panics(t, func() { voteSet.MakeCommit() })
|
||||
assert.Panics(t, func() { voteSet.MakeExtendedCommit() })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -309,7 +309,7 @@ func (l *LightClientAttackEvidence) GetByzantineValidators(commonVals *Validator
|
||||
// validators who are in the commonVals and voted for the lunatic header
|
||||
if l.ConflictingHeaderIsInvalid(trusted.Header) {
|
||||
for _, commitSig := range l.ConflictingBlock.Commit.Signatures {
|
||||
if !commitSig.ForBlock() {
|
||||
if commitSig.BlockIDFlag != BlockIDFlagCommit {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -329,12 +329,12 @@ func (l *LightClientAttackEvidence) GetByzantineValidators(commonVals *Validator
|
||||
// only need a single loop to find the validators that voted twice.
|
||||
for i := 0; i < len(l.ConflictingBlock.Commit.Signatures); i++ {
|
||||
sigA := l.ConflictingBlock.Commit.Signatures[i]
|
||||
if !sigA.ForBlock() {
|
||||
if sigA.BlockIDFlag != BlockIDFlagCommit {
|
||||
continue
|
||||
}
|
||||
|
||||
sigB := trusted.Commit.Signatures[i]
|
||||
if !sigB.ForBlock() {
|
||||
if sigB.BlockIDFlag != BlockIDFlagCommit {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -153,8 +153,10 @@ func TestLightClientAttackEvidenceBasic(t *testing.T) {
|
||||
header := makeHeaderRandom()
|
||||
header.Height = height
|
||||
blockID := makeBlockID(crypto.Checksum([]byte("blockhash")), math.MaxInt32, crypto.Checksum([]byte("partshash")))
|
||||
commit, err := makeCommit(ctx, blockID, height, 1, voteSet, privVals, defaultVoteTime)
|
||||
extCommit, err := makeExtCommit(ctx, blockID, height, 1, voteSet, privVals, defaultVoteTime)
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
lcae := &LightClientAttackEvidence{
|
||||
ConflictingBlock: &LightBlock{
|
||||
SignedHeader: &SignedHeader{
|
||||
@@ -217,8 +219,10 @@ func TestLightClientAttackEvidenceValidation(t *testing.T) {
|
||||
header.Height = height
|
||||
header.ValidatorsHash = valSet.Hash()
|
||||
blockID := makeBlockID(header.Hash(), math.MaxInt32, crypto.Checksum([]byte("partshash")))
|
||||
commit, err := makeCommit(ctx, blockID, height, 1, voteSet, privVals, time.Now())
|
||||
extCommit, err := makeExtCommit(ctx, blockID, height, 1, voteSet, privVals, time.Now())
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
lcae := &LightClientAttackEvidence{
|
||||
ConflictingBlock: &LightBlock{
|
||||
SignedHeader: &SignedHeader{
|
||||
@@ -424,13 +428,13 @@ func TestEvidenceVectors(t *testing.T) {
|
||||
ProposerAddress: []byte("2915b7b15f979e48ebc61774bb1d86ba3136b7eb"),
|
||||
}
|
||||
blockID3 := makeBlockID(header.Hash(), math.MaxInt32, crypto.Checksum([]byte("partshash")))
|
||||
commit, err := makeCommit(ctx, blockID3, height, 1, voteSet, privVals, defaultVoteTime)
|
||||
extCommit, err := makeExtCommit(ctx, blockID3, height, 1, voteSet, privVals, defaultVoteTime)
|
||||
require.NoError(t, err)
|
||||
lcae := &LightClientAttackEvidence{
|
||||
ConflictingBlock: &LightBlock{
|
||||
SignedHeader: &SignedHeader{
|
||||
Header: header,
|
||||
Commit: commit,
|
||||
Commit: extCommit.StripExtensions(),
|
||||
},
|
||||
ValidatorSet: valSet,
|
||||
},
|
||||
|
||||
@@ -43,6 +43,7 @@ type ConsensusParams struct {
|
||||
Version VersionParams `json:"version"`
|
||||
Synchrony SynchronyParams `json:"synchrony"`
|
||||
Timeout TimeoutParams `json:"timeout"`
|
||||
Vote VoteParams `json:"vote"`
|
||||
}
|
||||
|
||||
// HashedParams is a subset of ConsensusParams.
|
||||
@@ -96,6 +97,11 @@ type TimeoutParams struct {
|
||||
BypassCommitTimeout bool `json:"bypass_commit_timeout"`
|
||||
}
|
||||
|
||||
// VoteParams configure validity rules of the votes within Tendermint consensus.
|
||||
type VoteParams struct {
|
||||
ExtensionRequireHeight int64 `json:"extension_require_height"`
|
||||
}
|
||||
|
||||
// DefaultConsensusParams returns a default ConsensusParams.
|
||||
func DefaultConsensusParams() *ConsensusParams {
|
||||
return &ConsensusParams{
|
||||
@@ -105,6 +111,7 @@ func DefaultConsensusParams() *ConsensusParams {
|
||||
Version: DefaultVersionParams(),
|
||||
Synchrony: DefaultSynchronyParams(),
|
||||
Timeout: DefaultTimeoutParams(),
|
||||
Vote: DefaultVoteParams(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,6 +183,13 @@ func DefaultTimeoutParams() TimeoutParams {
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultVoteParams() VoteParams {
|
||||
return VoteParams{
|
||||
// When set to 0, vote extensions are not required.
|
||||
ExtensionRequireHeight: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TimeoutParamsOrDefaults returns the SynchronyParams, filling in any zero values
|
||||
// with the Tendermint defined default values.
|
||||
func (t TimeoutParams) TimeoutParamsOrDefaults() TimeoutParams {
|
||||
|
||||
@@ -145,6 +145,12 @@ func PartSetHeaderFromProto(ppsh *tmproto.PartSetHeader) (*PartSetHeader, error)
|
||||
return psh, psh.ValidateBasic()
|
||||
}
|
||||
|
||||
// ProtoPartSetHeaderIsZero is similar to the IsZero function for
|
||||
// PartSetHeader, but for the Protobuf representation.
|
||||
func ProtoPartSetHeaderIsZero(ppsh *tmproto.PartSetHeader) bool {
|
||||
return ppsh.Total == 0 && len(ppsh.Hash) == 0
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type PartSet struct {
|
||||
|
||||
@@ -90,7 +90,6 @@ func (pv MockPV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vot
|
||||
}
|
||||
|
||||
signBytes := VoteSignBytes(useChainID, vote)
|
||||
extSignBytes := VoteExtensionSignBytes(useChainID, vote)
|
||||
sig, err := pv.PrivKey.Sign(signBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -98,14 +97,15 @@ func (pv MockPV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vot
|
||||
vote.Signature = sig
|
||||
|
||||
var extSig []byte
|
||||
// We only sign vote extensions for precommits
|
||||
if vote.Type == tmproto.PrecommitType {
|
||||
// We only sign vote extensions for non-nil precommits
|
||||
if vote.Type == tmproto.PrecommitType && !ProtoBlockIDIsNil(&vote.BlockID) {
|
||||
extSignBytes := VoteExtensionSignBytes(useChainID, vote)
|
||||
extSig, err = pv.PrivKey.Sign(extSignBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(vote.Extension) > 0 {
|
||||
return errors.New("unexpected vote extension - vote extensions are only allowed in precommits")
|
||||
return errors.New("unexpected vote extension - vote extensions are only allowed in non-nil precommits")
|
||||
}
|
||||
vote.ExtensionSignature = extSig
|
||||
return nil
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
func makeCommit(ctx context.Context, blockID BlockID, height int64, round int32,
|
||||
voteSet *VoteSet, validators []PrivValidator, now time.Time) (*Commit, error) {
|
||||
func makeExtCommit(ctx context.Context, blockID BlockID, height int64, round int32,
|
||||
voteSet *VoteSet, validators []PrivValidator, now time.Time) (*ExtendedCommit, error) {
|
||||
|
||||
// all sign
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -33,7 +33,7 @@ func makeCommit(ctx context.Context, blockID BlockID, height int64, round int32,
|
||||
}
|
||||
}
|
||||
|
||||
return voteSet.MakeCommit(), nil
|
||||
return voteSet.MakeExtendedCommit(), nil
|
||||
}
|
||||
|
||||
func signAddVote(ctx context.Context, privVal PrivValidator, vote *Vote, voteSet *VoteSet) (signed bool, err error) {
|
||||
@@ -46,13 +46,3 @@ func signAddVote(ctx context.Context, privVal PrivValidator, vote *Vote, voteSet
|
||||
vote.ExtensionSignature = v.ExtensionSignature
|
||||
return voteSet.AddVote(vote)
|
||||
}
|
||||
|
||||
// Votes constructed from commits don't have extensions, because we don't store
|
||||
// the extensions themselves in the commit. This method is used to construct a
|
||||
// copy of a vote, but nil its extension and signature.
|
||||
func voteWithoutExtension(v *Vote) *Vote {
|
||||
vc := v.Copy()
|
||||
vc.Extension = nil
|
||||
vc.ExtensionSignature = nil
|
||||
return vc
|
||||
}
|
||||
|
||||
@@ -36,10 +36,10 @@ func VerifyCommit(chainID string, vals *ValidatorSet, blockID BlockID,
|
||||
votingPowerNeeded := vals.TotalVotingPower() * 2 / 3
|
||||
|
||||
// ignore all absent signatures
|
||||
ignore := func(c CommitSig) bool { return c.Absent() }
|
||||
ignore := func(c CommitSig) bool { return c.BlockIDFlag == BlockIDFlagAbsent }
|
||||
|
||||
// only count the signatures that are for the block
|
||||
count := func(c CommitSig) bool { return c.ForBlock() }
|
||||
count := func(c CommitSig) bool { return c.BlockIDFlag == BlockIDFlagCommit }
|
||||
|
||||
// attempt to batch verify
|
||||
if shouldBatchVerify(vals, commit) {
|
||||
@@ -69,7 +69,7 @@ func VerifyCommitLight(chainID string, vals *ValidatorSet, blockID BlockID,
|
||||
votingPowerNeeded := vals.TotalVotingPower() * 2 / 3
|
||||
|
||||
// ignore all commit signatures that are not for the block
|
||||
ignore := func(c CommitSig) bool { return !c.ForBlock() }
|
||||
ignore := func(c CommitSig) bool { return c.BlockIDFlag != BlockIDFlagCommit }
|
||||
|
||||
// count all the remaining signatures
|
||||
count := func(c CommitSig) bool { return true }
|
||||
@@ -113,7 +113,7 @@ func VerifyCommitLightTrusting(chainID string, vals *ValidatorSet, commit *Commi
|
||||
votingPowerNeeded := totalVotingPowerMulByNumerator / int64(trustLevel.Denominator)
|
||||
|
||||
// ignore all commit signatures that are not for the block
|
||||
ignore := func(c CommitSig) bool { return !c.ForBlock() }
|
||||
ignore := func(c CommitSig) bool { return c.BlockIDFlag != BlockIDFlagCommit }
|
||||
|
||||
// count all the remaining signatures
|
||||
count := func(c CommitSig) bool { return true }
|
||||
|
||||
@@ -99,7 +99,12 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) {
|
||||
|
||||
vi++
|
||||
}
|
||||
commit := NewCommit(tc.height, round, tc.blockID, sigs)
|
||||
commit := &Commit{
|
||||
Height: tc.height,
|
||||
Round: round,
|
||||
BlockID: tc.blockID,
|
||||
Signatures: sigs,
|
||||
}
|
||||
|
||||
err := valSet.VerifyCommit(chainID, blockID, height, commit)
|
||||
if tc.expErr {
|
||||
@@ -146,9 +151,10 @@ func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, 10)
|
||||
commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
|
||||
extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit))
|
||||
|
||||
// malleate 4th signature
|
||||
@@ -176,9 +182,10 @@ func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSign
|
||||
defer cancel()
|
||||
|
||||
voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, 10)
|
||||
commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
|
||||
extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit))
|
||||
|
||||
// malleate 4th signature (3 signatures are enough for 2/3+)
|
||||
@@ -203,9 +210,10 @@ func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotin
|
||||
defer cancel()
|
||||
|
||||
voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, 10)
|
||||
commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
|
||||
extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit))
|
||||
|
||||
// malleate 3rd signature (2 signatures are enough for 1/3+ trust level)
|
||||
@@ -227,10 +235,11 @@ func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) {
|
||||
var (
|
||||
blockID = makeBlockIDRandom()
|
||||
voteSet, originalValset, vals = randVoteSet(ctx, t, 1, 1, tmproto.PrecommitType, 6, 1)
|
||||
commit, err = makeCommit(ctx, blockID, 1, 1, voteSet, vals, time.Now())
|
||||
extCommit, err = makeExtCommit(ctx, blockID, 1, 1, voteSet, vals, time.Now())
|
||||
newValSet, _ = randValidatorPrivValSet(ctx, t, 2, 1)
|
||||
)
|
||||
require.NoError(t, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
testCases := []struct {
|
||||
valSet *ValidatorSet
|
||||
@@ -271,11 +280,11 @@ func TestValidatorSet_VerifyCommitLightTrustingErrorsOnOverflow(t *testing.T) {
|
||||
var (
|
||||
blockID = makeBlockIDRandom()
|
||||
voteSet, valSet, vals = randVoteSet(ctx, t, 1, 1, tmproto.PrecommitType, 1, MaxTotalVotingPower)
|
||||
commit, err = makeCommit(ctx, blockID, 1, 1, voteSet, vals, time.Now())
|
||||
extCommit, err = makeExtCommit(ctx, blockID, 1, 1, voteSet, vals, time.Now())
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = valSet.VerifyCommitLightTrusting("test_chain_id", commit,
|
||||
err = valSet.VerifyCommitLightTrusting("test_chain_id", extCommit.StripExtensions(),
|
||||
tmmath.Fraction{Numerator: 25, Denominator: 55})
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "int64 overflow")
|
||||
|
||||
@@ -1539,8 +1539,9 @@ func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { // nolint
|
||||
// generate n validators
|
||||
voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, int64(n*5))
|
||||
// create a commit with n validators
|
||||
commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
require.NoError(b, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
for i := 0; i < b.N/n; i++ {
|
||||
err = valSet.VerifyCommit(chainID, blockID, h, commit)
|
||||
@@ -1567,8 +1568,9 @@ func BenchmarkValidatorSet_VerifyCommitLight_Ed25519(b *testing.B) { // nolint
|
||||
voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, int64(n*5))
|
||||
|
||||
// create a commit with n validators
|
||||
commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
require.NoError(b, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
for i := 0; i < b.N/n; i++ {
|
||||
err = valSet.VerifyCommitLight(chainID, blockID, h, commit)
|
||||
@@ -1594,8 +1596,9 @@ func BenchmarkValidatorSet_VerifyCommitLightTrusting_Ed25519(b *testing.B) {
|
||||
// generate n validators
|
||||
voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, int64(n*5))
|
||||
// create a commit with n validators
|
||||
commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now())
|
||||
require.NoError(b, err)
|
||||
commit := extCommit.StripExtensions()
|
||||
|
||||
for i := 0; i < b.N/n; i++ {
|
||||
err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3})
|
||||
|
||||
@@ -14,6 +14,9 @@ import (
|
||||
|
||||
const (
|
||||
nilVoteStr string = "nil-Vote"
|
||||
|
||||
// The maximum supported number of bytes in a vote extension.
|
||||
MaxVoteExtensionSize int = 1024 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -24,7 +27,7 @@ var (
|
||||
ErrVoteInvalidBlockHash = errors.New("invalid block hash")
|
||||
ErrVoteNonDeterministicSignature = errors.New("non-deterministic signature")
|
||||
ErrVoteNil = errors.New("nil vote")
|
||||
ErrVoteInvalidExtension = errors.New("invalid vote extension")
|
||||
ErrVoteExtensionAbsent = errors.New("vote extension absent")
|
||||
)
|
||||
|
||||
type ErrVoteConflictingVotes struct {
|
||||
@@ -109,6 +112,22 @@ func (vote *Vote) CommitSig() CommitSig {
|
||||
}
|
||||
}
|
||||
|
||||
// ExtendedCommitSig attempts to construct an ExtendedCommitSig from this vote.
|
||||
// Panics if either the vote extension signature is missing or if the block ID
|
||||
// is not either empty or complete.
|
||||
func (vote *Vote) ExtendedCommitSig() ExtendedCommitSig {
|
||||
if vote == nil {
|
||||
return NewExtendedCommitSigAbsent()
|
||||
}
|
||||
|
||||
cs := vote.CommitSig()
|
||||
return ExtendedCommitSig{
|
||||
CommitSig: cs,
|
||||
Extension: vote.Extension,
|
||||
ExtensionSignature: vote.ExtensionSignature,
|
||||
}
|
||||
}
|
||||
|
||||
// VoteSignBytes returns the proto-encoding of the canonicalized Vote, for
|
||||
// signing. Panics if the marshaling fails.
|
||||
//
|
||||
@@ -216,12 +235,10 @@ func (vote *Vote) VerifyWithExtension(chainID string, pubKey crypto.PubKey) erro
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We only verify vote extension signatures for precommits.
|
||||
if vote.Type == tmproto.PrecommitType {
|
||||
// We only verify vote extension signatures for non-nil precommits.
|
||||
if vote.Type == tmproto.PrecommitType && !ProtoBlockIDIsNil(&v.BlockID) {
|
||||
extSignBytes := VoteExtensionSignBytes(chainID, v)
|
||||
// TODO: Remove extension signature nil check to enforce vote extension
|
||||
// signing once we resolve https://github.com/tendermint/tendermint/issues/8272
|
||||
if vote.ExtensionSignature != nil && !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) {
|
||||
if !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) {
|
||||
return ErrVoteInvalidSignature
|
||||
}
|
||||
}
|
||||
@@ -273,8 +290,10 @@ func (vote *Vote) ValidateBasic() error {
|
||||
return fmt.Errorf("signature is too big (max: %d)", MaxSignatureSize)
|
||||
}
|
||||
|
||||
// We should only ever see vote extensions in precommits.
|
||||
if vote.Type != tmproto.PrecommitType {
|
||||
// We should only ever see vote extensions in non-nil precommits, otherwise
|
||||
// this is a violation of the specification.
|
||||
// https://github.com/tendermint/tendermint/issues/8487
|
||||
if vote.Type != tmproto.PrecommitType || (vote.Type == tmproto.PrecommitType && vote.BlockID.IsNil()) {
|
||||
if len(vote.Extension) > 0 {
|
||||
return errors.New("unexpected vote extension")
|
||||
}
|
||||
@@ -294,22 +313,24 @@ func (vote *Vote) ValidateWithExtension() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// We should always see vote extension signatures in precommits
|
||||
if vote.Type == tmproto.PrecommitType {
|
||||
// TODO(thane): Remove extension length check once
|
||||
// https://github.com/tendermint/tendermint/issues/8272 is
|
||||
// resolved.
|
||||
if len(vote.Extension) > 0 && len(vote.ExtensionSignature) == 0 {
|
||||
return errors.New("vote extension signature is missing")
|
||||
}
|
||||
if len(vote.ExtensionSignature) > MaxSignatureSize {
|
||||
return fmt.Errorf("vote extension signature is too big (max: %d)", MaxSignatureSize)
|
||||
}
|
||||
if err := vote.EnsureExtension(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
func (vote *Vote) EnsureExtension() error {
|
||||
// We should always see vote extension signatures in non-nil precommits
|
||||
if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() {
|
||||
if len(vote.ExtensionSignature) == 0 {
|
||||
return ErrVoteExtensionAbsent
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToProto converts the handwritten type to proto generated type
|
||||
// return type, nil if everything converts safely, otherwise nil, error
|
||||
func (vote *Vote) ToProto() *tmproto.Vote {
|
||||
|
||||
@@ -53,11 +53,12 @@ const (
|
||||
NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64.
|
||||
*/
|
||||
type VoteSet struct {
|
||||
chainID string
|
||||
height int64
|
||||
round int32
|
||||
signedMsgType tmproto.SignedMsgType
|
||||
valSet *ValidatorSet
|
||||
chainID string
|
||||
height int64
|
||||
round int32
|
||||
signedMsgType tmproto.SignedMsgType
|
||||
valSet *ValidatorSet
|
||||
requireExtensions bool
|
||||
|
||||
mtx sync.Mutex
|
||||
votesBitArray *bits.BitArray
|
||||
@@ -70,22 +71,23 @@ type VoteSet struct {
|
||||
|
||||
// Constructs a new VoteSet struct used to accumulate votes for given height/round.
|
||||
func NewVoteSet(chainID string, height int64, round int32,
|
||||
signedMsgType tmproto.SignedMsgType, valSet *ValidatorSet) *VoteSet {
|
||||
signedMsgType tmproto.SignedMsgType, valSet *ValidatorSet, requireExtensions bool) *VoteSet {
|
||||
if height == 0 {
|
||||
panic("Cannot make VoteSet for height == 0, doesn't make sense.")
|
||||
}
|
||||
return &VoteSet{
|
||||
chainID: chainID,
|
||||
height: height,
|
||||
round: round,
|
||||
signedMsgType: signedMsgType,
|
||||
valSet: valSet,
|
||||
votesBitArray: bits.NewBitArray(valSet.Size()),
|
||||
votes: make([]*Vote, valSet.Size()),
|
||||
sum: 0,
|
||||
maj23: nil,
|
||||
votesByBlock: make(map[string]*blockVotes, valSet.Size()),
|
||||
peerMaj23s: make(map[string]BlockID),
|
||||
chainID: chainID,
|
||||
height: height,
|
||||
round: round,
|
||||
signedMsgType: signedMsgType,
|
||||
valSet: valSet,
|
||||
requireExtensions: requireExtensions,
|
||||
votesBitArray: bits.NewBitArray(valSet.Size()),
|
||||
votes: make([]*Vote, valSet.Size()),
|
||||
sum: 0,
|
||||
maj23: nil,
|
||||
votesByBlock: make(map[string]*blockVotes, valSet.Size()),
|
||||
peerMaj23s: make(map[string]BlockID),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,8 +196,20 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) {
|
||||
}
|
||||
|
||||
// Check signature.
|
||||
if err := vote.VerifyWithExtension(voteSet.chainID, val.PubKey); err != nil {
|
||||
return false, fmt.Errorf("failed to verify vote with ChainID %s and PubKey %s: %w", voteSet.chainID, val.PubKey, err)
|
||||
if voteSet.requireExtensions {
|
||||
if err := vote.VerifyWithExtension(voteSet.chainID, val.PubKey); err != nil {
|
||||
return false, fmt.Errorf("failed to verify vote with ChainID %s and PubKey %s: %w", voteSet.chainID, val.PubKey, err)
|
||||
}
|
||||
} else {
|
||||
if len(vote.ExtensionSignature) != 0 {
|
||||
if err := vote.VerifyWithExtension(voteSet.chainID, val.PubKey); err != nil {
|
||||
return false, fmt.Errorf("failed to verify vote with ChainID %s and PubKey %s: %w", voteSet.chainID, val.PubKey, err)
|
||||
}
|
||||
} else {
|
||||
if err := vote.Verify(voteSet.chainID, val.PubKey); err != nil {
|
||||
return false, fmt.Errorf("failed to verify vote with ChainID %s and PubKey %s: %w", voteSet.chainID, val.PubKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add vote and get conflicting vote if any.
|
||||
@@ -220,13 +234,6 @@ func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (voteSet *VoteSet) GetVotes() []*Vote {
|
||||
if voteSet == nil {
|
||||
return nil
|
||||
}
|
||||
return voteSet.votes
|
||||
}
|
||||
|
||||
// Assumes signature is valid.
|
||||
// If conflicting vote exists, returns it.
|
||||
func (voteSet *VoteSet) addVerifiedVote(
|
||||
@@ -606,36 +613,41 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) {
|
||||
//--------------------------------------------------------------------------------
|
||||
// Commit
|
||||
|
||||
// MakeCommit constructs a Commit from the VoteSet. It only includes precommits
|
||||
// for the block, which has 2/3+ majority, and nil.
|
||||
// MakeExtendedCommit constructs a Commit from the VoteSet. It only includes
|
||||
// precommits for the block, which has 2/3+ majority, and nil.
|
||||
//
|
||||
// Panics if the vote type is not PrecommitType or if there's no +2/3 votes for
|
||||
// a single block.
|
||||
func (voteSet *VoteSet) MakeCommit() *Commit {
|
||||
func (voteSet *VoteSet) MakeExtendedCommit() *ExtendedCommit {
|
||||
if voteSet.signedMsgType != tmproto.PrecommitType {
|
||||
panic("Cannot MakeCommit() unless VoteSet.Type is PrecommitType")
|
||||
panic("Cannot MakeExtendCommit() unless VoteSet.Type is PrecommitType")
|
||||
}
|
||||
voteSet.mtx.Lock()
|
||||
defer voteSet.mtx.Unlock()
|
||||
|
||||
// Make sure we have a 2/3 majority
|
||||
if voteSet.maj23 == nil {
|
||||
panic("Cannot MakeCommit() unless a blockhash has +2/3")
|
||||
panic("Cannot MakeExtendCommit() unless a blockhash has +2/3")
|
||||
}
|
||||
|
||||
// For every validator, get the precommit
|
||||
commitSigs := make([]CommitSig, len(voteSet.votes))
|
||||
// For every validator, get the precommit with extensions
|
||||
sigs := make([]ExtendedCommitSig, len(voteSet.votes))
|
||||
for i, v := range voteSet.votes {
|
||||
commitSig := v.CommitSig()
|
||||
sig := v.ExtendedCommitSig()
|
||||
// if block ID exists but doesn't match, exclude sig
|
||||
if commitSig.ForBlock() && !v.BlockID.Equals(*voteSet.maj23) {
|
||||
commitSig = NewCommitSigAbsent()
|
||||
if sig.BlockIDFlag == BlockIDFlagCommit && !v.BlockID.Equals(*voteSet.maj23) {
|
||||
sig = NewExtendedCommitSigAbsent()
|
||||
}
|
||||
|
||||
commitSigs[i] = commitSig
|
||||
sigs[i] = sig
|
||||
}
|
||||
|
||||
return NewCommit(voteSet.GetHeight(), voteSet.GetRound(), *voteSet.maj23, commitSigs)
|
||||
return &ExtendedCommit{
|
||||
Height: voteSet.GetHeight(),
|
||||
Round: voteSet.GetRound(),
|
||||
BlockID: *voteSet.maj23,
|
||||
ExtendedSignatures: sigs,
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
@@ -450,7 +450,7 @@ func TestVoteSet_MakeCommit(t *testing.T) {
|
||||
}
|
||||
|
||||
// MakeCommit should fail.
|
||||
assert.Panics(t, func() { voteSet.MakeCommit() }, "Doesn't have +2/3 majority")
|
||||
assert.Panics(t, func() { voteSet.MakeExtendedCommit() }, "Doesn't have +2/3 majority")
|
||||
|
||||
// 7th voted for some other block.
|
||||
{
|
||||
@@ -487,13 +487,13 @@ func TestVoteSet_MakeCommit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
commit := voteSet.MakeCommit()
|
||||
extCommit := voteSet.MakeExtendedCommit()
|
||||
|
||||
// Commit should have 10 elements
|
||||
assert.Equal(t, 10, len(commit.Signatures))
|
||||
assert.Equal(t, 10, len(extCommit.ExtendedSignatures))
|
||||
|
||||
// Ensure that Commit is good.
|
||||
if err := commit.ValidateBasic(); err != nil {
|
||||
if err := extCommit.ValidateBasic(); err != nil {
|
||||
t.Errorf("error in Commit.ValidateBasic(): %v", err)
|
||||
}
|
||||
}
|
||||
@@ -510,7 +510,7 @@ func randVoteSet(
|
||||
) (*VoteSet, *ValidatorSet, []PrivValidator) {
|
||||
t.Helper()
|
||||
valSet, privValidators := randValidatorPrivValSet(ctx, t, numValidators, votingPower)
|
||||
return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators
|
||||
return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet, false), valSet, privValidators
|
||||
}
|
||||
|
||||
func deterministicVoteSet(
|
||||
@@ -523,7 +523,7 @@ func deterministicVoteSet(
|
||||
) (*VoteSet, *ValidatorSet, []PrivValidator) {
|
||||
t.Helper()
|
||||
valSet, privValidators := deterministicValidatorSet(ctx, t)
|
||||
return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators
|
||||
return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet, false), valSet, privValidators
|
||||
}
|
||||
|
||||
func randValidatorPrivValSet(ctx context.Context, t testing.TB, numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) {
|
||||
|
||||
@@ -223,26 +223,22 @@ func TestVoteExtension(t *testing.T) {
|
||||
includeSignature: true,
|
||||
expectError: false,
|
||||
},
|
||||
// TODO(thane): Re-enable once
|
||||
// https://github.com/tendermint/tendermint/issues/8272 is resolved
|
||||
//{
|
||||
// name: "no extension signature",
|
||||
// extension: []byte("extension"),
|
||||
// includeSignature: false,
|
||||
// expectError: true,
|
||||
//},
|
||||
{
|
||||
name: "no extension signature",
|
||||
extension: []byte("extension"),
|
||||
includeSignature: false,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty extension",
|
||||
includeSignature: true,
|
||||
expectError: false,
|
||||
},
|
||||
// TODO: Re-enable once
|
||||
// https://github.com/tendermint/tendermint/issues/8272 is resolved.
|
||||
//{
|
||||
// name: "no extension and no signature",
|
||||
// includeSignature: false,
|
||||
// expectError: true,
|
||||
//},
|
||||
{
|
||||
name: "no extension and no signature",
|
||||
includeSignature: false,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -497,11 +493,11 @@ func getSampleCommit(ctx context.Context, t testing.TB) *Commit {
|
||||
|
||||
lastID := makeBlockIDRandom()
|
||||
voteSet, _, vals := randVoteSet(ctx, t, 2, 1, tmproto.PrecommitType, 10, 1)
|
||||
commit, err := makeCommit(ctx, lastID, 2, 1, voteSet, vals, time.Now())
|
||||
commit, err := makeExtCommit(ctx, lastID, 2, 1, voteSet, vals, time.Now())
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
return commit
|
||||
return commit.StripExtensions()
|
||||
}
|
||||
|
||||
func BenchmarkVoteSignBytes(b *testing.B) {
|
||||
|
||||
Reference in New Issue
Block a user