Compare commits

..

5 Commits

Author SHA1 Message Date
William Banfield
78443687a6 uncomment envelope methods 2022-11-03 16:58:17 -04:00
William Banfield
f7218259c5 fix metric to be seconds 2022-11-03 16:03:08 -04:00
William Banfield
5fa49830e4 add send and receive timings 2022-11-03 15:53:57 -04:00
William Banfield
e91616c15f add allocate in Receive calls 2022-11-03 15:20:41 -04:00
William Banfield
fbad107210 comment out new functions 2022-11-03 15:09:28 -04:00
34 changed files with 194 additions and 791 deletions

2
.github/CODEOWNERS vendored
View File

@@ -7,5 +7,5 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
* @ebuchman @tendermint/tendermint-engineering

View File

@@ -1,65 +0,0 @@
name: "Pre-release"
on:
push:
tags:
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
jobs:
prerelease:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-go@v3
with:
go-version: '1.18'
- name: Build
uses: goreleaser/goreleaser-action@v3
if: ${{ github.event_name == 'pull_request' }}
with:
version: latest
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
# Link to CHANGELOG_PENDING.md as release notes.
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
- name: Release
uses: goreleaser/goreleaser-action@v3
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist --release-notes=../release_notes.md
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
prerelease-success:
needs: prerelease
if: ${{ success() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack upon pre-release
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
with:
payload: |
{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
}
}
]
}

View File

@@ -3,10 +3,10 @@ name: "Release"
on:
push:
tags:
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
jobs:
release:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -18,45 +18,12 @@ jobs:
with:
go-version: '1.18'
- name: Build
uses: goreleaser/goreleaser-action@v3
if: ${{ github.event_name == 'pull_request' }}
with:
version: latest
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
- name: Release
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v3
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist --release-notes=../release_notes.md
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
release-success:
needs: release
if: ${{ success() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack upon release
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
with:
payload: |
{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
}
}
]
}

View File

@@ -2,64 +2,6 @@
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
## v0.34.24
*Nov 21, 2022*
Apart from one minor bug fix, this release aims to optimize the output of the
RPC (both HTTP and WebSocket endpoints). See our [upgrading
guidelines](./UPGRADING.md#v03424) for more details.
### IMPROVEMENTS
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
useless whitespace in RPC output (@adizere, @thanethomson)
### BUG FIXES
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
## v0.34.23
*Nov 9, 2022*
This release introduces some new Prometheus metrics to help in determining what
kinds of messages are consuming the most P2P bandwidth. This builds towards our
broader goal of optimizing Tendermint bandwidth consumption, and will give us
meaningful insights once we can establish these metrics for a number of chains.
We now also return `Cache-Control` headers for select RPC endpoints to help
facilitate caching.
Special thanks to external contributors on this release: @JayT106
### IMPROVEMENTS
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
Envelope type and associated methods for sending and receiving Envelopes
instead of raw bytes. This also adds new metrics,
`tendermint_p2p_message_send_bytes_total` and
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
each message type have been sent.
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
caching of RPC responses (@JayT106)
The following RPC endpoints will return `Cache-Control` headers with a maximum
age of 1 day:
- `/abci_info`
- `/block`, if `height` is supplied
- `/block_by_hash`
- `/block_results`, if `height` is supplied
- `/blockchain`
- `/check_tx`
- `/commit`, if `height` is supplied
- `/consensus_params`, if `height` is supplied
- `/genesis`
- `/genesis_chunked`
- `/tx`
- `/validators`, if `height` is supplied
## v0.34.22
This release includes several bug fixes, [one of

View File

@@ -1,6 +1,6 @@
# Unreleased Changes
## v0.34.25
## v0.34.23
### BREAKING CHANGES
@@ -17,6 +17,7 @@
### FEATURES
### IMPROVEMENTS
- [p2p] \#9641 Add new Envelope type and associated methods for sending and receiving Envelopes instead of raw bytes.
This also adds new metrics, `tendermint_p2p_message_send_bytes_total` and `tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of each message type have been sent.
### BUG FIXES

View File

@@ -1,9 +1,5 @@
# Tendermint
_UPDATE: TendermintCore featureset is frozen for LTS, see issue https://github.com/tendermint/tendermint/issues/9972_<br/>
_This is the latest stable release used by cosmoshub-4, version 0.34.24_<br/>
_The previous main branch (v0.38.xx) can now be found under "main_backup"_<br/>
![banner](docs/tendermint-core-image.jpg)
[Byzantine-Fault Tolerant][bft] [State Machine Replication][smr]. Or
@@ -46,15 +42,20 @@ since we are making breaking changes to the protocol and the APIs. See below for
more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production, we're happy to help.
You can contact us [over email](mailto:hello@newtendermint.org) or [join the
chat](https://discord.gg/gnoland).
You can contact us [over email](mailto:hello@interchain.io) or [join the
chat](https://discord.gg/cosmosnetwork).
More on how releases are conducted can be found [here](./RELEASES.md).
## Security
To report a security vulnerability, please [email us](mailto:security@newtendermint.org).
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
To report a security vulnerability, see our [bug bounty
program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're
looking for, see [our security policy](SECURITY.md).
We also maintain a dedicated mailing list for security updates. We will only
ever use this mailing list to notify you of vulnerabilities and fixes in
Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
## Minimum requirements
@@ -144,10 +145,14 @@ Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
## Join us!
The development of Tendermint Core was led primarily by All in Bits, Inc. The
Tendermint trademark is owned by New Tendermint, LLC. If you'd like to work
full-time on Tendermint2 or [gno.land](https://gno.land), [we're
hiring](mailto:hiring@newtendermint.org)!
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
If you'd like to work full-time on Tendermint Core,
[we're hiring](https://interchain-gmbh.breezy.hr/)!
Funding for Tendermint Core development comes primarily from the
[Interchain Foundation](https://interchain.io), a Swiss non-profit. The
Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the
for-profit entity that also maintains [tendermint.com](https://tendermint.com).
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
[smr]: https://en.wikipedia.org/wiki/State_machine_replication

View File

@@ -3,14 +3,6 @@
This guide provides instructions for upgrading to specific versions of
Tendermint Core.
## v0.34.24
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
WebSocket RPC for performance and subscription stability reasons. We recommend
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
output if you rely on that prettified output in some way.
## v0.34.20
### Feature: Priority Mempool

View File

@@ -7,7 +7,6 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -18,7 +17,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
@@ -194,25 +192,6 @@ func TestNoBlockResponse(t *testing.T) {
}
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
reactor := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 10).reactor
peer := p2p.CreateRandomPeer(false)
reactor.InitPeer(peer)
reactor.AddPeer(peer)
m := &bcproto.StatusRequest{}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(BlockchainChannel, peer, msg)
})
}
// NOTE: This is too hard to test without
// an easy way to add test peer to switch
// or without significant refactoring of the module.

View File

@@ -8,7 +8,6 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -19,7 +18,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
@@ -351,25 +349,6 @@ outerFor:
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
reactor := newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 10)
peer := p2p.CreateRandomPeer(false)
reactor.InitPeer(peer)
reactor.AddPeer(peer)
m := &bcproto.StatusRequest{}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(BlockchainChannel, peer, msg)
})
}
//----------------------------------------------
// utility funcs

View File

@@ -415,34 +415,6 @@ func TestReactorHelperMode(t *testing.T) {
}
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
params := testReactorParams{
logger: log.TestingLogger(),
genDoc: genDoc,
privVals: privVals,
startHeight: 20,
mockA: true,
}
reactor := newTestReactor(params)
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
reactor.io = mockSwitch
peer := p2p.CreateRandomPeer(false)
reactor.InitPeer(peer)
reactor.AddPeer(peer)
m := &bcproto.StatusRequest{}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(BlockchainChannel, peer, msg)
})
}
func TestReactorSetSwitchNil(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)

View File

@@ -11,7 +11,6 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
@@ -256,7 +255,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
}, css)
}
func TestLegacyReactorReceiveBasicIfAddPeerHasntBeenCalledYet(t *testing.T) {
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
@@ -282,35 +281,6 @@ func TestLegacyReactorReceiveBasicIfAddPeerHasntBeenCalledYet(t *testing.T) {
})
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
reactors, _, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
var (
reactor = reactors[0]
peer = p2pmock.NewPeer(nil)
)
reactor.InitPeer(peer)
v := &tmcons.HasVote{
Height: 1,
Round: 1,
Index: 1,
Type: tmproto.PrevoteType,
}
w := v.Wrap()
msg, err := proto.Marshal(w)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(StateChannel, peer, msg)
reactor.AddPeer(peer)
})
}
func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)

View File

@@ -92,7 +92,7 @@ module.exports = {
}
],
smallprint:
'The development of Tendermint Core was led primarily by All in Bits, Inc. The Tendermint trademark is owned by New Tendermint, LLC.'
'The development of Tendermint Core is led primarily by [Interchain GmbH](https://interchain.berlin/). Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit. The Tendermint trademark is owned by Tendermint Inc, the for-profit entity that also maintains this website.',
links: [
{
title: 'Documentation',

View File

@@ -349,8 +349,8 @@ the same results as for the Go version.
Want to write the counter app in your favorite language?! We'd be happy
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
TODO link to bounties page.
See [funding](https://github.com/interchainio/funding) opportunities from the
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.
The `abci-cli` is designed strictly for testing and debugging. In a real
deployment, the role of sending messages is taken by Tendermint, which

View File

@@ -18,40 +18,38 @@ Listen address can be changed in the config file (see
The following metrics are available:
| **Name** | **Type** | **Tags** | **Description** |
|------------------------------------------|-----------|-------------------|------------------------------------------------------------------------|
| `consensus_height` | Gauge | | Height of the chain |
| `consensus_validators` | Gauge | | Number of validators |
| `consensus_validators_power` | Gauge | | Total voting power of all validators |
| `consensus_validator_power` | Gauge | | Voting power of the node if in the validator set |
| `consensus_validator_last_signed_height` | Gauge | | Last height the node signed a block, if the node is a validator |
| `consensus_validator_missed_blocks` | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
| `consensus_missing_validators` | Gauge | | Number of validators who did not sign |
| `consensus_missing_validators_power` | Gauge | | Total voting power of the missing validators |
| `consensus_byzantine_validators` | Gauge | | Number of validators who tried to double sign |
| `consensus_byzantine_validators_power` | Gauge | | Total voting power of the byzantine validators |
| `consensus_block_interval_seconds` | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
| `consensus_rounds` | Gauge | | Number of rounds |
| `consensus_num_txs` | Gauge | | Number of transactions |
| `consensus_total_txs` | Gauge | | Total number of transactions committed |
| `consensus_block_parts` | Counter | `peer_id` | Number of blockparts transmitted by peer |
| `consensus_latest_block_height` | Gauge | | /status sync\_info number |
| `consensus_fast_syncing` | Gauge | | Either 0 (not fast syncing) or 1 (syncing) |
| `consensus_state_syncing` | Gauge | | Either 0 (not state syncing) or 1 (syncing) |
| `consensus_block_size_bytes` | Gauge | | Block size in bytes |
| `p2p_message_send_bytes_total` | Counter | `message_type` | Number of bytes sent to all peers per message type |
| `p2p_message_receive_bytes_total` | Counter | `message_type` | Number of bytes received from all peers per message type |
| `p2p_peers` | Gauge | | Number of peers node's connected to |
| `p2p_peer_receive_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel received from a given peer |
| `p2p_peer_send_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel sent to a given peer |
| `p2p_peer_pending_send_bytes` | Gauge | `peer_id` | Number of pending bytes to be sent to a given peer |
| `p2p_num_txs` | Gauge | `peer_id` | Number of transactions submitted by each peer\_id |
| `p2p_pending_send_bytes` | Gauge | `peer_id` | Amount of data pending to be sent to peer |
| `mempool_size` | Gauge | | Number of uncommitted transactions |
| `mempool_tx_size_bytes` | Histogram | | Transaction sizes in bytes |
| `mempool_failed_txs` | Counter | | Number of failed transactions |
| `mempool_recheck_times` | Counter | | Number of transactions rechecked in the mempool |
| `state_block_processing_time` | Histogram | | Time between BeginBlock and EndBlock in ms |
| **Name** | **Type** | **Tags** | **Description** |
| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- |
| consensus_height | Gauge | | Height of the chain |
| consensus_validators | Gauge | | Number of validators |
| consensus_validators_power | Gauge | | Total voting power of all validators |
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
| consensus_rounds | Gauge | | Number of rounds |
| consensus_num_txs | Gauge | | Number of transactions |
| consensus_total_txs | Gauge | | Total number of transactions committed |
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
| consensus_latest_block_height | gauge | | /status sync_info number |
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
| consensus_block_size_bytes | Gauge | | Block size in bytes |
| p2p_peers | Gauge | | Number of peers node's connected to |
| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer |
| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer |
| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer |
| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id |
| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer |
| mempool_size | Gauge | | Number of uncommitted transactions |
| mempool_tx_size_bytes | histogram | | transaction sizes in bytes |
| mempool_failed_txs | counter | | number of failed transactions |
| mempool_recheck_times | counter | | number of transactions rechecked in the mempool |
| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms |
## Useful queries

View File

@@ -97,7 +97,7 @@ More Information can be found at these links:
### Validator keys
Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`).
Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io/) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM.
Currently Tendermint uses [Ed25519](https://ed25519.cr.yp.to/) keys which are widely supported across the security sector and HSMs.

View File

@@ -9,7 +9,6 @@ import (
"github.com/fortytw2/leaktest"
"github.com/go-kit/log/term"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
@@ -370,33 +369,6 @@ func exampleVote(t byte) *types.Vote {
ValidatorIndex: 56789,
}
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
config := cfg.TestConfig()
N := 1
stateDBs := make([]sm.Store, N)
val := types.NewMockPV()
stateDBs[0] = initializeValidatorState(val, 1)
reactors, _ := makeAndConnectReactorsAndPools(config, stateDBs)
var (
reactor = reactors[0]
peer = &p2pmocks.Peer{}
)
quitChan := make(<-chan struct{})
peer.On("Quit").Return(quitChan)
reactor.InitPeer(peer)
reactor.AddPeer(peer)
e := &tmproto.EvidenceList{}
msg, err := proto.Marshal(e)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(evidence.EvidenceChannel, peer, msg)
})
}
//nolint:lll //ignore line length for tests
func TestEvidenceVectors(t *testing.T) {

View File

@@ -21,20 +21,20 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
"health": rpcserver.NewRPCFunc(makeHealthFunc(c), ""),
"status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""),
"net_info": rpcserver.NewRPCFunc(makeNetInfoFunc(c), ""),
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight", rpcserver.Cacheable()),
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), "", rpcserver.Cacheable()),
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), "", rpcserver.Cacheable()),
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height", rpcserver.Cacheable("height")),
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", rpcserver.Cacheable()),
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", rpcserver.Cacheable("height")),
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", rpcserver.Cacheable("height")),
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", rpcserver.Cacheable()),
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"),
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""),
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), ""),
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"),
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"),
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"),
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"),
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"),
"tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by"),
"block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by"),
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page", rpcserver.Cacheable("height")),
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page"),
"dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""),
"consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""),
"consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", rpcserver.Cacheable("height")),
"consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height"),
"unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"),
"num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""),
@@ -45,7 +45,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
// abci API
"abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data,height,prove"),
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), "", rpcserver.Cacheable()),
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""),
// evidence API
"broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence"),

View File

@@ -10,7 +10,6 @@ import (
"github.com/fortytw2/leaktest"
"github.com/go-kit/log/term"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -294,31 +293,6 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
}
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
config := cfg.TestConfig()
const N = 1
reactors := makeAndConnectReactors(config, N)
var (
reactor = reactors[0]
peer = mock.NewPeer(nil)
)
defer func() {
err := reactor.Stop()
assert.NoError(t, err)
}()
reactor.InitPeer(peer)
reactor.AddPeer(peer)
m := &memproto.Txs{}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(mempool.MempoolChannel, peer, msg)
})
}
// mempoolLogger is a TestingLogger which uses a different
// color for each validator ("validator" key must exist).
func mempoolLogger() log.Logger {

View File

@@ -8,12 +8,10 @@ import (
"time"
"github.com/go-kit/log/term"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/p2p/mock"
cfg "github.com/tendermint/tendermint/config"
@@ -95,35 +93,6 @@ func TestMempoolVectors(t *testing.T) {
}
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
config := cfg.TestConfig()
// if there were more than two reactors, the order of transactions could not be
// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
// replace Connect2Switches (full mesh) with a func, which connects first
// reactor to others and nothing else, this test should also pass with >2 reactors.
const N = 1
reactors := makeAndConnectReactors(config, N)
var (
reactor = reactors[0]
peer = mock.NewPeer(nil)
)
defer func() {
err := reactor.Stop()
assert.NoError(t, err)
}()
reactor.InitPeer(peer)
reactor.AddPeer(peer)
m := &memproto.Txs{}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(mempool.MempoolChannel, peer, msg)
})
}
func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
reactors := make([]*Reactor, n)
logger := mempoolLogger()

View File

@@ -39,8 +39,13 @@ type Metrics struct {
NumTxs metrics.Gauge
// Number of bytes of each message type received.
MessageReceiveBytesTotal metrics.Counter
// Number of bytes of each message type sent.
MessageSendBytesTotal metrics.Counter
// Histogram of message receive duration.
MessageReceiveTime metrics.Histogram
// Histogram of message send duration.
MessageSendTime metrics.Histogram
}
// PrometheusMetrics returns Metrics build using Prometheus client library.
@@ -94,6 +99,22 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Name: "message_send_bytes_total",
Help: "Number of bytes of each message type sent.",
}, append(labels, "message_type")).With(labelsAndValues...),
MessageReceiveTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "message_receive_time",
Help: "Histogram of message receive duration.",
Buckets: stdprometheus.ExponentialBucketsRange(0.05, 50, 8),
}, labels).With(labelsAndValues...),
MessageSendTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "message_send_time",
Help: "Histogram of message send duration.",
Buckets: stdprometheus.ExponentialBucketsRange(0.05, 50, 8),
}, labels).With(labelsAndValues...),
}
}
@@ -106,6 +127,8 @@ func NopMetrics() *Metrics {
NumTxs: discard.NewGauge(),
MessageReceiveBytesTotal: discard.NewCounter(),
MessageSendBytesTotal: discard.NewCounter(),
MessageReceiveTime: discard.NewHistogram(),
MessageSendTime: discard.NewHistogram(),
}
}

View File

@@ -64,6 +64,12 @@ type EnvelopeSender interface {
//
// Deprecated: Will be removed in v0.37.
func SendEnvelopeShim(p Peer, e Envelope, lg log.Logger) bool {
before := time.Now()
defer func() {
if pp, ok := p.(*peer); ok {
pp.metrics.MessageSendTime.Observe(time.Since(before).Seconds())
}
}()
if es, ok := p.(EnvelopeSender); ok {
return es.SendEnvelope(e)
}
@@ -86,6 +92,12 @@ func SendEnvelopeShim(p Peer, e Envelope, lg log.Logger) bool {
//
// Deprecated: Will be removed in v0.37.
func TrySendEnvelopeShim(p Peer, e Envelope, lg log.Logger) bool {
before := time.Now()
defer func() {
if pp, ok := p.(*peer); ok {
pp.metrics.MessageSendTime.Observe(time.Since(before).Seconds())
}
}()
if es, ok := p.(EnvelopeSender); ok {
return es.SendEnvelope(e)
}
@@ -510,6 +522,10 @@ func createMConnection(
) *tmconn.MConnection {
onReceive := func(chID byte, msgBytes []byte) {
before := time.Now()
defer func() {
p.metrics.MessageReceiveTime.Observe(time.Since(before).Seconds())
}()
reactor := reactorsByCh[chID]
if reactor == nil {
// Note that its ok to panic here as it's caught in the conn._recover,

View File

@@ -498,22 +498,6 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
assert.Equal(t, size, book.Size())
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
pexR, _ := createReactor(&ReactorConfig{})
peer := p2p.CreateRandomPeer(false)
pexR.InitPeer(peer)
pexR.AddPeer(peer)
m := &tmp2p.PexAddrs{}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
pexR.Receive(PexChannel, peer, msg)
})
}
func TestPEXReactorDialPeer(t *testing.T) {
pexR, book := createReactor(&ReactorConfig{})
defer teardownReactor(book)

View File

@@ -17,21 +17,21 @@ var Routes = map[string]*rpc.RPCFunc{
"health": rpc.NewRPCFunc(Health, ""),
"status": rpc.NewRPCFunc(Status, ""),
"net_info": rpc.NewRPCFunc(NetInfo, ""),
"blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()),
"genesis": rpc.NewRPCFunc(Genesis, "", rpc.Cacheable()),
"genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk", rpc.Cacheable()),
"block": rpc.NewRPCFunc(Block, "height", rpc.Cacheable("height")),
"block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash", rpc.Cacheable()),
"block_results": rpc.NewRPCFunc(BlockResults, "height", rpc.Cacheable("height")),
"commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")),
"blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"),
"genesis": rpc.NewRPCFunc(Genesis, ""),
"genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk"),
"block": rpc.NewRPCFunc(Block, "height"),
"block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash"),
"block_results": rpc.NewRPCFunc(BlockResults, "height"),
"commit": rpc.NewRPCFunc(Commit, "height"),
"check_tx": rpc.NewRPCFunc(CheckTx, "tx"),
"tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()),
"tx": rpc.NewRPCFunc(Tx, "hash,prove"),
"tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"),
"block_search": rpc.NewRPCFunc(BlockSearch, "query,page,per_page,order_by"),
"validators": rpc.NewRPCFunc(Validators, "height,page,per_page", rpc.Cacheable("height")),
"validators": rpc.NewRPCFunc(Validators, "height,page,per_page"),
"dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""),
"consensus_state": rpc.NewRPCFunc(ConsensusState, ""),
"consensus_params": rpc.NewRPCFunc(ConsensusParams, "height", rpc.Cacheable("height")),
"consensus_params": rpc.NewRPCFunc(ConsensusParams, "height"),
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"),
"num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""),
@@ -42,7 +42,7 @@ var Routes = map[string]*rpc.RPCFunc{
// abci API
"abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"),
"abci_info": rpc.NewRPCFunc(ABCIInfo, "", rpc.Cacheable()),
"abci_info": rpc.NewRPCFunc(ABCIInfo, ""),
// evidence API
"broadcast_evidence": rpc.NewRPCFunc(BroadcastEvidence, "evidence"),

View File

@@ -7,10 +7,8 @@ import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"strings"
"testing"
"time"
@@ -39,7 +37,9 @@ const (
testVal = "acbd"
)
var ctx = context.Background()
var (
ctx = context.Background()
)
type ResultEcho struct {
Value string `json:"value"`
@@ -57,10 +57,6 @@ type ResultEchoDataBytes struct {
Value tmbytes.HexBytes `json:"value"`
}
type ResultEchoWithDefault struct {
Value int `json:"value"`
}
// Define some routes
var Routes = map[string]*server.RPCFunc{
"echo": server.NewRPCFunc(EchoResult, "arg"),
@@ -68,7 +64,6 @@ var Routes = map[string]*server.RPCFunc{
"echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg"),
"echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg"),
"echo_int": server.NewRPCFunc(EchoIntResult, "arg"),
"echo_default": server.NewRPCFunc(EchoWithDefault, "arg", server.Cacheable("arg")),
}
func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) {
@@ -91,14 +86,6 @@ func EchoDataBytesResult(ctx *types.Context, v tmbytes.HexBytes) (*ResultEchoDat
return &ResultEchoDataBytes{v}, nil
}
func EchoWithDefault(ctx *types.Context, v *int) (*ResultEchoWithDefault, error) {
val := -1
if v != nil {
val = *v
}
return &ResultEchoWithDefault{val}, nil
}
func TestMain(m *testing.M) {
setup()
code := m.Run()
@@ -212,47 +199,26 @@ func echoDataBytesViaHTTP(cl client.Caller, bytes tmbytes.HexBytes) (tmbytes.Hex
return result.Value, nil
}
func echoWithDefaultViaHTTP(cl client.Caller, v *int) (int, error) {
params := map[string]interface{}{}
if v != nil {
params["arg"] = *v
}
result := new(ResultEchoWithDefault)
if _, err := cl.Call(ctx, "echo_default", params, result); err != nil {
return 0, err
}
return result.Value, nil
}
func testWithHTTPClient(t *testing.T, cl client.HTTPClient) {
val := testVal
got, err := echoViaHTTP(cl, val)
require.NoError(t, err)
require.Nil(t, err)
assert.Equal(t, got, val)
val2 := randBytes(t)
got2, err := echoBytesViaHTTP(cl, val2)
require.NoError(t, err)
require.Nil(t, err)
assert.Equal(t, got2, val2)
val3 := tmbytes.HexBytes(randBytes(t))
got3, err := echoDataBytesViaHTTP(cl, val3)
require.NoError(t, err)
require.Nil(t, err)
assert.Equal(t, got3, val3)
val4 := tmrand.Intn(10000)
got4, err := echoIntViaHTTP(cl, val4)
require.NoError(t, err)
require.Nil(t, err)
assert.Equal(t, got4, val4)
got5, err := echoWithDefaultViaHTTP(cl, nil)
require.NoError(t, err)
assert.Equal(t, got5, -1)
val6 := tmrand.Intn(10000)
got6, err := echoWithDefaultViaHTTP(cl, &val6)
require.NoError(t, err)
assert.Equal(t, got6, val6)
}
func echoViaWS(cl *client.WSClient, val string) (string, error) {
@@ -267,6 +233,7 @@ func echoViaWS(cl *client.WSClient, val string) (string, error) {
msg := <-cl.ResponsesCh
if msg.Error != nil {
return "", err
}
result := new(ResultEcho)
err = json.Unmarshal(msg.Result, result)
@@ -288,6 +255,7 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) {
msg := <-cl.ResponsesCh
if msg.Error != nil {
return []byte{}, msg.Error
}
result := new(ResultEchoBytes)
err = json.Unmarshal(msg.Result, result)
@@ -431,74 +399,6 @@ func TestWSClientPingPong(t *testing.T) {
time.Sleep(6 * time.Second)
}
func TestJSONRPCCaching(t *testing.T) {
httpAddr := strings.Replace(tcpAddr, "tcp://", "http://", 1)
cl, err := client.DefaultHTTPClient(httpAddr)
require.NoError(t, err)
// Not supplying the arg should result in not caching
params := make(map[string]interface{})
req, err := types.MapToRequest(types.JSONRPCIntID(1000), "echo_default", params)
require.NoError(t, err)
res1, err := rawJSONRPCRequest(t, cl, httpAddr, req)
defer func() { _ = res1.Body.Close() }()
require.NoError(t, err)
assert.Equal(t, "", res1.Header.Get("Cache-control"))
// Supplying the arg should result in caching
params["arg"] = tmrand.Intn(10000)
req, err = types.MapToRequest(types.JSONRPCIntID(1001), "echo_default", params)
require.NoError(t, err)
res2, err := rawJSONRPCRequest(t, cl, httpAddr, req)
defer func() { _ = res2.Body.Close() }()
require.NoError(t, err)
assert.Equal(t, "public, max-age=86400", res2.Header.Get("Cache-control"))
}
func rawJSONRPCRequest(t *testing.T, cl *http.Client, url string, req interface{}) (*http.Response, error) {
reqBytes, err := json.Marshal(req)
require.NoError(t, err)
reqBuf := bytes.NewBuffer(reqBytes)
httpReq, err := http.NewRequest(http.MethodPost, url, reqBuf)
require.NoError(t, err)
httpReq.Header.Set("Content-type", "application/json")
return cl.Do(httpReq)
}
func TestURICaching(t *testing.T) {
httpAddr := strings.Replace(tcpAddr, "tcp://", "http://", 1)
cl, err := client.DefaultHTTPClient(httpAddr)
require.NoError(t, err)
// Not supplying the arg should result in not caching
args := url.Values{}
res1, err := rawURIRequest(t, cl, httpAddr+"/echo_default", args)
defer func() { _ = res1.Body.Close() }()
require.NoError(t, err)
assert.Equal(t, "", res1.Header.Get("Cache-control"))
// Supplying the arg should result in caching
args.Set("arg", fmt.Sprintf("%d", tmrand.Intn(10000)))
res2, err := rawURIRequest(t, cl, httpAddr+"/echo_default", args)
defer func() { _ = res2.Body.Close() }()
require.NoError(t, err)
assert.Equal(t, "public, max-age=86400", res2.Header.Get("Cache-control"))
}
func rawURIRequest(t *testing.T, cl *http.Client, url string, args url.Values) (*http.Response, error) {
req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(args.Encode()))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return cl.Do(req)
}
func randBytes(t *testing.T) []byte {
n := tmrand.Intn(10) + 2
buf := make([]byte, n)

View File

@@ -55,11 +55,6 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
requests = []types.RPCRequest{request}
}
// Set the default response cache to true unless
// 1. Any RPC request error.
// 2. Any RPC request doesn't allow to be cached.
// 3. Any RPC request has the height argument and the value is 0 (the default).
cache := true
for _, request := range requests {
request := request
@@ -77,13 +72,11 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
responses,
types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)),
)
cache = false
continue
}
rpcFunc, ok := funcMap[request.Method]
if !ok || (rpcFunc.ws) {
if !ok || rpcFunc.ws {
responses = append(responses, types.RPCMethodNotFoundError(request.ID))
cache = false
continue
}
ctx := &types.Context{JSONReq: &request, HTTPReq: r}
@@ -95,16 +88,11 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
responses,
types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)),
)
cache = false
continue
}
args = append(args, fnArgs...)
}
if cache && !rpcFunc.cacheableWithArgs(args) {
cache = false
}
returns := rpcFunc.f.Call(args)
result, err := unreflectResult(returns)
if err != nil {
@@ -115,13 +103,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
}
if len(responses) > 0 {
var wErr error
if cache {
wErr = WriteCacheableRPCResponseHTTP(w, responses...)
} else {
wErr = WriteRPCResponseHTTP(w, responses...)
}
if wErr != nil {
if wErr := WriteRPCResponseHTTP(w, responses...); wErr != nil {
logger.Error("failed to write responses", "res", responses, "err", wErr)
}
}

View File

@@ -18,8 +18,7 @@ import (
func testMux() *http.ServeMux {
funcMap := map[string]*RPCFunc{
"c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"),
"block": NewRPCFunc(func(ctx *types.Context, h int) (string, error) { return "block", nil }, "height", Cacheable("height")),
"c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"),
}
mux := http.NewServeMux()
buf := new(bytes.Buffer)
@@ -228,52 +227,3 @@ func TestUnknownRPCPath(t *testing.T) {
require.Equal(t, http.StatusNotFound, res.StatusCode, "should always return 404")
res.Body.Close()
}
func TestRPCResponseCache(t *testing.T) {
mux := testMux()
body := strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["1"]}`)
req, _ := http.NewRequest("Get", "http://localhost/", body)
rec := httptest.NewRecorder()
mux.ServeHTTP(rec, req)
res := rec.Result()
// Always expecting back a JSONRPCResponse
require.True(t, statusOK(res.StatusCode), "should always return 2XX")
require.Equal(t, "public, max-age=86400", res.Header.Get("Cache-control"))
_, err := io.ReadAll(res.Body)
res.Body.Close()
require.Nil(t, err, "reading from the body should not give back an error")
// send a request with default height.
body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["0"]}`)
req, _ = http.NewRequest("Get", "http://localhost/", body)
rec = httptest.NewRecorder()
mux.ServeHTTP(rec, req)
res = rec.Result()
// Always expecting back a JSONRPCResponse
require.True(t, statusOK(res.StatusCode), "should always return 2XX")
require.Equal(t, "", res.Header.Get("Cache-control"))
_, err = io.ReadAll(res.Body)
res.Body.Close()
require.Nil(t, err, "reading from the body should not give back an error")
// send a request with default height, but as empty set of parameters.
body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": []}`)
req, _ = http.NewRequest("Get", "http://localhost/", body)
rec = httptest.NewRecorder()
mux.ServeHTTP(rec, req)
res = rec.Result()
// Always expecting back a JSONRPCResponse
require.True(t, statusOK(res.StatusCode), "should always return 2XX")
require.Equal(t, "", res.Header.Get("Cache-control"))
_, err = io.ReadAll(res.Body)
res.Body.Close()
require.Nil(t, err, "reading from the body should not give back an error")
}

View File

@@ -104,7 +104,7 @@ func WriteRPCResponseHTTPError(
panic("tried to write http error response without RPC error")
}
jsonBytes, err := json.Marshal(res)
jsonBytes, err := json.MarshalIndent(res, "", " ")
if err != nil {
return fmt.Errorf("json marshal: %w", err)
}
@@ -117,22 +117,6 @@ func WriteRPCResponseHTTPError(
// WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w.
func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error {
return writeRPCResponseHTTP(w, []httpHeader{}, res...)
}
// WriteCacheableRPCResponseHTTP marshals res as JSON (with indent) and writes
// it to w. Adds cache-control to the response header and sets the expiry to
// one day.
func WriteCacheableRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error {
return writeRPCResponseHTTP(w, []httpHeader{{"Cache-Control", "public, max-age=86400"}}, res...)
}
type httpHeader struct {
name string
value string
}
func writeRPCResponseHTTP(w http.ResponseWriter, headers []httpHeader, res ...types.RPCResponse) error {
var v interface{}
if len(res) == 1 {
v = res[0]
@@ -140,14 +124,11 @@ func writeRPCResponseHTTP(w http.ResponseWriter, headers []httpHeader, res ...ty
v = res
}
jsonBytes, err := json.Marshal(v)
jsonBytes, err := json.MarshalIndent(v, "", " ")
if err != nil {
return fmt.Errorf("json marshal: %w", err)
}
w.Header().Set("Content-Type", "application/json")
for _, header := range headers {
w.Header().Set(header.name, header.value)
}
w.WriteHeader(200)
_, err = w.Write(jsonBytes)
return err
@@ -185,6 +166,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler
// Without this, Chrome & Firefox were retrying aborted ajax requests,
// at least to my localhost.
if e := recover(); e != nil {
// If RPCResponse
if res, ok := e.(types.RPCResponse); ok {
if wErr := WriteRPCResponseHTTP(rww, res); wErr != nil {

View File

@@ -112,7 +112,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) {
// one argument
w := httptest.NewRecorder()
err := WriteCacheableRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"}))
err := WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"}))
require.NoError(t, err)
resp := w.Result()
body, err := io.ReadAll(resp.Body)
@@ -120,8 +120,13 @@ func TestWriteRPCResponseHTTP(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
assert.Equal(t, "application/json", resp.Header.Get("Content-Type"))
assert.Equal(t, "public, max-age=86400", resp.Header.Get("Cache-control"))
assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}}`, string(body))
assert.Equal(t, `{
"jsonrpc": "2.0",
"id": -1,
"result": {
"value": "hello"
}
}`, string(body))
// multiple arguments
w = httptest.NewRecorder()
@@ -136,7 +141,22 @@ func TestWriteRPCResponseHTTP(t *testing.T) {
assert.Equal(t, 200, resp.StatusCode)
assert.Equal(t, "application/json", resp.Header.Get("Content-Type"))
assert.Equal(t, `[{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}},{"jsonrpc":"2.0","id":-1,"result":{"value":"world"}}]`, string(body))
assert.Equal(t, `[
{
"jsonrpc": "2.0",
"id": -1,
"result": {
"value": "hello"
}
},
{
"jsonrpc": "2.0",
"id": -1,
"result": {
"value": "world"
}
}
]`, string(body))
}
func TestWriteRPCResponseHTTPError(t *testing.T) {
@@ -152,5 +172,13 @@ func TestWriteRPCResponseHTTPError(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
assert.Equal(t, "application/json", resp.Header.Get("Content-Type"))
assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"error":{"code":-32603,"message":"Internal error","data":"foo"}}`, string(body))
assert.Equal(t, `{
"jsonrpc": "2.0",
"id": -1,
"error": {
"code": -32603,
"message": "Internal error",
"data": "foo"
}
}`, string(body))
}

View File

@@ -63,14 +63,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit
}
return
}
resp := types.NewRPCSuccessResponse(dummyID, result)
if rpcFunc.cacheableWithArgs(args) {
err = WriteCacheableRPCResponseHTTP(w, resp)
} else {
err = WriteRPCResponseHTTP(w, resp)
}
if err != nil {
if err := WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(dummyID, result)); err != nil {
logger.Error("failed to write response", "res", result, "err", err)
return
}

View File

@@ -23,96 +23,40 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo
mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger)))
}
type Option func(*RPCFunc)
// Cacheable enables returning a cache control header from RPC functions to
// which it is applied.
//
// `noCacheDefArgs` is a list of argument names that, if omitted or set to
// their defaults when calling the RPC function, will skip the response
// caching.
func Cacheable(noCacheDefArgs ...string) Option {
return func(r *RPCFunc) {
r.cacheable = true
r.noCacheDefArgs = make(map[string]interface{})
for _, arg := range noCacheDefArgs {
r.noCacheDefArgs[arg] = nil
}
}
}
// Ws enables WebSocket communication.
func Ws() Option {
return func(r *RPCFunc) {
r.ws = true
}
}
// Function introspection
// RPCFunc contains the introspected type information for a function
type RPCFunc struct {
f reflect.Value // underlying rpc function
args []reflect.Type // type of each function arg
returns []reflect.Type // type of each return arg
argNames []string // name of each argument
cacheable bool // enable cache control
ws bool // enable websocket communication
noCacheDefArgs map[string]interface{} // a lookup table of args that, if not supplied or are set to default values, cause us to not cache
f reflect.Value // underlying rpc function
args []reflect.Type // type of each function arg
returns []reflect.Type // type of each return arg
argNames []string // name of each argument
ws bool // websocket only
}
// NewRPCFunc wraps a function for introspection.
// f is the function, args are comma separated argument names
func NewRPCFunc(f interface{}, args string, options ...Option) *RPCFunc {
return newRPCFunc(f, args, options...)
func NewRPCFunc(f interface{}, args string) *RPCFunc {
return newRPCFunc(f, args, false)
}
// NewWSRPCFunc wraps a function for introspection and use in the websockets.
func NewWSRPCFunc(f interface{}, args string, options ...Option) *RPCFunc {
options = append(options, Ws())
return newRPCFunc(f, args, options...)
func NewWSRPCFunc(f interface{}, args string) *RPCFunc {
return newRPCFunc(f, args, true)
}
// cacheableWithArgs returns whether or not a call to this function is cacheable,
// given the specified arguments.
func (f *RPCFunc) cacheableWithArgs(args []reflect.Value) bool {
if !f.cacheable {
return false
}
// Skip the context variable common to all RPC functions
for i := 1; i < len(f.args); i++ {
// f.argNames does not include the context variable
argName := f.argNames[i-1]
if _, hasDefault := f.noCacheDefArgs[argName]; hasDefault {
// Argument with default value was not supplied
if i >= len(args) {
return false
}
// Argument with default value is set to its zero value
if args[i].IsZero() {
return false
}
}
}
return true
}
func newRPCFunc(f interface{}, args string, options ...Option) *RPCFunc {
func newRPCFunc(f interface{}, args string, ws bool) *RPCFunc {
var argNames []string
if args != "" {
argNames = strings.Split(args, ",")
}
r := &RPCFunc{
return &RPCFunc{
f: reflect.ValueOf(f),
args: funcArgTypes(f),
returns: funcReturnTypes(f),
argNames: argNames,
ws: ws,
}
for _, opt := range options {
opt(r)
}
return r
}
// return a function's argument types

View File

@@ -431,10 +431,7 @@ func (wsc *wsConnection) writeRoutine() {
return
}
case msg := <-wsc.writeChan:
// Use json.MarshalIndent instead of Marshal for pretty output.
// Pretty output not necessary, since most consumers of WS events are
// automated processes, not humans.
jsonBytes, err := json.Marshal(msg)
jsonBytes, err := json.MarshalIndent(msg, "", " ")
if err != nil {
wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err)
continue

View File

@@ -216,9 +216,6 @@ paths:
Please refer to
https://docs.tendermint.com/v0.34/tendermint-core/using-tendermint.html#formatting
for formatting/encoding rules.
Upon success, the `Cache-Control` header will be set with the default
maximum age.
parameters:
- in: query
name: tx
@@ -626,12 +623,9 @@ paths:
tags:
- Info
description: |
Get block headers for minHeight <= height <= maxHeight.
Get block headers for minHeight <= height maxHeight.
At most 20 items will be returned.
Upon success, the `Cache-Control` header will be set with the default
maximum age.
responses:
"200":
description: Block headers, returned in descending order (highest first).
@@ -661,9 +655,6 @@ paths:
- Info
description: |
Get Block.
If the `height` field is set to a non-default value, upon success, the
`Cache-Control` header will be set with the default maximum age.
responses:
"200":
description: Block informations.
@@ -693,9 +684,6 @@ paths:
- Info
description: |
Get Block By Hash.
Upon success, the `Cache-Control` header will be set with the default
maximum age.
responses:
"200":
description: Block informations.
@@ -716,7 +704,7 @@ paths:
parameters:
- in: query
name: height
description: height to return. If no height is provided, it will fetch information regarding the latest block.
description: height to return. If no height is provided, it will fetch informations regarding the latest block.
schema:
type: integer
default: 0
@@ -726,9 +714,6 @@ paths:
description: |
Get block_results. When the `discard_abci_responses` storage flag is
enabled, this endpoint will return an error.
If the `height` field is set to a non-default value, upon success, the
`Cache-Control` header will be set with the default maximum age.
responses:
"200":
description: Block results.
@@ -758,9 +743,6 @@ paths:
- Info
description: |
Get Commit.
If the `height` field is set to a non-default value, upon success, the
`Cache-Control` header will be set with the default maximum age.
responses:
"200":
description: |
@@ -809,9 +791,6 @@ paths:
- Info
description: |
Get Validators. Validators are sorted by voting power.
If the `height` field is set to a non-default value, upon success, the
`Cache-Control` header will be set with the default maximum age.
responses:
"200":
description: Commit results.
@@ -833,9 +812,6 @@ paths:
- Info
description: |
Get genesis.
Upon success, the `Cache-Control` header will be set with the default
maximum age.
responses:
"200":
description: Genesis results.
@@ -914,9 +890,6 @@ paths:
- Info
description: |
Get consensus parameters.
If the `height` field is set to a non-default value, upon success, the
`Cache-Control` header will be set with the default maximum age.
responses:
"200":
description: consensus parameters results.
@@ -1107,14 +1080,14 @@ paths:
parameters:
- in: query
name: hash
description: hash of transaction to retrieve
description: transaction Hash to retrive
required: true
schema:
type: string
example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED"
- in: query
name: prove
description: Include proofs of the transaction's inclusion in the block
description: Include proofs of the transactions inclusion in the block
required: false
schema:
type: boolean
@@ -1123,10 +1096,7 @@ paths:
tags:
- Info
description: |
Get a transaction
Upon success, the `Cache-Control` header will be set with the default
maximum age.
Get a trasasction
responses:
"200":
description: Get a transaction`
@@ -1142,15 +1112,12 @@ paths:
$ref: "#/components/schemas/ErrorResponse"
/abci_info:
get:
summary: Get info about the application.
summary: Get some info about the application.
operationId: abci_info
tags:
- ABCI
description: |
Get info about the application.
Upon success, the `Cache-Control` header will be set with the default
maximum age.
Get some info about the application.
responses:
"200":
description: Get some info about the application.

View File

@@ -183,21 +183,3 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) {
})
}
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
cfg := config.DefaultStateSyncConfig()
conn := &proxymocks.AppConnSnapshot{}
reactor := NewReactor(*cfg, conn, nil, "")
peer := p2p.CreateRandomPeer(false)
reactor.InitPeer(peer)
reactor.AddPeer(peer)
m := &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(ChunkChannel, peer, msg)
})
}

View File

@@ -5,7 +5,7 @@ var TMCoreSemVer = TMVersionDefault
const (
// TMVersionDefault is the used as the fallback version of Tendermint Core
// when not using git describe. It is formatted with semantic versioning.
TMVersionDefault = "0.34.24"
TMVersionDefault = "0.34.22"
// ABCISemVer is the semantic version of the ABCI library
ABCISemVer = "0.17.0"