mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-19 11:12:50 +00:00
Compare commits
12 Commits
wb/initial
...
marko/brin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f6dc0502a | ||
|
|
dd4fee88ef | ||
|
|
c8e336f2e9 | ||
|
|
ceca73a873 | ||
|
|
e31c1e3622 | ||
|
|
161496bfca | ||
|
|
fd3c397c69 | ||
|
|
c430624e1b | ||
|
|
2a0147515f | ||
|
|
44988943ba | ||
|
|
9089b2aed5 | ||
|
|
5119d16d5c |
32
.github/workflows/docs.yaml
vendored
Normal file
32
.github/workflows/docs.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Documentation
|
||||
# This workflow builds the static documentation site, and publishes the results to GitHub Pages.
|
||||
# It runs on every push to the main branch, with changes in the docs and spec directories
|
||||
on:
|
||||
workflow_dispatch: # allow manual updates
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
- "spec/**"
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: tendermintdev/docker-website-deployment
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install and Build 🔧
|
||||
run: |
|
||||
apk add rsync
|
||||
make build-docs
|
||||
- name: Deploy 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@v4.3.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: ~/output
|
||||
single-commit: true
|
||||
21
UPGRADING.md
21
UPGRADING.md
@@ -212,22 +212,25 @@ and one function have moved to the Tendermint `crypto` package:
|
||||
|
||||
The format of all tendermint on-disk database keys changes in
|
||||
0.35. Upgrading nodes must either re-sync all data or run a migration
|
||||
script provided in this release. The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
|
||||
provides the function `Migrate(context.Context, db.DB)` which you can
|
||||
operationalize as makes sense for your deployment.
|
||||
script provided in this release.
|
||||
|
||||
The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` provides the
|
||||
function `Migrate(context.Context, db.DB)` which you can operationalize as
|
||||
makes sense for your deployment.
|
||||
|
||||
For ease of use the `tendermint` command includes a CLI version of the
|
||||
migration script, which you can invoke, as in:
|
||||
|
||||
tendermint key-migrate
|
||||
|
||||
This reads the configuration file as normal and allows the
|
||||
`--db-backend` and `--db-dir` flags to change database operations as
|
||||
needed.
|
||||
This reads the configuration file as normal and allows the `--db-backend` and
|
||||
`--db-dir` flags to override the database location as needed.
|
||||
|
||||
The migration operation is idempotent and can be run more than once,
|
||||
if needed.
|
||||
The migration operation is intended to be idempotent, and should be safe to
|
||||
rerun on the same database multiple times. As a safety measure, however, we
|
||||
recommend that operators test out the migration on a copy of the database
|
||||
first, if it is practical to do so, before applying it to the production data.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
|
||||
@@ -13,14 +13,15 @@ order: 3
|
||||
The PBTS algorithm defines a way for a Tendermint blockchain to create block
|
||||
timestamps that are within a reasonable bound of the clocks of the validators on
|
||||
the network. This replaces the original BFTTime algorithm for timestamp
|
||||
assignment that relied on the timestamps included in precommit messages.
|
||||
assignment that computed a timestamp using the timestamps included in precommit
|
||||
messages.
|
||||
|
||||
## Algorithm Parameters
|
||||
|
||||
The functionality of the PBTS algorithm is governed by two parameters within
|
||||
Tendermint. These two parameters are [consensus
|
||||
parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291),
|
||||
meaning they are configured by the ABCI application and are expected to be the
|
||||
meaning they are configured by the ABCI application and are therefore the same
|
||||
same across all nodes on the network.
|
||||
|
||||
### `Precision`
|
||||
@@ -51,7 +52,7 @@ useful for the protocols and applications built on top of Tendermint.
|
||||
The following protocols and application features require a reliable source of time:
|
||||
|
||||
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
|
||||
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification).
|
||||
* Tendermint Evidence expiration is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification).
|
||||
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21
|
||||
days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime).
|
||||
* IBC packets can use either a [timestamp or a height to timeout packet
|
||||
|
||||
@@ -86,27 +86,30 @@ const (
|
||||
var prefixes = []struct {
|
||||
prefix []byte
|
||||
ktype keyType
|
||||
check func(keyID) bool
|
||||
}{
|
||||
{[]byte("consensusParamsKey:"), consensusParamsKey},
|
||||
{[]byte("abciResponsesKey:"), abciResponsesKey},
|
||||
{[]byte("validatorsKey:"), validatorsKey},
|
||||
{[]byte("stateKey"), stateStoreKey},
|
||||
{[]byte("H:"), blockMetaKey},
|
||||
{[]byte("P:"), blockPartKey},
|
||||
{[]byte("C:"), commitKey},
|
||||
{[]byte("SC:"), seenCommitKey},
|
||||
{[]byte("BH:"), blockHashKey},
|
||||
{[]byte("size"), lightSizeKey},
|
||||
{[]byte("lb/"), lightBlockKey},
|
||||
{[]byte("\x00"), evidenceCommittedKey},
|
||||
{[]byte("\x01"), evidencePendingKey},
|
||||
{[]byte("consensusParamsKey:"), consensusParamsKey, nil},
|
||||
{[]byte("abciResponsesKey:"), abciResponsesKey, nil},
|
||||
{[]byte("validatorsKey:"), validatorsKey, nil},
|
||||
{[]byte("stateKey"), stateStoreKey, nil},
|
||||
{[]byte("H:"), blockMetaKey, nil},
|
||||
{[]byte("P:"), blockPartKey, nil},
|
||||
{[]byte("C:"), commitKey, nil},
|
||||
{[]byte("SC:"), seenCommitKey, nil},
|
||||
{[]byte("BH:"), blockHashKey, nil},
|
||||
{[]byte("size"), lightSizeKey, nil},
|
||||
{[]byte("lb/"), lightBlockKey, nil},
|
||||
{[]byte("\x00"), evidenceCommittedKey, checkEvidenceKey},
|
||||
{[]byte("\x01"), evidencePendingKey, checkEvidenceKey},
|
||||
}
|
||||
|
||||
// checkKeyType classifies a candidate key based on its structure.
|
||||
func checkKeyType(key keyID) keyType {
|
||||
for _, p := range prefixes {
|
||||
if bytes.HasPrefix(key, p.prefix) {
|
||||
return p.ktype
|
||||
if p.check == nil || p.check(key) {
|
||||
return p.ktype
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -342,6 +345,35 @@ func convertEvidence(key keyID, newPrefix int64) ([]byte, error) {
|
||||
return orderedcode.Append(nil, newPrefix, binary.BigEndian.Uint64(hb), string(evidenceHash))
|
||||
}
|
||||
|
||||
// checkEvidenceKey reports whether a candidate key with one of the legacy
|
||||
// evidence prefixes has the correct structure for a legacy evidence key.
|
||||
//
|
||||
// This check is needed because transaction hashes are stored without a prefix,
|
||||
// so checking the one-byte prefix alone is not enough to distinguish them.
|
||||
// Legacy evidence keys are suffixed with a string of the format:
|
||||
//
|
||||
// "%0.16X/%X"
|
||||
//
|
||||
// where the first element is the height and the second is the hash. Thus, we
|
||||
// check
|
||||
func checkEvidenceKey(key keyID) bool {
|
||||
parts := bytes.SplitN(key[1:], []byte("/"), 2)
|
||||
if len(parts) != 2 || len(parts[0]) != 16 || !isHex(parts[0]) || !isHex(parts[1]) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isHex(data []byte) bool {
|
||||
for _, b := range data {
|
||||
if ('0' <= b && b <= '9') || ('a' <= b && b <= 'f') || ('A' <= b && b <= 'F') {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
return len(data) != 0
|
||||
}
|
||||
|
||||
func replaceKey(db dbm.DB, key keyID, gooseFn migrateFunc) error {
|
||||
exists, err := db.Has(key)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package keymigrate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/orderedcode"
|
||||
@@ -21,6 +21,7 @@ func makeKey(t *testing.T, elems ...interface{}) []byte {
|
||||
}
|
||||
|
||||
func getLegacyPrefixKeys(val int) map[string][]byte {
|
||||
vstr := fmt.Sprintf("%02x", byte(val))
|
||||
return map[string][]byte{
|
||||
"Height": []byte(fmt.Sprintf("H:%d", val)),
|
||||
"BlockPart": []byte(fmt.Sprintf("P:%d:%d", val, val)),
|
||||
@@ -40,14 +41,19 @@ func getLegacyPrefixKeys(val int) map[string][]byte {
|
||||
"UserKey1": []byte(fmt.Sprintf("foo/bar/baz/%d/%d", val, val)),
|
||||
"TxHeight": []byte(fmt.Sprintf("tx.height/%s/%d/%d", fmt.Sprint(val), val, val)),
|
||||
"TxHash": append(
|
||||
bytes.Repeat([]byte{fmt.Sprint(val)[0]}, 16),
|
||||
bytes.Repeat([]byte{fmt.Sprint(val)[len([]byte(fmt.Sprint(val)))-1]}, 16)...,
|
||||
[]byte(strings.Repeat(vstr[:1], 16)),
|
||||
[]byte(strings.Repeat(vstr[1:], 16))...,
|
||||
),
|
||||
|
||||
// Transaction hashes that could be mistaken for evidence keys.
|
||||
"TxHashMimic0": append([]byte{0}, []byte(strings.Repeat(vstr, 16)[:31])...),
|
||||
"TxHashMimic1": append([]byte{1}, []byte(strings.Repeat(vstr, 16)[:31])...),
|
||||
}
|
||||
}
|
||||
|
||||
func getNewPrefixKeys(t *testing.T, val int) map[string][]byte {
|
||||
t.Helper()
|
||||
vstr := fmt.Sprintf("%02x", byte(val))
|
||||
return map[string][]byte{
|
||||
"Height": makeKey(t, int64(0), int64(val)),
|
||||
"BlockPart": makeKey(t, int64(1), int64(val), int64(val)),
|
||||
@@ -66,7 +72,9 @@ func getNewPrefixKeys(t *testing.T, val int) map[string][]byte {
|
||||
"UserKey0": makeKey(t, "foo", "bar", int64(val), int64(val)),
|
||||
"UserKey1": makeKey(t, "foo", "bar/baz", int64(val), int64(val)),
|
||||
"TxHeight": makeKey(t, "tx.height", fmt.Sprint(val), int64(val), int64(val+2), int64(val+val)),
|
||||
"TxHash": makeKey(t, "tx.hash", string(bytes.Repeat([]byte{[]byte(fmt.Sprint(val))[0]}, 32))),
|
||||
"TxHash": makeKey(t, "tx.hash", strings.Repeat(vstr, 16)),
|
||||
"TxHashMimic0": makeKey(t, "tx.hash", "\x00"+strings.Repeat(vstr, 16)[:31]),
|
||||
"TxHashMimic1": makeKey(t, "tx.hash", "\x01"+strings.Repeat(vstr, 16)[:31]),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user