mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-06 21:36:26 +00:00
Merge branch 'develop' into jae/literefactor4
This commit is contained in:
8
.github/ISSUE_TEMPLATE/bug-report.md
vendored
8
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@@ -1,9 +1,9 @@
|
||||
---
|
||||
name: Bug Report
|
||||
name: Bug Report
|
||||
about: Create a report to help us squash bugs!
|
||||
|
||||
---
|
||||
<!--
|
||||
<!--
|
||||
Please fill in as much of the template below as you can.
|
||||
|
||||
Be ready for followup questions, and please respond in a timely
|
||||
@@ -27,12 +27,16 @@ manner. We might ask you to provide additional logs and data (tendermint & app).
|
||||
**What you expected to happen**:
|
||||
|
||||
|
||||
**Have you tried the latest version**: yes/no
|
||||
|
||||
**How to reproduce it** (as minimally and precisely as possible):
|
||||
|
||||
**Logs (paste a small part showing an error (< 10 lines) or link a pastebin, gist, etc. containing more of the log file)**:
|
||||
|
||||
**Config (you can paste only the changes you've made)**:
|
||||
|
||||
**node command runtime flags**:
|
||||
|
||||
**`/dump_consensus_state` output for consensus bugs**
|
||||
|
||||
**Anything else we need to know**:
|
||||
|
||||
@@ -32,3 +32,5 @@ BUG FIXES:
|
||||
- [common] Safely handle cases where atomic write files already exist [#2109](https://github.com/tendermint/tendermint/issues/2109)
|
||||
- [privval] fix a deadline for accepting new connections in socket private
|
||||
validator.
|
||||
- [p2p] Allow startup if a configured seed node's IP can't be resolved ([#1716](https://github.com/tendermint/tendermint/issues/1716))
|
||||
- [node] Fully exit when CTRL-C is pressed even if consensus state panics [#2072](https://github.com/tendermint/tendermint/issues/2072)
|
||||
|
||||
13
Gopkg.lock
generated
13
Gopkg.lock
generated
@@ -15,7 +15,7 @@
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = "UT"
|
||||
revision = "cf05f92c3f815bbd5091ed6c73eff51f7b1945e8"
|
||||
revision = "f5e261fc9ec3437697fb31d8b38453c293204b29"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:df684ed7fed3fb406ec421424aaf5fc9c63ccc2f428b25b842da78e634482e4b"
|
||||
@@ -244,7 +244,7 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:dad2e5a2153ee7a6c9ab8fc13673a16ee4fb64434a7da980965a3741b0c981a3"
|
||||
digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
@@ -418,14 +418,14 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8cf61f10625f94b618d574224a437fc22ca0f300a3bc03ecab23ab81d478e95c"
|
||||
digest = "1:bb0fe59917bdd5b89f49b9a8b26e5f465e325d9223b3a8e32254314bdf51e0f1"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"cpu",
|
||||
"unix",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "0ffbfd41fbef8ffcf9b62b0b0aa3a5873ed7a4fe"
|
||||
revision = "3dc4335d56c789b04b0ba99b7a37249d9b614314"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7509ba4347d1f8de6ae9be8818b0cd1abc3deeffe28aeaf4be6d4b6b5178d9ca"
|
||||
@@ -451,11 +451,12 @@
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cd018653a358d4b743a9d3bee89e825521f2ab2f2ec0770164bf7632d8d73ab7"
|
||||
branch = "master"
|
||||
digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
pruneopts = "UT"
|
||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||
revision = "daca94659cb50e9f37c1b834680f2e46358f10b0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4515e3030c440845b046354fd5d57671238428b820deebce2e9dabb5cd3c51ac"
|
||||
|
||||
14
Gopkg.toml
14
Gopkg.toml
@@ -72,20 +72,6 @@
|
||||
## Some repos dont have releases.
|
||||
## Pin to revision
|
||||
|
||||
## We can remove this one by updating protobuf to v1.1.0
|
||||
## but then the grpc tests break with
|
||||
#--- FAIL: TestBroadcastTx (0.01s)
|
||||
#panic: message/group field common.KVPair:bytes without pointer [recovered]
|
||||
# panic: message/group field common.KVPair:bytes without pointer
|
||||
#
|
||||
# ...
|
||||
#
|
||||
# github.com/tendermint/tendermint/rpc/grpc_test.TestBroadcastTx(0xc420a5ab40)
|
||||
# /go/src/github.com/tendermint/tendermint/rpc/grpc/grpc_test.go:29 +0x141
|
||||
[[override]]
|
||||
name = "google.golang.org/genproto"
|
||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/jmhodges/levigo"
|
||||
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
|
||||
|
||||
@@ -561,9 +561,30 @@ func (cs *ConsensusState) newStep() {
|
||||
// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities.
|
||||
// ConsensusState must be locked before any internal state is updated.
|
||||
func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
onExit := func(cs *ConsensusState) {
|
||||
// NOTE: the internalMsgQueue may have signed messages from our
|
||||
// priv_val that haven't hit the WAL, but its ok because
|
||||
// priv_val tracks LastSig
|
||||
|
||||
// close wal now that we're done writing to it
|
||||
cs.wal.Stop()
|
||||
cs.wal.Wait()
|
||||
|
||||
close(cs.done)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack()))
|
||||
// stop gracefully
|
||||
//
|
||||
// NOTE: We most probably shouldn't be running any further when there is
|
||||
// some unexpected panic. Some unknown error happened, and so we don't
|
||||
// know if that will result in the validator signing an invalid thing. It
|
||||
// might be worthwhile to explore a mechanism for manual resuming via
|
||||
// some console or secure RPC system, but for now, halting the chain upon
|
||||
// unexpected consensus bugs sounds like the better option.
|
||||
onExit(cs)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -596,16 +617,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
// go to the next step
|
||||
cs.handleTimeout(ti, rs)
|
||||
case <-cs.Quit():
|
||||
|
||||
// NOTE: the internalMsgQueue may have signed messages from our
|
||||
// priv_val that haven't hit the WAL, but its ok because
|
||||
// priv_val tracks LastSig
|
||||
|
||||
// close wal now that we're done writing to it
|
||||
cs.wal.Stop()
|
||||
cs.wal.Wait()
|
||||
|
||||
close(cs.done)
|
||||
onExit(cs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
93
docs/architecture/adr-013-symmetric-crypto.md
Normal file
93
docs/architecture/adr-013-symmetric-crypto.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# ADR 013: Need for symmetric cryptography
|
||||
|
||||
## Context
|
||||
|
||||
We require symmetric ciphers to handle how we encrypt keys in the sdk,
|
||||
and to potentially encrypt `priv_validator.json` in tendermint.
|
||||
|
||||
Currently we use AEAD's to support symmetric encryption,
|
||||
which is great since we want data integrity in addition to privacy and authenticity.
|
||||
We don't currently have a scenario where we want to encrypt without data integrity,
|
||||
so it is fine to optimize our code to just use AEAD's.
|
||||
Currently there is not a way to switch out AEAD's easily, this ADR outlines a way
|
||||
to easily swap these out.
|
||||
|
||||
### How do we encrypt with AEAD's
|
||||
|
||||
AEAD's typically require a nonce in addition to the key.
|
||||
For the purposes we require symmetric cryptography for,
|
||||
we need encryption to be stateless.
|
||||
Because of this we use random nonces.
|
||||
(Thus the AEAD must support random nonces)
|
||||
|
||||
We currently construct a random nonce, and encrypt the data with it.
|
||||
The returned value is `nonce || encrypted data`.
|
||||
The limitation of this is that does not provide a way to identify
|
||||
which algorithm was used in encryption.
|
||||
Consequently decryption with multiple algoritms is sub-optimal.
|
||||
(You have to try them all)
|
||||
|
||||
## Decision
|
||||
|
||||
We should create the following two methods in a new `crypto/encoding/symmetric` package:
|
||||
```golang
|
||||
func Encrypt(aead cipher.AEAD, plaintext []byte) (ciphertext []byte, err error)
|
||||
func Decrypt(key []byte, ciphertext []byte) (plaintext []byte, err error)
|
||||
func Register(aead cipher.AEAD, algo_name string, NewAead func(key []byte) (cipher.Aead, error)) error
|
||||
```
|
||||
|
||||
This allows you to specify the algorithm in encryption, but not have to specify
|
||||
it in decryption.
|
||||
This is intended for ease of use in downstream applications, in addition to people
|
||||
looking at the file directly.
|
||||
One downside is that for the encrypt function you must have already initialized an AEAD,
|
||||
but I don't really see this as an issue.
|
||||
|
||||
If there is no error in encryption, Encrypt will return `algo_name || nonce || aead_ciphertext`.
|
||||
`algo_name` should be length prefixed, using standard varuint encoding.
|
||||
This will be binary data, but thats not a problem considering the nonce and ciphertext are also binary.
|
||||
|
||||
This solution requires a mapping from aead type to name.
|
||||
We can achieve this via reflection.
|
||||
```golang
|
||||
func getType(myvar interface{}) string {
|
||||
if t := reflect.TypeOf(myvar); t.Kind() == reflect.Ptr {
|
||||
return "*" + t.Elem().Name()
|
||||
} else {
|
||||
return t.Name()
|
||||
}
|
||||
}
|
||||
```
|
||||
Then we maintain a map from the name returned from `getType(aead)` to `algo_name`.
|
||||
|
||||
In decryption, we read the `algo_name`, and then instantiate a new AEAD with the key.
|
||||
Then we call the AEAD's decrypt method on the provided nonce/ciphertext.
|
||||
|
||||
`Register` allows a downstream user to add their own desired AEAD to the symmetric package.
|
||||
It will error if the AEAD name is already registered.
|
||||
This prevents a malicious import from modifying / nullifying an AEAD at runtime.
|
||||
|
||||
## Implementation strategy
|
||||
|
||||
The golang implementation of what is proposed is rather straight forward.
|
||||
The concern is that we will break existing private keys if we just switch to this.
|
||||
If this is concerning, we can make a simple script which doesn't require decoding privkeys,
|
||||
for converting from the old format to the new one.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
* Allows us to support new AEAD's, in a way that makes decryption easier
|
||||
* Allows downstream users to add their own AEAD
|
||||
|
||||
### Negative
|
||||
* We will have to break all private keys stored on disk.
|
||||
They can be recovered using seed words, and upgrade scripts are simple.
|
||||
|
||||
### Neutral
|
||||
* Caller has to instantiate the AEAD with the private key.
|
||||
However it forces them to be aware of what signing algorithm they are using, which is a positive.
|
||||
61
docs/architecture/adr-014-secp-malleability.md
Normal file
61
docs/architecture/adr-014-secp-malleability.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# ADR 014: Secp256k1 Signature Malleability
|
||||
|
||||
## Context
|
||||
|
||||
Secp256k1 has two layers of malleability.
|
||||
The signer has a random nonce, and thus can produce many different valid signatures.
|
||||
This ADR is not concerned with that.
|
||||
The second layer of malleability basically allows one who is given a signature
|
||||
to produce exactly one more valid signature for the same message from the same public key.
|
||||
(They don't even have to know the message!)
|
||||
The math behind this will be explained in the subsequent section.
|
||||
|
||||
Note that in many downstream applications, signatures will appear in a transaction, and therefore in the tx hash.
|
||||
This means that if someone broadcasts a transaction with secp256k1 signature, the signature can be altered into the other form by anyone in the p2p network.
|
||||
Thus the tx hash will change, and this altered tx hash may be committed instead.
|
||||
This breaks the assumption that you can broadcast a valid transaction and just wait for its hash to be included on chain.
|
||||
One example is if you are broadcasting a tx in cosmos,
|
||||
and you wait for it to appear on chain before incrementing your sequence number.
|
||||
You may never increment your sequence number if a different tx hash got committed.
|
||||
Removing this second layer of signature malleability concerns could ease downstream development.
|
||||
|
||||
### ECDSA context
|
||||
|
||||
Secp256k1 is ECDSA over a particular curve.
|
||||
The signature is of the form `(r, s)`, where `s` is a field element.
|
||||
(The particular field is the `Z_n`, where the elliptic curve has order `n`)
|
||||
However `(r, -s)` is also another valid solution.
|
||||
Note that anyone can negate a group element, and therefore can get this second signature.
|
||||
|
||||
## Decision
|
||||
|
||||
We can just distinguish a canonical form for the ECDSA signatures.
|
||||
Then we require that all ECDSA signatures be in the form which we defined as canonical.
|
||||
We reject signatures in non-canonical form.
|
||||
|
||||
A canonical form is rather easy to define and check.
|
||||
It would just be the smaller of the two values for `s`, defined lexicographically.
|
||||
This is a simple check, instead of checking if `s < n`, instead check `s <= (n - 1)/2`.
|
||||
An example of another cryptosystem using this
|
||||
is the parity definition here https://github.com/zkcrypto/pairing/pull/30#issuecomment-372910663.
|
||||
|
||||
This is the same solution Ethereum has chosen for solving secp malleability.
|
||||
|
||||
## Proposed Implementation
|
||||
|
||||
Fork https://github.com/btcsuite/btcd, and just update the [parse sig method](https://github.com/btcsuite/btcd/blob/master/btcec/signature.go#195) and serialize functions to enforce our canonical form.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
* Lets us maintain the ability to expect a tx hash to appear in the blockchain.
|
||||
|
||||
### Negative
|
||||
* More work in all future implementations (Though this is a very simple check)
|
||||
* Requires us to maintain another fork
|
||||
|
||||
### Neutral
|
||||
@@ -59,8 +59,8 @@ Use the canonical representation for signatures.
|
||||
#### Secp256k1
|
||||
There isn't a clear canonical representation here.
|
||||
Signatures have two elements `r,s`.
|
||||
We should encode these bytes as `r || s`, where `r` and `s` are both exactly
|
||||
32 bytes long.
|
||||
These bytes are encoded as `r || s`, where `r` and `s` are both exactly
|
||||
32 bytes long, encoded big-endian.
|
||||
This is basically Ethereum's encoding, but without the leading recovery bit.
|
||||
|
||||
## Status
|
||||
|
||||
@@ -35,18 +35,20 @@ const autoFileOpenDuration = 1000 * time.Millisecond
|
||||
// Automatically closes and re-opens file for writing.
|
||||
// This is useful for using a log file with the logrotate tool.
|
||||
type AutoFile struct {
|
||||
ID string
|
||||
Path string
|
||||
ticker *time.Ticker
|
||||
mtx sync.Mutex
|
||||
file *os.File
|
||||
ID string
|
||||
Path string
|
||||
ticker *time.Ticker
|
||||
tickerStopped chan struct{} // closed when ticker is stopped
|
||||
mtx sync.Mutex
|
||||
file *os.File
|
||||
}
|
||||
|
||||
func OpenAutoFile(path string) (af *AutoFile, err error) {
|
||||
af = &AutoFile{
|
||||
ID: cmn.RandStr(12) + ":" + path,
|
||||
Path: path,
|
||||
ticker: time.NewTicker(autoFileOpenDuration),
|
||||
ID: cmn.RandStr(12) + ":" + path,
|
||||
Path: path,
|
||||
ticker: time.NewTicker(autoFileOpenDuration),
|
||||
tickerStopped: make(chan struct{}),
|
||||
}
|
||||
if err = af.openFile(); err != nil {
|
||||
return
|
||||
@@ -58,18 +60,18 @@ func OpenAutoFile(path string) (af *AutoFile, err error) {
|
||||
|
||||
func (af *AutoFile) Close() error {
|
||||
af.ticker.Stop()
|
||||
close(af.tickerStopped)
|
||||
err := af.closeFile()
|
||||
sighupWatchers.removeAutoFile(af)
|
||||
return err
|
||||
}
|
||||
|
||||
func (af *AutoFile) processTicks() {
|
||||
for {
|
||||
_, ok := <-af.ticker.C
|
||||
if !ok {
|
||||
return // Done.
|
||||
}
|
||||
select {
|
||||
case <-af.ticker.C:
|
||||
af.closeFile()
|
||||
case <-af.tickerStopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -85,7 +85,6 @@ func OpenGroup(headPath string) (g *Group, err error) {
|
||||
Head: head,
|
||||
headBuf: bufio.NewWriterSize(head, 4096*10),
|
||||
Dir: dir,
|
||||
ticker: time.NewTicker(groupCheckDuration),
|
||||
headSizeLimit: defaultHeadSizeLimit,
|
||||
totalSizeLimit: defaultTotalSizeLimit,
|
||||
minIndex: 0,
|
||||
@@ -102,6 +101,7 @@ func OpenGroup(headPath string) (g *Group, err error) {
|
||||
// OnStart implements Service by starting the goroutine that checks file and
|
||||
// group limits.
|
||||
func (g *Group) OnStart() error {
|
||||
g.ticker = time.NewTicker(groupCheckDuration)
|
||||
go g.processTicks()
|
||||
return nil
|
||||
}
|
||||
@@ -199,21 +199,15 @@ func (g *Group) Flush() error {
|
||||
}
|
||||
|
||||
func (g *Group) processTicks() {
|
||||
for {
|
||||
_, ok := <-g.ticker.C
|
||||
if !ok {
|
||||
return // Done.
|
||||
}
|
||||
select {
|
||||
case <-g.ticker.C:
|
||||
g.checkHeadSizeLimit()
|
||||
g.checkTotalSizeLimit()
|
||||
case <-g.Quit():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: for testing
|
||||
func (g *Group) stopTicker() {
|
||||
g.ticker.Stop()
|
||||
}
|
||||
|
||||
// NOTE: this function is called manually in tests.
|
||||
func (g *Group) checkHeadSizeLimit() {
|
||||
limit := g.HeadSizeLimit()
|
||||
|
||||
@@ -16,23 +16,25 @@ import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
// NOTE: Returned group has ticker stopped
|
||||
func createTestGroup(t *testing.T, headSizeLimit int64) *Group {
|
||||
func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group {
|
||||
testID := cmn.RandStr(12)
|
||||
testDir := "_test_" + testID
|
||||
err := cmn.EnsureDir(testDir, 0700)
|
||||
require.NoError(t, err, "Error creating dir")
|
||||
|
||||
headPath := testDir + "/myfile"
|
||||
g, err := OpenGroup(headPath)
|
||||
require.NoError(t, err, "Error opening Group")
|
||||
g.SetHeadSizeLimit(headSizeLimit)
|
||||
g.stopTicker()
|
||||
require.NotEqual(t, nil, g, "Failed to create Group")
|
||||
|
||||
g.SetHeadSizeLimit(headSizeLimit)
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
func destroyTestGroup(t *testing.T, g *Group) {
|
||||
g.Close()
|
||||
|
||||
err := os.RemoveAll(g.Dir)
|
||||
require.NoError(t, err, "Error removing test Group directory")
|
||||
}
|
||||
@@ -45,7 +47,7 @@ func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, tota
|
||||
}
|
||||
|
||||
func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
g := createTestGroup(t, 1000*1000)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 1000*1000)
|
||||
|
||||
// At first, there are no files.
|
||||
assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0)
|
||||
@@ -107,7 +109,7 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSearch(t *testing.T) {
|
||||
g := createTestGroup(t, 10*1000)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 10*1000)
|
||||
|
||||
// Create some files in the group that have several INFO lines in them.
|
||||
// Try to put the INFO lines in various spots.
|
||||
@@ -208,7 +210,7 @@ func TestSearch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRotateFile(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("Line 3")
|
||||
@@ -238,7 +240,7 @@ func TestRotateFile(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindLast1(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
@@ -262,7 +264,7 @@ func TestFindLast1(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindLast2(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
@@ -286,7 +288,7 @@ func TestFindLast2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindLast3(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("# a")
|
||||
@@ -310,7 +312,7 @@ func TestFindLast3(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindLast4(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
@@ -332,7 +334,7 @@ func TestFindLast4(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
written := []byte("Medusa")
|
||||
g.Write(written)
|
||||
@@ -353,7 +355,7 @@ func TestWrite(t *testing.T) {
|
||||
// test that Read reads the required amount of bytes from all the files in the
|
||||
// group and returns no error if n == size of the given slice.
|
||||
func TestGroupReaderRead(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
professor := []byte("Professor Monster")
|
||||
g.Write(professor)
|
||||
@@ -382,7 +384,7 @@ func TestGroupReaderRead(t *testing.T) {
|
||||
// test that Read returns an error if number of bytes read < size of
|
||||
// the given slice. Subsequent call should return 0, io.EOF.
|
||||
func TestGroupReaderRead2(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
professor := []byte("Professor Monster")
|
||||
g.Write(professor)
|
||||
@@ -413,7 +415,7 @@ func TestGroupReaderRead2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMinIndex(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
assert.Zero(t, g.MinIndex(), "MinIndex should be zero at the beginning")
|
||||
|
||||
@@ -422,7 +424,7 @@ func TestMinIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMaxIndex(t *testing.T) {
|
||||
g := createTestGroup(t, 0)
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning")
|
||||
|
||||
|
||||
@@ -18,13 +18,19 @@ var sighupCounter int32 // For testing
|
||||
func initSighupWatcher() {
|
||||
sighupWatchers = newSighupWatcher()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGHUP)
|
||||
hup := make(chan os.Signal, 1)
|
||||
signal.Notify(hup, syscall.SIGHUP)
|
||||
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
go func() {
|
||||
for range c {
|
||||
select {
|
||||
case <-hup:
|
||||
sighupWatchers.closeAll()
|
||||
atomic.AddInt32(&sighupCounter, 1)
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -45,6 +45,9 @@ type AddrBook interface {
|
||||
|
||||
// Do we need more peers?
|
||||
NeedMoreAddrs() bool
|
||||
// Is Address Book Empty? Answer should not depend on being in your own
|
||||
// address book, or private peers
|
||||
Empty() bool
|
||||
|
||||
// Pick an address to dial
|
||||
PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress
|
||||
@@ -223,6 +226,12 @@ func (a *addrBook) NeedMoreAddrs() bool {
|
||||
return a.Size() < needAddressThreshold
|
||||
}
|
||||
|
||||
// Empty implements AddrBook - returns true if there are no addresses in the address book.
|
||||
// Does not count the peer appearing in its own address book, or private peers.
|
||||
func (a *addrBook) Empty() bool {
|
||||
return a.Size() == 0
|
||||
}
|
||||
|
||||
// PickAddress implements AddrBook. It picks an address to connect to.
|
||||
// The address is picked randomly from an old or new bucket according
|
||||
// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range)
|
||||
@@ -496,7 +505,6 @@ out:
|
||||
}
|
||||
saveFileTicker.Stop()
|
||||
a.saveToFile(a.filePath)
|
||||
a.Logger.Info("Address handler done")
|
||||
}
|
||||
|
||||
//----------------------------------------------------------
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -355,6 +357,40 @@ func TestAddrBookHasAddress(t *testing.T) {
|
||||
assert.False(t, book.HasAddress(addr))
|
||||
}
|
||||
|
||||
func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) {
|
||||
addrs := make([]*p2p.NetAddress, numAddrs)
|
||||
for i := 0; i < numAddrs; i++ {
|
||||
addrs[i] = randIPv4Address(t)
|
||||
}
|
||||
|
||||
private := make([]string, numAddrs)
|
||||
for i, addr := range addrs {
|
||||
private[i] = string(addr.ID)
|
||||
}
|
||||
return addrs, private
|
||||
}
|
||||
|
||||
func TestAddrBookEmpty(t *testing.T) {
|
||||
fname := createTempFileName("addrbook_test")
|
||||
defer deleteTempFile(fname)
|
||||
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
// Check that empty book is empty
|
||||
require.True(t, book.Empty())
|
||||
// Check that book with our address is empty
|
||||
book.AddOurAddress(randIPv4Address(t))
|
||||
require.True(t, book.Empty())
|
||||
// Check that book with private addrs is empty
|
||||
_, privateIds := testCreatePrivateAddrs(t, 5)
|
||||
book.AddPrivateIDs(privateIds)
|
||||
require.True(t, book.Empty())
|
||||
|
||||
// Check that book with address is not empty
|
||||
book.AddAddress(randIPv4Address(t), randIPv4Address(t))
|
||||
require.False(t, book.Empty())
|
||||
}
|
||||
|
||||
func TestPrivatePeers(t *testing.T) {
|
||||
fname := createTempFileName("addrbook_test")
|
||||
defer deleteTempFile(fname)
|
||||
@@ -362,15 +398,7 @@ func TestPrivatePeers(t *testing.T) {
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
addrs := make([]*p2p.NetAddress, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
addrs[i] = randIPv4Address(t)
|
||||
}
|
||||
|
||||
private := make([]string, 10)
|
||||
for i, addr := range addrs {
|
||||
private[i] = string(addr.ID)
|
||||
}
|
||||
addrs, private := testCreatePrivateAddrs(t, 10)
|
||||
book.AddPrivateIDs(private)
|
||||
|
||||
// private addrs must not be added
|
||||
|
||||
@@ -7,9 +7,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
)
|
||||
@@ -120,11 +121,11 @@ func (r *PEXReactor) OnStart() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// return err if user provided a bad seed address
|
||||
// or a host name that we cant resolve
|
||||
seedAddrs, err := r.checkSeeds()
|
||||
numOnline, seedAddrs, err := r.checkSeeds()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if numOnline == 0 && r.book.Empty() {
|
||||
return errors.New("Address book is empty, and could not connect to any seed nodes")
|
||||
}
|
||||
|
||||
r.seedAddrs = seedAddrs
|
||||
@@ -478,19 +479,27 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) {
|
||||
}
|
||||
}
|
||||
|
||||
// check seed addresses are well formed
|
||||
func (r *PEXReactor) checkSeeds() ([]*p2p.NetAddress, error) {
|
||||
// checkSeeds checks that addresses are well formed.
|
||||
// Returns number of seeds we can connect to, along with all seeds addrs.
|
||||
// return err if user provided any badly formatted seed addresses.
|
||||
// Doesn't error if the seed node can't be reached.
|
||||
// numOnline returns -1 if no seed nodes were in the initial configuration.
|
||||
func (r *PEXReactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) {
|
||||
lSeeds := len(r.config.Seeds)
|
||||
if lSeeds == 0 {
|
||||
return nil, nil
|
||||
return -1, nil, nil
|
||||
}
|
||||
netAddrs, errs := p2p.NewNetAddressStrings(r.config.Seeds)
|
||||
numOnline = lSeeds - len(errs)
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
switch e := err.(type) {
|
||||
case p2p.ErrNetAddressLookup:
|
||||
r.Logger.Error("Connecting to seed failed", "err", e)
|
||||
default:
|
||||
return 0, nil, errors.Wrap(e, "seed node configuration has error")
|
||||
}
|
||||
}
|
||||
return netAddrs, nil
|
||||
return
|
||||
}
|
||||
|
||||
// randomly dial seeds until we connect to one or exhaust them
|
||||
|
||||
@@ -204,6 +204,45 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
|
||||
assert.False(t, sw.Peers().Has(peer.ID()))
|
||||
}
|
||||
|
||||
func TestCheckSeeds(t *testing.T) {
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir) // nolint: errcheck
|
||||
|
||||
// 1. test creating peer with no seeds works
|
||||
peer := testCreateDefaultPeer(dir, 0)
|
||||
require.Nil(t, peer.Start())
|
||||
peer.Stop()
|
||||
|
||||
// 2. create seed
|
||||
seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{})
|
||||
|
||||
// 3. test create peer with online seed works
|
||||
peer = testCreatePeerWithSeed(dir, 2, seed)
|
||||
require.Nil(t, peer.Start())
|
||||
peer.Stop()
|
||||
|
||||
// 4. test create peer with all seeds having unresolvable DNS fails
|
||||
badPeerConfig := &PEXReactorConfig{
|
||||
Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
|
||||
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"},
|
||||
}
|
||||
peer = testCreatePeerWithConfig(dir, 2, badPeerConfig)
|
||||
require.Error(t, peer.Start())
|
||||
peer.Stop()
|
||||
|
||||
// 5. test create peer with one good seed address succeeds
|
||||
badPeerConfig = &PEXReactorConfig{
|
||||
Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
|
||||
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657",
|
||||
seed.NodeInfo().NetAddress().String()},
|
||||
}
|
||||
peer = testCreatePeerWithConfig(dir, 2, badPeerConfig)
|
||||
require.Nil(t, peer.Start())
|
||||
peer.Stop()
|
||||
}
|
||||
|
||||
func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
@@ -231,30 +270,7 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) {
|
||||
defer os.RemoveAll(dir) // nolint: errcheck
|
||||
|
||||
// 1. create peer
|
||||
peer := p2p.MakeSwitch(
|
||||
cfg,
|
||||
1,
|
||||
"127.0.0.1",
|
||||
"123.123.123",
|
||||
func(i int, sw *p2p.Switch) *p2p.Switch {
|
||||
book := NewAddrBook(filepath.Join(dir, "addrbook1.json"), false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
sw.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(
|
||||
book,
|
||||
&PEXReactorConfig{},
|
||||
)
|
||||
r.SetLogger(log.TestingLogger())
|
||||
sw.AddReactor("pex", r)
|
||||
return sw
|
||||
},
|
||||
)
|
||||
peer.AddListener(
|
||||
p2p.NewDefaultListener("tcp://"+peer.NodeInfo().ListenAddr, "", false, log.TestingLogger()),
|
||||
)
|
||||
peer := testCreateDefaultPeer(dir, 1)
|
||||
require.Nil(t, peer.Start())
|
||||
defer peer.Stop()
|
||||
|
||||
@@ -435,6 +451,40 @@ func assertPeersWithTimeout(
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a peer with the provided config
|
||||
func testCreatePeerWithConfig(dir string, id int, config *PEXReactorConfig) *p2p.Switch {
|
||||
peer := p2p.MakeSwitch(
|
||||
cfg,
|
||||
id,
|
||||
"127.0.0.1",
|
||||
"123.123.123",
|
||||
func(i int, sw *p2p.Switch) *p2p.Switch {
|
||||
book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
sw.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(
|
||||
book,
|
||||
config,
|
||||
)
|
||||
r.SetLogger(log.TestingLogger())
|
||||
sw.AddReactor("pex", r)
|
||||
return sw
|
||||
},
|
||||
)
|
||||
peer.AddListener(
|
||||
p2p.NewDefaultListener("tcp://"+peer.NodeInfo().ListenAddr, "", false, log.TestingLogger()),
|
||||
)
|
||||
return peer
|
||||
}
|
||||
|
||||
// Creates a peer with the default config
|
||||
func testCreateDefaultPeer(dir string, id int) *p2p.Switch {
|
||||
return testCreatePeerWithConfig(dir, id, &PEXReactorConfig{})
|
||||
}
|
||||
|
||||
// Creates a seed which knows about the provided addresses / source address pairs.
|
||||
// Starting and stopping the seed is left to the caller
|
||||
func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) *p2p.Switch {
|
||||
@@ -469,33 +519,10 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress)
|
||||
// Creates a peer which knows about the provided seed.
|
||||
// Starting and stopping the peer is left to the caller
|
||||
func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch {
|
||||
peer := p2p.MakeSwitch(
|
||||
cfg,
|
||||
id,
|
||||
"127.0.0.1",
|
||||
"123.123.123",
|
||||
func(i int, sw *p2p.Switch) *p2p.Switch {
|
||||
book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
sw.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(
|
||||
book,
|
||||
&PEXReactorConfig{
|
||||
Seeds: []string{seed.NodeInfo().NetAddress().String()},
|
||||
},
|
||||
)
|
||||
r.SetLogger(log.TestingLogger())
|
||||
sw.AddReactor("pex", r)
|
||||
return sw
|
||||
},
|
||||
)
|
||||
peer.AddListener(
|
||||
p2p.NewDefaultListener("tcp://"+peer.NodeInfo().ListenAddr, "", false, log.TestingLogger()),
|
||||
)
|
||||
return peer
|
||||
conf := &PEXReactorConfig{
|
||||
Seeds: []string{seed.NodeInfo().NetAddress().String()},
|
||||
}
|
||||
return testCreatePeerWithConfig(dir, id, conf)
|
||||
}
|
||||
|
||||
func createReactor(conf *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
|
||||
|
||||
Reference in New Issue
Block a user