diff --git a/CHANGELOG.md b/CHANGELOG.md
index c5dc06753..967fdef65 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,6 +24,24 @@ BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness
+## 0.19.0 (April 13th, 2018)
+
+BREAKING:
+- [cmd] improved `testnet` command; now it can fill in `persistent_peers` for you in the config file and much more (see `tendermint testnet --help` for details)
+- [cmd] `show_node_id` now returns an error if there is no node key
+- [rpc]: changed the output format for the `/status` endpoint (see https://godoc.org/github.com/tendermint/tendermint/rpc/core#Status)
+
+Upgrade from go-wire to go-amino. This is a sweeping change that breaks everything that is
+serialized to disk or over the network.
+
+See github.com/tendermint/go-amino for details on the new format.
+
+See `scripts/wire2amino.go` for a tool to upgrade
+genesis/priv_validator/node_key JSON files.
+
+FEATURES:
+- [cmd] added `gen_node_key` command
+
## 0.18.0 (April 6th, 2018)
BREAKING:
diff --git a/Gopkg.lock b/Gopkg.lock
index 9461b319b..0701a1faa 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -159,7 +159,7 @@
branch = "master"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
- revision = "8732c616f52954686704c8645fe1a9d59e9df7c1"
+ revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
[[projects]]
name = "github.com/spf13/afero"
@@ -191,8 +191,8 @@
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
- revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
- version = "v1.0.0"
+ revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
+ version = "v1.0.1"
[[projects]]
name = "github.com/spf13/viper"
@@ -238,8 +238,8 @@
"server",
"types"
]
- revision = "46686763ba8ea595ede16530ed4a40fb38f49f94"
- version = "v0.10.2"
+ revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
+ version = "v0.10.3"
[[projects]]
branch = "master"
@@ -251,20 +251,22 @@
]
revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
+[[projects]]
+ name = "github.com/tendermint/go-amino"
+ packages = ["."]
+ revision = "42246108ff925a457fb709475070a03dfd3e2b5c"
+ version = "0.9.6"
+
[[projects]]
name = "github.com/tendermint/go-crypto"
packages = ["."]
- revision = "c3e19f3ea26f5c3357e0bcbb799b0761ef923755"
- version = "v0.5.0"
+ revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
+ version = "v0.6.2"
[[projects]]
name = "github.com/tendermint/go-wire"
- packages = [
- ".",
- "data"
- ]
+ packages = ["."]
revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c"
- source = "github.com/tendermint/go-amino"
version = "v0.7.3"
[[projects]]
@@ -283,8 +285,8 @@
"pubsub/query",
"test"
]
- revision = "2e24b64fc121dcdf1cabceab8dc2f7257675483c"
- version = "0.8.1"
+ revision = "97e1f1ad3f510048929a51475811a18686c894df"
+ version = "0.8.2-rc0"
[[projects]]
branch = "master"
@@ -299,7 +301,7 @@
"ripemd160",
"salsa20/salsa"
]
- revision = "b2aa35443fbc700ab74c586ae79b81c171851023"
+ revision = "d6449816ce06963d9d136eee5a56fca5b0616e7e"
[[projects]]
branch = "master"
@@ -313,13 +315,13 @@
"lex/httplex",
"trace"
]
- revision = "b3c676e531a6dc479fa1b35ac961c13f5e2b4d2e"
+ revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
- revision = "1d206c9fa8975fb4cf00df1dc8bf3283dc24ba0e"
+ revision = "3b87a42e500a6dc65dae1a55d0b641295971163e"
[[projects]]
name = "golang.org/x/text"
@@ -346,7 +348,7 @@
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
- revision = "35de2414665fc36f56b72d982c5af480d86de5ab"
+ revision = "51d0944304c3cbce4afe9e5247e21100037bff78"
[[projects]]
name = "google.golang.org/grpc"
@@ -381,6 +383,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "b7c02a311569ec5fe2197614444fb231ea60f3e65a11a20e318421f1752054d7"
+ inputs-digest = "e70f8692c825e80ae8510546e297840b9560d00e11b2272749a55cc2ffd147f0"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
index f6846793d..8e65a41a3 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -26,12 +26,12 @@
[[constraint]]
- branch = "master"
name = "github.com/ebuchman/fail-test"
+ branch = "master"
[[constraint]]
- branch = "master"
name = "github.com/fortytw2/leaktest"
+ branch = "master"
[[constraint]]
name = "github.com/go-kit/kit"
@@ -54,8 +54,8 @@
version = "~0.8.0"
[[constraint]]
- branch = "master"
name = "github.com/rcrowley/go-metrics"
+ branch = "master"
[[constraint]]
name = "github.com/spf13/cobra"
@@ -71,22 +71,19 @@
[[constraint]]
name = "github.com/tendermint/abci"
- version = "~0.10.2"
+ version = "~0.10.3"
[[constraint]]
name = "github.com/tendermint/go-crypto"
- version = "~0.5.0"
+ version = "~0.6.2"
[[constraint]]
- name = "github.com/tendermint/go-wire"
- source = "github.com/tendermint/go-amino"
- version = "~0.7.3"
+ name = "github.com/tendermint/go-amino"
+ version = "~0.9.6"
-[[override]]
-# [[constraint]]
+[[constraint]]
name = "github.com/tendermint/tmlibs"
- version = "~0.8.1"
- # branch = "develop"
+ version = "~0.8.2-rc0"
[[constraint]]
name = "google.golang.org/grpc"
diff --git a/Makefile b/Makefile
index 3e51c834e..da94627d3 100644
--- a/Makefile
+++ b/Makefile
@@ -178,7 +178,25 @@ metalinter_all:
@echo "--> Running linter (all)"
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
+###########################################################
+### Local testnet using docker
+
+# Build linux binary on other platforms
+build-linux:
+ GOOS=linux GOARCH=amd64 $(MAKE) build
+
+# Run a 4-node testnet locally
+docker-start:
+ @echo "Wait until 'Attaching to node0, node1, node2, node3' message appears"
+ @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v `pwd`/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
+ docker-compose up
+
+# Stop testnet
+docker-stop:
+ docker-compose down
+
# To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
-.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt
+.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux docker-start docker-stop
+
diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go
index 8ac62a247..d8e4bc82c 100644
--- a/benchmarks/codec_test.go
+++ b/benchmarks/codec_test.go
@@ -4,8 +4,8 @@ import (
"testing"
"time"
+ "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
- "github.com/tendermint/go-wire"
proto "github.com/tendermint/tendermint/benchmarks/proto"
"github.com/tendermint/tendermint/p2p"
@@ -14,26 +14,35 @@ import (
func BenchmarkEncodeStatusWire(b *testing.B) {
b.StopTimer()
- pubKey := crypto.GenPrivKeyEd25519().PubKey()
+ cdc := amino.NewCodec()
+ ctypes.RegisterAmino(cdc)
+ nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
status := &ctypes.ResultStatus{
NodeInfo: p2p.NodeInfo{
- PubKey: pubKey,
+ ID: nodeKey.ID(),
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
},
- PubKey: pubKey,
- LatestBlockHash: []byte("SOMEBYTES"),
- LatestBlockHeight: 123,
- LatestBlockTime: time.Unix(0, 1234),
+ SyncInfo: ctypes.SyncInfo{
+ LatestBlockHash: []byte("SOMEBYTES"),
+ LatestBlockHeight: 123,
+ LatestBlockTime: time.Unix(0, 1234),
+ },
+ ValidatorInfo: ctypes.ValidatorInfo{
+ PubKey: nodeKey.PubKey(),
+ },
}
b.StartTimer()
counter := 0
for i := 0; i < b.N; i++ {
- jsonBytes := wire.JSONBytes(status)
+ jsonBytes, err := cdc.MarshalJSON(status)
+ if err != nil {
+ panic(err)
+ }
counter += len(jsonBytes)
}
@@ -41,9 +50,11 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
func BenchmarkEncodeNodeInfoWire(b *testing.B) {
b.StopTimer()
- pubKey := crypto.GenPrivKeyEd25519().PubKey()
+ cdc := amino.NewCodec()
+ ctypes.RegisterAmino(cdc)
+ nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeInfo := p2p.NodeInfo{
- PubKey: pubKey,
+ ID: nodeKey.ID(),
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
@@ -54,16 +65,21 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) {
counter := 0
for i := 0; i < b.N; i++ {
- jsonBytes := wire.JSONBytes(nodeInfo)
+ jsonBytes, err := cdc.MarshalJSON(nodeInfo)
+ if err != nil {
+ panic(err)
+ }
counter += len(jsonBytes)
}
}
func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
b.StopTimer()
- pubKey := crypto.GenPrivKeyEd25519().PubKey()
+ cdc := amino.NewCodec()
+ ctypes.RegisterAmino(cdc)
+ nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeInfo := p2p.NodeInfo{
- PubKey: pubKey,
+ ID: nodeKey.ID(),
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
@@ -74,7 +90,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
counter := 0
for i := 0; i < b.N; i++ {
- jsonBytes := wire.BinaryBytes(nodeInfo)
+ jsonBytes := cdc.MustMarshalBinaryBare(nodeInfo)
counter += len(jsonBytes)
}
@@ -82,15 +98,20 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
func BenchmarkEncodeNodeInfoProto(b *testing.B) {
b.StopTimer()
- pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519)
- pubKey2 := &proto.PubKey{Ed25519: &proto.PubKeyEd25519{Bytes: pubKey[:]}}
+ nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
+ nodeID := string(nodeKey.ID())
+ someName := "SOMENAME"
+ someAddr := "SOMEADDR"
+ someVer := "SOMEVER"
+ someString := "SOMESTRING"
+ otherString := "OTHERSTRING"
nodeInfo := proto.NodeInfo{
- PubKey: pubKey2,
- Moniker: "SOMENAME",
- Network: "SOMENAME",
- ListenAddr: "SOMEADDR",
- Version: "SOMEVER",
- Other: []string{"SOMESTRING", "OTHERSTRING"},
+ Id: &proto.ID{Id: &nodeID},
+ Moniker: &someName,
+ Network: &someName,
+ ListenAddr: &someAddr,
+ Version: &someVer,
+ Other: []string{someString, otherString},
}
b.StartTimer()
diff --git a/benchmarks/proto/test.pb.go b/benchmarks/proto/test.pb.go
index dc21a2a82..d430eeb08 100644
--- a/benchmarks/proto/test.pb.go
+++ b/benchmarks/proto/test.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: test.proto
-// DO NOT EDIT!
/*
Package test is a generated protocol buffer package.
@@ -11,6 +10,7 @@
It has these top-level messages:
ResultStatus
NodeInfo
+ ID
PubKey
PubKeyEd25519
*/
@@ -20,11 +20,6 @@ import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
-import bytes "bytes"
-
-import strings "strings"
-import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
@@ -32,16 +27,25 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
type ResultStatus struct {
NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo" json:"nodeInfo,omitempty"`
PubKey *PubKey `protobuf:"bytes,2,req,name=pubKey" json:"pubKey,omitempty"`
- LatestBlockHash []byte `protobuf:"bytes,3,req,name=latestBlockHash" json:"latestBlockHash"`
- LatestBlockHeight int64 `protobuf:"varint,4,req,name=latestBlockHeight" json:"latestBlockHeight"`
- LatestBlocktime int64 `protobuf:"varint,5,req,name=latestBlocktime" json:"latestBlocktime"`
+ LatestBlockHash []byte `protobuf:"bytes,3,req,name=latestBlockHash" json:"latestBlockHash,omitempty"`
+ LatestBlockHeight *int64 `protobuf:"varint,4,req,name=latestBlockHeight" json:"latestBlockHeight,omitempty"`
+ LatestBlocktime *int64 `protobuf:"varint,5,req,name=latestBlocktime" json:"latestBlocktime,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
}
-func (m *ResultStatus) Reset() { *m = ResultStatus{} }
-func (*ResultStatus) ProtoMessage() {}
+func (m *ResultStatus) Reset() { *m = ResultStatus{} }
+func (m *ResultStatus) String() string { return proto.CompactTextString(m) }
+func (*ResultStatus) ProtoMessage() {}
+func (*ResultStatus) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{0} }
func (m *ResultStatus) GetNodeInfo() *NodeInfo {
if m != nil {
@@ -65,70 +69,73 @@ func (m *ResultStatus) GetLatestBlockHash() []byte {
}
func (m *ResultStatus) GetLatestBlockHeight() int64 {
- if m != nil {
- return m.LatestBlockHeight
+ if m != nil && m.LatestBlockHeight != nil {
+ return *m.LatestBlockHeight
}
return 0
}
func (m *ResultStatus) GetLatestBlocktime() int64 {
- if m != nil {
- return m.LatestBlocktime
+ if m != nil && m.LatestBlocktime != nil {
+ return *m.LatestBlocktime
}
return 0
}
type NodeInfo struct {
- PubKey *PubKey `protobuf:"bytes,1,req,name=pubKey" json:"pubKey,omitempty"`
- Moniker string `protobuf:"bytes,2,req,name=moniker" json:"moniker"`
- Network string `protobuf:"bytes,3,req,name=network" json:"network"`
- RemoteAddr string `protobuf:"bytes,4,req,name=remoteAddr" json:"remoteAddr"`
- ListenAddr string `protobuf:"bytes,5,req,name=listenAddr" json:"listenAddr"`
- Version string `protobuf:"bytes,6,req,name=version" json:"version"`
- Other []string `protobuf:"bytes,7,rep,name=other" json:"other,omitempty"`
+ Id *ID `protobuf:"bytes,1,req,name=id" json:"id,omitempty"`
+ Moniker *string `protobuf:"bytes,2,req,name=moniker" json:"moniker,omitempty"`
+ Network *string `protobuf:"bytes,3,req,name=network" json:"network,omitempty"`
+ RemoteAddr *string `protobuf:"bytes,4,req,name=remoteAddr" json:"remoteAddr,omitempty"`
+ ListenAddr *string `protobuf:"bytes,5,req,name=listenAddr" json:"listenAddr,omitempty"`
+ Version *string `protobuf:"bytes,6,req,name=version" json:"version,omitempty"`
+ Other []string `protobuf:"bytes,7,rep,name=other" json:"other,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
}
-func (m *NodeInfo) Reset() { *m = NodeInfo{} }
-func (*NodeInfo) ProtoMessage() {}
+func (m *NodeInfo) Reset() { *m = NodeInfo{} }
+func (m *NodeInfo) String() string { return proto.CompactTextString(m) }
+func (*NodeInfo) ProtoMessage() {}
+func (*NodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{1} }
-func (m *NodeInfo) GetPubKey() *PubKey {
+func (m *NodeInfo) GetId() *ID {
if m != nil {
- return m.PubKey
+ return m.Id
}
return nil
}
func (m *NodeInfo) GetMoniker() string {
- if m != nil {
- return m.Moniker
+ if m != nil && m.Moniker != nil {
+ return *m.Moniker
}
return ""
}
func (m *NodeInfo) GetNetwork() string {
- if m != nil {
- return m.Network
+ if m != nil && m.Network != nil {
+ return *m.Network
}
return ""
}
func (m *NodeInfo) GetRemoteAddr() string {
- if m != nil {
- return m.RemoteAddr
+ if m != nil && m.RemoteAddr != nil {
+ return *m.RemoteAddr
}
return ""
}
func (m *NodeInfo) GetListenAddr() string {
- if m != nil {
- return m.ListenAddr
+ if m != nil && m.ListenAddr != nil {
+ return *m.ListenAddr
}
return ""
}
func (m *NodeInfo) GetVersion() string {
- if m != nil {
- return m.Version
+ if m != nil && m.Version != nil {
+ return *m.Version
}
return ""
}
@@ -140,12 +147,32 @@ func (m *NodeInfo) GetOther() []string {
return nil
}
-type PubKey struct {
- Ed25519 *PubKeyEd25519 `protobuf:"bytes,1,opt,name=ed25519" json:"ed25519,omitempty"`
+type ID struct {
+ Id *string `protobuf:"bytes,1,req,name=id" json:"id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
}
-func (m *PubKey) Reset() { *m = PubKey{} }
-func (*PubKey) ProtoMessage() {}
+func (m *ID) Reset() { *m = ID{} }
+func (m *ID) String() string { return proto.CompactTextString(m) }
+func (*ID) ProtoMessage() {}
+func (*ID) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{2} }
+
+func (m *ID) GetId() string {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return ""
+}
+
+type PubKey struct {
+ Ed25519 *PubKeyEd25519 `protobuf:"bytes,1,opt,name=ed25519" json:"ed25519,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PubKey) Reset() { *m = PubKey{} }
+func (m *PubKey) String() string { return proto.CompactTextString(m) }
+func (*PubKey) ProtoMessage() {}
+func (*PubKey) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{3} }
func (m *PubKey) GetEd25519() *PubKeyEd25519 {
if m != nil {
@@ -155,11 +182,14 @@ func (m *PubKey) GetEd25519() *PubKeyEd25519 {
}
type PubKeyEd25519 struct {
- Bytes []byte `protobuf:"bytes,1,req,name=bytes" json:"bytes"`
+ Bytes []byte `protobuf:"bytes,1,req,name=bytes" json:"bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
}
-func (m *PubKeyEd25519) Reset() { *m = PubKeyEd25519{} }
-func (*PubKeyEd25519) ProtoMessage() {}
+func (m *PubKeyEd25519) Reset() { *m = PubKeyEd25519{} }
+func (m *PubKeyEd25519) String() string { return proto.CompactTextString(m) }
+func (*PubKeyEd25519) ProtoMessage() {}
+func (*PubKeyEd25519) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{4} }
func (m *PubKeyEd25519) GetBytes() []byte {
if m != nil {
@@ -171,400 +201,259 @@ func (m *PubKeyEd25519) GetBytes() []byte {
func init() {
proto.RegisterType((*ResultStatus)(nil), "ResultStatus")
proto.RegisterType((*NodeInfo)(nil), "NodeInfo")
+ proto.RegisterType((*ID)(nil), "ID")
proto.RegisterType((*PubKey)(nil), "PubKey")
proto.RegisterType((*PubKeyEd25519)(nil), "PubKeyEd25519")
}
-func (this *ResultStatus) Equal(that interface{}) bool {
- if that == nil {
- if this == nil {
- return true
- }
- return false
- }
-
- that1, ok := that.(*ResultStatus)
- if !ok {
- that2, ok := that.(ResultStatus)
- if ok {
- that1 = &that2
- } else {
- return false
- }
- }
- if that1 == nil {
- if this == nil {
- return true
- }
- return false
- } else if this == nil {
- return false
- }
- if !this.NodeInfo.Equal(that1.NodeInfo) {
- return false
- }
- if !this.PubKey.Equal(that1.PubKey) {
- return false
- }
- if !bytes.Equal(this.LatestBlockHash, that1.LatestBlockHash) {
- return false
- }
- if this.LatestBlockHeight != that1.LatestBlockHeight {
- return false
- }
- if this.LatestBlocktime != that1.LatestBlocktime {
- return false
- }
- return true
-}
-func (this *NodeInfo) Equal(that interface{}) bool {
- if that == nil {
- if this == nil {
- return true
- }
- return false
- }
-
- that1, ok := that.(*NodeInfo)
- if !ok {
- that2, ok := that.(NodeInfo)
- if ok {
- that1 = &that2
- } else {
- return false
- }
- }
- if that1 == nil {
- if this == nil {
- return true
- }
- return false
- } else if this == nil {
- return false
- }
- if !this.PubKey.Equal(that1.PubKey) {
- return false
- }
- if this.Moniker != that1.Moniker {
- return false
- }
- if this.Network != that1.Network {
- return false
- }
- if this.RemoteAddr != that1.RemoteAddr {
- return false
- }
- if this.ListenAddr != that1.ListenAddr {
- return false
- }
- if this.Version != that1.Version {
- return false
- }
- if len(this.Other) != len(that1.Other) {
- return false
- }
- for i := range this.Other {
- if this.Other[i] != that1.Other[i] {
- return false
- }
- }
- return true
-}
-func (this *PubKey) Equal(that interface{}) bool {
- if that == nil {
- if this == nil {
- return true
- }
- return false
- }
-
- that1, ok := that.(*PubKey)
- if !ok {
- that2, ok := that.(PubKey)
- if ok {
- that1 = &that2
- } else {
- return false
- }
- }
- if that1 == nil {
- if this == nil {
- return true
- }
- return false
- } else if this == nil {
- return false
- }
- if !this.Ed25519.Equal(that1.Ed25519) {
- return false
- }
- return true
-}
-func (this *PubKeyEd25519) Equal(that interface{}) bool {
- if that == nil {
- if this == nil {
- return true
- }
- return false
- }
-
- that1, ok := that.(*PubKeyEd25519)
- if !ok {
- that2, ok := that.(PubKeyEd25519)
- if ok {
- that1 = &that2
- } else {
- return false
- }
- }
- if that1 == nil {
- if this == nil {
- return true
- }
- return false
- } else if this == nil {
- return false
- }
- if !bytes.Equal(this.Bytes, that1.Bytes) {
- return false
- }
- return true
-}
-func (this *ResultStatus) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 9)
- s = append(s, "&test.ResultStatus{")
- if this.NodeInfo != nil {
- s = append(s, "NodeInfo: "+fmt.Sprintf("%#v", this.NodeInfo)+",\n")
- }
- if this.PubKey != nil {
- s = append(s, "PubKey: "+fmt.Sprintf("%#v", this.PubKey)+",\n")
- }
- s = append(s, "LatestBlockHash: "+fmt.Sprintf("%#v", this.LatestBlockHash)+",\n")
- s = append(s, "LatestBlockHeight: "+fmt.Sprintf("%#v", this.LatestBlockHeight)+",\n")
- s = append(s, "LatestBlocktime: "+fmt.Sprintf("%#v", this.LatestBlocktime)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *NodeInfo) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 11)
- s = append(s, "&test.NodeInfo{")
- if this.PubKey != nil {
- s = append(s, "PubKey: "+fmt.Sprintf("%#v", this.PubKey)+",\n")
- }
- s = append(s, "Moniker: "+fmt.Sprintf("%#v", this.Moniker)+",\n")
- s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n")
- s = append(s, "RemoteAddr: "+fmt.Sprintf("%#v", this.RemoteAddr)+",\n")
- s = append(s, "ListenAddr: "+fmt.Sprintf("%#v", this.ListenAddr)+",\n")
- s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n")
- if this.Other != nil {
- s = append(s, "Other: "+fmt.Sprintf("%#v", this.Other)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *PubKey) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 5)
- s = append(s, "&test.PubKey{")
- if this.Ed25519 != nil {
- s = append(s, "Ed25519: "+fmt.Sprintf("%#v", this.Ed25519)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *PubKeyEd25519) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 5)
- s = append(s, "&test.PubKeyEd25519{")
- s = append(s, "Bytes: "+fmt.Sprintf("%#v", this.Bytes)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (m *ResultStatus) Marshal() (data []byte, err error) {
+func (m *ResultStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
- return data[:n], nil
+ return dAtA[:n], nil
}
-func (m *ResultStatus) MarshalTo(data []byte) (int, error) {
+func (m *ResultStatus) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.NodeInfo != nil {
- data[i] = 0xa
+ dAtA[i] = 0xa
i++
- i = encodeVarintTest(data, i, uint64(m.NodeInfo.Size()))
- n1, err := m.NodeInfo.MarshalTo(data[i:])
+ i = encodeVarintTest(dAtA, i, uint64(m.NodeInfo.Size()))
+ n1, err := m.NodeInfo.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
if m.PubKey == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("pubKey")
+ return 0, proto.NewRequiredNotSetError("pubKey")
} else {
- data[i] = 0x12
+ dAtA[i] = 0x12
i++
- i = encodeVarintTest(data, i, uint64(m.PubKey.Size()))
- n2, err := m.PubKey.MarshalTo(data[i:])
+ i = encodeVarintTest(dAtA, i, uint64(m.PubKey.Size()))
+ n2, err := m.PubKey.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
- if m.LatestBlockHash != nil {
- data[i] = 0x1a
+ if m.LatestBlockHash == nil {
+ return 0, proto.NewRequiredNotSetError("latestBlockHash")
+ } else {
+ dAtA[i] = 0x1a
i++
- i = encodeVarintTest(data, i, uint64(len(m.LatestBlockHash)))
- i += copy(data[i:], m.LatestBlockHash)
+ i = encodeVarintTest(dAtA, i, uint64(len(m.LatestBlockHash)))
+ i += copy(dAtA[i:], m.LatestBlockHash)
+ }
+ if m.LatestBlockHeight == nil {
+ return 0, proto.NewRequiredNotSetError("latestBlockHeight")
+ } else {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintTest(dAtA, i, uint64(*m.LatestBlockHeight))
+ }
+ if m.LatestBlocktime == nil {
+ return 0, proto.NewRequiredNotSetError("latestBlocktime")
+ } else {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintTest(dAtA, i, uint64(*m.LatestBlocktime))
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
}
- data[i] = 0x20
- i++
- i = encodeVarintTest(data, i, uint64(m.LatestBlockHeight))
- data[i] = 0x28
- i++
- i = encodeVarintTest(data, i, uint64(m.LatestBlocktime))
return i, nil
}
-func (m *NodeInfo) Marshal() (data []byte, err error) {
+func (m *NodeInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
- return data[:n], nil
+ return dAtA[:n], nil
}
-func (m *NodeInfo) MarshalTo(data []byte) (int, error) {
+func (m *NodeInfo) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
- if m.PubKey == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("pubKey")
+ if m.Id == nil {
+ return 0, proto.NewRequiredNotSetError("id")
} else {
- data[i] = 0xa
+ dAtA[i] = 0xa
i++
- i = encodeVarintTest(data, i, uint64(m.PubKey.Size()))
- n3, err := m.PubKey.MarshalTo(data[i:])
+ i = encodeVarintTest(dAtA, i, uint64(m.Id.Size()))
+ n3, err := m.Id.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n3
}
- data[i] = 0x12
- i++
- i = encodeVarintTest(data, i, uint64(len(m.Moniker)))
- i += copy(data[i:], m.Moniker)
- data[i] = 0x1a
- i++
- i = encodeVarintTest(data, i, uint64(len(m.Network)))
- i += copy(data[i:], m.Network)
- data[i] = 0x22
- i++
- i = encodeVarintTest(data, i, uint64(len(m.RemoteAddr)))
- i += copy(data[i:], m.RemoteAddr)
- data[i] = 0x2a
- i++
- i = encodeVarintTest(data, i, uint64(len(m.ListenAddr)))
- i += copy(data[i:], m.ListenAddr)
- data[i] = 0x32
- i++
- i = encodeVarintTest(data, i, uint64(len(m.Version)))
- i += copy(data[i:], m.Version)
+ if m.Moniker == nil {
+ return 0, proto.NewRequiredNotSetError("moniker")
+ } else {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintTest(dAtA, i, uint64(len(*m.Moniker)))
+ i += copy(dAtA[i:], *m.Moniker)
+ }
+ if m.Network == nil {
+ return 0, proto.NewRequiredNotSetError("network")
+ } else {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintTest(dAtA, i, uint64(len(*m.Network)))
+ i += copy(dAtA[i:], *m.Network)
+ }
+ if m.RemoteAddr == nil {
+ return 0, proto.NewRequiredNotSetError("remoteAddr")
+ } else {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintTest(dAtA, i, uint64(len(*m.RemoteAddr)))
+ i += copy(dAtA[i:], *m.RemoteAddr)
+ }
+ if m.ListenAddr == nil {
+ return 0, proto.NewRequiredNotSetError("listenAddr")
+ } else {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintTest(dAtA, i, uint64(len(*m.ListenAddr)))
+ i += copy(dAtA[i:], *m.ListenAddr)
+ }
+ if m.Version == nil {
+ return 0, proto.NewRequiredNotSetError("version")
+ } else {
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintTest(dAtA, i, uint64(len(*m.Version)))
+ i += copy(dAtA[i:], *m.Version)
+ }
if len(m.Other) > 0 {
for _, s := range m.Other {
- data[i] = 0x3a
+ dAtA[i] = 0x3a
i++
l = len(s)
for l >= 1<<7 {
- data[i] = uint8(uint64(l)&0x7f | 0x80)
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
- data[i] = uint8(l)
+ dAtA[i] = uint8(l)
i++
- i += copy(data[i:], s)
+ i += copy(dAtA[i:], s)
}
}
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
return i, nil
}
-func (m *PubKey) Marshal() (data []byte, err error) {
+func (m *ID) Marshal() (dAtA []byte, err error) {
size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
- return data[:n], nil
+ return dAtA[:n], nil
}
-func (m *PubKey) MarshalTo(data []byte) (int, error) {
+func (m *ID) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Id == nil {
+ return 0, proto.NewRequiredNotSetError("id")
+ } else {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintTest(dAtA, i, uint64(len(*m.Id)))
+ i += copy(dAtA[i:], *m.Id)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *PubKey) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PubKey) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Ed25519 != nil {
- data[i] = 0xa
+ dAtA[i] = 0xa
i++
- i = encodeVarintTest(data, i, uint64(m.Ed25519.Size()))
- n4, err := m.Ed25519.MarshalTo(data[i:])
+ i = encodeVarintTest(dAtA, i, uint64(m.Ed25519.Size()))
+ n4, err := m.Ed25519.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n4
}
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
return i, nil
}
-func (m *PubKeyEd25519) Marshal() (data []byte, err error) {
+func (m *PubKeyEd25519) Marshal() (dAtA []byte, err error) {
size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
- return data[:n], nil
+ return dAtA[:n], nil
}
-func (m *PubKeyEd25519) MarshalTo(data []byte) (int, error) {
+func (m *PubKeyEd25519) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
- if m.Bytes != nil {
- data[i] = 0xa
+ if m.Bytes == nil {
+ return 0, proto.NewRequiredNotSetError("bytes")
+ } else {
+ dAtA[i] = 0xa
i++
- i = encodeVarintTest(data, i, uint64(len(m.Bytes)))
- i += copy(data[i:], m.Bytes)
+ i = encodeVarintTest(dAtA, i, uint64(len(m.Bytes)))
+ i += copy(dAtA[i:], m.Bytes)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
-func encodeVarintTest(data []byte, offset int, v uint64) int {
+func encodeVarintTest(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
- data[offset] = uint8(v&0x7f | 0x80)
+ dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
- data[offset] = uint8(v)
+ dAtA[offset] = uint8(v)
return offset + 1
}
func (m *ResultStatus) Size() (n int) {
@@ -582,34 +471,67 @@ func (m *ResultStatus) Size() (n int) {
l = len(m.LatestBlockHash)
n += 1 + l + sovTest(uint64(l))
}
- n += 1 + sovTest(uint64(m.LatestBlockHeight))
- n += 1 + sovTest(uint64(m.LatestBlocktime))
+ if m.LatestBlockHeight != nil {
+ n += 1 + sovTest(uint64(*m.LatestBlockHeight))
+ }
+ if m.LatestBlocktime != nil {
+ n += 1 + sovTest(uint64(*m.LatestBlocktime))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
return n
}
func (m *NodeInfo) Size() (n int) {
var l int
_ = l
- if m.PubKey != nil {
- l = m.PubKey.Size()
+ if m.Id != nil {
+ l = m.Id.Size()
+ n += 1 + l + sovTest(uint64(l))
+ }
+ if m.Moniker != nil {
+ l = len(*m.Moniker)
+ n += 1 + l + sovTest(uint64(l))
+ }
+ if m.Network != nil {
+ l = len(*m.Network)
+ n += 1 + l + sovTest(uint64(l))
+ }
+ if m.RemoteAddr != nil {
+ l = len(*m.RemoteAddr)
+ n += 1 + l + sovTest(uint64(l))
+ }
+ if m.ListenAddr != nil {
+ l = len(*m.ListenAddr)
+ n += 1 + l + sovTest(uint64(l))
+ }
+ if m.Version != nil {
+ l = len(*m.Version)
n += 1 + l + sovTest(uint64(l))
}
- l = len(m.Moniker)
- n += 1 + l + sovTest(uint64(l))
- l = len(m.Network)
- n += 1 + l + sovTest(uint64(l))
- l = len(m.RemoteAddr)
- n += 1 + l + sovTest(uint64(l))
- l = len(m.ListenAddr)
- n += 1 + l + sovTest(uint64(l))
- l = len(m.Version)
- n += 1 + l + sovTest(uint64(l))
if len(m.Other) > 0 {
for _, s := range m.Other {
l = len(s)
n += 1 + l + sovTest(uint64(l))
}
}
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ID) Size() (n int) {
+ var l int
+ _ = l
+ if m.Id != nil {
+ l = len(*m.Id)
+ n += 1 + l + sovTest(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
return n
}
@@ -620,6 +542,9 @@ func (m *PubKey) Size() (n int) {
l = m.Ed25519.Size()
n += 1 + l + sovTest(uint64(l))
}
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
return n
}
@@ -630,6 +555,9 @@ func (m *PubKeyEd25519) Size() (n int) {
l = len(m.Bytes)
n += 1 + l + sovTest(uint64(l))
}
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
return n
}
@@ -643,59 +571,10 @@ func sovTest(x uint64) (n int) {
}
return n
}
-func (this *ResultStatus) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ResultStatus{`,
- `NodeInfo:` + strings.Replace(fmt.Sprintf("%v", this.NodeInfo), "NodeInfo", "NodeInfo", 1) + `,`,
- `PubKey:` + strings.Replace(fmt.Sprintf("%v", this.PubKey), "PubKey", "PubKey", 1) + `,`,
- `LatestBlockHash:` + fmt.Sprintf("%v", this.LatestBlockHash) + `,`,
- `LatestBlockHeight:` + fmt.Sprintf("%v", this.LatestBlockHeight) + `,`,
- `LatestBlocktime:` + fmt.Sprintf("%v", this.LatestBlocktime) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *NodeInfo) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&NodeInfo{`,
- `PubKey:` + strings.Replace(fmt.Sprintf("%v", this.PubKey), "PubKey", "PubKey", 1) + `,`,
- `Moniker:` + fmt.Sprintf("%v", this.Moniker) + `,`,
- `Network:` + fmt.Sprintf("%v", this.Network) + `,`,
- `RemoteAddr:` + fmt.Sprintf("%v", this.RemoteAddr) + `,`,
- `ListenAddr:` + fmt.Sprintf("%v", this.ListenAddr) + `,`,
- `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
- `Other:` + fmt.Sprintf("%v", this.Other) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PubKey) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&PubKey{`,
- `Ed25519:` + strings.Replace(fmt.Sprintf("%v", this.Ed25519), "PubKeyEd25519", "PubKeyEd25519", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PubKeyEd25519) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&PubKeyEd25519{`,
- `Bytes:` + fmt.Sprintf("%v", this.Bytes) + `,`,
- `}`,
- }, "")
- return s
-}
-func (m *ResultStatus) Unmarshal(data []byte) error {
+
+func (m *ResultStatus) Unmarshal(dAtA []byte) error {
var hasFields [1]uint64
- l := len(data)
+ l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
@@ -707,7 +586,7 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -735,7 +614,7 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@@ -752,7 +631,7 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if m.NodeInfo == nil {
m.NodeInfo = &NodeInfo{}
}
- if err := m.NodeInfo.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -768,7 +647,7 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@@ -785,7 +664,7 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if m.PubKey == nil {
m.PubKey = &PubKey{}
}
- if err := m.PubKey.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -802,7 +681,7 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@@ -816,14 +695,17 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.LatestBlockHash = append([]byte{}, data[iNdEx:postIndex]...)
+ m.LatestBlockHash = append(m.LatestBlockHash[:0], dAtA[iNdEx:postIndex]...)
+ if m.LatestBlockHash == nil {
+ m.LatestBlockHash = []byte{}
+ }
iNdEx = postIndex
hasFields[0] |= uint64(0x00000002)
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHeight", wireType)
}
- m.LatestBlockHeight = 0
+ var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTest
@@ -831,19 +713,20 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
- m.LatestBlockHeight |= (int64(b) & 0x7F) << shift
+ v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
+ m.LatestBlockHeight = &v
hasFields[0] |= uint64(0x00000004)
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LatestBlocktime", wireType)
}
- m.LatestBlocktime = 0
+ var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTest
@@ -851,17 +734,18 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
- m.LatestBlocktime |= (int64(b) & 0x7F) << shift
+ v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
+ m.LatestBlocktime = &v
hasFields[0] |= uint64(0x00000008)
default:
iNdEx = preIndex
- skippy, err := skipTest(data[iNdEx:])
+ skippy, err := skipTest(dAtA[iNdEx:])
if err != nil {
return err
}
@@ -871,20 +755,21 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("pubKey")
+ return proto.NewRequiredNotSetError("pubKey")
}
if hasFields[0]&uint64(0x00000002) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("latestBlockHash")
+ return proto.NewRequiredNotSetError("latestBlockHash")
}
if hasFields[0]&uint64(0x00000004) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("latestBlockHeight")
+ return proto.NewRequiredNotSetError("latestBlockHeight")
}
if hasFields[0]&uint64(0x00000008) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("latestBlocktime")
+ return proto.NewRequiredNotSetError("latestBlocktime")
}
if iNdEx > l {
@@ -892,9 +777,9 @@ func (m *ResultStatus) Unmarshal(data []byte) error {
}
return nil
}
-func (m *NodeInfo) Unmarshal(data []byte) error {
+func (m *NodeInfo) Unmarshal(dAtA []byte) error {
var hasFields [1]uint64
- l := len(data)
+ l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
@@ -906,7 +791,7 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -924,7 +809,7 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -934,7 +819,7 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@@ -948,10 +833,10 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.PubKey == nil {
- m.PubKey = &PubKey{}
+ if m.Id == nil {
+ m.Id = &ID{}
}
- if err := m.PubKey.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -968,7 +853,7 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -983,7 +868,8 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Moniker = string(data[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.Moniker = &s
iNdEx = postIndex
hasFields[0] |= uint64(0x00000002)
case 3:
@@ -998,7 +884,7 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -1013,7 +899,8 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Network = string(data[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.Network = &s
iNdEx = postIndex
hasFields[0] |= uint64(0x00000004)
case 4:
@@ -1028,7 +915,7 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -1043,7 +930,8 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.RemoteAddr = string(data[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.RemoteAddr = &s
iNdEx = postIndex
hasFields[0] |= uint64(0x00000008)
case 5:
@@ -1058,7 +946,7 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -1073,7 +961,8 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ListenAddr = string(data[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.ListenAddr = &s
iNdEx = postIndex
hasFields[0] |= uint64(0x00000010)
case 6:
@@ -1088,7 +977,7 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -1103,7 +992,8 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Version = string(data[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.Version = &s
iNdEx = postIndex
hasFields[0] |= uint64(0x00000020)
case 7:
@@ -1118,7 +1008,7 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -1133,11 +1023,11 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Other = append(m.Other, string(data[iNdEx:postIndex]))
+ m.Other = append(m.Other, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
- skippy, err := skipTest(data[iNdEx:])
+ skippy, err := skipTest(dAtA[iNdEx:])
if err != nil {
return err
}
@@ -1147,26 +1037,27 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("pubKey")
+ return proto.NewRequiredNotSetError("id")
}
if hasFields[0]&uint64(0x00000002) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("moniker")
+ return proto.NewRequiredNotSetError("moniker")
}
if hasFields[0]&uint64(0x00000004) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("network")
+ return proto.NewRequiredNotSetError("network")
}
if hasFields[0]&uint64(0x00000008) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("remoteAddr")
+ return proto.NewRequiredNotSetError("remoteAddr")
}
if hasFields[0]&uint64(0x00000010) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("listenAddr")
+ return proto.NewRequiredNotSetError("listenAddr")
}
if hasFields[0]&uint64(0x00000020) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("version")
+ return proto.NewRequiredNotSetError("version")
}
if iNdEx > l {
@@ -1174,8 +1065,9 @@ func (m *NodeInfo) Unmarshal(data []byte) error {
}
return nil
}
-func (m *PubKey) Unmarshal(data []byte) error {
- l := len(data)
+func (m *ID) Unmarshal(dAtA []byte) error {
+ var hasFields [1]uint64
+ l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
@@ -1187,7 +1079,92 @@ func (m *PubKey) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ID: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ID: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTest
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTest
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Id = &s
+ iNdEx = postIndex
+ hasFields[0] |= uint64(0x00000001)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTest(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTest
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+ if hasFields[0]&uint64(0x00000001) == 0 {
+ return proto.NewRequiredNotSetError("id")
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PubKey) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTest
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -1215,7 +1192,7 @@ func (m *PubKey) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@@ -1232,13 +1209,13 @@ func (m *PubKey) Unmarshal(data []byte) error {
if m.Ed25519 == nil {
m.Ed25519 = &PubKeyEd25519{}
}
- if err := m.Ed25519.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ if err := m.Ed25519.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
- skippy, err := skipTest(data[iNdEx:])
+ skippy, err := skipTest(dAtA[iNdEx:])
if err != nil {
return err
}
@@ -1248,6 +1225,7 @@ func (m *PubKey) Unmarshal(data []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1257,9 +1235,9 @@ func (m *PubKey) Unmarshal(data []byte) error {
}
return nil
}
-func (m *PubKeyEd25519) Unmarshal(data []byte) error {
+func (m *PubKeyEd25519) Unmarshal(dAtA []byte) error {
var hasFields [1]uint64
- l := len(data)
+ l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
@@ -1271,7 +1249,7 @@ func (m *PubKeyEd25519) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -1299,7 +1277,7 @@ func (m *PubKeyEd25519) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@@ -1313,12 +1291,15 @@ func (m *PubKeyEd25519) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Bytes = append([]byte{}, data[iNdEx:postIndex]...)
+ m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...)
+ if m.Bytes == nil {
+ m.Bytes = []byte{}
+ }
iNdEx = postIndex
hasFields[0] |= uint64(0x00000001)
default:
iNdEx = preIndex
- skippy, err := skipTest(data[iNdEx:])
+ skippy, err := skipTest(dAtA[iNdEx:])
if err != nil {
return err
}
@@ -1328,11 +1309,12 @@ func (m *PubKeyEd25519) Unmarshal(data []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("bytes")
+ return proto.NewRequiredNotSetError("bytes")
}
if iNdEx > l {
@@ -1340,8 +1322,8 @@ func (m *PubKeyEd25519) Unmarshal(data []byte) error {
}
return nil
}
-func skipTest(data []byte) (n int, err error) {
- l := len(data)
+func skipTest(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
@@ -1352,7 +1334,7 @@ func skipTest(data []byte) (n int, err error) {
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -1370,7 +1352,7 @@ func skipTest(data []byte) (n int, err error) {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
- if data[iNdEx-1] < 0x80 {
+ if dAtA[iNdEx-1] < 0x80 {
break
}
}
@@ -1387,7 +1369,7 @@ func skipTest(data []byte) (n int, err error) {
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
@@ -1410,7 +1392,7 @@ func skipTest(data []byte) (n int, err error) {
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
- b := data[iNdEx]
+ b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@@ -1421,7 +1403,7 @@ func skipTest(data []byte) (n int, err error) {
if innerWireType == 4 {
break
}
- next, err := skipTest(data[start:])
+ next, err := skipTest(dAtA[start:])
if err != nil {
return 0, err
}
@@ -1444,3 +1426,31 @@ var (
ErrInvalidLengthTest = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTest = fmt.Errorf("proto: integer overflow")
)
+
+func init() { proto.RegisterFile("test.proto", fileDescriptorTest) }
+
+var fileDescriptorTest = []byte{
+ // 342 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x41, 0x4b, 0xf3, 0x30,
+ 0x1c, 0xc6, 0x49, 0xfb, 0x6e, 0x5d, 0xff, 0xeb, 0x3b, 0x31, 0xee, 0x90, 0x53, 0x2d, 0x85, 0x41,
+ 0x0f, 0x52, 0xb0, 0xb0, 0x83, 0x47, 0xc7, 0x04, 0x87, 0x20, 0x23, 0x7e, 0x82, 0xcd, 0xfe, 0x75,
+ 0x65, 0x5d, 0x33, 0x92, 0x4c, 0xd9, 0xe7, 0xf3, 0xe2, 0xd1, 0xa3, 0x47, 0xd9, 0x27, 0x91, 0xa6,
+ 0xed, 0x9c, 0xf3, 0xf8, 0xfc, 0x7e, 0xe5, 0xc9, 0xd3, 0x04, 0x40, 0xa3, 0xd2, 0xf1, 0x5a, 0x0a,
+ 0x2d, 0xc2, 0x4f, 0x02, 0x1e, 0x47, 0xb5, 0xc9, 0xf5, 0x83, 0x9e, 0xe9, 0x8d, 0xa2, 0x03, 0xe8,
+ 0x14, 0x22, 0xc5, 0x49, 0xf1, 0x24, 0x18, 0x09, 0x48, 0xd4, 0x4d, 0xdc, 0xf8, 0xbe, 0x06, 0x7c,
+ 0xaf, 0xe8, 0x39, 0xb4, 0xd7, 0x9b, 0xf9, 0x1d, 0x6e, 0x99, 0x15, 0x58, 0x51, 0x37, 0x71, 0xe2,
+ 0xa9, 0x89, 0xbc, 0xc6, 0x34, 0x82, 0x93, 0x7c, 0x56, 0x1e, 0x34, 0xca, 0xc5, 0xe3, 0xf2, 0x76,
+ 0xa6, 0x16, 0xcc, 0x0e, 0xac, 0xc8, 0xe3, 0xc7, 0x98, 0x5e, 0xc0, 0xe9, 0x21, 0xc2, 0xec, 0x79,
+ 0xa1, 0xd9, 0xbf, 0xc0, 0x8a, 0x6c, 0xfe, 0x57, 0x1c, 0xf5, 0xea, 0x6c, 0x85, 0xac, 0x65, 0xbe,
+ 0x3d, 0xc6, 0xe1, 0x1b, 0x81, 0x4e, 0xb3, 0x9c, 0x9e, 0x81, 0x95, 0xa5, 0x8c, 0x98, 0xad, 0x76,
+ 0x3c, 0x19, 0x73, 0x2b, 0x4b, 0x29, 0x03, 0x67, 0x25, 0x8a, 0x6c, 0x89, 0xd2, 0xfc, 0x85, 0xcb,
+ 0x9b, 0x58, 0x9a, 0x02, 0xf5, 0xab, 0x90, 0x4b, 0xb3, 0xda, 0xe5, 0x4d, 0xa4, 0x3e, 0x80, 0xc4,
+ 0x95, 0xd0, 0x78, 0x9d, 0xa6, 0xd2, 0xcc, 0x74, 0xf9, 0x01, 0x29, 0x7d, 0x9e, 0x29, 0x8d, 0x85,
+ 0xf1, 0xad, 0xca, 0xff, 0x90, 0xb2, 0xf9, 0x05, 0xa5, 0xca, 0x44, 0xc1, 0xda, 0x55, 0x73, 0x1d,
+ 0x69, 0x1f, 0x5a, 0x42, 0x2f, 0x50, 0x32, 0x27, 0xb0, 0x23, 0x97, 0x57, 0x21, 0xec, 0x83, 0x35,
+ 0x19, 0xd3, 0xde, 0x7e, 0xbe, 0x5b, 0x2e, 0x0f, 0x13, 0x68, 0x4f, 0x9b, 0x7b, 0x76, 0x30, 0x4d,
+ 0x86, 0xc3, 0xcb, 0xab, 0xfa, 0xb9, 0x7a, 0xf5, 0x4b, 0xdc, 0x54, 0x94, 0x37, 0x3a, 0x1c, 0xc0,
+ 0xff, 0x5f, 0xa6, 0x3c, 0x70, 0xbe, 0xd5, 0xa8, 0x4c, 0xaf, 0xc7, 0xab, 0x30, 0xf2, 0xde, 0x77,
+ 0x3e, 0xf9, 0xd8, 0xf9, 0xe4, 0x6b, 0xe7, 0x93, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xee,
+ 0x6b, 0xdd, 0x2c, 0x02, 0x00, 0x00,
+}
diff --git a/benchmarks/proto/test.proto b/benchmarks/proto/test.proto
index d1a1c94d7..6d770d98a 100644
--- a/benchmarks/proto/test.proto
+++ b/benchmarks/proto/test.proto
@@ -7,7 +7,7 @@ message ResultStatus {
}
message NodeInfo {
- required PubKey pubKey = 1;
+ required ID id = 1;
required string moniker = 2;
required string network = 3;
required string remoteAddr = 4;
@@ -16,6 +16,10 @@ message NodeInfo {
repeated string other = 7;
}
+message ID {
+ required string id = 1;
+}
+
message PubKey {
optional PubKeyEd25519 ed25519 = 1;
}
diff --git a/blockchain/reactor.go b/blockchain/reactor.go
index 3c25eed2f..33dfdd288 100644
--- a/blockchain/reactor.go
+++ b/blockchain/reactor.go
@@ -1,21 +1,16 @@
package blockchain
import (
- "bytes"
- "errors"
"fmt"
"reflect"
- "sync"
"time"
- wire "github.com/tendermint/go-wire"
-
- cmn "github.com/tendermint/tmlibs/common"
- "github.com/tendermint/tmlibs/log"
-
+ "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
+ cmn "github.com/tendermint/tmlibs/common"
+ "github.com/tendermint/tmlibs/log"
)
const (
@@ -31,6 +26,13 @@ const (
statusUpdateIntervalSeconds = 10
// check if we should switch to consensus reactor
switchToConsensusIntervalSeconds = 1
+
+ // NOTE: keep up to date with bcBlockResponseMessage
+ bcBlockResponseMessagePrefixSize = 4
+ bcBlockResponseMessageFieldKeySize = 1
+ maxMsgSize = types.MaxBlockSizeBytes +
+ bcBlockResponseMessagePrefixSize +
+ bcBlockResponseMessageFieldKeySize
)
type consensusReactor interface {
@@ -52,9 +54,6 @@ func (e peerError) Error() string {
type BlockchainReactor struct {
p2p.BaseReactor
- mtx sync.Mutex
- params types.ConsensusParams
-
// immutable
initialState sm.State
@@ -87,7 +86,6 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
)
bcR := &BlockchainReactor{
- params: state.ConsensusParams,
initialState: state,
blockExec: blockExec,
store: store,
@@ -131,17 +129,19 @@ func (bcR *BlockchainReactor) OnStop() {
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
- ID: BlockchainChannel,
- Priority: 10,
- SendQueueCapacity: 1000,
+ ID: BlockchainChannel,
+ Priority: 10,
+ SendQueueCapacity: 1000,
+ RecvBufferCapacity: 50 * 4096,
+ RecvMessageCapacity: maxMsgSize,
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
- if !peer.Send(BlockchainChannel,
- struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
+ msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
+ if !peer.Send(BlockchainChannel, msgBytes) {
// doing nothing, will try later in `poolRoutine`
}
// peer is added to the pool once we receive the first
@@ -162,20 +162,19 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
- msg := &bcBlockResponseMessage{Block: block}
- return src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg})
+ msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block})
+ return src.TrySend(BlockchainChannel, msgBytes)
}
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
- return src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{
- &bcNoBlockResponseMessage{Height: msg.Height},
- })
+ msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height})
+ return src.TrySend(BlockchainChannel, msgBytes)
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
- _, msg, err := DecodeMessage(msgBytes, bcR.maxMsgSize())
+ msg, err := DecodeMessage(msgBytes)
if err != nil {
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
bcR.Switch.StopPeerForError(src, err)
@@ -194,8 +193,8 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
case *bcStatusRequestMessage:
// Send peer our state.
- queued := src.TrySend(BlockchainChannel,
- struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
+ msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
+ queued := src.TrySend(BlockchainChannel, msgBytes)
if !queued {
// sorry
}
@@ -207,21 +206,6 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
}
}
-// maxMsgSize returns the maximum allowable size of a
-// message on the blockchain reactor.
-func (bcR *BlockchainReactor) maxMsgSize() int {
- bcR.mtx.Lock()
- defer bcR.mtx.Unlock()
- return bcR.params.BlockSize.MaxBytes + 2
-}
-
-// updateConsensusParams updates the internal consensus params
-func (bcR *BlockchainReactor) updateConsensusParams(params types.ConsensusParams) {
- bcR.mtx.Lock()
- defer bcR.mtx.Unlock()
- bcR.params = params
-}
-
// Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
@@ -247,8 +231,8 @@ FOR_LOOP:
if peer == nil {
continue FOR_LOOP // Peer has since been disconnected.
}
- msg := &bcBlockRequestMessage{request.Height}
- queued := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg})
+ msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
+ queued := peer.TrySend(BlockchainChannel, msgBytes)
if !queued {
// We couldn't make the request, send-queue full.
// The pool handles timeouts, just let it go.
@@ -321,9 +305,6 @@ FOR_LOOP:
}
blocksSynced++
- // update the consensus params
- bcR.updateConsensusParams(state.ConsensusParams)
-
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
@@ -341,43 +322,36 @@ FOR_LOOP:
// BroadcastStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
- bcR.Switch.Broadcast(BlockchainChannel,
- struct{ BlockchainMessage }{&bcStatusRequestMessage{bcR.store.Height()}})
+ msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()})
+ bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
return nil
}
//-----------------------------------------------------------------------------
// Messages
-const (
- msgTypeBlockRequest = byte(0x10)
- msgTypeBlockResponse = byte(0x11)
- msgTypeNoBlockResponse = byte(0x12)
- msgTypeStatusResponse = byte(0x20)
- msgTypeStatusRequest = byte(0x21)
-)
-
// BlockchainMessage is a generic message for this reactor.
type BlockchainMessage interface{}
-var _ = wire.RegisterInterface(
- struct{ BlockchainMessage }{},
- wire.ConcreteType{&bcBlockRequestMessage{}, msgTypeBlockRequest},
- wire.ConcreteType{&bcBlockResponseMessage{}, msgTypeBlockResponse},
- wire.ConcreteType{&bcNoBlockResponseMessage{}, msgTypeNoBlockResponse},
- wire.ConcreteType{&bcStatusResponseMessage{}, msgTypeStatusResponse},
- wire.ConcreteType{&bcStatusRequestMessage{}, msgTypeStatusRequest},
-)
+func RegisterBlockchainMessages(cdc *amino.Codec) {
+ cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
+ cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil)
+ cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil)
+ cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil)
+ cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil)
+ cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
+}
// DecodeMessage decodes BlockchainMessage.
// TODO: ensure that bz is completely read.
-func DecodeMessage(bz []byte, maxSize int) (msgType byte, msg BlockchainMessage, err error) {
- msgType = bz[0]
- n := int(0)
- r := bytes.NewReader(bz)
- msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
- if err != nil && n != len(bz) {
- err = errors.New("DecodeMessage() had bytes left over")
+func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) {
+ if len(bz) > maxMsgSize {
+ return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
+ len(bz), maxMsgSize)
+ }
+ err = cdc.UnmarshalBinaryBare(bz, &msg)
+ if err != nil {
+ err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over")
}
return
}
@@ -402,7 +376,6 @@ func (brm *bcNoBlockResponseMessage) String() string {
//-------------------------------------
-// NOTE: keep up-to-date with maxBlockchainResponseSize
type bcBlockResponseMessage struct {
Block *types.Block
}
diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go
index 263ca0f05..63e3c72bb 100644
--- a/blockchain/reactor_test.go
+++ b/blockchain/reactor_test.go
@@ -3,8 +3,6 @@ package blockchain
import (
"testing"
- wire "github.com/tendermint/go-wire"
-
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
@@ -18,8 +16,15 @@ import (
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
- blockStore := NewBlockStore(dbm.NewMemDB())
- state, _ := sm.LoadStateFromDBOrGenesisFile(dbm.NewMemDB(), config.GenesisFile())
+ // blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
+ // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
+ blockDB := dbm.NewMemDB()
+ stateDB := dbm.NewMemDB()
+ blockStore := NewBlockStore(blockDB)
+ state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
+ if err != nil {
+ panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
+ }
return state, blockStore
}
@@ -76,10 +81,9 @@ func TestNoBlockResponse(t *testing.T) {
// wait for our response to be received on the peer
for _, tt := range tests {
reqBlockMsg := &bcBlockRequestMessage{tt.height}
- reqBlockBytes := wire.BinaryBytes(struct{ BlockchainMessage }{reqBlockMsg})
+ reqBlockBytes := cdc.MustMarshalBinaryBare(reqBlockMsg)
bcr.Receive(chID, peer, reqBlockBytes)
- value := peer.lastValue()
- msg := value.(struct{ BlockchainMessage }).BlockchainMessage
+ msg := peer.lastBlockchainMessage()
if tt.existent {
if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok {
@@ -173,26 +177,30 @@ func newbcrTestPeer(id p2p.ID) *bcrTestPeer {
return bcr
}
-func (tp *bcrTestPeer) lastValue() interface{} { return <-tp.ch }
+func (tp *bcrTestPeer) lastBlockchainMessage() interface{} { return <-tp.ch }
-func (tp *bcrTestPeer) TrySend(chID byte, value interface{}) bool {
- if _, ok := value.(struct{ BlockchainMessage }).
- BlockchainMessage.(*bcStatusResponseMessage); ok {
+func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool {
+ var msg BlockchainMessage
+ err := cdc.UnmarshalBinaryBare(msgBytes, &msg)
+ if err != nil {
+ panic(cmn.ErrorWrap(err, "Error while trying to parse a BlockchainMessage"))
+ }
+ if _, ok := msg.(*bcStatusResponseMessage); ok {
// Discard status response messages since they skew our results
// We only want to deal with:
// + bcBlockResponseMessage
// + bcNoBlockResponseMessage
} else {
- tp.ch <- value
+ tp.ch <- msg
}
return true
}
-func (tp *bcrTestPeer) Send(chID byte, data interface{}) bool { return tp.TrySend(chID, data) }
-func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} }
-func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} }
-func (tp *bcrTestPeer) ID() p2p.ID { return tp.id }
-func (tp *bcrTestPeer) IsOutbound() bool { return false }
-func (tp *bcrTestPeer) IsPersistent() bool { return true }
-func (tp *bcrTestPeer) Get(s string) interface{} { return s }
-func (tp *bcrTestPeer) Set(string, interface{}) {}
+func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) }
+func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} }
+func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} }
+func (tp *bcrTestPeer) ID() p2p.ID { return tp.id }
+func (tp *bcrTestPeer) IsOutbound() bool { return false }
+func (tp *bcrTestPeer) IsPersistent() bool { return true }
+func (tp *bcrTestPeer) Get(s string) interface{} { return s }
+func (tp *bcrTestPeer) Set(string, interface{}) {}
diff --git a/blockchain/store.go b/blockchain/store.go
index b949bc904..e7608b2cc 100644
--- a/blockchain/store.go
+++ b/blockchain/store.go
@@ -1,14 +1,9 @@
package blockchain
import (
- "bytes"
- "encoding/json"
"fmt"
- "io"
"sync"
- wire "github.com/tendermint/go-wire"
-
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
@@ -54,38 +49,25 @@ func (bs *BlockStore) Height() int64 {
return bs.height
}
-// GetReader returns the value associated with the given key wrapped in an io.Reader.
-// If no value is found, it returns nil.
-// It's mainly for use with wire.ReadBinary.
-func (bs *BlockStore) GetReader(key []byte) io.Reader {
- bytez := bs.db.Get(key)
- if bytez == nil {
- return nil
- }
- return bytes.NewReader(bytez)
-}
-
// LoadBlock returns the block with the given height.
// If no block is found for that height, it returns nil.
func (bs *BlockStore) LoadBlock(height int64) *types.Block {
- var n int
- var err error
- r := bs.GetReader(calcBlockMetaKey(height))
- if r == nil {
+ var blockMeta = bs.LoadBlockMeta(height)
+ if blockMeta == nil {
return nil
}
- blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
- if err != nil {
- panic(fmt.Sprintf("Error reading block meta: %v", err))
- }
- bytez := []byte{}
+
+ var block = new(types.Block)
+ buf := []byte{}
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
part := bs.LoadBlockPart(height, i)
- bytez = append(bytez, part.Bytes...)
+ buf = append(buf, part.Bytes...)
}
- block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block)
+ err := cdc.UnmarshalBinary(buf, block)
if err != nil {
- panic(fmt.Sprintf("Error reading block: %v", err))
+ // NOTE: The existence of meta should imply the existence of the
+ // block. So, make sure meta is only saved after blocks are saved.
+ panic(cmn.ErrorWrap(err, "Error reading block"))
}
return block
}
@@ -94,15 +76,14 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
// from the block at the given height.
// If no part is found for the given height and index, it returns nil.
func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
- var n int
- var err error
- r := bs.GetReader(calcBlockPartKey(height, index))
- if r == nil {
+ var part = new(types.Part)
+ bz := bs.db.Get(calcBlockPartKey(height, index))
+ if len(bz) == 0 {
return nil
}
- part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part)
+ err := cdc.UnmarshalBinaryBare(bz, part)
if err != nil {
- panic(fmt.Sprintf("Error reading block part: %v", err))
+ panic(cmn.ErrorWrap(err, "Error reading block part"))
}
return part
}
@@ -110,15 +91,14 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
// LoadBlockMeta returns the BlockMeta for the given height.
// If no block is found for the given height, it returns nil.
func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
- var n int
- var err error
- r := bs.GetReader(calcBlockMetaKey(height))
- if r == nil {
+ var blockMeta = new(types.BlockMeta)
+ bz := bs.db.Get(calcBlockMetaKey(height))
+ if len(bz) == 0 {
return nil
}
- blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
+ err := cdc.UnmarshalBinaryBare(bz, blockMeta)
if err != nil {
- panic(fmt.Sprintf("Error reading block meta: %v", err))
+ panic(cmn.ErrorWrap(err, "Error reading block meta"))
}
return blockMeta
}
@@ -128,15 +108,14 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
// and it comes from the block.LastCommit for `height+1`.
// If no commit is found for the given height, it returns nil.
func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
- var n int
- var err error
- r := bs.GetReader(calcBlockCommitKey(height))
- if r == nil {
+ var commit = new(types.Commit)
+ bz := bs.db.Get(calcBlockCommitKey(height))
+ if len(bz) == 0 {
return nil
}
- commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
+ err := cdc.UnmarshalBinaryBare(bz, commit)
if err != nil {
- panic(fmt.Sprintf("Error reading commit: %v", err))
+ panic(cmn.ErrorWrap(err, "Error reading block commit"))
}
return commit
}
@@ -145,15 +124,14 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
// This is useful when we've seen a commit, but there has not yet been
// a new block at `height + 1` that includes this commit in its block.LastCommit.
func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
- var n int
- var err error
- r := bs.GetReader(calcSeenCommitKey(height))
- if r == nil {
+ var commit = new(types.Commit)
+ bz := bs.db.Get(calcSeenCommitKey(height))
+ if len(bz) == 0 {
return nil
}
- commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
+ err := cdc.UnmarshalBinaryBare(bz, commit)
if err != nil {
- panic(fmt.Sprintf("Error reading commit: %v", err))
+ panic(cmn.ErrorWrap(err, "Error reading block seen commit"))
}
return commit
}
@@ -178,21 +156,22 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
// Save block meta
blockMeta := types.NewBlockMeta(block, blockParts)
- metaBytes := wire.BinaryBytes(blockMeta)
+ metaBytes := cdc.MustMarshalBinaryBare(blockMeta)
bs.db.Set(calcBlockMetaKey(height), metaBytes)
// Save block parts
for i := 0; i < blockParts.Total(); i++ {
- bs.saveBlockPart(height, i, blockParts.GetPart(i))
+ part := blockParts.GetPart(i)
+ bs.saveBlockPart(height, i, part)
}
// Save block commit (duplicate and separate from the Block)
- blockCommitBytes := wire.BinaryBytes(block.LastCommit)
+ blockCommitBytes := cdc.MustMarshalBinaryBare(block.LastCommit)
bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes)
// Save seen commit (seen +2/3 precommits for block)
// NOTE: we can delete this at a later height
- seenCommitBytes := wire.BinaryBytes(seenCommit)
+ seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit)
bs.db.Set(calcSeenCommitKey(height), seenCommitBytes)
// Save new BlockStoreStateJSON descriptor
@@ -211,7 +190,7 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
if height != bs.Height()+1 {
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
- partBytes := wire.BinaryBytes(part)
+ partBytes := cdc.MustMarshalBinaryBare(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
}
@@ -238,12 +217,12 @@ func calcSeenCommitKey(height int64) []byte {
var blockStoreKey = []byte("blockStore")
type BlockStoreStateJSON struct {
- Height int64
+ Height int64 `json:"height"`
}
// Save persists the blockStore state to the database as JSON.
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
- bytes, err := json.Marshal(bsj)
+ bytes, err := cdc.MarshalJSON(bsj)
if err != nil {
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
}
@@ -260,7 +239,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
}
}
bsj := BlockStoreStateJSON{}
- err := json.Unmarshal(bytes, &bsj)
+ err := cdc.UnmarshalJSON(bytes, &bsj)
if err != nil {
panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes))
}
diff --git a/blockchain/store_test.go b/blockchain/store_test.go
index 16185ca0d..a0d53e0cf 100644
--- a/blockchain/store_test.go
+++ b/blockchain/store_test.go
@@ -3,7 +3,6 @@ package blockchain
import (
"bytes"
"fmt"
- "io/ioutil"
"runtime/debug"
"strings"
"testing"
@@ -11,9 +10,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- wire "github.com/tendermint/go-wire"
-
"github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
@@ -35,7 +31,7 @@ func TestNewBlockStore(t *testing.T) {
db := db.NewMemDB()
db.Set(blockStoreKey, []byte(`{"height": 10000}`))
bs := NewBlockStore(db)
- assert.Equal(t, bs.Height(), int64(10000), "failed to properly parse blockstore")
+ require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore")
panicCausers := []struct {
data []byte
@@ -61,38 +57,6 @@ func TestNewBlockStore(t *testing.T) {
assert.Equal(t, bs.Height(), int64(0), "expecting nil bytes to be unmarshaled alright")
}
-func TestBlockStoreGetReader(t *testing.T) {
- db := db.NewMemDB()
- // Initial setup
- db.Set([]byte("Foo"), []byte("Bar"))
- db.Set([]byte("Foo1"), nil)
-
- bs := NewBlockStore(db)
-
- tests := [...]struct {
- key []byte
- want []byte
- }{
- 0: {key: []byte("Foo"), want: []byte("Bar")},
- 1: {key: []byte("KnoxNonExistent"), want: nil},
- 2: {key: []byte("Foo1"), want: []byte{}},
- }
-
- for i, tt := range tests {
- r := bs.GetReader(tt.key)
- if r == nil {
- assert.Nil(t, tt.want, "#%d: expected a non-nil reader", i)
- continue
- }
- slurp, err := ioutil.ReadAll(r)
- if err != nil {
- t.Errorf("#%d: unexpected Read err: %v", i, err)
- } else {
- assert.Equal(t, slurp, tt.want, "#%d: mismatch", i)
- }
- }
-}
-
func freshBlockStore() (*BlockStore, db.DB) {
db := db.NewMemDB()
return NewBlockStore(db), db
@@ -189,14 +153,14 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
parts: validPartSet,
seenCommit: seenCommit1,
corruptCommitInDB: true, // Corrupt the DB's commit entry
- wantPanic: "rror reading commit",
+ wantPanic: "Error reading block commit",
},
{
block: newBlock(&header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
- wantPanic: "rror reading block",
+ wantPanic: "Error reading block",
corruptBlockInDB: true, // Corrupt the DB's block entry
},
@@ -215,7 +179,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
seenCommit: seenCommit1,
corruptSeenCommitInDB: true,
- wantPanic: "rror reading commit",
+ wantPanic: "Error reading block seen commit",
},
{
@@ -305,14 +269,6 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
}
}
-func binarySerializeIt(v interface{}) []byte {
- var n int
- var err error
- buf := new(bytes.Buffer)
- wire.WriteBinary(v, buf, &n, &err)
- return buf.Bytes()
-}
-
func TestLoadBlockPart(t *testing.T) {
bs, db := freshBlockStore()
height, index := int64(10), 1
@@ -334,7 +290,7 @@ func TestLoadBlockPart(t *testing.T) {
require.Contains(t, panicErr.Error(), "Error reading block part")
// 3. A good block serialized and saved to the DB should be retrievable
- db.Set(calcBlockPartKey(height, index), binarySerializeIt(part1))
+ db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1))
gotPart, _, panicErr := doFn(loadPart)
require.Nil(t, panicErr, "an existent and proper block should not panic")
require.Nil(t, res, "a properly saved block should return a proper block")
@@ -364,11 +320,11 @@ func TestLoadBlockMeta(t *testing.T) {
// 3. A good blockMeta serialized and saved to the DB should be retrievable
meta := &types.BlockMeta{}
- db.Set(calcBlockMetaKey(height), binarySerializeIt(meta))
+ db.Set(calcBlockMetaKey(height), cdc.MustMarshalBinaryBare(meta))
gotMeta, _, panicErr := doFn(loadMeta)
require.Nil(t, panicErr, "an existent and proper block should not panic")
require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ")
- require.Equal(t, binarySerializeIt(meta), binarySerializeIt(gotMeta),
+ require.Equal(t, cdc.MustMarshalBinaryBare(meta), cdc.MustMarshalBinaryBare(gotMeta),
"expecting successful retrieval of previously saved blockMeta")
}
@@ -385,6 +341,9 @@ func TestBlockFetchAtHeight(t *testing.T) {
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
blockAtHeight := bs.LoadBlock(bs.Height())
+ bz1 := cdc.MustMarshalBinaryBare(block)
+ bz2 := cdc.MustMarshalBinaryBare(blockAtHeight)
+ require.Equal(t, bz1, bz2)
require.Equal(t, block.Hash(), blockAtHeight.Hash(),
"expecting a successful load of the last saved block")
diff --git a/blockchain/wire.go b/blockchain/wire.go
new file mode 100644
index 000000000..55b4e60ae
--- /dev/null
+++ b/blockchain/wire.go
@@ -0,0 +1,13 @@
+package blockchain
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ RegisterBlockchainMessages(cdc)
+ crypto.RegisterAmino(cdc)
+}
diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go
index 9f3ec73ca..6b10b8178 100644
--- a/cmd/priv_val_server/main.go
+++ b/cmd/priv_val_server/main.go
@@ -30,7 +30,7 @@ func main() {
"privPath", *privValPath,
)
- privVal := priv_val.LoadPrivValidatorJSON(*privValPath)
+ privVal := priv_val.LoadFilePV(*privValPath)
rs := priv_val.NewRemoteSigner(
logger,
diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go
new file mode 100644
index 000000000..4990be477
--- /dev/null
+++ b/cmd/tendermint/commands/gen_node_key.go
@@ -0,0 +1,32 @@
+package commands
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/tendermint/tendermint/p2p"
+ cmn "github.com/tendermint/tmlibs/common"
+)
+
+// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to
+// the standard output.
+var GenNodeKeyCmd = &cobra.Command{
+ Use: "gen_node_key",
+ Short: "Generate a node key for this node and print its ID",
+ RunE: genNodeKey,
+}
+
+func genNodeKey(cmd *cobra.Command, args []string) error {
+ nodeKeyFile := config.NodeKeyFile()
+ if cmn.FileExists(nodeKeyFile) {
+ return fmt.Errorf("node key at %s already exists", nodeKeyFile)
+ }
+
+ nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile)
+ if err != nil {
+ return err
+ }
+ fmt.Println(nodeKey.ID())
+ return nil
+}
diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go
index 59fe30128..ff0d97d76 100644
--- a/cmd/tendermint/commands/gen_validator.go
+++ b/cmd/tendermint/commands/gen_validator.go
@@ -1,12 +1,11 @@
package commands
import (
- "encoding/json"
"fmt"
"github.com/spf13/cobra"
- "github.com/tendermint/tendermint/types"
+ pvm "github.com/tendermint/tendermint/types/priv_validator"
)
// GenValidatorCmd allows the generation of a keypair for a
@@ -18,11 +17,11 @@ var GenValidatorCmd = &cobra.Command{
}
func genValidator(cmd *cobra.Command, args []string) {
- privValidator := types.GenPrivValidatorFS("")
- privValidatorJSONBytes, err := json.MarshalIndent(privValidator, "", "\t")
+ pv := pvm.GenFilePV("")
+ jsbz, err := cdc.MarshalJSON(pv)
if err != nil {
panic(err)
}
fmt.Printf(`%v
-`, string(privValidatorJSONBytes))
+`, string(jsbz))
}
diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go
index 70648f8ff..32559fa5c 100644
--- a/cmd/tendermint/commands/init.go
+++ b/cmd/tendermint/commands/init.go
@@ -3,7 +3,10 @@ package commands
import (
"github.com/spf13/cobra"
+ cfg "github.com/tendermint/tendermint/config"
+ "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
+ pvm "github.com/tendermint/tendermint/types/priv_validator"
cmn "github.com/tendermint/tmlibs/common"
)
@@ -11,22 +14,36 @@ import (
var InitFilesCmd = &cobra.Command{
Use: "init",
Short: "Initialize Tendermint",
- Run: initFiles,
+ RunE: initFiles,
}
-func initFiles(cmd *cobra.Command, args []string) {
+func initFiles(cmd *cobra.Command, args []string) error {
+ return initFilesWithConfig(config)
+}
+
+func initFilesWithConfig(config *cfg.Config) error {
// private validator
privValFile := config.PrivValidatorFile()
- var privValidator *types.PrivValidatorFS
+ var pv *pvm.FilePV
if cmn.FileExists(privValFile) {
- privValidator = types.LoadPrivValidatorFS(privValFile)
+ pv = pvm.LoadFilePV(privValFile)
logger.Info("Found private validator", "path", privValFile)
} else {
- privValidator = types.GenPrivValidatorFS(privValFile)
- privValidator.Save()
+ pv = pvm.GenFilePV(privValFile)
+ pv.Save()
logger.Info("Generated private validator", "path", privValFile)
}
+ nodeKeyFile := config.NodeKeyFile()
+ if cmn.FileExists(nodeKeyFile) {
+ logger.Info("Found node key", "path", nodeKeyFile)
+ } else {
+ if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
+ return err
+ }
+ logger.Info("Generated node key", "path", nodeKeyFile)
+ }
+
// genesis file
genFile := config.GenesisFile()
if cmn.FileExists(genFile) {
@@ -36,13 +53,15 @@ func initFiles(cmd *cobra.Command, args []string) {
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
}
genDoc.Validators = []types.GenesisValidator{{
- PubKey: privValidator.GetPubKey(),
+ PubKey: pv.GetPubKey(),
Power: 10,
}}
if err := genDoc.SaveAs(genFile); err != nil {
- panic(err)
+ return err
}
logger.Info("Generated genesis file", "path", genFile)
}
+
+ return nil
}
diff --git a/cmd/tendermint/commands/probe_upnp.go b/cmd/tendermint/commands/probe_upnp.go
index a37bc3602..35c3c354d 100644
--- a/cmd/tendermint/commands/probe_upnp.go
+++ b/cmd/tendermint/commands/probe_upnp.go
@@ -1,7 +1,6 @@
package commands
import (
- "encoding/json"
"fmt"
"github.com/spf13/cobra"
@@ -22,7 +21,7 @@ func probeUpnp(cmd *cobra.Command, args []string) error {
fmt.Println("Probe failed: ", err)
} else {
fmt.Println("Probe success!")
- jsonBytes, err := json.Marshal(capabilities)
+ jsonBytes, err := cdc.MarshalJSON(capabilities)
if err != nil {
return err
}
diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go
index 513365237..78db87ded 100644
--- a/cmd/tendermint/commands/reset_priv_validator.go
+++ b/cmd/tendermint/commands/reset_priv_validator.go
@@ -5,7 +5,7 @@ import (
"github.com/spf13/cobra"
- "github.com/tendermint/tendermint/types"
+ pvm "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tmlibs/log"
)
@@ -27,7 +27,7 @@ var ResetPrivValidatorCmd = &cobra.Command{
// ResetAll removes the privValidator files.
// Exported so other CLI tools can use it.
func ResetAll(dbDir, privValFile string, logger log.Logger) {
- resetPrivValidatorFS(privValFile, logger)
+ resetFilePV(privValFile, logger)
if err := os.RemoveAll(dbDir); err != nil {
logger.Error("Error removing directory", "err", err)
return
@@ -44,18 +44,18 @@ func resetAll(cmd *cobra.Command, args []string) {
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) {
- resetPrivValidatorFS(config.PrivValidatorFile(), logger)
+ resetFilePV(config.PrivValidatorFile(), logger)
}
-func resetPrivValidatorFS(privValFile string, logger log.Logger) {
+func resetFilePV(privValFile string, logger log.Logger) {
// Get PrivValidator
if _, err := os.Stat(privValFile); err == nil {
- privValidator := types.LoadPrivValidatorFS(privValFile)
- privValidator.Reset()
+ pv := pvm.LoadFilePV(privValFile)
+ pv.Reset()
logger.Info("Reset PrivValidator", "file", privValFile)
} else {
- privValidator := types.GenPrivValidatorFS(privValFile)
- privValidator.Save()
+ pv := pvm.GenFilePV(privValFile)
+ pv.Save()
logger.Info("Generated PrivValidator", "file", privValFile)
}
}
diff --git a/cmd/tendermint/commands/show_node_id.go b/cmd/tendermint/commands/show_node_id.go
index 92af49417..1d94933ef 100644
--- a/cmd/tendermint/commands/show_node_id.go
+++ b/cmd/tendermint/commands/show_node_id.go
@@ -6,6 +6,7 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/p2p"
+
)
// ShowNodeIDCmd dumps node's ID to the standard output.
@@ -16,10 +17,12 @@ var ShowNodeIDCmd = &cobra.Command{
}
func showNodeID(cmd *cobra.Command, args []string) error {
- nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
+
+ nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil {
return err
}
fmt.Println(nodeKey.ID())
+
return nil
}
diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go
index 458f11c2d..6ead4bada 100644
--- a/cmd/tendermint/commands/show_validator.go
+++ b/cmd/tendermint/commands/show_validator.go
@@ -2,11 +2,9 @@ package commands
import (
"fmt"
-
"github.com/spf13/cobra"
- "github.com/tendermint/go-wire/data"
- "github.com/tendermint/tendermint/types"
+ privval "github.com/tendermint/tendermint/types/priv_validator"
)
// ShowValidatorCmd adds capabilities for showing the validator info.
@@ -17,7 +15,7 @@ var ShowValidatorCmd = &cobra.Command{
}
func showValidator(cmd *cobra.Command, args []string) {
- privValidator := types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile())
- pubKeyJSONBytes, _ := data.ToJSON(privValidator.PubKey)
+ privValidator := privval.LoadOrGenFilePV(config.PrivValidatorFile())
+ pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey())
fmt.Println(string(pubKeyJSONBytes))
}
diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go
index 3864e7ad5..ef503db3f 100644
--- a/cmd/tendermint/commands/testnet.go
+++ b/cmd/tendermint/commands/testnet.go
@@ -2,59 +2,103 @@ package commands
import (
"fmt"
+ "net"
+ "os"
"path/filepath"
+ "strings"
"time"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
+ "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
+ pvm "github.com/tendermint/tendermint/types/priv_validator"
cmn "github.com/tendermint/tmlibs/common"
)
-//flags
var (
- nValidators int
- dataDir string
+ nValidators int
+ nNonValidators int
+ outputDir string
+ nodeDirPrefix string
+
+ populatePersistentPeers bool
+ hostnamePrefix string
+ startingIPAddress string
+ p2pPort int
+)
+
+const (
+ nodeDirPerm = 0755
)
func init() {
- TestnetFilesCmd.Flags().IntVar(&nValidators, "n", 4,
+ TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4,
"Number of validators to initialize the testnet with")
- TestnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet",
+ TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0,
+ "Number of non-validators to initialize the testnet with")
+ TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet",
"Directory to store initialization data for the testnet")
+ TestnetFilesCmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node",
+ "Prefix the directory name for each node with (node results in node0, node1, ...)")
+
+ TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true,
+ "Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address")
+ TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node",
+ "Hostname prefix (node results in persistent peers list ID0@node0:46656, ID1@node1:46656, ...)")
+ TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "",
+ "Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:46656, ID1@192.168.0.2:46656, ...)")
+ TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 46656,
+ "P2P Port")
}
-// TestnetFilesCmd allows initialisation of files for a
-// Tendermint testnet.
+// TestnetFilesCmd allows initialisation of files for a Tendermint testnet.
var TestnetFilesCmd = &cobra.Command{
Use: "testnet",
Short: "Initialize files for a Tendermint testnet",
- Run: testnetFiles,
+ RunE: testnetFiles,
}
-func testnetFiles(cmd *cobra.Command, args []string) {
-
+func testnetFiles(cmd *cobra.Command, args []string) error {
+ config := cfg.DefaultConfig()
genVals := make([]types.GenesisValidator, nValidators)
- defaultConfig := cfg.DefaultBaseConfig()
- // Initialize core dir and priv_validator.json's
for i := 0; i < nValidators; i++ {
- mach := cmn.Fmt("mach%d", i)
- err := initMachCoreDirectory(dataDir, mach)
+ nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i)
+ nodeDir := filepath.Join(outputDir, nodeDirName)
+ config.SetRoot(nodeDir)
+
+ err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
if err != nil {
- cmn.Exit(err.Error())
+ _ = os.RemoveAll(outputDir)
+ return err
}
- // Read priv_validator.json to populate vals
- privValFile := filepath.Join(dataDir, mach, defaultConfig.PrivValidator)
- privVal := types.LoadPrivValidatorFS(privValFile)
+
+ initFilesWithConfig(config)
+
+ pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
+ pv := pvm.LoadFilePV(pvFile)
genVals[i] = types.GenesisValidator{
- PubKey: privVal.GetPubKey(),
+ PubKey: pv.GetPubKey(),
Power: 1,
- Name: mach,
+ Name: nodeDirName,
}
}
+ for i := 0; i < nNonValidators; i++ {
+ nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators))
+ config.SetRoot(nodeDir)
+
+ err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
+ if err != nil {
+ _ = os.RemoveAll(outputDir)
+ return err
+ }
+
+ initFilesWithConfig(config)
+ }
+
// Generate genesis doc from generated validators
genDoc := &types.GenesisDoc{
GenesisTime: time.Now(),
@@ -63,36 +107,65 @@ func testnetFiles(cmd *cobra.Command, args []string) {
}
// Write genesis file.
- for i := 0; i < nValidators; i++ {
- mach := cmn.Fmt("mach%d", i)
- if err := genDoc.SaveAs(filepath.Join(dataDir, mach, defaultConfig.Genesis)); err != nil {
- panic(err)
+ for i := 0; i < nValidators+nNonValidators; i++ {
+ nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
+ if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil {
+ _ = os.RemoveAll(outputDir)
+ return err
}
}
- fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators))
-}
-
-// Initialize per-machine core directory
-func initMachCoreDirectory(base, mach string) error {
- // Create priv_validator.json file if not present
- defaultConfig := cfg.DefaultBaseConfig()
- dir := filepath.Join(base, mach)
- privValPath := filepath.Join(dir, defaultConfig.PrivValidator)
- dir = filepath.Dir(privValPath)
- err := cmn.EnsureDir(dir, 0700)
- if err != nil {
- return err
+ if populatePersistentPeers {
+ err := populatePersistentPeersInConfigAndWriteIt(config)
+ if err != nil {
+ _ = os.RemoveAll(outputDir)
+ return err
+ }
}
- ensurePrivValidator(privValPath)
+
+ fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators)
return nil
-
}
-func ensurePrivValidator(file string) {
- if cmn.FileExists(file) {
- return
+func hostnameOrIP(i int) string {
+ if startingIPAddress != "" {
+ ip := net.ParseIP(startingIPAddress)
+ ip = ip.To4()
+ if ip == nil {
+ fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
+ os.Exit(1)
+ }
+
+ for j := 0; j < i; j++ {
+ ip[3]++
+ }
+ return ip.String()
}
- privValidator := types.GenPrivValidatorFS(file)
- privValidator.Save()
+
+ return fmt.Sprintf("%s%d", hostnamePrefix, i)
+}
+
+func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
+ persistentPeers := make([]string, nValidators+nNonValidators)
+ for i := 0; i < nValidators+nNonValidators; i++ {
+ nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
+ config.SetRoot(nodeDir)
+ nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
+ if err != nil {
+ return err
+ }
+ persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
+ }
+ persistentPeersList := strings.Join(persistentPeers, ",")
+
+ for i := 0; i < nValidators+nNonValidators; i++ {
+ nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
+ config.SetRoot(nodeDir)
+ config.P2P.PersistentPeers = persistentPeersList
+
+ // overwrite default config
+ cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
+ }
+
+ return nil
}
diff --git a/cmd/tendermint/commands/wire.go b/cmd/tendermint/commands/wire.go
new file mode 100644
index 000000000..4c133a8c5
--- /dev/null
+++ b/cmd/tendermint/commands/wire.go
@@ -0,0 +1,12 @@
+package commands
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ crypto.RegisterAmino(cdc)
+}
diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go
index fd6287115..8c7f0cd17 100644
--- a/cmd/tendermint/main.go
+++ b/cmd/tendermint/main.go
@@ -25,6 +25,7 @@ func main() {
cmd.ShowValidatorCmd,
cmd.TestnetFilesCmd,
cmd.ShowNodeIDCmd,
+ cmd.GenNodeKeyCmd,
cmd.VersionCmd)
// NOTE:
diff --git a/config/config.go b/config/config.go
index c87d56b3d..e83e9e51d 100644
--- a/config/config.go
+++ b/config/config.go
@@ -270,7 +270,7 @@ type P2PConfig struct {
FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"`
// Maximum size of a message packet payload, in bytes
- MaxMsgPacketPayloadSize int `mapstructure:"max_msg_packet_payload_size"`
+ MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
// Rate at which packets can be sent, in bytes/second
SendRate int64 `mapstructure:"send_rate"`
@@ -302,7 +302,7 @@ func DefaultP2PConfig() *P2PConfig {
AddrBookStrict: true,
MaxNumPeers: 50,
FlushThrottleTimeout: 100,
- MaxMsgPacketPayloadSize: 1024, // 1 kB
+ MaxPacketMsgPayloadSize: 1024, // 1 kB
SendRate: 512000, // 500 kB/s
RecvRate: 512000, // 500 kB/s
PexReactor: true,
diff --git a/config/toml.go b/config/toml.go
index 13f71db86..71b8b2e81 100644
--- a/config/toml.go
+++ b/config/toml.go
@@ -37,16 +37,21 @@ func EnsureRoot(rootDir string) {
// Write default config file if missing.
if !cmn.FileExists(configFilePath) {
- writeConfigFile(configFilePath)
+ writeDefaultCondigFile(configFilePath)
}
}
// XXX: this func should probably be called by cmd/tendermint/commands/init.go
// alongside the writing of the genesis.json and priv_validator.json
-func writeConfigFile(configFilePath string) {
+func writeDefaultCondigFile(configFilePath string) {
+ WriteConfigFile(configFilePath, DefaultConfig())
+}
+
+// WriteConfigFile renders config using the template and writes it to configFilePath.
+func WriteConfigFile(configFilePath string, config *Config) {
var buffer bytes.Buffer
- if err := configTemplate.Execute(&buffer, DefaultConfig()); err != nil {
+ if err := configTemplate.Execute(&buffer, config); err != nil {
panic(err)
}
@@ -124,11 +129,11 @@ unsafe = {{ .RPC.Unsafe }}
laddr = "{{ .P2P.ListenAddress }}"
# Comma separated list of seed nodes to connect to
-seeds = ""
+seeds = "{{ .P2P.Seeds }}"
# Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised
-persistent_peers = ""
+persistent_peers = "{{ .P2P.PersistentPeers }}"
# Path to address book
addr_book_file = "{{ .P2P.AddrBook }}"
@@ -143,7 +148,7 @@ flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
max_num_peers = {{ .P2P.MaxNumPeers }}
# Maximum size of a message packet payload, in bytes
-max_msg_packet_payload_size = {{ .P2P.MaxMsgPacketPayloadSize }}
+max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }}
# Rate at which packets can be sent, in bytes/second
send_rate = {{ .P2P.SendRate }}
@@ -262,7 +267,7 @@ func ResetTestRoot(testName string) *Config {
// Write default config file if missing.
if !cmn.FileExists(configFilePath) {
- writeConfigFile(configFilePath)
+ writeDefaultCondigFile(configFilePath)
}
if !cmn.FileExists(genesisFilePath) {
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
@@ -280,8 +285,8 @@ var testGenesis = `{
"validators": [
{
"pub_key": {
- "type": "ed25519",
- "data":"3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
+ "type": "AC26791624DE60",
+ "value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
},
"power": 10,
"name": ""
@@ -291,14 +296,14 @@ var testGenesis = `{
}`
var testPrivValidator = `{
- "address": "D028C9981F7A87F3093672BF0D5B0E2A1B3ED456",
+ "address": "849CB2C877F87A20925F35D00AE6688342D25B47",
"pub_key": {
- "type": "ed25519",
- "data": "3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
+ "type": "AC26791624DE60",
+ "value": "AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
},
"priv_key": {
- "type": "ed25519",
- "data": "27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
+ "type": "954568A3288910",
+ "value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="
},
"last_height": 0,
"last_round": 0,
diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go
index f8163c07d..5f04a3308 100644
--- a/consensus/byzantine_test.go
+++ b/consensus/byzantine_test.go
@@ -7,7 +7,6 @@ import (
"time"
"github.com/stretchr/testify/require"
- crypto "github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
@@ -48,7 +47,9 @@ func TestByzantine(t *testing.T) {
for i := 0; i < N; i++ {
// make first val byzantine
if i == 0 {
- css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator)
+ // NOTE: Now, test validators are MockPV, which by default doesn't
+ // do any safety checks.
+ css[i].privValidator.(*types.MockPV).DisableChecks()
css[i].decideProposal = func(j int) func(int64, int) {
return func(height int64, round int) {
byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
@@ -203,7 +204,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons
func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
// proposal
msg := &ProposalMessage{Proposal: proposal}
- peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
+ peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
// parts
for i := 0; i < parts.Total(); i++ {
@@ -213,7 +214,7 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.
Round: round, // This tells peer that this part applies to us.
Part: part,
}
- peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
+ peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
}
// votes
@@ -222,8 +223,8 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.
precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header())
cs.mtx.Unlock()
- peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{prevote}})
- peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{precommit}})
+ peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote}))
+ peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{precommit}))
}
//----------------------------------------
@@ -264,47 +265,3 @@ func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
br.reactor.Receive(chID, peer, msgBytes)
}
-
-//----------------------------------------
-// byzantine privValidator
-
-type ByzantinePrivValidator struct {
- types.Signer
-
- pv types.PrivValidator
-}
-
-// Return a priv validator that will sign anything
-func NewByzantinePrivValidator(pv types.PrivValidator) *ByzantinePrivValidator {
- return &ByzantinePrivValidator{
- Signer: pv.(*types.PrivValidatorFS).Signer,
- pv: pv,
- }
-}
-
-func (privVal *ByzantinePrivValidator) GetAddress() types.Address {
- return privVal.pv.GetAddress()
-}
-
-func (privVal *ByzantinePrivValidator) GetPubKey() crypto.PubKey {
- return privVal.pv.GetPubKey()
-}
-
-func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) (err error) {
- vote.Signature, err = privVal.Sign(vote.SignBytes(chainID))
- return err
-}
-
-func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) {
- proposal.Signature, _ = privVal.Sign(proposal.SignBytes(chainID))
- return nil
-}
-
-func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) {
- heartbeat.Signature, _ = privVal.Sign(heartbeat.SignBytes(chainID))
- return nil
-}
-
-func (privVal *ByzantinePrivValidator) String() string {
- return cmn.Fmt("PrivValidator{%X}", privVal.GetAddress())
-}
diff --git a/consensus/common_test.go b/consensus/common_test.go
index 287852e04..4ddd6b8aa 100644
--- a/consensus/common_test.go
+++ b/consensus/common_test.go
@@ -21,6 +21,7 @@ import (
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
+ pvm "github.com/tendermint/tendermint/types/priv_validator"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
@@ -222,7 +223,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
voteCh := make(chan interface{})
go func() {
for v := range voteCh0 {
- vote := v.(types.TMEventData).Unwrap().(types.EventDataVote)
+ vote := v.(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
voteCh <- v
@@ -277,10 +278,10 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
return cs
}
-func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS {
+func loadPrivValidator(config *cfg.Config) *pvm.FilePV {
privValidatorFile := config.PrivValidatorFile()
ensureDir(path.Dir(privValidatorFile), 0700)
- privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile)
+ privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
privValidator.Reset()
return privValidator
}
@@ -378,7 +379,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
privVal = privVals[i]
} else {
_, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal = types.GenPrivValidatorFS(tempFilePath)
+ privVal = pvm.GenFilePV(tempFilePath)
}
app := appFunc()
@@ -394,7 +395,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
for i, s := range switches {
- if bytes.Equal(peer.NodeInfo().PubKey.Address(), s.NodeInfo().PubKey.Address()) {
+ if peer.NodeInfo().ID == s.NodeInfo().ID {
return i
}
}
@@ -405,9 +406,9 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
//-------------------------------------------------------------------------------
// genesis
-func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidatorFS) {
+func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
- privValidators := make([]*types.PrivValidatorFS, numValidators)
+ privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
@@ -425,7 +426,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
}, privValidators
}
-func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []*types.PrivValidatorFS) {
+func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) {
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
s0, _ := sm.MakeGenesisState(genDoc)
db := dbm.NewMemDB()
diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go
index 31e4e1e62..3c8180060 100644
--- a/consensus/mempool_test.go
+++ b/consensus/mempool_test.go
@@ -108,7 +108,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
ticker := time.NewTicker(time.Second * 30)
select {
case b := <-newBlockCh:
- evt := b.(types.TMEventData).Unwrap().(types.EventDataNewBlock)
+ evt := b.(types.EventDataNewBlock)
nTxs += int(evt.Block.Header.NumTxs)
case <-ticker.C:
panic("Timed out waiting to commit blocks with transactions")
diff --git a/consensus/reactor.go b/consensus/reactor.go
index 70a79d86a..385e8d867 100644
--- a/consensus/reactor.go
+++ b/consensus/reactor.go
@@ -1,7 +1,6 @@
package consensus
import (
- "bytes"
"context"
"fmt"
"reflect"
@@ -10,7 +9,7 @@ import (
"github.com/pkg/errors"
- wire "github.com/tendermint/go-wire"
+ amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@@ -26,7 +25,7 @@ const (
VoteChannel = byte(0x22)
VoteSetBitsChannel = byte(0x23)
- maxConsensusMessageSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
+ maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
blocksToContributeToBecomeGoodPeer = 10000
)
@@ -110,27 +109,31 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
// TODO optimize
return []*p2p.ChannelDescriptor{
{
- ID: StateChannel,
- Priority: 5,
- SendQueueCapacity: 100,
+ ID: StateChannel,
+ Priority: 5,
+ SendQueueCapacity: 100,
+ RecvMessageCapacity: maxMsgSize,
},
{
- ID: DataChannel, // maybe split between gossiping current block and catchup stuff
- Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
- SendQueueCapacity: 100,
- RecvBufferCapacity: 50 * 4096,
+ ID: DataChannel, // maybe split between gossiping current block and catchup stuff
+ Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
+ SendQueueCapacity: 100,
+ RecvBufferCapacity: 50 * 4096,
+ RecvMessageCapacity: maxMsgSize,
},
{
- ID: VoteChannel,
- Priority: 5,
- SendQueueCapacity: 100,
- RecvBufferCapacity: 100 * 100,
+ ID: VoteChannel,
+ Priority: 5,
+ SendQueueCapacity: 100,
+ RecvBufferCapacity: 100 * 100,
+ RecvMessageCapacity: maxMsgSize,
},
{
- ID: VoteSetBitsChannel,
- Priority: 1,
- SendQueueCapacity: 2,
- RecvBufferCapacity: 1024,
+ ID: VoteSetBitsChannel,
+ Priority: 1,
+ SendQueueCapacity: 2,
+ RecvBufferCapacity: 1024,
+ RecvMessageCapacity: maxMsgSize,
},
}
}
@@ -178,7 +181,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
return
}
- _, msg, err := DecodeMessage(msgBytes)
+ msg, err := DecodeMessage(msgBytes)
if err != nil {
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
conR.Switch.StopPeerForError(src, err)
@@ -224,13 +227,13 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
conR.Logger.Error("Bad VoteSetBitsMessage field Type")
return
}
- src.TrySend(VoteSetBitsChannel, struct{ ConsensusMessage }{&VoteSetBitsMessage{
+ src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
BlockID: msg.BlockID,
Votes: ourVotes,
- }})
+ }))
case *ProposalHeartbeatMessage:
hb := msg.Heartbeat
conR.Logger.Debug("Received proposal heartbeat message",
@@ -377,17 +380,17 @@ func (conR *ConsensusReactor) startBroadcastRoutine() error {
select {
case data, ok = <-stepsCh:
if ok { // a receive from a closed channel returns the zero value immediately
- edrs := data.(types.TMEventData).Unwrap().(types.EventDataRoundState)
+ edrs := data.(types.EventDataRoundState)
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
}
case data, ok = <-votesCh:
if ok {
- edv := data.(types.TMEventData).Unwrap().(types.EventDataVote)
+ edv := data.(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote)
}
case data, ok = <-heartbeatsCh:
if ok {
- edph := data.(types.TMEventData).Unwrap().(types.EventDataProposalHeartbeat)
+ edph := data.(types.EventDataProposalHeartbeat)
conR.broadcastProposalHeartbeatMessage(edph)
}
case <-conR.Quit():
@@ -409,16 +412,16 @@ func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.
conR.Logger.Debug("Broadcasting proposal heartbeat message",
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence)
msg := &ProposalHeartbeatMessage{hb}
- conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg})
+ conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
}
func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) {
nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil {
- conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{nrsMsg})
+ conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
}
if csMsg != nil {
- conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{csMsg})
+ conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
}
}
@@ -430,7 +433,7 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
Type: vote.Type,
Index: vote.ValidatorIndex,
}
- conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg})
+ conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
/*
// TODO: Make this broadcast more selective.
for _, peer := range conR.Switch.Peers().List() {
@@ -470,10 +473,10 @@ func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) {
rs := conR.conS.GetRoundState()
nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil {
- peer.Send(StateChannel, struct{ ConsensusMessage }{nrsMsg})
+ peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
}
if csMsg != nil {
- peer.Send(StateChannel, struct{ ConsensusMessage }{csMsg})
+ peer.Send(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
}
}
@@ -500,7 +503,7 @@ OUTER_LOOP:
Part: part,
}
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
- if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
+ if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
}
continue OUTER_LOOP
@@ -544,7 +547,7 @@ OUTER_LOOP:
{
msg := &ProposalMessage{Proposal: rs.Proposal}
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
- if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
+ if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasProposal(rs.Proposal)
}
}
@@ -559,7 +562,7 @@ OUTER_LOOP:
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
}
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
- peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
+ peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
}
continue OUTER_LOOP
}
@@ -602,7 +605,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
Part: part,
}
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
- if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
+ if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
} else {
logger.Debug("Sending block part for catchup failed")
@@ -739,12 +742,12 @@ OUTER_LOOP:
prs := ps.GetRoundState()
if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
- peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
+ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: types.VoteTypePrevote,
BlockID: maj23,
- }})
+ }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
}
}
@@ -756,12 +759,12 @@ OUTER_LOOP:
prs := ps.GetRoundState()
if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
- peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
+ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: types.VoteTypePrecommit,
BlockID: maj23,
- }})
+ }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
}
}
@@ -773,12 +776,12 @@ OUTER_LOOP:
prs := ps.GetRoundState()
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
- peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
+ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.ProposalPOLRound,
Type: types.VoteTypePrevote,
BlockID: maj23,
- }})
+ }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
}
}
@@ -792,12 +795,12 @@ OUTER_LOOP:
prs := ps.GetRoundState()
if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() {
commit := conR.conS.LoadCommit(prs.Height)
- peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
+ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height,
Round: commit.Round(),
Type: types.VoteTypePrecommit,
BlockID: commit.BlockID,
- }})
+ }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
}
}
@@ -835,8 +838,8 @@ var (
ErrPeerStateInvalidStartTime = errors.New("Error peer state invalid startTime")
)
-// PeerState contains the known state of a peer, including its connection
-// and threadsafe access to its PeerRoundState.
+// PeerState contains the known state of a peer, including its connection and
+// threadsafe access to its PeerRoundState.
type PeerState struct {
Peer p2p.Peer
logger log.Logger
@@ -875,12 +878,14 @@ func NewPeerState(peer p2p.Peer) *PeerState {
}
}
+// SetLogger allows to set a logger on the peer state. Returns the peer state
+// itself.
func (ps *PeerState) SetLogger(logger log.Logger) *PeerState {
ps.logger = logger
return ps
}
-// GetRoundState returns an atomic snapshot of the PeerRoundState.
+// GetRoundState returns an shallow copy of the PeerRoundState.
// There's no point in mutating it since it won't change PeerState.
func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
ps.mtx.Lock()
@@ -890,6 +895,14 @@ func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
return &prs
}
+// GetRoundStateJSON returns a json of PeerRoundState, marshalled using go-amino.
+func (ps *PeerState) GetRoundStateJSON() ([]byte, error) {
+ ps.mtx.Lock()
+ defer ps.mtx.Unlock()
+
+ return cdc.MarshalJSON(ps.PeerRoundState)
+}
+
// GetHeight returns an atomic snapshot of the PeerRoundState's height
// used by the mempool to ensure peers are caught up before broadcasting new txs
func (ps *PeerState) GetHeight() int64 {
@@ -948,7 +961,7 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
if vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{vote}
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
- return ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg})
+ return ps.Peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg))
}
return false
}
@@ -1052,7 +1065,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
}
}
-// EnsureVoteVitArrays ensures the bit-arrays have been allocated for tracking
+// EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking
// what votes this peer has received.
// NOTE: It's important to make sure that numValidators actually matches
// what the node sees as the number of validators for height.
@@ -1292,45 +1305,30 @@ func (ps *PeerState) StringIndented(indent string) string {
//-----------------------------------------------------------------------------
// Messages
-const (
- msgTypeNewRoundStep = byte(0x01)
- msgTypeCommitStep = byte(0x02)
- msgTypeProposal = byte(0x11)
- msgTypeProposalPOL = byte(0x12)
- msgTypeBlockPart = byte(0x13) // both block & POL
- msgTypeVote = byte(0x14)
- msgTypeHasVote = byte(0x15)
- msgTypeVoteSetMaj23 = byte(0x16)
- msgTypeVoteSetBits = byte(0x17)
-
- msgTypeProposalHeartbeat = byte(0x20)
-)
-
// ConsensusMessage is a message that can be sent and received on the ConsensusReactor
type ConsensusMessage interface{}
-var _ = wire.RegisterInterface(
- struct{ ConsensusMessage }{},
- wire.ConcreteType{&NewRoundStepMessage{}, msgTypeNewRoundStep},
- wire.ConcreteType{&CommitStepMessage{}, msgTypeCommitStep},
- wire.ConcreteType{&ProposalMessage{}, msgTypeProposal},
- wire.ConcreteType{&ProposalPOLMessage{}, msgTypeProposalPOL},
- wire.ConcreteType{&BlockPartMessage{}, msgTypeBlockPart},
- wire.ConcreteType{&VoteMessage{}, msgTypeVote},
- wire.ConcreteType{&HasVoteMessage{}, msgTypeHasVote},
- wire.ConcreteType{&VoteSetMaj23Message{}, msgTypeVoteSetMaj23},
- wire.ConcreteType{&VoteSetBitsMessage{}, msgTypeVoteSetBits},
- wire.ConcreteType{&ProposalHeartbeatMessage{}, msgTypeProposalHeartbeat},
-)
+func RegisterConsensusMessages(cdc *amino.Codec) {
+ cdc.RegisterInterface((*ConsensusMessage)(nil), nil)
+ cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil)
+ cdc.RegisterConcrete(&CommitStepMessage{}, "tendermint/CommitStep", nil)
+ cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil)
+ cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil)
+ cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil)
+ cdc.RegisterConcrete(&VoteMessage{}, "tendermint/Vote", nil)
+ cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil)
+ cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil)
+ cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil)
+ cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil)
+}
// DecodeMessage decodes the given bytes into a ConsensusMessage.
-// TODO: check for unnecessary extra bytes at the end.
-func DecodeMessage(bz []byte) (msgType byte, msg ConsensusMessage, err error) {
- msgType = bz[0]
- n := new(int)
- r := bytes.NewReader(bz)
- msgI := wire.ReadBinary(struct{ ConsensusMessage }{}, r, maxConsensusMessageSize, n, &err)
- msg = msgI.(struct{ ConsensusMessage }).ConsensusMessage
+func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) {
+ if len(bz) > maxMsgSize {
+ return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
+ len(bz), maxMsgSize)
+ }
+ err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}
diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go
index 8448caf63..058bf1e71 100644
--- a/consensus/reactor_test.go
+++ b/consensus/reactor_test.go
@@ -11,7 +11,6 @@ import (
"time"
"github.com/tendermint/abci/example/kvstore"
- wire "github.com/tendermint/tendermint/wire"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@@ -149,30 +148,30 @@ func TestReactorRecordsBlockParts(t *testing.T) {
Round: 0,
Part: parts.GetPart(0),
}
- bz, err := wire.MarshalBinary(struct{ ConsensusMessage }{msg})
+ bz, err := cdc.MarshalBinaryBare(msg)
require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz)
- assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1")
+ require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1")
// 2) block part with the same height, but different round
msg.Round = 1
- bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{msg})
+ bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz)
- assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
+ require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
// 3) block part from earlier height
msg.Height = 1
msg.Round = 0
- bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{msg})
+ bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz)
- assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
+ require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
}
// Test we record votes from other peers
@@ -204,7 +203,7 @@ func TestReactorRecordsVotes(t *testing.T) {
Type: types.VoteTypePrevote,
BlockID: types.BlockID{},
}
- bz, err := wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}})
+ bz, err := cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz)
@@ -213,7 +212,7 @@ func TestReactorRecordsVotes(t *testing.T) {
// 2) vote with the same height, but different round
vote.Round = 1
- bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}})
+ bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz)
@@ -223,7 +222,7 @@ func TestReactorRecordsVotes(t *testing.T) {
vote.Height = 1
vote.Round = 0
- bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}})
+ bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz)
@@ -410,7 +409,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
if !ok {
return
}
- newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
+ newBlock := newBlockI.(types.EventDataNewBlock).Block
css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
err := validateBlock(newBlock, activeVals)
assert.Nil(t, err)
@@ -431,7 +430,7 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
if !ok {
return
}
- newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
+ newBlock := newBlockI.(types.EventDataNewBlock).Block
css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
err := validateBlock(newBlock, activeVals)
assert.Nil(t, err)
@@ -463,7 +462,7 @@ func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals m
if !ok {
return
}
- newBlock = newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
+ newBlock = newBlockI.(types.EventDataNewBlock).Block
if newBlock.LastCommit.Size() == len(updatedVals) {
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
break LOOP
diff --git a/consensus/replay_test.go b/consensus/replay_test.go
index 606df8260..75f208bb6 100644
--- a/consensus/replay_test.go
+++ b/consensus/replay_test.go
@@ -17,8 +17,7 @@ import (
"github.com/tendermint/abci/example/kvstore"
abci "github.com/tendermint/abci/types"
- crypto "github.com/tendermint/go-crypto"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-crypto"
auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
@@ -27,6 +26,7 @@ import (
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
+ pvm "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tmlibs/log"
)
@@ -60,7 +60,7 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
// fmt.Printf("====== WAL: \n\r%s\n", bytes)
- t.Logf("====== WAL: \n\r%s\n", bytes)
+ t.Logf("====== WAL: \n\r%X\n", bytes)
err := cs.Start()
require.NoError(t, err)
@@ -325,7 +325,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile)
- privVal := types.LoadPrivValidatorFS(config.PrivValidatorFile())
+ privVal := pvm.LoadFilePV(config.PrivValidatorFile())
wal, err := NewWAL(walFile, false)
if err != nil {
@@ -519,8 +519,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
case EndHeightMessage:
// if its not the first one, we have a full block
if thisBlockParts != nil {
- var n int
- block := wire.ReadBinary(&types.Block{}, thisBlockParts.GetReader(), 0, &n, &err).(*types.Block)
+ var block = new(types.Block)
+ _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
if err != nil {
panic(err)
}
@@ -552,8 +552,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
}
}
// grab the last block too
- var n int
- block := wire.ReadBinary(&types.Block{}, thisBlockParts.GetReader(), 0, &n, &err).(*types.Block)
+ var block = new(types.Block)
+ _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
if err != nil {
panic(err)
}
diff --git a/consensus/state.go b/consensus/state.go
index 57c7b32f0..2e97c4953 100644
--- a/consensus/state.go
+++ b/consensus/state.go
@@ -10,8 +10,6 @@ import (
"time"
fail "github.com/ebuchman/fail-test"
-
- wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@@ -170,18 +168,23 @@ func (cs *ConsensusState) GetState() sm.State {
return cs.state.Copy()
}
-// GetRoundState returns a copy of the internal consensus state.
+// GetRoundState returns a shallow copy of the internal consensus state.
func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
cs.mtx.Lock()
defer cs.mtx.Unlock()
- return cs.getRoundState()
-}
-func (cs *ConsensusState) getRoundState() *cstypes.RoundState {
rs := cs.RoundState // copy
return &rs
}
+// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino.
+func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
+ cs.mtx.Lock()
+ defer cs.mtx.Unlock()
+
+ return cdc.MarshalJSON(cs.RoundState)
+}
+
// GetValidators returns a copy of the current validators.
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
cs.mtx.Lock()
@@ -776,7 +779,7 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
// if not a validator, we're done
if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
- cs.Logger.Debug("This node is not a validator")
+ cs.Logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
return
}
cs.Logger.Debug("This node is a validator")
@@ -1297,10 +1300,10 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
}
if added && cs.ProposalBlockParts.IsComplete() {
// Added and completed!
- var n int
- var err error
- cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(),
- cs.state.ConsensusParams.BlockSize.MaxBytes, &n, &err).(*types.Block)
+ _, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes))
+ if err != nil {
+ return true, err
+ }
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() {
@@ -1310,7 +1313,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
// If we're waiting on the proposal block...
cs.tryFinalizeCommit(height)
}
- return true, err
+ return true, nil
}
return added, nil
}
diff --git a/consensus/state_test.go b/consensus/state_test.go
index e6b2a1354..0d7cad484 100644
--- a/consensus/state_test.go
+++ b/consensus/state_test.go
@@ -261,7 +261,7 @@ func TestStateFullRound1(t *testing.T) {
// grab proposal
re := <-propCh
- propBlockHash := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
+ propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
<-voteCh // wait for prevote
validatePrevote(t, cs, round, vss[0], propBlockHash)
@@ -356,7 +356,7 @@ func TestStateLockNoPOL(t *testing.T) {
cs1.startRoutines(0)
re := <-proposalCh
- rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote
@@ -396,7 +396,7 @@ func TestStateLockNoPOL(t *testing.T) {
// now we're on a new round and not the proposer, so wait for timeout
re = <-timeoutProposeCh
- rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.ProposalBlock != nil {
panic("Expected proposal block to be nil")
@@ -409,7 +409,7 @@ func TestStateLockNoPOL(t *testing.T) {
validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
// add a conflicting prevote from the other validator
- signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
+ signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
// now we're going to enter prevote again, but with invalid args
@@ -424,7 +424,7 @@ func TestStateLockNoPOL(t *testing.T) {
// add conflicting precommit from vs2
// NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
- signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
+ signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
// (note we're entering precommit for a second time this round, but with invalid args
@@ -440,7 +440,7 @@ func TestStateLockNoPOL(t *testing.T) {
incrementRound(vs2)
re = <-proposalCh
- rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
// now we're on a new round and are the proposer
if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
@@ -529,7 +529,7 @@ func TestStateLockPOLRelock(t *testing.T) {
<-newRoundCh
re := <-proposalCh
- rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote
@@ -605,9 +605,9 @@ func TestStateLockPOLRelock(t *testing.T) {
discardFromChan(voteCh, 2)
be := <-newBlockCh
- b := be.(types.TMEventData).Unwrap().(types.EventDataNewBlockHeader)
+ b := be.(types.EventDataNewBlockHeader)
re = <-newRoundCh
- rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.Height != 2 {
panic("Expected height to increment")
}
@@ -643,7 +643,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
startTestRound(cs1, cs1.Height, 0)
<-newRoundCh
re := <-proposalCh
- rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote
@@ -669,7 +669,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
// timeout to new round
re = <-timeoutWaitCh
- rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
lockedBlockHash := rs.LockedBlock.Hash()
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
@@ -731,7 +731,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
startTestRound(cs1, cs1.Height, 0)
<-newRoundCh
re := <-proposalCh
- rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
propBlock := rs.ProposalBlock
<-voteCh // prevote
@@ -781,7 +781,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
re = <-proposalCh
}
- rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.LockedBlock != nil {
panic("we should not be locked!")
@@ -1033,7 +1033,7 @@ func TestStateHalt1(t *testing.T) {
startTestRound(cs1, cs1.Height, 0)
<-newRoundCh
re := <-proposalCh
- rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
propBlock := rs.ProposalBlock
propBlockParts := propBlock.MakePartSet(partSize)
@@ -1056,7 +1056,7 @@ func TestStateHalt1(t *testing.T) {
// timeout to new round
<-timeoutWaitCh
re = <-newRoundCh
- rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
t.Log("### ONTO ROUND 1")
/*Round2
@@ -1074,7 +1074,7 @@ func TestStateHalt1(t *testing.T) {
// receiving that precommit should take us straight to commit
<-newBlockCh
re = <-newRoundCh
- rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
+ rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.Height != 2 {
panic("expected height to increment")
diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go
index 246c0b711..678d34759 100644
--- a/consensus/types/height_vote_set_test.go
+++ b/consensus/types/height_vote_set_test.go
@@ -48,7 +48,7 @@ func TestPeerCatchupRounds(t *testing.T) {
}
-func makeVoteHR(t *testing.T, height int64, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote {
+func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote {
privVal := privVals[valIndex]
vote := &types.Vote{
ValidatorAddress: privVal.GetAddress(),
diff --git a/consensus/types/state.go b/consensus/types/state.go
index 8e79f10d2..0c3626cc7 100644
--- a/consensus/types/state.go
+++ b/consensus/types/state.go
@@ -52,9 +52,6 @@ func (rs RoundStepType) String() string {
//-----------------------------------------------------------------------------
// RoundState defines the internal consensus state.
-// It is Immutable when returned from ConsensusState.GetRoundState()
-// TODO: Actually, only the top pointer is copied,
-// so access to field pointers is still racey
// NOTE: Not thread safe. Should only be manipulated by functions downstream
// of the cs.receiveRoutine
type RoundState struct {
diff --git a/consensus/wal.go b/consensus/wal.go
index 88218940d..694e2516e 100644
--- a/consensus/wal.go
+++ b/consensus/wal.go
@@ -1,7 +1,6 @@
package consensus
import (
- "bytes"
"encoding/binary"
"fmt"
"hash/crc32"
@@ -11,7 +10,7 @@ import (
"github.com/pkg/errors"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types"
auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common"
@@ -38,13 +37,13 @@ type EndHeightMessage struct {
type WALMessage interface{}
-var _ = wire.RegisterInterface(
- struct{ WALMessage }{},
- wire.ConcreteType{types.EventDataRoundState{}, 0x01},
- wire.ConcreteType{msgInfo{}, 0x02},
- wire.ConcreteType{timeoutInfo{}, 0x03},
- wire.ConcreteType{EndHeightMessage{}, 0x04},
-)
+func RegisterWALMessages(cdc *amino.Codec) {
+ cdc.RegisterInterface((*WALMessage)(nil), nil)
+ cdc.RegisterConcrete(types.EventDataRoundState{}, "tendermint/wal/EventDataRoundState", nil)
+ cdc.RegisterConcrete(msgInfo{}, "tendermint/wal/MsgInfo", nil)
+ cdc.RegisterConcrete(timeoutInfo{}, "tendermint/wal/TimeoutInfo", nil)
+ cdc.RegisterConcrete(EndHeightMessage{}, "tendermint/wal/EndHeightMessage", nil)
+}
//--------------------------------------------------------
// Simple write-ahead logger
@@ -193,7 +192,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
// A WALEncoder writes custom-encoded WAL messages to an output stream.
//
-// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-wire encoded)
+// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-amino encoded)
type WALEncoder struct {
wr io.Writer
}
@@ -205,7 +204,7 @@ func NewWALEncoder(wr io.Writer) *WALEncoder {
// Encode writes the custom encoding of v to the stream.
func (enc *WALEncoder) Encode(v *TimedWALMessage) error {
- data := wire.BinaryBytes(v)
+ data := cdc.MustMarshalBinaryBare(v)
crc := crc32.Checksum(data, crc32c)
length := uint32(len(data))
@@ -298,9 +297,8 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)}
}
- var nn int
- var res *TimedWALMessage // nolint: gosimple
- res = wire.ReadBinary(&TimedWALMessage{}, bytes.NewBuffer(data), int(length), &nn, &err).(*TimedWALMessage)
+ var res = new(TimedWALMessage) // nolint: gosimple
+ err = cdc.UnmarshalBinaryBare(data, res)
if err != nil {
return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)}
}
diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go
index de41d4018..18ff614c5 100644
--- a/consensus/wal_generator.go
+++ b/consensus/wal_generator.go
@@ -4,7 +4,6 @@ import (
"bufio"
"bytes"
"fmt"
- "math/rand"
"os"
"path/filepath"
"strings"
@@ -17,6 +16,7 @@ import (
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
+ pvm "github.com/tendermint/tendermint/types/priv_validator"
auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/db"
@@ -40,7 +40,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
// NOTE: we can't import node package because of circular dependency
privValidatorFile := config.PrivValidatorFile()
- privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile)
+ privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
if err != nil {
return nil, errors.Wrap(err, "failed to read genesis file")
@@ -116,7 +116,7 @@ func makePathname() string {
func randPort() int {
// returns between base and base + spread
base, spread := 20000, 20000
- return base + rand.Intn(spread)
+ return base + cmn.RandIntn(spread)
}
func makeAddrs() (string, string, string) {
diff --git a/consensus/wal_test.go b/consensus/wal_test.go
index 3553591c9..45af36e12 100644
--- a/consensus/wal_test.go
+++ b/consensus/wal_test.go
@@ -3,11 +3,10 @@ package consensus
import (
"bytes"
"crypto/rand"
- "sync"
+ // "sync"
"testing"
"time"
- wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/consensus/types"
tmtypes "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
@@ -36,7 +35,7 @@ func TestWALEncoderDecoder(t *testing.T) {
decoded, err := dec.Decode()
require.NoError(t, err)
- assert.Equal(t, msg.Time.Truncate(time.Millisecond), decoded.Time)
+ assert.Equal(t, msg.Time.UTC(), decoded.Time)
assert.Equal(t, msg.Msg, decoded.Msg)
}
}
@@ -68,6 +67,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height"))
}
+/*
var initOnce sync.Once
func registerInterfacesOnce() {
@@ -78,6 +78,7 @@ func registerInterfacesOnce() {
)
})
}
+*/
func nBytes(n int) []byte {
buf := make([]byte, n)
@@ -86,7 +87,7 @@ func nBytes(n int) []byte {
}
func benchmarkWalDecode(b *testing.B, n int) {
- registerInterfacesOnce()
+ // registerInterfacesOnce()
buf := new(bytes.Buffer)
enc := NewWALEncoder(buf)
diff --git a/consensus/wire.go b/consensus/wire.go
new file mode 100644
index 000000000..81223c689
--- /dev/null
+++ b/consensus/wire.go
@@ -0,0 +1,14 @@
+package consensus
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ RegisterConsensusMessages(cdc)
+ RegisterWALMessages(cdc)
+ crypto.RegisterAmino(cdc)
+}
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 000000000..b9a3fc684
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,68 @@
+version: '3'
+
+services:
+ node0:
+ container_name: node0
+ image: "tendermint/localnode"
+ ports:
+ - "46656-46657:46656-46657"
+ environment:
+ - ID=0
+ - LOG=${LOG:-tendermint.log}
+ volumes:
+ - ${FOLDER:-./build}:/tendermint:Z
+ networks:
+ localnet:
+ ipv4_address: 192.167.10.2
+
+ node1:
+ container_name: node1
+ image: "tendermint/localnode"
+ ports:
+ - "46659-46660:46656-46657"
+ environment:
+ - ID=1
+ - LOG=${LOG:-tendermint.log}
+ volumes:
+ - ${FOLDER:-./build}:/tendermint:Z
+ networks:
+ localnet:
+ ipv4_address: 192.167.10.3
+
+ node2:
+ container_name: node2
+ image: "tendermint/localnode"
+ environment:
+ - ID=2
+ - LOG=${LOG:-tendermint.log}
+ ports:
+ - "46661-46662:46656-46657"
+ volumes:
+ - ${FOLDER:-./build}:/tendermint:Z
+ networks:
+ localnet:
+ ipv4_address: 192.167.10.4
+
+ node3:
+ container_name: node3
+ image: "tendermint/localnode"
+ environment:
+ - ID=3
+ - LOG=${LOG:-tendermint.log}
+ ports:
+ - "46663-46664:46656-46657"
+ volumes:
+ - ${FOLDER:-./build}:/tendermint:Z
+ networks:
+ localnet:
+ ipv4_address: 192.167.10.5
+
+networks:
+ localnet:
+ driver: bridge
+ ipam:
+ driver: default
+ config:
+ -
+ subnet: 192.167.10.0/16
+
diff --git a/docker-compose/Makefile b/docker-compose/Makefile
new file mode 100644
index 000000000..98517851d
--- /dev/null
+++ b/docker-compose/Makefile
@@ -0,0 +1,7 @@
+# Makefile for the "localnode" docker image.
+
+all:
+ docker build --tag tendermint/localnode localnode
+
+.PHONY: all
+
diff --git a/docker-compose/README.rst b/docker-compose/README.rst
new file mode 100644
index 000000000..d22a24d9b
--- /dev/null
+++ b/docker-compose/README.rst
@@ -0,0 +1,40 @@
+localnode
+=========
+
+It is assumed that you have already `setup docker `__.
+
+Description
+-----------
+Image for local testnets.
+
+Add the tendermint binary to the image by attaching it in a folder to the `/tendermint` mount point.
+
+It assumes that the configuration was created by the `tendermint testnet` command and it is also attached to the `/tendermint` mount point.
+
+Example:
+This example builds a linux tendermint binary under the `build/` folder, creates tendermint configuration for a single-node validator and runs the node:
+```
+cd $GOPATH/src/github.com/tendermint/tendermint
+
+#Build binary
+make build-linux
+
+#Create configuration
+docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1
+
+#Run the node
+docker run -v `pwd`/build:/tendermint tendermint/localnode
+```
+
+Logging
+-------
+Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen.
+
+Special binaries
+----------------
+If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume.
+
+docker-compose.yml
+==================
+This file creates a 4-node network using the localnode image. The nodes of the network are exposed to the host machine on ports 46656-46657, 46659-46660, 46661-46662, 46663-46664 respectively.
+
diff --git a/docker-compose/localnode/Dockerfile b/docker-compose/localnode/Dockerfile
new file mode 100644
index 000000000..7eeba963d
--- /dev/null
+++ b/docker-compose/localnode/Dockerfile
@@ -0,0 +1,16 @@
+FROM alpine:3.7
+MAINTAINER Greg Szabo
+
+RUN apk update && \
+ apk upgrade && \
+ apk --no-cache add curl jq file
+
+VOLUME [ /tendermint ]
+WORKDIR /tendermint
+EXPOSE 46656 46657
+ENTRYPOINT ["/usr/bin/wrapper.sh"]
+CMD ["node", "--proxy_app dummy"]
+STOPSIGNAL SIGTERM
+
+COPY wrapper.sh /usr/bin/wrapper.sh
+
diff --git a/docker-compose/localnode/wrapper.sh b/docker-compose/localnode/wrapper.sh
new file mode 100755
index 000000000..8b55cbc10
--- /dev/null
+++ b/docker-compose/localnode/wrapper.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env sh
+
+##
+## Input parameters
+##
+BINARY=/tendermint/${BINARY:-tendermint}
+ID=${ID:-0}
+LOG=${LOG:-tendermint.log}
+
+##
+## Assert linux binary
+##
+if ! [ -f "${BINARY}" ]; then
+ echo "The binary `basename ${BINARY}` cannot be found. Please add the binary to the shared folder. Please use the BINARY environment variable if the name of the binary is not 'tendermint' E.g.: -e BINARY=tendermint_my_test_version"
+ exit 1
+fi
+BINARY_CHECK="`file $BINARY | grep 'ELF 64-bit LSB executable, x86-64'`"
+if [ -z "${BINARY_CHECK}" ]; then
+ echo "Binary needs to be OS linux, ARCH amd64"
+ exit 1
+fi
+
+##
+## Run binary with all parameters
+##
+export TMHOME="/tendermint/node${ID}"
+
+if [ -d "${TMHOME}/${LOG}" ]; then
+ "$BINARY" $@ | tee "${TMHOME}/${LOG}"
+else
+ "$BINARY" $@
+fi
+
diff --git a/docs/architecture/adr-008-priv-validator.md b/docs/architecture/adr-008-priv-validator.md
index 0861e9415..4c1d87bed 100644
--- a/docs/architecture/adr-008-priv-validator.md
+++ b/docs/architecture/adr-008-priv-validator.md
@@ -1,128 +1,29 @@
-# ADR 008: PrivValidator
+# ADR 008: SocketPV
-## Context
+Tendermint node's should support only two in-process PrivValidator
+implementations:
-The current PrivValidator is monolithic and isn't easily reuseable by alternative signers.
+- FilePV uses an unencrypted private key in a "priv_validator.json" file - no
+ configuration required (just `tendermint init`).
+- SocketPV uses a socket to send signing requests to another process - user is
+ responsible for starting that process themselves.
-For instance, see https://github.com/tendermint/tendermint/issues/673
+The SocketPV address can be provided via flags at the command line - doing so
+will cause Tendermint to ignore any "priv_validator.json" file and to listen on
+the given address for incoming connections from an external priv_validator
+process. It will halt any operation until at least one external process
+succesfully connected.
-The goal is to have a clean PrivValidator interface like:
+The external priv_validator process will dial the address to connect to
+Tendermint, and then Tendermint will send requests on the ensuing connection to
+sign votes and proposals. Thus the external process initiates the connection,
+but the Tendermint process makes all requests. In a later stage we're going to
+support multiple validators for fault tolerance. To prevent double signing they
+need to be synced, which is deferred to an external solution (see #1185).
-```
-type PrivValidator interface {
- Address() data.Bytes
- PubKey() crypto.PubKey
-
- SignVote(chainID string, vote *types.Vote) error
- SignProposal(chainID string, proposal *types.Proposal) error
- SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error
-}
-```
-
-It should also be easy to re-use the LastSignedInfo logic to avoid double signing.
-
-## Decision
-
-Tendermint node's should support only two in-process PrivValidator implementations:
-
-- PrivValidatorUnencrypted uses an unencrypted private key in a "priv_validator.json" file - no configuration required (just `tendermint init`).
-- PrivValidatorSocket uses a socket to send signing requests to another process - user is responsible for starting that process themselves.
-
-The PrivValidatorSocket address can be provided via flags at the command line -
-doing so will cause Tendermint to ignore any "priv_validator.json" file and to listen
-on the given address for incoming connections from an external priv_validator process.
-It will halt any operation until at least one external process succesfully
-connected.
-
-The external priv_validator process will dial the address to connect to Tendermint,
-and then Tendermint will send requests on the ensuing connection to sign votes and proposals.
-Thus the external process initiates the connection, but the Tendermint process makes all requests.
-In a later stage we're going to support multiple validators for fault
-tolerance. To prevent double signing they need to be synced, which is deferred
-to an external solution (see #1185).
-
-In addition, Tendermint will provide implementations that can be run in that external process.
-These include:
-
-- PrivValidatorEncrypted uses an encrypted private key persisted to disk - user must enter password to decrypt key when process is started.
-- PrivValidatorLedger uses a Ledger Nano S to handle all signing.
-
-What follows are descriptions of useful types
-
-### Signer
-
-```
-type Signer interface {
- Sign(msg []byte) (crypto.Signature, error)
-}
-```
-
-Signer signs a message. It can also return an error.
-
-### ValidatorID
-
-
-ValidatorID is just the Address and PubKey
-
-```
-type ValidatorID struct {
- Address data.Bytes `json:"address"`
- PubKey crypto.PubKey `json:"pub_key"`
-}
-```
-
-### LastSignedInfo
-
-LastSignedInfo tracks the last thing we signed:
-
-```
-type LastSignedInfo struct {
- Height int64 `json:"height"`
- Round int `json:"round"`
- Step int8 `json:"step"`
- Signature crypto.Signature `json:"signature,omitempty"` // so we dont lose signatures
- SignBytes data.Bytes `json:"signbytes,omitempty"` // so we dont lose signatures
-}
-```
-
-It exposes methods for signing votes and proposals using a `Signer`.
-
-This allows it to easily be reused by developers implemented their own PrivValidator.
-
-### PrivValidatorUnencrypted
-
-```
-type PrivValidatorUnencrypted struct {
- ID types.ValidatorID `json:"id"`
- PrivKey PrivKey `json:"priv_key"`
- LastSignedInfo *LastSignedInfo `json:"last_signed_info"`
-}
-```
-
-Has the same structure as currently, but broken up into sub structs.
-
-Note the LastSignedInfo is mutated in place every time we sign.
-
-### PrivValidatorJSON
-
-The "priv_validator.json" file supports only the PrivValidatorUnencrypted type.
-
-It unmarshals into PrivValidatorJSON, which is used as the default PrivValidator type.
-It wraps the PrivValidatorUnencrypted and persists it to disk after every signature.
-
-## Status
-
-Accepted.
-
-## Consequences
-
-### Positive
-
-- Cleaner separation of components enabling re-use.
-
-### Negative
-
-- More files - led to creation of new directory.
-
-### Neutral
+In addition, Tendermint will provide implementations that can be run in that
+external process. These include:
+- FilePV will encrypt the private key, and the user must enter password to
+ decrypt key when process is started.
+- LedgerPV uses a Ledger Nano S to handle all signing.
diff --git a/docs/deploy-testnets.rst b/docs/deploy-testnets.rst
index 5740ca56f..32355e4ae 100644
--- a/docs/deploy-testnets.rst
+++ b/docs/deploy-testnets.rst
@@ -11,26 +11,26 @@ Manual Deployments
It's relatively easy to setup a Tendermint cluster manually. The only
requirements for a particular Tendermint node are a private key for the
-validator, stored as ``priv_validator.json``, and a list of the public
-keys of all validators, stored as ``genesis.json``. These files should
-be stored in ``~/.tendermint/config``, or wherever the ``$TMHOME`` variable
-might be set to.
+validator, stored as ``priv_validator.json``, a node key, stored as
+``node_key.json`` and a list of the public keys of all validators, stored as
+``genesis.json``. These files should be stored in ``~/.tendermint/config``, or
+wherever the ``$TMHOME`` variable might be set to.
Here are the steps to setting up a testnet manually:
1) Provision nodes on your cloud provider of choice
2) Install Tendermint and the application of interest on all nodes
-3) Generate a private key for each validator using
- ``tendermint gen_validator``
+3) Generate a private key and a node key for each validator using
+ ``tendermint init``
4) Compile a list of public keys for each validator into a
- ``genesis.json`` file.
+ ``genesis.json`` file and replace the existing file with it.
5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node,
where ``< peer addresses >`` is a comma separated list of the IP:PORT
combination for each node. The default port for Tendermint is
``46656``. Thus, if the IP addresses of your nodes were
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
would look like:
- ``tendermint node --p2p.persistent_peers=192.168.0.1:46656,192.168.0.2:46656,192.168.0.3:46656,192.168.0.4:46656``.
+ ``tendermint node --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656``.
After a few seconds, all the nodes should connect to each other and start
making blocks! For more information, see the Tendermint Networks section
diff --git a/docs/examples/getting-started.md b/docs/examples/getting-started.md
index 1675b55b2..59015b79f 100644
--- a/docs/examples/getting-started.md
+++ b/docs/examples/getting-started.md
@@ -2,8 +2,9 @@
## Overview
-This is a quick start guide. If you have a vague idea about how Tendermint works
-and want to get started right away, continue. Otherwise, [review the documentation](http://tendermint.readthedocs.io/en/master/)
+This is a quick start guide. If you have a vague idea about how Tendermint
+works and want to get started right away, continue. Otherwise, [review the
+documentation](http://tendermint.readthedocs.io/en/master/).
## Install
@@ -42,7 +43,7 @@ Confirm installation:
```
$ tendermint version
-0.15.0-381fe19
+0.18.0-XXXXXXX
```
## Initialization
@@ -117,7 +118,9 @@ where the value is returned in hex.
## Cluster of Nodes
-First create four Ubuntu cloud machines. The following was tested on Digital Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP addresses below as IP1, IP2, IP3, IP4.
+First create four Ubuntu cloud machines. The following was tested on Digital
+Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP
+addresses below as IP1, IP2, IP3, IP4.
Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY):
@@ -131,12 +134,16 @@ This will install `go` and other dependencies, get the Tendermint source code, t
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
```
-tendermint node --home ./node1 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656
-tendermint node --home ./node2 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656
-tendermint node --home ./node3 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656
-tendermint node --home ./node4 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656
+tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
+tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
+tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
+tendermint node --home ./node4 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
```
-Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json`) have come online. Seeds can also be specified in the `config.toml`. See [this PR](https://github.com/tendermint/tendermint/pull/792) for more information about configuration options.
+Note that after the third node is started, blocks will start to stream in
+because >2/3 of validators (defined in the `genesis.json`) have come online.
+Seeds can also be specified in the `config.toml`. See [this
+PR](https://github.com/tendermint/tendermint/pull/792) for more information
+about configuration options.
Transactions can then be sent as covered in the single, local node example above.
diff --git a/docs/examples/install_tendermint.sh b/docs/examples/install_tendermint.sh
index d58b84d04..aeb87db5f 100644
--- a/docs/examples/install_tendermint.sh
+++ b/docs/examples/install_tendermint.sh
@@ -26,7 +26,7 @@ go get $REPO
cd $GOPATH/src/$REPO
## build
-git checkout v0.17.0
+git checkout v0.18.0
make get_tools
make get_vendor_deps
make install
diff --git a/docs/examples/node1/node_key.json b/docs/examples/node1/node_key.json
new file mode 100644
index 000000000..de1c41718
--- /dev/null
+++ b/docs/examples/node1/node_key.json
@@ -0,0 +1,6 @@
+{
+ "priv_key" : {
+ "data" : "DA9BAABEA7211A6D93D9A1986B4279EAB3021FAA1653D459D53E6AB4D1CFB4C69BF7D52E48CF00AC5779AA0A6D3C368955D5636A677F72370B8ED19989714CFC",
+ "type" : "ed25519"
+ }
+}
diff --git a/docs/examples/node2/node_key.json b/docs/examples/node2/node_key.json
new file mode 100644
index 000000000..4e8b0b100
--- /dev/null
+++ b/docs/examples/node2/node_key.json
@@ -0,0 +1,6 @@
+{
+ "priv_key" : {
+ "data" : "F7BCABA165DFC0DDD50AE563EFB285BAA236EA805D35612504238A36EFA105958756442B1D9F942D7ABD259F2D59671657B6378E9C7194342A7AAA47A66D1E95",
+ "type" : "ed25519"
+ }
+}
diff --git a/docs/examples/node3/node_key.json b/docs/examples/node3/node_key.json
new file mode 100644
index 000000000..32fdeee9d
--- /dev/null
+++ b/docs/examples/node3/node_key.json
@@ -0,0 +1,6 @@
+{
+ "priv_key" : {
+ "data" : "95136FCC97E4446B3141EDF9841078107ECE755E99925D79CCBF91085492680B3CA1034D9917DF1DED4E4AB2D9BC225919F6CB2176F210D2368697CC339DF4E7",
+ "type" : "ed25519"
+ }
+}
diff --git a/docs/examples/node4/node_key.json b/docs/examples/node4/node_key.json
new file mode 100644
index 000000000..955fc989a
--- /dev/null
+++ b/docs/examples/node4/node_key.json
@@ -0,0 +1,6 @@
+{
+ "priv_key" : {
+ "data" : "8895D6C9A1B46AB83A8E2BAE2121B8C3E245B9E9126EBD797FEAC5058285F2F64FDE2E8182C88AD5185A49D837C581465D57BD478C41865A66D7D9742D8AEF57",
+ "type" : "ed25519"
+ }
+}
diff --git a/docs/getting-started.rst b/docs/getting-started.rst
index fe49e27b5..9c722f0f1 100644
--- a/docs/getting-started.rst
+++ b/docs/getting-started.rst
@@ -81,9 +81,8 @@ Tendermint node as follows:
curl -s localhost:46657/status
-The ``-s`` just silences ``curl``. For nicer output, pipe the result
-into a tool like `jq `__ or
-`jsonpp `__.
+The ``-s`` just silences ``curl``. For nicer output, pipe the result into a
+tool like `jq `__ or ``json_pp``.
Now let's send some transactions to the kvstore.
@@ -104,17 +103,23 @@ like:
"id": "",
"result": {
"check_tx": {
- "code": 0,
- "data": "",
- "log": ""
+ "fee": {}
},
"deliver_tx": {
- "code": 0,
- "data": "",
- "log": ""
+ "tags": [
+ {
+ "key": "YXBwLmNyZWF0b3I=",
+ "value": "amFl"
+ },
+ {
+ "key": "YXBwLmtleQ==",
+ "value": "YWJjZA=="
+ }
+ ],
+ "fee": {}
},
- "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
- "height": 154
+ "hash": "9DF66553F98DE3C26E3C3317A3E4CED54F714E39",
+ "height": 14
}
}
@@ -134,20 +139,17 @@ The result should look like:
"id": "",
"result": {
"response": {
- "code": 0,
- "index": 0,
- "key": "",
- "value": "61626364",
- "proof": "",
- "height": 0,
- "log": "exists"
+ "log": "exists",
+ "index": "-1",
+ "key": "YWJjZA==",
+ "value": "YWJjZA=="
}
}
}
-Note the ``value`` in the result (``61626364``); this is the
-hex-encoding of the ASCII of ``abcd``. You can verify this in
-a python 2 shell by running ``"61626364".decode('hex')`` or in python 3 shell by running ``import codecs; codecs.decode("61626364", 'hex').decode('ascii')``. Stay
+Note the ``value`` in the result (``YWJjZA==``); this is the
+base64-encoding of the ASCII of ``abcd``. You can verify this in
+a python 2 shell by running ``"61626364".decode('base64')`` or in python 3 shell by running ``import codecs; codecs.decode("61626364", 'base64').decode('ascii')``. Stay
tuned for a future release that `makes this output more human-readable `__.
Now let's try setting a different key and value:
@@ -157,7 +159,7 @@ Now let's try setting a different key and value:
curl -s 'localhost:46657/broadcast_tx_commit?tx="name=satoshi"'
Now if we query for ``name``, we should get ``satoshi``, or
-``7361746F736869`` in hex:
+``c2F0b3NoaQ==`` in base64:
::
@@ -226,17 +228,15 @@ the number ``1``. If instead, we try to send a ``5``, we get an error:
"id": "",
"result": {
"check_tx": {
- "code": 0,
- "data": "",
- "log": ""
+ "fee": {}
},
"deliver_tx": {
- "code": 3,
- "data": "",
- "log": "Invalid nonce. Expected 1, got 5"
+ "code": 2,
+ "log": "Invalid nonce. Expected 1, got 5",
+ "fee": {}
},
"hash": "33B93DFF98749B0D6996A70F64071347060DC19C",
- "height": 38
+ "height": 34
}
}
@@ -250,17 +250,13 @@ But if we send a ``1``, it works again:
"id": "",
"result": {
"check_tx": {
- "code": 0,
- "data": "",
- "log": ""
+ "fee": {}
},
"deliver_tx": {
- "code": 0,
- "data": "",
- "log": ""
+ "fee": {}
},
"hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D",
- "height": 87
+ "height": 60
}
}
diff --git a/docs/how-to-read-logs.rst b/docs/how-to-read-logs.rst
index c5a352d72..e0c3f1b4f 100644
--- a/docs/how-to-read-logs.rst
+++ b/docs/how-to-read-logs.rst
@@ -59,7 +59,7 @@ Next we replay all the messages from the WAL.
::
I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:46657 module=rpc-server
- I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{pk: PubKeyEd25519{DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E177003C4D6FD66}, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:46656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:46657])}"
+ I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:46656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:46657])}"
Next follows a standard block creation cycle, where we enter a new round,
propose a block, receive more than 2/3 of prevotes, then precommits and finally
diff --git a/docs/install.rst b/docs/install.rst
index 4ee572604..b2aae6766 100644
--- a/docs/install.rst
+++ b/docs/install.rst
@@ -4,7 +4,7 @@ Install Tendermint
From Binary
-----------
-To download pre-built binaries, see the `Download page `__.
+To download pre-built binaries, see the `Download page `__.
From Source
-----------
@@ -37,13 +37,13 @@ First, install ``dep``:
::
+ cd $GOPATH/src/github.com/tendermint/tendermint
make get_tools
Now we can fetch the correct versions of each dependency by running:
::
- cd $GOPATH/src/github.com/tendermint/tendermint
make get_vendor_deps
make install
@@ -96,6 +96,7 @@ If ``go get`` failing bothers you, fetch the code using ``git``:
mkdir -p $GOPATH/src/github.com/tendermint
git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint
cd $GOPATH/src/github.com/tendermint/tendermint
+ make get_tools
make get_vendor_deps
make install
diff --git a/docs/specification/new-spec/p2p/peer.md b/docs/specification/new-spec/p2p/peer.md
index 68615c498..6aa36af79 100644
--- a/docs/specification/new-spec/p2p/peer.md
+++ b/docs/specification/new-spec/p2p/peer.md
@@ -83,7 +83,7 @@ The Tendermint Version Handshake allows the peers to exchange their NodeInfo:
```golang
type NodeInfo struct {
- PubKey crypto.PubKey
+ ID p2p.ID
Moniker string
Network string
RemoteAddr string
@@ -95,7 +95,7 @@ type NodeInfo struct {
```
The connection is disconnected if:
-- `peer.NodeInfo.PubKey != peer.PubKey`
+- `peer.NodeInfo.ID` is not equal `peerConn.ID`
- `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision
- `peer.NodeInfo.Version` Major is not the same as ours
- `peer.NodeInfo.Version` Minor is not the same as ours
diff --git a/docs/specification/secure-p2p.rst b/docs/specification/secure-p2p.rst
index c48ddb590..2110c835b 100644
--- a/docs/specification/secure-p2p.rst
+++ b/docs/specification/secure-p2p.rst
@@ -62,6 +62,13 @@ such as the Web-of-Trust or Certificate Authorities. In our case, we can
use the blockchain itself as a certificate authority to ensure that we
are connected to at least one validator.
+Config
+------
+
+Authenticated encryption is enabled by default. If you wish to use another
+authentication scheme or your peers are connected via VPN, you can turn it off
+by setting ``auth_enc`` to ``false`` in the config file.
+
Additional Reading
------------------
diff --git a/docs/using-tendermint.rst b/docs/using-tendermint.rst
index e07534c96..bc5d73859 100644
--- a/docs/using-tendermint.rst
+++ b/docs/using-tendermint.rst
@@ -74,20 +74,17 @@ RPC server, for example:
curl http://localhost:46657/broadcast_tx_commit?tx=\"abcd\"
-For handling responses, we recommend you `install the jsonpp
-tool `__ to pretty print the JSON.
-
We can see the chain's status at the ``/status`` end-point:
::
- curl http://localhost:46657/status | jsonpp
+ curl http://localhost:46657/status | json_pp
and the ``latest_app_hash`` in particular:
::
- curl http://localhost:46657/status | jsonpp | grep app_hash
+ curl http://localhost:46657/status | json_pp | grep latest_app_hash
Visit http://localhost:46657 in your browser to see the list of other
endpoints. Some take no arguments (like ``/status``), while others
@@ -185,7 +182,7 @@ once per second, it is possible to disable empty blocks or set a block creation
interval. In the former case, blocks will be created when there are new
transactions or when the AppHash changes.
-To configure Tendermint to not produce empty blocks unless there are
+To configure Tendermint to not produce empty blocks unless there are
transactions or the app hash changes, run Tendermint with this additional flag:
::
@@ -260,19 +257,19 @@ When ``tendermint init`` is run, both a ``genesis.json`` and
::
{
- "app_hash": "",
- "chain_id": "test-chain-HZw6TB",
- "genesis_time": "0001-01-01T00:00:00.000Z",
- "validators": [
- {
- "power": 10,
- "name": "",
- "pub_key": [
- 1,
- "5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
- ]
- }
- ]
+ "validators" : [
+ {
+ "pub_key" : {
+ "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=",
+ "type" : "AC26791624DE60"
+ },
+ "power" : 10,
+ "name" : ""
+ }
+ ],
+ "app_hash" : "",
+ "chain_id" : "test-chain-rDlYSN",
+ "genesis_time" : "0001-01-01T00:00:00Z"
}
And the ``priv_validator.json``:
@@ -280,20 +277,18 @@ And the ``priv_validator.json``:
::
{
- "address": "4F4D895F882A18E1D1FC608D102601DA8D3570E5",
- "last_height": 0,
- "last_round": 0,
- "last_signature": null,
- "last_signbytes": "",
- "last_step": 0,
- "priv_key": [
- 1,
- "F9FA3CD435BDAE54D0BCA8F1BC289D718C23D855C6DB21E8543F5E4F457E62805770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
- ],
- "pub_key": [
- 1,
- "5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
- ]
+ "last_step" : 0,
+ "last_round" : 0,
+ "address" : "B788DEDE4F50AD8BC9462DE76741CCAFF87D51E2",
+ "pub_key" : {
+ "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=",
+ "type" : "AC26791624DE60"
+ },
+ "last_height" : 0,
+ "priv_key" : {
+ "value" : "JPivl82x+LfVkp8i3ztoTjY6c6GJ4pBxQexErOCyhwqHeGT5ATxzpAtPJKnxNx/NyUnD8Ebv3OIYH+kgD4N88Q==",
+ "type" : "954568A3288910"
+ }
}
The ``priv_validator.json`` actually contains a private key, and should
@@ -334,14 +329,14 @@ For instance,
::
- tendermint node --p2p.seeds "1.2.3.4:46656,5.6.7.8:46656"
+ tendermint node --p2p.seeds "f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:46656,0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:46656"
Alternatively, you can use the ``/dial_seeds`` endpoint of the RPC to
specify seeds for a running node to connect to:
::
- curl 'localhost:46657/dial_seeds?seeds=\["1.2.3.4:46656","5.6.7.8:46656"\]'
+ curl 'localhost:46657/dial_seeds?seeds=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:46656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:46656"\]'
Note, if the peer-exchange protocol (PEX) is enabled (default), you should not
normally need seeds after the first start. Peers will be gossipping about known
@@ -355,8 +350,8 @@ core instance.
::
- tendermint node --p2p.persistent_peers "10.11.12.13:46656,10.11.12.14:46656"
- curl 'localhost:46657/dial_peers?persistent=true&peers=\["1.2.3.4:46656","5.6.7.8:46656"\]'
+ tendermint node --p2p.persistent_peers "429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:46656,96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:46656"
+ curl 'localhost:46657/dial_peers?persistent=true&peers=\["429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:46656","96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:46656"\]'
Adding a Non-Validator
~~~~~~~~~~~~~~~~~~~~~~
@@ -387,20 +382,18 @@ Now we can update our genesis file. For instance, if the new
::
{
- "address": "AC379688105901436A34A65F185C115B8BB277A1",
- "last_height": 0,
- "last_round": 0,
- "last_signature": null,
- "last_signbytes": "",
- "last_step": 0,
- "priv_key": [
- 1,
- "0D2ED337D748ADF79BE28559B9E59EBE1ABBA0BAFE6D65FCB9797985329B950C8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94"
- ],
- "pub_key": [
- 1,
- "8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94"
- ]
+ "address" : "5AF49D2A2D4F5AD4C7C8C4CC2FB020131E9C4902",
+ "pub_key" : {
+ "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=",
+ "type" : "AC26791624DE60"
+ },
+ "priv_key" : {
+ "value" : "EDJY9W6zlAw+su6ITgTKg2nTZcHAH1NMTW5iwlgmNDuX1f35+OR4HMN88ZtQzsAwhETq4k3vzM3n6WTk5ii16Q==",
+ "type" : "954568A3288910"
+ },
+ "last_step" : 0,
+ "last_round" : 0,
+ "last_height" : 0
}
then the new ``genesis.json`` will be:
@@ -408,27 +401,27 @@ then the new ``genesis.json`` will be:
::
{
- "app_hash": "",
- "chain_id": "test-chain-HZw6TB",
- "genesis_time": "0001-01-01T00:00:00.000Z",
- "validators": [
- {
- "power": 10,
- "name": "",
- "pub_key": [
- 1,
- "5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
- ]
- },
- {
- "power": 10,
- "name": "",
- "pub_key": [
- 1,
- "8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94"
- ]
- }
- ]
+ "validators" : [
+ {
+ "pub_key" : {
+ "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=",
+ "type" : "AC26791624DE60"
+ },
+ "power" : 10,
+ "name" : ""
+ },
+ {
+ "pub_key" : {
+ "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=",
+ "type" : "AC26791624DE60"
+ },
+ "power" : 10,
+ "name" : ""
+ }
+ ],
+ "app_hash" : "",
+ "chain_id" : "test-chain-rDlYSN",
+ "genesis_time" : "0001-01-01T00:00:00Z"
}
Update the ``genesis.json`` in ``~/.tendermint/config``. Copy the genesis file
diff --git a/evidence/reactor.go b/evidence/reactor.go
index 6647db969..a6aa66b1f 100644
--- a/evidence/reactor.go
+++ b/evidence/reactor.go
@@ -1,12 +1,11 @@
package evidence
import (
- "bytes"
"fmt"
"reflect"
"time"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/p2p"
@@ -16,7 +15,7 @@ import (
const (
EvidenceChannel = byte(0x38)
- maxEvidenceMessageSize = 1048576 // 1MB TODO make it configurable
+ maxMsgSize = 1048576 // 1MB TODO make it configurable
broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often
)
@@ -68,7 +67,7 @@ func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
// the rest will be sent by the broadcastRoutine
evidences := evR.evpool.PriorityEvidence()
msg := &EvidenceListMessage{evidences}
- success := peer.Send(EvidenceChannel, struct{ EvidenceMessage }{msg})
+ success := peer.Send(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
if !success {
// TODO: remove peer ?
}
@@ -82,7 +81,7 @@ func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// Receive implements Reactor.
// It adds any received evidence to the evpool.
func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
- _, msg, err := DecodeMessage(msgBytes)
+ msg, err := DecodeMessage(msgBytes)
if err != nil {
evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
evR.Switch.StopPeerForError(src, err)
@@ -119,7 +118,7 @@ func (evR *EvidenceReactor) broadcastRoutine() {
case evidence := <-evR.evpool.EvidenceChan():
// broadcast some new evidence
msg := &EvidenceListMessage{[]types.Evidence{evidence}}
- evR.Switch.Broadcast(EvidenceChannel, struct{ EvidenceMessage }{msg})
+ evR.Switch.Broadcast(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
// TODO: Broadcast runs asynchronously, so this should wait on the successChan
// in another routine before marking to be proper.
@@ -127,7 +126,7 @@ func (evR *EvidenceReactor) broadcastRoutine() {
case <-ticker.C:
// broadcast all pending evidence
msg := &EvidenceListMessage{evR.evpool.PendingEvidence()}
- evR.Switch.Broadcast(EvidenceChannel, struct{ EvidenceMessage }{msg})
+ evR.Switch.Broadcast(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
case <-evR.Quit():
return
}
@@ -137,24 +136,22 @@ func (evR *EvidenceReactor) broadcastRoutine() {
//-----------------------------------------------------------------------------
// Messages
-const (
- msgTypeEvidence = byte(0x01)
-)
-
// EvidenceMessage is a message sent or received by the EvidenceReactor.
type EvidenceMessage interface{}
-var _ = wire.RegisterInterface(
- struct{ EvidenceMessage }{},
- wire.ConcreteType{&EvidenceListMessage{}, msgTypeEvidence},
-)
+func RegisterEvidenceMessages(cdc *amino.Codec) {
+ cdc.RegisterInterface((*EvidenceMessage)(nil), nil)
+ cdc.RegisterConcrete(&EvidenceListMessage{},
+ "tendermint/evidence/EvidenceListMessage", nil)
+}
// DecodeMessage decodes a byte-array into a EvidenceMessage.
-func DecodeMessage(bz []byte) (msgType byte, msg EvidenceMessage, err error) {
- msgType = bz[0]
- n := new(int)
- r := bytes.NewReader(bz)
- msg = wire.ReadBinary(struct{ EvidenceMessage }{}, r, maxEvidenceMessageSize, n, &err).(struct{ EvidenceMessage }).EvidenceMessage
+func DecodeMessage(bz []byte) (msg EvidenceMessage, err error) {
+ if len(bz) > maxMsgSize {
+ return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
+ len(bz), maxMsgSize)
+ }
+ err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}
diff --git a/evidence/store.go b/evidence/store.go
index 7c8becd02..081715e3c 100644
--- a/evidence/store.go
+++ b/evidence/store.go
@@ -3,7 +3,6 @@ package evidence
import (
"fmt"
- wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tmlibs/db"
)
@@ -104,7 +103,10 @@ func (store *EvidenceStore) ListEvidence(prefixKey string) (evidence []types.Evi
val := iter.Value()
var ei EvidenceInfo
- wire.ReadBinaryBytes(val, &ei)
+ err := cdc.UnmarshalBinaryBare(val, &ei)
+ if err != nil {
+ panic(err)
+ }
evidence = append(evidence, ei.Evidence)
}
return evidence
@@ -119,7 +121,10 @@ func (store *EvidenceStore) GetEvidence(height int64, hash []byte) *EvidenceInfo
return nil
}
var ei EvidenceInfo
- wire.ReadBinaryBytes(val, &ei)
+ err := cdc.UnmarshalBinaryBare(val, &ei)
+ if err != nil {
+ panic(err)
+ }
return &ei
}
@@ -137,7 +142,7 @@ func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int
Priority: priority,
Evidence: evidence,
}
- eiBytes := wire.BinaryBytes(ei)
+ eiBytes := cdc.MustMarshalBinaryBare(ei)
// add it to the store
key := keyOutqueue(evidence, priority)
@@ -171,7 +176,7 @@ func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) {
ei.Committed = true
lookupKey := keyLookup(evidence)
- store.db.SetSync(lookupKey, wire.BinaryBytes(ei))
+ store.db.SetSync(lookupKey, cdc.MustMarshalBinaryBare(ei))
}
//---------------------------------------------------
@@ -181,6 +186,9 @@ func (store *EvidenceStore) getEvidenceInfo(evidence types.Evidence) EvidenceInf
key := keyLookup(evidence)
var ei EvidenceInfo
b := store.db.Get(key)
- wire.ReadBinaryBytes(b, &ei)
+ err := cdc.UnmarshalBinaryBare(b, &ei)
+ if err != nil {
+ panic(err)
+ }
return ei
}
diff --git a/evidence/store_test.go b/evidence/store_test.go
index 180bee58f..3fdb3ba6e 100644
--- a/evidence/store_test.go
+++ b/evidence/store_test.go
@@ -4,7 +4,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
- wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tmlibs/db"
)
@@ -108,15 +107,3 @@ func TestStorePriority(t *testing.T) {
assert.Equal(ev, cases[i].ev)
}
}
-
-//-------------------------------------------
-const (
- evidenceTypeMockGood = byte(0x01)
- evidenceTypeMockBad = byte(0x02)
-)
-
-var _ = wire.RegisterInterface(
- struct{ types.Evidence }{},
- wire.ConcreteType{types.MockGoodEvidence{}, evidenceTypeMockGood},
- wire.ConcreteType{types.MockBadEvidence{}, evidenceTypeMockBad},
-)
diff --git a/evidence/wire.go b/evidence/wire.go
new file mode 100644
index 000000000..842e0707a
--- /dev/null
+++ b/evidence/wire.go
@@ -0,0 +1,25 @@
+package evidence
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+ "github.com/tendermint/tendermint/types"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ RegisterEvidenceMessages(cdc)
+ crypto.RegisterAmino(cdc)
+ types.RegisterEvidences(cdc)
+ RegisterMockEvidences(cdc) // For testing
+}
+
+//-------------------------------------------
+
+func RegisterMockEvidences(cdc *amino.Codec) {
+ cdc.RegisterConcrete(types.MockGoodEvidence{},
+ "tendermint/MockGoodEvidence", nil)
+ cdc.RegisterConcrete(types.MockBadEvidence{},
+ "tendermint/MockBadEvidence", nil)
+}
diff --git a/lite/client/provider.go b/lite/client/provider.go
index c98297dec..5f3d72450 100644
--- a/lite/client/provider.go
+++ b/lite/client/provider.go
@@ -93,7 +93,7 @@ func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) {
if err != nil {
return nil, err
}
- return p.node.Commit(&status.LatestBlockHeight)
+ return p.node.Commit(&status.SyncInfo.LatestBlockHeight)
}
// CommitFromResult ...
diff --git a/lite/files/commit.go b/lite/files/commit.go
index 33f5bb677..8a7e4721e 100644
--- a/lite/files/commit.go
+++ b/lite/files/commit.go
@@ -1,13 +1,11 @@
package files
import (
- "encoding/json"
+ "io/ioutil"
"os"
"github.com/pkg/errors"
- wire "github.com/tendermint/go-wire"
-
"github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
@@ -19,7 +17,7 @@ const (
MaxFullCommitSize = 1024 * 1024
)
-// SaveFullCommit exports the seed in binary / go-wire style
+// SaveFullCommit exports the seed in binary / go-amino style
func SaveFullCommit(fc lite.FullCommit, path string) error {
f, err := os.Create(path)
if err != nil {
@@ -27,9 +25,11 @@ func SaveFullCommit(fc lite.FullCommit, path string) error {
}
defer f.Close()
- var n int
- wire.WriteBinary(fc, f, &n, &err)
- return errors.WithStack(err)
+ _, err = cdc.MarshalBinaryWriter(f, fc)
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ return nil
}
// SaveFullCommitJSON exports the seed in a json format
@@ -39,9 +39,15 @@ func SaveFullCommitJSON(fc lite.FullCommit, path string) error {
return errors.WithStack(err)
}
defer f.Close()
- stream := json.NewEncoder(f)
- err = stream.Encode(fc)
- return errors.WithStack(err)
+ bz, err := cdc.MarshalJSON(fc)
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ _, err = f.Write(bz)
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ return nil
}
// LoadFullCommit loads the full commit from the file system.
@@ -56,9 +62,11 @@ func LoadFullCommit(path string) (lite.FullCommit, error) {
}
defer f.Close()
- var n int
- wire.ReadBinaryPtr(&fc, f, MaxFullCommitSize, &n, &err)
- return fc, errors.WithStack(err)
+ _, err = cdc.UnmarshalBinaryReader(f, &fc, 0)
+ if err != nil {
+ return fc, errors.WithStack(err)
+ }
+ return fc, nil
}
// LoadFullCommitJSON loads the commit from the file system in JSON format.
@@ -73,7 +81,13 @@ func LoadFullCommitJSON(path string) (lite.FullCommit, error) {
}
defer f.Close()
- stream := json.NewDecoder(f)
- err = stream.Decode(&fc)
- return fc, errors.WithStack(err)
+ bz, err := ioutil.ReadAll(f)
+ if err != nil {
+ return fc, errors.WithStack(err)
+ }
+ err = cdc.UnmarshalJSON(bz, &fc)
+ if err != nil {
+ return fc, errors.WithStack(err)
+ }
+ return fc, nil
}
diff --git a/lite/files/wire.go b/lite/files/wire.go
new file mode 100644
index 000000000..99f98931e
--- /dev/null
+++ b/lite/files/wire.go
@@ -0,0 +1,12 @@
+package files
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ crypto.RegisterAmino(cdc)
+}
diff --git a/lite/helpers.go b/lite/helpers.go
index 7df77027c..714675afb 100644
--- a/lite/helpers.go
+++ b/lite/helpers.go
@@ -23,7 +23,7 @@ type ValKeys []crypto.PrivKey
func GenValKeys(n int) ValKeys {
res := make(ValKeys, n)
for i := range res {
- res[i] = crypto.GenPrivKeyEd25519().Wrap()
+ res[i] = crypto.GenPrivKeyEd25519()
}
return res
}
@@ -32,7 +32,7 @@ func GenValKeys(n int) ValKeys {
func (v ValKeys) Change(i int) ValKeys {
res := make(ValKeys, len(v))
copy(res, v)
- res[i] = crypto.GenPrivKeyEd25519().Wrap()
+ res[i] = crypto.GenPrivKeyEd25519()
return res
}
@@ -46,7 +46,7 @@ func (v ValKeys) Extend(n int) ValKeys {
func GenSecpValKeys(n int) ValKeys {
res := make(ValKeys, n)
for i := range res {
- res[i] = crypto.GenPrivKeySecp256k1().Wrap()
+ res[i] = crypto.GenPrivKeySecp256k1()
}
return res
}
diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go
index 34aa99fa0..fe10399dc 100644
--- a/lite/proxy/proxy.go
+++ b/lite/proxy/proxy.go
@@ -3,10 +3,12 @@ package proxy
import (
"net/http"
+ "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log"
rpcclient "github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/core"
+ ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpc "github.com/tendermint/tendermint/rpc/lib/server"
)
@@ -23,13 +25,15 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error
return err
}
+ cdc := amino.NewCodec()
+ ctypes.RegisterAmino(cdc)
r := RPCRoutes(c)
// build the handler...
mux := http.NewServeMux()
- rpc.RegisterRPCFuncs(mux, r, logger)
+ rpc.RegisterRPCFuncs(mux, r, cdc, logger)
- wm := rpc.NewWebsocketManager(r, rpc.EventSubscriber(c))
+ wm := rpc.NewWebsocketManager(r, cdc, rpc.EventSubscriber(c))
wm.SetLogger(logger)
core.SetLogger(logger)
mux.HandleFunc(wsEndpoint, wm.WebsocketHandler)
diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go
index e8aa011e1..5fb12a40a 100644
--- a/lite/proxy/wrapper.go
+++ b/lite/proxy/wrapper.go
@@ -146,7 +146,7 @@ func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) {
// }
// // check to validate it if possible, and drop if not valid
-// switch t := tm.Unwrap().(type) {
+// switch t := tm.(type) {
// case types.EventDataNewBlockHeader:
// err := verifyHeader(s.client, t.Header)
// if err != nil {
diff --git a/mempool/reactor.go b/mempool/reactor.go
index 514347e94..54a3c32fe 100644
--- a/mempool/reactor.go
+++ b/mempool/reactor.go
@@ -1,13 +1,12 @@
package mempool
import (
- "bytes"
"fmt"
"reflect"
"time"
abci "github.com/tendermint/abci/types"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/clist"
"github.com/tendermint/tmlibs/log"
@@ -19,7 +18,7 @@ import (
const (
MempoolChannel = byte(0x30)
- maxMempoolMessageSize = 1048576 // 1MB TODO make it configurable
+ maxMsgSize = 1048576 // 1MB TODO make it configurable
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
)
@@ -71,7 +70,7 @@ func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// Receive implements Reactor.
// It adds any received transactions to the mempool.
func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
- _, msg, err := DecodeMessage(msgBytes)
+ msg, err := DecodeMessage(msgBytes)
if err != nil {
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
memR.Switch.StopPeerForError(src, err)
@@ -137,7 +136,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
}
// send memTx
msg := &TxMessage{Tx: memTx.tx}
- success := peer.Send(MempoolChannel, struct{ MempoolMessage }{msg})
+ success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg))
if !success {
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
continue
@@ -158,24 +157,21 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
//-----------------------------------------------------------------------------
// Messages
-const (
- msgTypeTx = byte(0x01)
-)
-
// MempoolMessage is a message sent or received by the MempoolReactor.
type MempoolMessage interface{}
-var _ = wire.RegisterInterface(
- struct{ MempoolMessage }{},
- wire.ConcreteType{&TxMessage{}, msgTypeTx},
-)
+func RegisterMempoolMessages(cdc *amino.Codec) {
+ cdc.RegisterInterface((*MempoolMessage)(nil), nil)
+ cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
+}
// DecodeMessage decodes a byte-array into a MempoolMessage.
-func DecodeMessage(bz []byte) (msgType byte, msg MempoolMessage, err error) {
- msgType = bz[0]
- n := new(int)
- r := bytes.NewReader(bz)
- msg = wire.ReadBinary(struct{ MempoolMessage }{}, r, maxMempoolMessageSize, n, &err).(struct{ MempoolMessage }).MempoolMessage
+func DecodeMessage(bz []byte) (msg MempoolMessage, err error) {
+ if len(bz) > maxMsgSize {
+ return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
+ len(bz), maxMsgSize)
+ }
+ err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}
diff --git a/mempool/wire.go b/mempool/wire.go
new file mode 100644
index 000000000..ed0897268
--- /dev/null
+++ b/mempool/wire.go
@@ -0,0 +1,11 @@
+package mempool
+
+import (
+ "github.com/tendermint/go-amino"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ RegisterMempoolMessages(cdc)
+}
diff --git a/node/node.go b/node/node.go
index acde2e482..976066964 100644
--- a/node/node.go
+++ b/node/node.go
@@ -2,15 +2,14 @@ package node
import (
"bytes"
- "encoding/json"
"errors"
"fmt"
"net"
"net/http"
abci "github.com/tendermint/abci/types"
+ amino "github.com/tendermint/go-amino"
crypto "github.com/tendermint/go-crypto"
- wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
@@ -25,6 +24,7 @@ import (
"github.com/tendermint/tendermint/p2p/trust"
"github.com/tendermint/tendermint/proxy"
rpccore "github.com/tendermint/tendermint/rpc/core"
+ ctypes "github.com/tendermint/tendermint/rpc/core/types"
grpccore "github.com/tendermint/tendermint/rpc/grpc"
rpc "github.com/tendermint/tendermint/rpc/lib"
rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
@@ -33,7 +33,7 @@ import (
"github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types"
- priv_val "github.com/tendermint/tendermint/types/priv_validator"
+ pvm "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tendermint/version"
_ "net/http/pprof"
@@ -78,7 +78,7 @@ type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
// It implements NodeProvider.
func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
return NewNode(config,
- types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile()),
+ pvm.LoadOrGenFilePV(config.PrivValidatorFile()),
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
DefaultGenesisDocProviderFunc(config),
DefaultDBProvider,
@@ -179,8 +179,8 @@ func NewNode(config *cfg.Config,
// TODO: persist this key so external signer
// can actually authenticate us
privKey = crypto.GenPrivKeyEd25519()
- pvsc = priv_val.NewSocketClient(
- logger.With("module", "priv_val"),
+ pvsc = pvm.NewSocketPV(
+ logger.With("module", "pvm"),
config.PrivValidatorListenAddr,
privKey,
)
@@ -405,7 +405,7 @@ func (n *Node) OnStart() error {
}
n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile())
- nodeInfo := n.makeNodeInfo(nodeKey.PubKey())
+ nodeInfo := n.makeNodeInfo(nodeKey.ID())
n.sw.SetNodeInfo(nodeInfo)
n.sw.SetNodeKey(nodeKey)
@@ -448,7 +448,7 @@ func (n *Node) OnStop() {
n.eventBus.Stop()
n.indexerService.Stop()
- if pvsc, ok := n.privValidator.(*priv_val.SocketClient); ok {
+ if pvsc, ok := n.privValidator.(*pvm.SocketPV); ok {
if err := pvsc.Stop(); err != nil {
n.Logger.Error("Error stopping priv validator socket client", "err", err)
}
@@ -492,6 +492,8 @@ func (n *Node) ConfigureRPC() {
func (n *Node) startRPC() ([]net.Listener, error) {
n.ConfigureRPC()
listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ")
+ coreCodec := amino.NewCodec()
+ ctypes.RegisterAmino(coreCodec)
if n.config.RPC.Unsafe {
rpccore.AddUnsafeRoutes()
@@ -502,10 +504,10 @@ func (n *Node) startRPC() ([]net.Listener, error) {
for i, listenAddr := range listenAddrs {
mux := http.NewServeMux()
rpcLogger := n.Logger.With("module", "rpc-server")
- wm := rpcserver.NewWebsocketManager(rpccore.Routes, rpcserver.EventSubscriber(n.eventBus))
+ wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus))
wm.SetLogger(rpcLogger.With("protocol", "websocket"))
mux.HandleFunc("/websocket", wm.WebsocketHandler)
- rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
+ rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger)
if err != nil {
return nil, err
@@ -577,13 +579,13 @@ func (n *Node) ProxyApp() proxy.AppConns {
return n.proxyApp
}
-func (n *Node) makeNodeInfo(pubKey crypto.PubKey) p2p.NodeInfo {
+func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo {
txIndexerStatus := "on"
if _, ok := n.txIndexer.(*null.TxIndex); ok {
txIndexerStatus = "off"
}
nodeInfo := p2p.NodeInfo{
- PubKey: pubKey,
+ ID: nodeID,
Network: n.genesisDoc.ChainID,
Version: version.Version,
Channels: []byte{
@@ -594,7 +596,7 @@ func (n *Node) makeNodeInfo(pubKey crypto.PubKey) p2p.NodeInfo {
},
Moniker: n.config.Moniker,
Other: []string{
- cmn.Fmt("wire_version=%v", wire.Version),
+ cmn.Fmt("amino_version=%v", amino.Version),
cmn.Fmt("p2p_version=%v", p2p.Version),
cmn.Fmt("consensus_version=%v", cs.Version),
cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
@@ -641,7 +643,7 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
return nil, errors.New("Genesis doc not found")
}
var genDoc *types.GenesisDoc
- err := json.Unmarshal(bytes, &genDoc)
+ err := cdc.UnmarshalJSON(bytes, &genDoc)
if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
}
@@ -650,7 +652,7 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
// panics if failed to marshal the given genesis document
func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
- bytes, err := json.Marshal(genDoc)
+ bytes, err := cdc.MarshalJSON(genDoc)
if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
}
diff --git a/node/wire.go b/node/wire.go
new file mode 100644
index 000000000..a0d7677df
--- /dev/null
+++ b/node/wire.go
@@ -0,0 +1,12 @@
+package node
+
+import (
+ amino "github.com/tendermint/go-amino"
+ crypto "github.com/tendermint/go-crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ crypto.RegisterAmino(cdc)
+}
diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go
index 9a3360f2d..6684941ab 100644
--- a/p2p/conn/connection.go
+++ b/p2p/conn/connection.go
@@ -7,17 +7,21 @@ import (
"io"
"math"
"net"
+ "reflect"
"sync/atomic"
"time"
- wire "github.com/tendermint/go-wire"
+ amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common"
flow "github.com/tendermint/tmlibs/flowrate"
"github.com/tendermint/tmlibs/log"
)
const (
- numBatchMsgPackets = 10
+ maxPacketMsgPayloadSizeDefault = 1024 // NOTE: Must be below 16,384 bytes for 14 below.
+ maxPacketMsgOverheadSize = 14 // NOTE: See connection_test for derivation.
+
+ numBatchPacketMsgs = 10
minReadBufferSize = 1024
minWriteBufferSize = 65536
updateStats = 2 * time.Second
@@ -52,35 +56,34 @@ The byte id and the relative priorities of each `Channel` are configured upon
initialization of the connection.
There are two methods for sending messages:
- func (m MConnection) Send(chID byte, msg interface{}) bool {}
- func (m MConnection) TrySend(chID byte, msg interface{}) bool {}
+ func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
+ func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
-`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued
-for the channel with the given id byte `chID`, or until the request times out.
-The message `msg` is serialized using the `tendermint/wire` submodule's
-`WriteBinary()` reflection routine.
+`Send(chID, msgBytes)` is a blocking call that waits until `msg` is
+successfully queued for the channel with the given id byte `chID`, or until the
+request times out. The message `msg` is serialized using Go-Amino.
-`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's
-queue is full.
+`TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
+channel's queue is full.
Inbound message bytes are handled with an onReceive callback function.
*/
type MConnection struct {
cmn.BaseService
- conn net.Conn
- bufReader *bufio.Reader
- bufWriter *bufio.Writer
- sendMonitor *flow.Monitor
- recvMonitor *flow.Monitor
- send chan struct{}
- pong chan struct{}
- channels []*Channel
- channelsIdx map[byte]*Channel
- onReceive receiveCbFunc
- onError errorCbFunc
- errored uint32
- config *MConnConfig
+ conn net.Conn
+ bufConnReader *bufio.Reader
+ bufConnWriter *bufio.Writer
+ sendMonitor *flow.Monitor
+ recvMonitor *flow.Monitor
+ send chan struct{}
+ pong chan struct{}
+ channels []*Channel
+ channelsIdx map[byte]*Channel
+ onReceive receiveCbFunc
+ onError errorCbFunc
+ errored uint32
+ config *MConnConfig
quit chan struct{}
flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled.
@@ -101,7 +104,7 @@ type MConnConfig struct {
RecvRate int64 `mapstructure:"recv_rate"`
// Maximum payload size
- MaxMsgPacketPayloadSize int `mapstructure:"max_msg_packet_payload_size"`
+ MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
// Interval to flush writes (throttled)
FlushThrottle time.Duration `mapstructure:"flush_throttle"`
@@ -113,8 +116,8 @@ type MConnConfig struct {
PongTimeout time.Duration `mapstructure:"pong_timeout"`
}
-func (cfg *MConnConfig) maxMsgPacketTotalSize() int {
- return cfg.MaxMsgPacketPayloadSize + maxMsgPacketOverheadSize
+func (cfg *MConnConfig) maxPacketMsgTotalSize() int {
+ return cfg.MaxPacketMsgPayloadSize + maxPacketMsgOverheadSize
}
// DefaultMConnConfig returns the default config.
@@ -122,7 +125,7 @@ func DefaultMConnConfig() *MConnConfig {
return &MConnConfig{
SendRate: defaultSendRate,
RecvRate: defaultRecvRate,
- MaxMsgPacketPayloadSize: defaultMaxMsgPacketPayloadSize,
+ MaxPacketMsgPayloadSize: maxPacketMsgPayloadSizeDefault,
FlushThrottle: defaultFlushThrottle,
PingInterval: defaultPingInterval,
PongTimeout: defaultPongTimeout,
@@ -146,16 +149,16 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
}
mconn := &MConnection{
- conn: conn,
- bufReader: bufio.NewReaderSize(conn, minReadBufferSize),
- bufWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
- sendMonitor: flow.New(0, 0),
- recvMonitor: flow.New(0, 0),
- send: make(chan struct{}, 1),
- pong: make(chan struct{}, 1),
- onReceive: onReceive,
- onError: onError,
- config: config,
+ conn: conn,
+ bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
+ bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
+ sendMonitor: flow.New(0, 0),
+ recvMonitor: flow.New(0, 0),
+ send: make(chan struct{}, 1),
+ pong: make(chan struct{}, 1),
+ onReceive: onReceive,
+ onError: onError,
+ config: config,
}
// Create channels
@@ -220,7 +223,7 @@ func (c *MConnection) String() string {
func (c *MConnection) flush() {
c.Logger.Debug("Flush", "conn", c)
- err := c.bufWriter.Flush()
+ err := c.bufConnWriter.Flush()
if err != nil {
c.Logger.Error("MConnection flush failed", "err", err)
}
@@ -229,7 +232,7 @@ func (c *MConnection) flush() {
// Catch panics, usually caused by remote disconnects.
func (c *MConnection) _recover() {
if r := recover(); r != nil {
- err := cmn.ErrorWrap(r, "recovered from panic")
+ err := cmn.ErrorWrap(r, "recovered panic in MConnection")
c.stopForError(err)
}
}
@@ -244,12 +247,12 @@ func (c *MConnection) stopForError(r interface{}) {
}
// Queues a message to be sent to channel.
-func (c *MConnection) Send(chID byte, msg interface{}) bool {
+func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
if !c.IsRunning() {
return false
}
- c.Logger.Debug("Send", "channel", chID, "conn", c, "msg", msg) //, "bytes", wire.BinaryBytes(msg))
+ c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
// Send message to channel.
channel, ok := c.channelsIdx[chID]
@@ -258,7 +261,7 @@ func (c *MConnection) Send(chID byte, msg interface{}) bool {
return false
}
- success := channel.sendBytes(wire.BinaryBytes(msg))
+ success := channel.sendBytes(msgBytes)
if success {
// Wake up sendRoutine if necessary
select {
@@ -266,19 +269,19 @@ func (c *MConnection) Send(chID byte, msg interface{}) bool {
default:
}
} else {
- c.Logger.Error("Send failed", "channel", chID, "conn", c, "msg", msg)
+ c.Logger.Error("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
}
return success
}
// Queues a message to be sent to channel.
// Nonblocking, returns true if successful.
-func (c *MConnection) TrySend(chID byte, msg interface{}) bool {
+func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
if !c.IsRunning() {
return false
}
- c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msg", msg)
+ c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
// Send message to channel.
channel, ok := c.channelsIdx[chID]
@@ -287,7 +290,7 @@ func (c *MConnection) TrySend(chID byte, msg interface{}) bool {
return false
}
- ok = channel.trySendBytes(wire.BinaryBytes(msg))
+ ok = channel.trySendBytes(msgBytes)
if ok {
// Wake up sendRoutine if necessary
select {
@@ -320,12 +323,13 @@ func (c *MConnection) sendRoutine() {
FOR_LOOP:
for {
- var n int
+ var _n int64
var err error
+ SELECTION:
select {
case <-c.flushTimer.Ch:
// NOTE: flushTimer.Set() must be called every time
- // something is written to .bufWriter.
+ // something is written to .bufConnWriter.
c.flush()
case <-c.chStatsTimer.Chan():
for _, channel := range c.channels {
@@ -333,8 +337,11 @@ FOR_LOOP:
}
case <-c.pingTimer.Chan():
c.Logger.Debug("Send Ping")
- wire.WriteByte(packetTypePing, c.bufWriter, &n, &err)
- c.sendMonitor.Update(int(n))
+ _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPing{})
+ if err != nil {
+ break SELECTION
+ }
+ c.sendMonitor.Update(int(_n))
c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
select {
@@ -352,14 +359,17 @@ FOR_LOOP:
}
case <-c.pong:
c.Logger.Debug("Send Pong")
- wire.WriteByte(packetTypePong, c.bufWriter, &n, &err)
- c.sendMonitor.Update(int(n))
+ _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPong{})
+ if err != nil {
+ break SELECTION
+ }
+ c.sendMonitor.Update(int(_n))
c.flush()
case <-c.quit:
break FOR_LOOP
case <-c.send:
- // Send some msgPackets
- eof := c.sendSomeMsgPackets()
+ // Send some PacketMsgs
+ eof := c.sendSomePacketMsgs()
if !eof {
// Keep sendRoutine awake.
select {
@@ -385,15 +395,15 @@ FOR_LOOP:
// Returns true if messages from channels were exhausted.
// Blocks in accordance to .sendMonitor throttling.
-func (c *MConnection) sendSomeMsgPackets() bool {
+func (c *MConnection) sendSomePacketMsgs() bool {
// Block until .sendMonitor says we can write.
// Once we're ready we send more than we asked for,
// but amortized it should even out.
- c.sendMonitor.Limit(c.config.maxMsgPacketTotalSize(), atomic.LoadInt64(&c.config.SendRate), true)
+ c.sendMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.SendRate), true)
- // Now send some msgPackets.
- for i := 0; i < numBatchMsgPackets; i++ {
- if c.sendMsgPacket() {
+ // Now send some PacketMsgs.
+ for i := 0; i < numBatchPacketMsgs; i++ {
+ if c.sendPacketMsg() {
return true
}
}
@@ -401,8 +411,8 @@ func (c *MConnection) sendSomeMsgPackets() bool {
}
// Returns true if messages from channels were exhausted.
-func (c *MConnection) sendMsgPacket() bool {
- // Choose a channel to create a msgPacket from.
+func (c *MConnection) sendPacketMsg() bool {
+ // Choose a channel to create a PacketMsg from.
// The chosen channel will be the one whose recentlySent/priority is the least.
var leastRatio float32 = math.MaxFloat32
var leastChannel *Channel
@@ -425,19 +435,19 @@ func (c *MConnection) sendMsgPacket() bool {
}
// c.Logger.Info("Found a msgPacket to send")
- // Make & send a msgPacket from this channel
- n, err := leastChannel.writeMsgPacketTo(c.bufWriter)
+ // Make & send a PacketMsg from this channel
+ _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
if err != nil {
- c.Logger.Error("Failed to write msgPacket", "err", err)
+ c.Logger.Error("Failed to write PacketMsg", "err", err)
c.stopForError(err)
return true
}
- c.sendMonitor.Update(int(n))
+ c.sendMonitor.Update(int(_n))
c.flushTimer.Set()
return false
}
-// recvRoutine reads msgPackets and reconstructs the message using the channels' "recving" buffer.
+// recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
// After a whole message has been assembled, it's pushed to onReceive().
// Blocks depending on how the connection is throttled.
// Otherwise, it never blocks.
@@ -447,28 +457,28 @@ func (c *MConnection) recvRoutine() {
FOR_LOOP:
for {
// Block until .recvMonitor says we can read.
- c.recvMonitor.Limit(c.config.maxMsgPacketTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true)
+ c.recvMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true)
+ // Peek into bufConnReader for debugging
/*
- // Peek into bufReader for debugging
- if numBytes := c.bufReader.Buffered(); numBytes > 0 {
- log.Info("Peek connection buffer", "numBytes", numBytes, "bytes", log15.Lazy{func() []byte {
- bytes, err := c.bufReader.Peek(cmn.MinInt(numBytes, 100))
- if err == nil {
- return bytes
- } else {
- log.Warn("Error peeking connection buffer", "err", err)
- return nil
- }
- }})
+ if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
+ bz, err := c.bufConnReader.Peek(cmn.MinInt(numBytes, 100))
+ if err == nil {
+ // return
+ } else {
+ c.Logger.Debug("Error peeking connection buffer", "err", err)
+ // return nil
+ }
+ c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)
}
*/
// Read packet type
- var n int
+ var packet Packet
+ var _n int64
var err error
- pktType := wire.ReadByte(c.bufReader, &n, &err)
- c.recvMonitor.Update(int(n))
+ _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c.config.maxPacketMsgTotalSize()))
+ c.recvMonitor.Update(int(_n))
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
@@ -478,8 +488,8 @@ FOR_LOOP:
}
// Read more depending on packet type.
- switch pktType {
- case packetTypePing:
+ switch pkt := packet.(type) {
+ case PacketPing:
// TODO: prevent abuse, as they cause flush()'s.
// https://github.com/tendermint/tendermint/issues/1190
c.Logger.Debug("Receive Ping")
@@ -488,24 +498,14 @@ FOR_LOOP:
default:
// never block
}
- case packetTypePong:
+ case PacketPong:
c.Logger.Debug("Receive Pong")
select {
case c.pongTimeoutCh <- false:
default:
// never block
}
- case packetTypeMsg:
- pkt, n, err := msgPacket{}, int(0), error(nil)
- wire.ReadBinaryPtr(&pkt, c.bufReader, c.config.maxMsgPacketTotalSize(), &n, &err)
- c.recvMonitor.Update(int(n))
- if err != nil {
- if c.IsRunning() {
- c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
- c.stopForError(err)
- }
- break FOR_LOOP
- }
+ case PacketMsg:
channel, ok := c.channelsIdx[pkt.ChannelID]
if !ok || channel == nil {
err := fmt.Errorf("Unknown channel %X", pkt.ChannelID)
@@ -514,7 +514,7 @@ FOR_LOOP:
break FOR_LOOP
}
- msgBytes, err := channel.recvMsgPacket(pkt)
+ msgBytes, err := channel.recvPacketMsg(pkt)
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
@@ -528,7 +528,7 @@ FOR_LOOP:
c.onReceive(pkt.ChannelID, msgBytes)
}
default:
- err := fmt.Errorf("Unknown message type %X", pktType)
+ err := fmt.Errorf("Unknown message type %v", reflect.TypeOf(packet))
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
c.stopForError(err)
break FOR_LOOP
@@ -620,7 +620,7 @@ type Channel struct {
sending []byte
recentlySent int64 // exponential moving average
- maxMsgPacketPayloadSize int
+ maxPacketMsgPayloadSize int
Logger log.Logger
}
@@ -635,7 +635,7 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
desc: desc,
sendQueue: make(chan []byte, desc.SendQueueCapacity),
recving: make([]byte, 0, desc.RecvBufferCapacity),
- maxMsgPacketPayloadSize: conn.config.MaxMsgPacketPayloadSize,
+ maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
}
}
@@ -680,8 +680,8 @@ func (ch *Channel) canSend() bool {
return ch.loadSendQueueSize() < defaultSendQueueCapacity
}
-// Returns true if any msgPackets are pending to be sent.
-// Call before calling nextMsgPacket()
+// Returns true if any PacketMsgs are pending to be sent.
+// Call before calling nextPacketMsg()
// Goroutine-safe
func (ch *Channel) isSendPending() bool {
if len(ch.sending) == 0 {
@@ -693,12 +693,12 @@ func (ch *Channel) isSendPending() bool {
return true
}
-// Creates a new msgPacket to send.
+// Creates a new PacketMsg to send.
// Not goroutine-safe
-func (ch *Channel) nextMsgPacket() msgPacket {
- packet := msgPacket{}
+func (ch *Channel) nextPacketMsg() PacketMsg {
+ packet := PacketMsg{}
packet.ChannelID = byte(ch.desc.ID)
- maxSize := ch.maxMsgPacketPayloadSize
+ maxSize := ch.maxPacketMsgPayloadSize
packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))]
if len(ch.sending) <= maxSize {
packet.EOF = byte(0x01)
@@ -711,30 +711,23 @@ func (ch *Channel) nextMsgPacket() msgPacket {
return packet
}
-// Writes next msgPacket to w.
+// Writes next PacketMsg to w and updates c.recentlySent.
// Not goroutine-safe
-func (ch *Channel) writeMsgPacketTo(w io.Writer) (n int, err error) {
- packet := ch.nextMsgPacket()
- ch.Logger.Debug("Write Msg Packet", "conn", ch.conn, "packet", packet)
- writeMsgPacketTo(packet, w, &n, &err)
- if err == nil {
- ch.recentlySent += int64(n)
- }
+func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) {
+ var packet = ch.nextPacketMsg()
+ n, err = cdc.MarshalBinaryWriter(w, packet)
+ ch.recentlySent += n
return
}
-func writeMsgPacketTo(packet msgPacket, w io.Writer, n *int, err *error) {
- wire.WriteByte(packetTypeMsg, w, n, err)
- wire.WriteBinary(packet, w, n, err)
-}
-
-// Handles incoming msgPackets. It returns a message bytes if message is
-// complete. NOTE message bytes may change on next call to recvMsgPacket.
+// Handles incoming PacketMsgs. It returns a message bytes if message is
+// complete. NOTE message bytes may change on next call to recvPacketMsg.
// Not goroutine-safe
-func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) {
- ch.Logger.Debug("Read Msg Packet", "conn", ch.conn, "packet", packet)
- if ch.desc.RecvMessageCapacity < len(ch.recving)+len(packet.Bytes) {
- return nil, wire.ErrBinaryReadOverflow
+func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) {
+ ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
+ var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Bytes)
+ if recvCap < recvReceived {
+ return nil, fmt.Errorf("Received message exceeds available capacity: %v < %v", recvCap, recvReceived)
}
ch.recving = append(ch.recving, packet.Bytes...)
if packet.EOF == byte(0x01) {
@@ -758,24 +751,36 @@ func (ch *Channel) updateStats() {
ch.recentlySent = int64(float64(ch.recentlySent) * 0.8)
}
-//-----------------------------------------------------------------------------
+//----------------------------------------
+// Packet
-const (
- defaultMaxMsgPacketPayloadSize = 1024
+type Packet interface {
+ AssertIsPacket()
+}
- maxMsgPacketOverheadSize = 10 // It's actually lower but good enough
- packetTypePing = byte(0x01)
- packetTypePong = byte(0x02)
- packetTypeMsg = byte(0x03)
-)
+func RegisterPacket(cdc *amino.Codec) {
+ cdc.RegisterInterface((*Packet)(nil), nil)
+ cdc.RegisterConcrete(PacketPing{}, "tendermint/p2p/PacketPing", nil)
+ cdc.RegisterConcrete(PacketPong{}, "tendermint/p2p/PacketPong", nil)
+ cdc.RegisterConcrete(PacketMsg{}, "tendermint/p2p/PacketMsg", nil)
+}
-// Messages in channels are chopped into smaller msgPackets for multiplexing.
-type msgPacket struct {
+func (_ PacketPing) AssertIsPacket() {}
+func (_ PacketPong) AssertIsPacket() {}
+func (_ PacketMsg) AssertIsPacket() {}
+
+type PacketPing struct {
+}
+
+type PacketPong struct {
+}
+
+type PacketMsg struct {
ChannelID byte
EOF byte // 1 means message ends here.
Bytes []byte
}
-func (p msgPacket) String() string {
- return fmt.Sprintf("MsgPacket{%X:%X T:%X}", p.ChannelID, p.Bytes, p.EOF)
+func (mp PacketMsg) String() string {
+ return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF)
}
diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go
index d308ea61a..a927d6959 100644
--- a/p2p/conn/connection_test.go
+++ b/p2p/conn/connection_test.go
@@ -1,13 +1,14 @@
package conn
import (
+ "bytes"
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log"
)
@@ -32,41 +33,37 @@ func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msg
}
func TestMConnectionSend(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
server, client := NetPipe()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client)
err := mconn.Start()
- require.Nil(err)
+ require.Nil(t, err)
defer mconn.Stop()
- msg := "Ant-Man"
- assert.True(mconn.Send(0x01, msg))
+ msg := []byte("Ant-Man")
+ assert.True(t, mconn.Send(0x01, msg))
// Note: subsequent Send/TrySend calls could pass because we are reading from
// the send queue in a separate goroutine.
_, err = server.Read(make([]byte, len(msg)))
if err != nil {
t.Error(err)
}
- assert.True(mconn.CanSend(0x01))
+ assert.True(t, mconn.CanSend(0x01))
- msg = "Spider-Man"
- assert.True(mconn.TrySend(0x01, msg))
+ msg = []byte("Spider-Man")
+ assert.True(t, mconn.TrySend(0x01, msg))
_, err = server.Read(make([]byte, len(msg)))
if err != nil {
t.Error(err)
}
- assert.False(mconn.CanSend(0x05), "CanSend should return false because channel is unknown")
- assert.False(mconn.Send(0x05, "Absorbing Man"), "Send should return false because channel is unknown")
+ assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown")
+ assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown")
}
func TestMConnectionReceive(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
server, client := NetPipe()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
@@ -81,20 +78,20 @@ func TestMConnectionReceive(t *testing.T) {
}
mconn1 := createMConnectionWithCallbacks(client, onReceive, onError)
err := mconn1.Start()
- require.Nil(err)
+ require.Nil(t, err)
defer mconn1.Stop()
mconn2 := createTestMConnection(server)
err = mconn2.Start()
- require.Nil(err)
+ require.Nil(t, err)
defer mconn2.Stop()
- msg := "Cyclops"
- assert.True(mconn2.Send(0x01, msg))
+ msg := []byte("Cyclops")
+ assert.True(t, mconn2.Send(0x01, msg))
select {
case receivedBytes := <-receivedCh:
- assert.Equal([]byte(msg), receivedBytes[2:]) // first 3 bytes are internal
+ assert.Equal(t, []byte(msg), receivedBytes)
case err := <-errorsCh:
t.Fatalf("Expected %s, got %+v", msg, err)
case <-time.After(500 * time.Millisecond):
@@ -103,20 +100,18 @@ func TestMConnectionReceive(t *testing.T) {
}
func TestMConnectionStatus(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
server, client := NetPipe()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client)
err := mconn.Start()
- require.Nil(err)
+ require.Nil(t, err)
defer mconn.Stop()
status := mconn.Status()
- assert.NotNil(status)
- assert.Zero(status.Channels[0].SendQueueSize)
+ assert.NotNil(t, status)
+ assert.Zero(t, status.Channels[0].SendQueueSize)
}
func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
@@ -140,7 +135,10 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
serverGotPing := make(chan struct{})
go func() {
// read ping
- server.Read(make([]byte, 1))
+ var pkt PacketPing
+ const maxPacketPingSize = 1024
+ _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPacketPingSize)
+ assert.Nil(t, err)
serverGotPing <- struct{}{}
}()
<-serverGotPing
@@ -175,21 +173,22 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) {
defer mconn.Stop()
// sending 3 pongs in a row (abuse)
- _, err = server.Write([]byte{packetTypePong})
+ _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err)
- _, err = server.Write([]byte{packetTypePong})
+ _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err)
- _, err = server.Write([]byte{packetTypePong})
+ _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err)
serverGotPing := make(chan struct{})
go func() {
// read ping (one byte)
- _, err = server.Read(make([]byte, 1))
+ var packet, err = Packet(nil), error(nil)
+ _, err = cdc.UnmarshalBinaryReader(server, &packet, 1024)
require.Nil(t, err)
serverGotPing <- struct{}{}
// respond with pong
- _, err = server.Write([]byte{packetTypePong})
+ _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err)
}()
<-serverGotPing
@@ -225,17 +224,18 @@ func TestMConnectionMultiplePings(t *testing.T) {
// sending 3 pings in a row (abuse)
// see https://github.com/tendermint/tendermint/issues/1190
- _, err = server.Write([]byte{packetTypePing})
+ _, err = server.Write(cdc.MustMarshalBinary(PacketPing{}))
require.Nil(t, err)
- _, err = server.Read(make([]byte, 1))
+ var pkt PacketPong
+ _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err)
- _, err = server.Write([]byte{packetTypePing})
+ _, err = server.Write(cdc.MustMarshalBinary(PacketPing{}))
require.Nil(t, err)
- _, err = server.Read(make([]byte, 1))
+ _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err)
- _, err = server.Write([]byte{packetTypePing})
+ _, err = server.Write(cdc.MustMarshalBinary(PacketPing{}))
require.Nil(t, err)
- _, err = server.Read(make([]byte, 1))
+ _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err)
assert.True(t, mconn.IsRunning())
@@ -262,18 +262,21 @@ func TestMConnectionPingPongs(t *testing.T) {
serverGotPing := make(chan struct{})
go func() {
// read ping
- server.Read(make([]byte, 1))
+ var pkt PacketPing
+ _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
+ require.Nil(t, err)
serverGotPing <- struct{}{}
// respond with pong
- _, err = server.Write([]byte{packetTypePong})
+ _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err)
time.Sleep(mconn.config.PingInterval)
// read ping
- server.Read(make([]byte, 1))
+ _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
+ require.Nil(t, err)
// respond with pong
- _, err = server.Write([]byte{packetTypePong})
+ _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err)
}()
<-serverGotPing
@@ -290,8 +293,6 @@ func TestMConnectionPingPongs(t *testing.T) {
}
func TestMConnectionStopsAndReturnsError(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
server, client := NetPipe()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
@@ -306,7 +307,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
}
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
err := mconn.Start()
- require.Nil(err)
+ require.Nil(t, err)
defer mconn.Stop()
if err := client.Close(); err != nil {
@@ -317,14 +318,14 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
case receivedBytes := <-receivedCh:
t.Fatalf("Expected error, got %v", receivedBytes)
case err := <-errorsCh:
- assert.NotNil(err)
- assert.False(mconn.IsRunning())
+ assert.NotNil(t, err)
+ assert.False(t, mconn.IsRunning())
case <-time.After(500 * time.Millisecond):
t.Fatal("Did not receive error in 500ms")
}
}
-func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr chan struct{}) (*MConnection, *MConnection) {
+func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) {
server, client := NetPipe()
onReceive := func(chID byte, msgBytes []byte) {}
@@ -338,7 +339,7 @@ func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr c
mconnClient := NewMConnection(client, chDescs, onReceive, onError)
mconnClient.SetLogger(log.TestingLogger().With("module", "client"))
err := mconnClient.Start()
- require.Nil(err)
+ require.Nil(t, err)
// create server conn with 1 channel
// it fires on chOnErr when there's an error
@@ -349,7 +350,7 @@ func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr c
mconnServer := createMConnectionWithCallbacks(server, onReceive, onError)
mconnServer.SetLogger(serverLogger)
err = mconnServer.Start()
- require.Nil(err)
+ require.Nil(t, err)
return mconnClient, mconnServer
}
@@ -364,50 +365,45 @@ func expectSend(ch chan struct{}) bool {
}
func TestMConnectionReadErrorBadEncoding(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
chOnErr := make(chan struct{})
- mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr)
+ mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop()
defer mconnServer.Stop()
client := mconnClient.conn
- msg := "Ant-Man"
// send badly encoded msgPacket
- var n int
- var err error
- wire.WriteByte(packetTypeMsg, client, &n, &err)
- wire.WriteByteSlice([]byte(msg), client, &n, &err)
- assert.True(expectSend(chOnErr), "badly encoded msgPacket")
+ bz := cdc.MustMarshalBinary(PacketMsg{})
+ bz[4] += 0x01 // Invalid prefix bytes.
+
+ // Write it.
+ _, err := client.Write(bz)
+ assert.Nil(t, err)
+ assert.True(t, expectSend(chOnErr), "badly encoded msgPacket")
}
func TestMConnectionReadErrorUnknownChannel(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
chOnErr := make(chan struct{})
- mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr)
+ mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop()
defer mconnServer.Stop()
- msg := "Ant-Man"
+ msg := []byte("Ant-Man")
// fail to send msg on channel unknown by client
- assert.False(mconnClient.Send(0x03, msg))
+ assert.False(t, mconnClient.Send(0x03, msg))
// send msg on channel unknown by the server.
// should cause an error
- assert.True(mconnClient.Send(0x02, msg))
- assert.True(expectSend(chOnErr), "unknown channel")
+ assert.True(t, mconnClient.Send(0x02, msg))
+ assert.True(t, expectSend(chOnErr), "unknown channel")
}
func TestMConnectionReadErrorLongMessage(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
chOnErr := make(chan struct{})
chOnRcv := make(chan struct{})
- mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr)
+ mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop()
defer mconnServer.Stop()
@@ -418,65 +414,81 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) {
client := mconnClient.conn
// send msg thats just right
- var n int
var err error
- packet := msgPacket{
+ var buf = new(bytes.Buffer)
+ // - Uvarint length of MustMarshalBinary(packet) = 1 or 2 bytes
+ // (as long as it's less than 16,384 bytes)
+ // - Prefix bytes = 4 bytes
+ // - ChannelID field key + byte = 2 bytes
+ // - EOF field key + byte = 2 bytes
+ // - Bytes field key = 1 bytes
+ // - Uvarint length of MustMarshalBinary(bytes) = 1 or 2 bytes
+ // - Struct terminator = 1 byte
+ // = up to 14 bytes overhead for the packet.
+ var packet = PacketMsg{
ChannelID: 0x01,
- Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-5),
EOF: 1,
+ Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize),
}
- writeMsgPacketTo(packet, client, &n, &err)
- assert.True(expectSend(chOnRcv), "msg just right")
+ _, err = cdc.MarshalBinaryWriter(buf, packet)
+ assert.Nil(t, err)
+ _, err = client.Write(buf.Bytes())
+ assert.Nil(t, err)
+ assert.True(t, expectSend(chOnRcv), "msg just right")
+ assert.False(t, expectSend(chOnErr), "msg just right")
// send msg thats too long
- packet = msgPacket{
+ buf = new(bytes.Buffer)
+ packet = PacketMsg{
ChannelID: 0x01,
- Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-4),
EOF: 1,
+ Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+1),
}
- writeMsgPacketTo(packet, client, &n, &err)
- assert.True(expectSend(chOnErr), "msg too long")
+ _, err = cdc.MarshalBinaryWriter(buf, packet)
+ assert.Nil(t, err)
+ _, err = client.Write(buf.Bytes())
+ assert.NotNil(t, err)
+ assert.False(t, expectSend(chOnRcv), "msg too long")
+ assert.True(t, expectSend(chOnErr), "msg too long")
}
func TestMConnectionReadErrorUnknownMsgType(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
chOnErr := make(chan struct{})
- mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr)
+ mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop()
defer mconnServer.Stop()
// send msg with unknown msg type
- var n int
- var err error
- wire.WriteByte(0x04, mconnClient.conn, &n, &err)
- assert.True(expectSend(chOnErr), "unknown msg type")
+ err := error(nil)
+ err = amino.EncodeUvarint(mconnClient.conn, 4)
+ assert.Nil(t, err)
+ _, err = mconnClient.conn.Write([]byte{0xFF, 0xFF, 0xFF, 0xFF})
+ assert.Nil(t, err)
+ assert.True(t, expectSend(chOnErr), "unknown msg type")
}
func TestMConnectionTrySend(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
server, client := NetPipe()
defer server.Close()
defer client.Close()
mconn := createTestMConnection(client)
err := mconn.Start()
- require.Nil(err)
+ require.Nil(t, err)
defer mconn.Stop()
- msg := "Semicolon-Woman"
+ msg := []byte("Semicolon-Woman")
resultCh := make(chan string, 2)
- assert.True(mconn.TrySend(0x01, msg))
+ assert.True(t, mconn.TrySend(0x01, msg))
server.Read(make([]byte, len(msg)))
- assert.True(mconn.CanSend(0x01))
- assert.True(mconn.TrySend(0x01, msg))
- assert.False(mconn.CanSend(0x01))
+ assert.True(t, mconn.CanSend(0x01))
+ assert.True(t, mconn.TrySend(0x01, msg))
+ assert.False(t, mconn.CanSend(0x01))
go func() {
mconn.TrySend(0x01, msg)
resultCh <- "TrySend"
}()
- assert.False(mconn.CanSend(0x01))
- assert.False(mconn.TrySend(0x01, msg))
- assert.Equal("TrySend", <-resultCh)
+ assert.False(t, mconn.CanSend(0x01))
+ assert.False(t, mconn.TrySend(0x01, msg))
+ assert.Equal(t, "TrySend", <-resultCh)
}
diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go
index bc67abf3a..2a507f882 100644
--- a/p2p/conn/secret_connection.go
+++ b/p2p/conn/secret_connection.go
@@ -20,17 +20,15 @@ import (
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/ripemd160"
- crypto "github.com/tendermint/go-crypto"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
)
-// 2 + 1024 == 1026 total frame size
-const dataLenSize = 2 // uint16 to describe the length, is <= dataMaxSize
+// 4 + 1024 == 1028 total frame size
+const dataLenSize = 4
const dataMaxSize = 1024
const totalFrameSize = dataMaxSize + dataLenSize
const sealedFrameSize = totalFrameSize + secretbox.Overhead
-const authSigMsgSize = (32 + 1) + (64 + 1) // fixed size (length prefixed) byte arrays
// Implements net.Conn
type SecretConnection struct {
@@ -123,7 +121,7 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
data = nil
}
chunkLength := len(chunk)
- binary.BigEndian.PutUint16(frame, uint16(chunkLength))
+ binary.BigEndian.PutUint32(frame, uint32(chunkLength))
copy(frame[dataLenSize:], chunk)
// encrypt the frame
@@ -145,8 +143,8 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Read(data []byte) (n int, err error) {
if 0 < len(sc.recvBuffer) {
- n_ := copy(data, sc.recvBuffer)
- sc.recvBuffer = sc.recvBuffer[n_:]
+ n = copy(data, sc.recvBuffer)
+ sc.recvBuffer = sc.recvBuffer[n:]
return
}
@@ -166,7 +164,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) {
incr2Nonce(sc.recvNonce)
// end decryption
- var chunkLength = binary.BigEndian.Uint16(frame) // read the first two bytes
+ var chunkLength = binary.BigEndian.Uint32(frame) // read the first two bytes
if chunkLength > dataMaxSize {
return 0, errors.New("chunkLength is greater than dataMaxSize")
}
@@ -193,16 +191,17 @@ func genEphKeys() (ephPub, ephPriv *[32]byte) {
var err error
ephPub, ephPriv, err = box.GenerateKey(crand.Reader)
if err != nil {
- cmn.PanicCrisis("Could not generate ephemeral keypairs")
+ panic("Could not generate ephemeral keypairs")
}
return
}
func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) {
+
// Send our pubkey and receive theirs in tandem.
var trs, _ = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) {
- var _, err1 = conn.Write(locEphPub[:])
+ var _, err1 = cdc.MarshalBinaryWriter(conn, locEphPub)
if err1 != nil {
return nil, err1, true // abort
} else {
@@ -211,7 +210,7 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3
},
func(_ int) (val interface{}, err error, abort bool) {
var _remEphPub [32]byte
- var _, err2 = io.ReadFull(conn, _remEphPub[:])
+ var _, err2 = cdc.UnmarshalBinaryReader(conn, &_remEphPub, 1024*1024) // TODO
if err2 != nil {
return nil, err2, true // abort
} else {
@@ -277,12 +276,12 @@ type authSigMessage struct {
Sig crypto.Signature
}
-func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature crypto.Signature) (recvMsg *authSigMessage, err error) {
+func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature crypto.Signature) (recvMsg authSigMessage, err error) {
+
// Send our info and receive theirs in tandem.
var trs, _ = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) {
- msgBytes := wire.BinaryBytes(authSigMessage{pubKey.Wrap(), signature.Wrap()})
- var _, err1 = sc.Write(msgBytes)
+ var _, err1 = cdc.MarshalBinaryWriter(sc, authSigMessage{pubKey, signature})
if err1 != nil {
return nil, err1, true // abort
} else {
@@ -290,13 +289,8 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature cr
}
},
func(_ int) (val interface{}, err error, abort bool) {
- readBuffer := make([]byte, authSigMsgSize)
- var _, err2 = io.ReadFull(sc, readBuffer)
- if err2 != nil {
- return nil, err2, true // abort
- }
- n := int(0) // not used.
- var _recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
+ var _recvMsg authSigMessage
+ var _, err2 = cdc.UnmarshalBinaryReader(sc, &_recvMsg, 1024*1024) // TODO
if err2 != nil {
return nil, err2, true // abort
} else {
@@ -312,7 +306,7 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature cr
}
var _recvMsg = trs.FirstValue().(authSigMessage)
- return &_recvMsg, nil
+ return _recvMsg, nil
}
//--------------------------------------------------------------------------------
diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go
index edb0de04d..8dfc2bef0 100644
--- a/p2p/conn/secret_connection_test.go
+++ b/p2p/conn/secret_connection_test.go
@@ -33,12 +33,14 @@ func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) {
}
func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) {
- fooConn, barConn := makeKVStoreConnPair()
- fooPrvKey := crypto.GenPrivKeyEd25519().Wrap()
- fooPubKey := fooPrvKey.PubKey()
- barPrvKey := crypto.GenPrivKeyEd25519().Wrap()
- barPubKey := barPrvKey.PubKey()
+ var fooConn, barConn = makeKVStoreConnPair()
+ var fooPrvKey = crypto.GenPrivKeyEd25519()
+ var fooPubKey = fooPrvKey.PubKey()
+ var barPrvKey = crypto.GenPrivKeyEd25519()
+ var barPubKey = barPrvKey.PubKey()
+
+ // Make connections from both sides in parallel.
var trs, ok = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) {
fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey)
@@ -100,10 +102,10 @@ func TestSecretConnectionReadWrite(t *testing.T) {
}
// A helper that will run with (fooConn, fooWrites, fooReads) and vice versa
- genNodeRunner := func(nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) cmn.Task {
+ genNodeRunner := func(id string, nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) cmn.Task {
return func(_ int) (interface{}, error, bool) {
- // Node handskae
- nodePrvKey := crypto.GenPrivKeyEd25519().Wrap()
+ // Initiate cryptographic private key and secret connection trhough nodeConn.
+ nodePrvKey := crypto.GenPrivKeyEd25519()
nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey)
if err != nil {
t.Errorf("Failed to establish SecretConnection for node: %v", err)
@@ -112,7 +114,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
// In parallel, handle some reads and writes.
var trs, ok = cmn.Parallel(
func(_ int) (interface{}, error, bool) {
- // Node writes
+ // Node writes:
for _, nodeWrite := range nodeWrites {
n, err := nodeSecretConn.Write([]byte(nodeWrite))
if err != nil {
@@ -132,7 +134,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
return nil, nil, false
},
func(_ int) (interface{}, error, bool) {
- // Node reads
+ // Node reads:
readBuffer := make([]byte, dataMaxSize)
for {
n, err := nodeSecretConn.Read(readBuffer)
@@ -165,8 +167,8 @@ func TestSecretConnectionReadWrite(t *testing.T) {
// Run foo & bar in parallel
var trs, ok = cmn.Parallel(
- genNodeRunner(fooConn, fooWrites, &fooReads),
- genNodeRunner(barConn, barWrites, &barReads),
+ genNodeRunner("foo", fooConn, fooWrites, &fooReads),
+ genNodeRunner("bar", barConn, barWrites, &barReads),
)
require.Nil(t, trs.FirstError())
require.True(t, ok, "unexpected task abortion")
@@ -237,3 +239,12 @@ func BenchmarkSecretConnection(b *testing.B) {
}
//barSecConn.Close() race condition
}
+
+func fingerprint(bz []byte) []byte {
+ const fbsize = 40
+ if len(bz) < fbsize {
+ return bz
+ } else {
+ return bz[:fbsize]
+ }
+}
diff --git a/p2p/conn/wire.go b/p2p/conn/wire.go
new file mode 100644
index 000000000..02d67f6fb
--- /dev/null
+++ b/p2p/conn/wire.go
@@ -0,0 +1,13 @@
+package conn
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+)
+
+var cdc *amino.Codec = amino.NewCodec()
+
+func init() {
+ crypto.RegisterAmino(cdc)
+ RegisterPacket(cdc)
+}
diff --git a/p2p/dummy/peer.go b/p2p/dummy/peer.go
index 61c3a8ace..97fb7e2ef 100644
--- a/p2p/dummy/peer.go
+++ b/p2p/dummy/peer.go
@@ -48,12 +48,12 @@ func (p *peer) Status() tmconn.ConnectionStatus {
}
// Send does not do anything and just returns true.
-func (p *peer) Send(byte, interface{}) bool {
+func (p *peer) Send(byte, []byte) bool {
return true
}
// TrySend does not do anything and just returns true.
-func (p *peer) TrySend(byte, interface{}) bool {
+func (p *peer) TrySend(byte, []byte) bool {
return true
}
diff --git a/p2p/fuzz.go b/p2p/fuzz.go
index fa16e4a26..6bfadc29d 100644
--- a/p2p/fuzz.go
+++ b/p2p/fuzz.go
@@ -1,10 +1,11 @@
package p2p
import (
- "math/rand"
"net"
"sync"
"time"
+
+ cmn "github.com/tendermint/tmlibs/common"
)
const (
@@ -124,7 +125,7 @@ func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error {
func (fc *FuzzedConnection) randomDuration() time.Duration {
maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000)
- return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis) // nolint: gas
+ return time.Millisecond * time.Duration(cmn.RandInt()%maxDelayMillis) // nolint: gas
}
// implements the fuzz (delay, kill conn)
@@ -137,7 +138,7 @@ func (fc *FuzzedConnection) fuzz() bool {
switch fc.config.Mode {
case FuzzModeDrop:
// randomly drop the r/w, drop the conn, or sleep
- r := rand.Float64()
+ r := cmn.RandFloat64()
if r <= fc.config.ProbDropRW {
return true
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn {
diff --git a/p2p/key.go b/p2p/key.go
index 6d0f28586..73103ebd4 100644
--- a/p2p/key.go
+++ b/p2p/key.go
@@ -3,7 +3,6 @@ package p2p
import (
"bytes"
"encoding/hex"
- "encoding/json"
"fmt"
"io/ioutil"
@@ -48,7 +47,7 @@ func PubKeyToID(pubKey crypto.PubKey) ID {
// If the file does not exist, it generates and saves a new NodeKey.
func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
if cmn.FileExists(filePath) {
- nodeKey, err := loadNodeKey(filePath)
+ nodeKey, err := LoadNodeKey(filePath)
if err != nil {
return nil, err
}
@@ -57,13 +56,13 @@ func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
return genNodeKey(filePath)
}
-func loadNodeKey(filePath string) (*NodeKey, error) {
+func LoadNodeKey(filePath string) (*NodeKey, error) {
jsonBytes, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
nodeKey := new(NodeKey)
- err = json.Unmarshal(jsonBytes, nodeKey)
+ err = cdc.UnmarshalJSON(jsonBytes, nodeKey)
if err != nil {
return nil, fmt.Errorf("Error reading NodeKey from %v: %v", filePath, err)
}
@@ -71,12 +70,12 @@ func loadNodeKey(filePath string) (*NodeKey, error) {
}
func genNodeKey(filePath string) (*NodeKey, error) {
- privKey := crypto.GenPrivKeyEd25519().Wrap()
+ privKey := crypto.GenPrivKeyEd25519()
nodeKey := &NodeKey{
PrivKey: privKey,
}
- jsonBytes, err := json.Marshal(nodeKey)
+ jsonBytes, err := cdc.MarshalJSON(nodeKey)
if err != nil {
return nil, err
}
diff --git a/p2p/netaddress.go b/p2p/netaddress.go
index a77090a78..6b88f5670 100644
--- a/p2p/netaddress.go
+++ b/p2p/netaddress.go
@@ -13,7 +13,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
cmn "github.com/tendermint/tmlibs/common"
)
@@ -77,7 +76,7 @@ func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) {
idStr := spl[0]
idBytes, err := hex.DecodeString(idStr)
if err != nil {
- return nil, errors.Wrapf(err, "Address (%s) contains invalid ID", addrWithoutProtocol)
+ return nil, cmn.ErrorWrap(err, fmt.Sprintf("Address (%s) contains invalid ID", addrWithoutProtocol))
}
if len(idBytes) != IDByteLength {
return nil, fmt.Errorf("Address (%s) contains ID of invalid length (%d). Should be %d hex-encoded bytes",
diff --git a/p2p/node_info.go b/p2p/node_info.go
index 346de37d3..930f9bfea 100644
--- a/p2p/node_info.go
+++ b/p2p/node_info.go
@@ -3,8 +3,6 @@ package p2p
import (
"fmt"
"strings"
-
- crypto "github.com/tendermint/go-crypto"
)
const (
@@ -20,8 +18,8 @@ func MaxNodeInfoSize() int {
// between two peers during the Tendermint P2P handshake.
type NodeInfo struct {
// Authenticate
- PubKey crypto.PubKey `json:"pub_key"` // authenticated pubkey
- ListenAddr string `json:"listen_addr"` // accepting incoming
+ ID ID `json:"id"` // authenticated identifier
+ ListenAddr string `json:"listen_addr"` // accepting incoming
// Check compatibility
Network string `json:"network"` // network/chain ID
@@ -107,19 +105,12 @@ OUTER_LOOP:
return nil
}
-// ID returns node's ID.
-func (info NodeInfo) ID() ID {
- return PubKeyToID(info.PubKey)
-}
-
// NetAddress returns a NetAddress derived from the NodeInfo -
// it includes the authenticated peer ID and the self-reported
// ListenAddr. Note that the ListenAddr is not authenticated and
// may not match that address actually dialed if its an outbound peer.
func (info NodeInfo) NetAddress() *NetAddress {
- id := PubKeyToID(info.PubKey)
- addr := info.ListenAddr
- netAddr, err := NewNetAddressString(IDAddressString(id, addr))
+ netAddr, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr))
if err != nil {
panic(err) // everything should be well formed by now
}
@@ -127,7 +118,8 @@ func (info NodeInfo) NetAddress() *NetAddress {
}
func (info NodeInfo) String() string {
- return fmt.Sprintf("NodeInfo{pk: %v, moniker: %v, network: %v [listen %v], version: %v (%v)}", info.PubKey, info.Moniker, info.Network, info.ListenAddr, info.Version, info.Other)
+ return fmt.Sprintf("NodeInfo{id: %v, moniker: %v, network: %v [listen %v], version: %v (%v)}",
+ info.ID, info.Moniker, info.Network, info.ListenAddr, info.Version, info.Other)
}
func splitVersion(version string) (string, string, string, error) {
diff --git a/p2p/peer.go b/p2p/peer.go
index 0fa7ca034..9f3ccbabc 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -5,10 +5,7 @@ import (
"net"
"time"
- "github.com/pkg/errors"
-
- crypto "github.com/tendermint/go-crypto"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@@ -25,8 +22,8 @@ type Peer interface {
NodeInfo() NodeInfo // peer's info
Status() tmconn.ConnectionStatus
- Send(byte, interface{}) bool
- TrySend(byte, interface{}) bool
+ Send(byte, []byte) bool
+ TrySend(byte, []byte) bool
Set(string, interface{})
Get(string) interface{}
@@ -114,13 +111,13 @@ func newOutboundPeerConn(addr *NetAddress, config *PeerConfig, persistent bool,
conn, err := dial(addr, config)
if err != nil {
- return pc, errors.Wrap(err, "Error creating peer")
+ return pc, cmn.ErrorWrap(err, "Error creating peer")
}
pc, err = newPeerConn(conn, config, true, persistent, ourNodePrivKey)
if err != nil {
if err2 := conn.Close(); err2 != nil {
- return pc, errors.Wrap(err, err2.Error())
+ return pc, cmn.ErrorWrap(err, err2.Error())
}
return pc, err
}
@@ -128,7 +125,7 @@ func newOutboundPeerConn(addr *NetAddress, config *PeerConfig, persistent bool,
// ensure dialed ID matches connection ID
if config.AuthEnc && addr.ID != pc.ID() {
if err2 := conn.Close(); err2 != nil {
- return pc, errors.Wrap(err, err2.Error())
+ return pc, cmn.ErrorWrap(err, err2.Error())
}
return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()}
}
@@ -157,13 +154,13 @@ func newPeerConn(rawConn net.Conn,
if config.AuthEnc {
// Set deadline for secret handshake
if err := conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second)); err != nil {
- return pc, errors.Wrap(err, "Error setting deadline while encrypting connection")
+ return pc, cmn.ErrorWrap(err, "Error setting deadline while encrypting connection")
}
// Encrypt connection
conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey)
if err != nil {
- return pc, errors.Wrap(err, "Error creating peer")
+ return pc, cmn.ErrorWrap(err, "Error creating peer")
}
}
@@ -205,7 +202,7 @@ func (p *peer) OnStop() {
// ID returns the peer's ID - the hex encoded hash of its pubkey.
func (p *peer) ID() ID {
- return p.nodeInfo.ID()
+ return p.nodeInfo.ID
}
// IsOutbound returns true if the connection is outbound, false otherwise.
@@ -228,9 +225,9 @@ func (p *peer) Status() tmconn.ConnectionStatus {
return p.mconn.Status()
}
-// Send msg to the channel identified by chID byte. Returns false if the send
-// queue is full after timeout, specified by MConnection.
-func (p *peer) Send(chID byte, msg interface{}) bool {
+// Send msg bytes to the channel identified by chID byte. Returns false if the
+// send queue is full after timeout, specified by MConnection.
+func (p *peer) Send(chID byte, msgBytes []byte) bool {
if !p.IsRunning() {
// see Switch#Broadcast, where we fetch the list of peers and loop over
// them - while we're looping, one peer may be removed and stopped.
@@ -238,18 +235,18 @@ func (p *peer) Send(chID byte, msg interface{}) bool {
} else if !p.hasChannel(chID) {
return false
}
- return p.mconn.Send(chID, msg)
+ return p.mconn.Send(chID, msgBytes)
}
-// TrySend msg to the channel identified by chID byte. Immediately returns
+// TrySend msg bytes to the channel identified by chID byte. Immediately returns
// false if the send queue is full.
-func (p *peer) TrySend(chID byte, msg interface{}) bool {
+func (p *peer) TrySend(chID byte, msgBytes []byte) bool {
if !p.IsRunning() {
return false
} else if !p.hasChannel(chID) {
return false
}
- return p.mconn.TrySend(chID, msg)
+ return p.mconn.TrySend(chID, msgBytes)
}
// Get the data for a given key.
@@ -290,28 +287,26 @@ func (pc *peerConn) CloseConn() {
func (pc *peerConn) HandshakeTimeout(ourNodeInfo NodeInfo, timeout time.Duration) (peerNodeInfo NodeInfo, err error) {
// Set deadline for handshake so we don't block forever on conn.ReadFull
if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil {
- return peerNodeInfo, errors.Wrap(err, "Error setting deadline")
+ return peerNodeInfo, cmn.ErrorWrap(err, "Error setting deadline")
}
var trs, _ = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) {
- var n int
- wire.WriteBinary(&ourNodeInfo, pc.conn, &n, &err)
+ _, err = cdc.MarshalBinaryWriter(pc.conn, ourNodeInfo)
return
},
func(_ int) (val interface{}, err error, abort bool) {
- var n int
- wire.ReadBinary(&peerNodeInfo, pc.conn, MaxNodeInfoSize(), &n, &err)
+ _, err = cdc.UnmarshalBinaryReader(pc.conn, &peerNodeInfo, int64(MaxNodeInfoSize()))
return
},
)
if err := trs.FirstError(); err != nil {
- return peerNodeInfo, errors.Wrap(err, "Error during handshake")
+ return peerNodeInfo, cmn.ErrorWrap(err, "Error during handshake")
}
// Remove deadline
if err := pc.conn.SetDeadline(time.Time{}); err != nil {
- return peerNodeInfo, errors.Wrap(err, "Error removing deadline")
+ return peerNodeInfo, cmn.ErrorWrap(err, "Error removing deadline")
}
return peerNodeInfo, nil
diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go
index c8d1d64d2..872758355 100644
--- a/p2p/peer_set_test.go
+++ b/p2p/peer_set_test.go
@@ -13,11 +13,11 @@ import (
// Returns an empty kvstore peer
func randPeer() *peer {
- pubKey := crypto.GenPrivKeyEd25519().Wrap().PubKey()
+ nodeKey := NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
return &peer{
nodeInfo: NodeInfo{
+ ID: nodeKey.ID(),
ListenAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
- PubKey: pubKey,
},
}
}
diff --git a/p2p/peer_test.go b/p2p/peer_test.go
index 0bf7076e1..24d750a9f 100644
--- a/p2p/peer_test.go
+++ b/p2p/peer_test.go
@@ -20,7 +20,7 @@ func TestPeerBasic(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// simulate remote peer
- rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: DefaultPeerConfig()}
+ rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp.Start()
defer rp.Stop()
@@ -47,7 +47,7 @@ func TestPeerWithoutAuthEnc(t *testing.T) {
config.AuthEnc = false
// simulate remote peer
- rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: config}
+ rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
rp.Start()
defer rp.Stop()
@@ -68,7 +68,7 @@ func TestPeerSend(t *testing.T) {
config.AuthEnc = false
// simulate remote peer
- rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: config}
+ rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
rp.Start()
defer rp.Stop()
@@ -81,7 +81,7 @@ func TestPeerSend(t *testing.T) {
defer p.Stop()
assert.True(p.CanSend(testCh))
- assert.True(p.Send(testCh, "Asylum"))
+ assert.True(p.Send(testCh, []byte("Asylum")))
}
func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*peer, error) {
@@ -89,13 +89,13 @@ func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig)
{ID: testCh, Priority: 1},
}
reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)}
- pk := crypto.GenPrivKeyEd25519().Wrap()
+ pk := crypto.GenPrivKeyEd25519()
pc, err := newOutboundPeerConn(addr, config, false, pk)
if err != nil {
return nil, err
}
nodeInfo, err := pc.HandshakeTimeout(NodeInfo{
- PubKey: pk.PubKey(),
+ ID: addr.ID,
Moniker: "host_peer",
Network: "testing",
Version: "123.123.123",
@@ -152,7 +152,7 @@ func (p *remotePeer) accept(l net.Listener) {
golog.Fatalf("Failed to create a peer: %+v", err)
}
_, err = pc.HandshakeTimeout(NodeInfo{
- PubKey: p.PrivKey.PubKey(),
+ ID: p.Addr().ID,
Moniker: "remote_peer",
Network: "testing",
Version: "123.123.123",
diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go
index a8462f375..56a4c5163 100644
--- a/p2p/pex/addrbook.go
+++ b/p2p/pex/addrbook.go
@@ -9,7 +9,6 @@ import (
"encoding/binary"
"fmt"
"math"
- "math/rand"
"net"
"sync"
"time"
@@ -82,7 +81,7 @@ type addrBook struct {
// accessed concurrently
mtx sync.Mutex
- rand *rand.Rand
+ rand *cmn.Rand
ourAddrs map[string]struct{}
addrLookup map[p2p.ID]*knownAddress // new & old
bucketsOld []map[string]*knownAddress
@@ -97,7 +96,7 @@ type addrBook struct {
// Use Start to begin processing asynchronous address updates.
func NewAddrBook(filePath string, routabilityStrict bool) *addrBook {
am := &addrBook{
- rand: rand.New(rand.NewSource(time.Now().UnixNano())), // TODO: seed from outside
+ rand: cmn.NewRand(),
ourAddrs: make(map[string]struct{}),
addrLookup: make(map[p2p.ID]*knownAddress),
filePath: filePath,
@@ -320,7 +319,7 @@ func (a *addrBook) GetSelection() []*p2p.NetAddress {
// XXX: What's the point of this if we already loop randomly through addrLookup ?
for i := 0; i < numAddresses; i++ {
// pick a number between current index and the end
- j := rand.Intn(len(allAddr)-i) + i
+ j := cmn.RandIntn(len(allAddr)-i) + i
allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
}
diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go
index 1bcc493dd..70ae7ed11 100644
--- a/p2p/pex/pex_reactor.go
+++ b/p2p/pex/pex_reactor.go
@@ -1,16 +1,13 @@
package pex
import (
- "bytes"
"fmt"
- "math/rand"
"reflect"
"sort"
"sync"
"time"
- "github.com/pkg/errors"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/p2p"
@@ -23,7 +20,7 @@ const (
// PexChannel is a channel for PEX messages
PexChannel = byte(0x00)
- maxPexMessageSize = 1048576 // 1MB
+ maxMsgSize = 1048576 // 1MB
// ensure we have enough peers
defaultEnsurePeersPeriod = 30 * time.Second
@@ -181,7 +178,7 @@ func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
// Receive implements Reactor by handling incoming PEX messages.
func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
- _, msg, err := DecodeMessage(msgBytes)
+ msg, err := DecodeMessage(msgBytes)
if err != nil {
r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
r.Switch.StopPeerForError(src, err)
@@ -250,7 +247,7 @@ func (r *PEXReactor) RequestAddrs(p Peer) {
return
}
r.requestsSent.Set(id, struct{}{})
- p.Send(PexChannel, struct{ PexMessage }{&pexRequestMessage{}})
+ p.Send(PexChannel, cdc.MustMarshalBinary(&pexRequestMessage{}))
}
// ReceiveAddrs adds the given addrs to the addrbook if theres an open
@@ -260,7 +257,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
id := string(src.ID())
if !r.requestsSent.Has(id) {
- return errors.New("Received unsolicited pexAddrsMessage")
+ return cmn.NewError("Received unsolicited pexAddrsMessage")
}
r.requestsSent.Delete(id)
@@ -279,7 +276,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
// SendAddrs sends addrs to the peer.
func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
- p.Send(PexChannel, struct{ PexMessage }{&pexAddrsMessage{Addrs: netAddrs}})
+ p.Send(PexChannel, cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: netAddrs}))
}
// SetEnsurePeersPeriod sets period to ensure peers connected.
@@ -290,7 +287,7 @@ func (r *PEXReactor) SetEnsurePeersPeriod(d time.Duration) {
// Ensures that sufficient peers are connected. (continuous)
func (r *PEXReactor) ensurePeersRoutine() {
var (
- seed = rand.New(rand.NewSource(time.Now().UnixNano()))
+ seed = cmn.NewRand()
jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds())
)
@@ -377,7 +374,7 @@ func (r *PEXReactor) ensurePeers() {
peers := r.Switch.Peers().List()
peersCount := len(peers)
if peersCount > 0 {
- peer := peers[rand.Int()%peersCount] // nolint: gas
+ peer := peers[cmn.RandInt()%peersCount] // nolint: gas
r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer)
r.RequestAddrs(peer)
}
@@ -406,7 +403,7 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) {
// exponential backoff if it's not our first attempt to dial given address
if attempts > 0 {
- jitterSeconds := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns)
+ jitterSeconds := time.Duration(cmn.RandFloat64() * float64(time.Second)) // 1s == (1e9 ns)
backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second)
sinceLastDialed := time.Since(lastDialed)
if sinceLastDialed < backoffDuration {
@@ -459,7 +456,7 @@ func (r *PEXReactor) dialSeeds() {
}
seedAddrs, _ := p2p.NewNetAddressStrings(r.config.Seeds)
- perm := rand.Perm(lSeeds)
+ perm := cmn.RandPerm(lSeeds)
// perm := r.Switch.rng.Perm(lSeeds)
for _, i := range perm {
// dial a random seed
@@ -606,27 +603,23 @@ func isAddrPrivate(addr *p2p.NetAddress, privatePeerIDs []string) bool {
//-----------------------------------------------------------------------------
// Messages
-const (
- msgTypeRequest = byte(0x01)
- msgTypeAddrs = byte(0x02)
-)
-
// PexMessage is a primary type for PEX messages. Underneath, it could contain
// either pexRequestMessage, or pexAddrsMessage messages.
type PexMessage interface{}
-var _ = wire.RegisterInterface(
- struct{ PexMessage }{},
- wire.ConcreteType{&pexRequestMessage{}, msgTypeRequest},
- wire.ConcreteType{&pexAddrsMessage{}, msgTypeAddrs},
-)
+func RegisterPexMessage(cdc *amino.Codec) {
+ cdc.RegisterInterface((*PexMessage)(nil), nil)
+ cdc.RegisterConcrete(&pexRequestMessage{}, "tendermint/p2p/PexRequestMessage", nil)
+ cdc.RegisterConcrete(&pexAddrsMessage{}, "tendermint/p2p/PexAddrsMessage", nil)
+}
// DecodeMessage implements interface registered above.
-func DecodeMessage(bz []byte) (msgType byte, msg PexMessage, err error) {
- msgType = bz[0]
- n := new(int)
- r := bytes.NewReader(bz)
- msg = wire.ReadBinary(struct{ PexMessage }{}, r, maxPexMessageSize, n, &err).(struct{ PexMessage }).PexMessage
+func DecodeMessage(bz []byte) (msg PexMessage, err error) {
+ if len(bz) > maxMsgSize {
+ return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
+ len(bz), maxMsgSize)
+ }
+ err = cdc.UnmarshalBinary(bz, &msg)
return
}
diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go
index 329d17f37..f7297a343 100644
--- a/p2p/pex/pex_reactor_test.go
+++ b/p2p/pex/pex_reactor_test.go
@@ -12,7 +12,6 @@ import (
"github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto"
- wire "github.com/tendermint/go-wire"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/conn"
@@ -124,12 +123,12 @@ func TestPEXReactorReceive(t *testing.T) {
size := book.Size()
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
- msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
+ msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs})
r.Receive(PexChannel, peer, msg)
assert.Equal(t, size+1, book.Size())
- msg = wire.BinaryBytes(struct{ PexMessage }{&pexRequestMessage{}})
- r.Receive(PexChannel, peer, msg)
+ msg = cdc.MustMarshalBinary(&pexRequestMessage{})
+ r.Receive(PexChannel, peer, msg) // should not panic.
}
func TestPEXReactorRequestMessageAbuse(t *testing.T) {
@@ -144,7 +143,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) {
assert.True(t, sw.Peers().Has(peer.ID()))
id := string(peer.ID())
- msg := wire.BinaryBytes(struct{ PexMessage }{&pexRequestMessage{}})
+ msg := cdc.MustMarshalBinary(&pexRequestMessage{})
// first time creates the entry
r.Receive(PexChannel, peer, msg)
@@ -181,7 +180,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
assert.True(t, sw.Peers().Has(peer.ID()))
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
- msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
+ msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs})
// receive some addrs. should clear the request
r.Receive(PexChannel, peer, msg)
@@ -257,7 +256,7 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
defer peer.Stop()
// 3. check that the peer connects to seed immediately
- assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 1*time.Second, 1)
+ assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1)
}
func TestPEXReactorCrawlStatus(t *testing.T) {
@@ -290,7 +289,7 @@ func TestPEXReactorCrawlStatus(t *testing.T) {
func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
peer := p2p.CreateRandomPeer(false)
- pexR, book := createReactor(&PEXReactorConfig{PrivatePeerIDs: []string{string(peer.NodeInfo().ID())}})
+ pexR, book := createReactor(&PEXReactorConfig{PrivatePeerIDs: []string{string(peer.NodeInfo().ID)}})
defer teardownReactor(book)
// we have to send a request to receive responses
@@ -298,7 +297,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
size := book.Size()
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
- msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
+ msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs})
pexR.Receive(PexChannel, peer, msg)
assert.Equal(t, size, book.Size())
@@ -350,27 +349,27 @@ func newMockPeer() mockPeer {
_, netAddr := p2p.CreateRoutableAddr()
mp := mockPeer{
addr: netAddr,
- pubKey: crypto.GenPrivKeyEd25519().Wrap().PubKey(),
+ pubKey: crypto.GenPrivKeyEd25519().PubKey(),
}
mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp)
mp.Start()
return mp
}
-func (mp mockPeer) ID() p2p.ID { return p2p.PubKeyToID(mp.pubKey) }
+func (mp mockPeer) ID() p2p.ID { return mp.addr.ID }
func (mp mockPeer) IsOutbound() bool { return mp.outbound }
func (mp mockPeer) IsPersistent() bool { return mp.persistent }
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
return p2p.NodeInfo{
- PubKey: mp.pubKey,
+ ID: mp.addr.ID,
ListenAddr: mp.addr.DialString(),
}
}
-func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
-func (mp mockPeer) Send(byte, interface{}) bool { return false }
-func (mp mockPeer) TrySend(byte, interface{}) bool { return false }
-func (mp mockPeer) Set(string, interface{}) {}
-func (mp mockPeer) Get(string) interface{} { return nil }
+func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
+func (mp mockPeer) Send(byte, []byte) bool { return false }
+func (mp mockPeer) TrySend(byte, []byte) bool { return false }
+func (mp mockPeer) Set(string, interface{}) {}
+func (mp mockPeer) Get(string) interface{} { return nil }
func assertPeersWithTimeout(
t *testing.T,
diff --git a/p2p/pex/wire.go b/p2p/pex/wire.go
new file mode 100644
index 000000000..57fc93858
--- /dev/null
+++ b/p2p/pex/wire.go
@@ -0,0 +1,11 @@
+package pex
+
+import (
+ "github.com/tendermint/go-amino"
+)
+
+var cdc *amino.Codec = amino.NewCodec()
+
+func init() {
+ RegisterPexMessage(cdc)
+}
diff --git a/p2p/switch.go b/p2p/switch.go
index e412d3cc9..6afc30fe4 100644
--- a/p2p/switch.go
+++ b/p2p/switch.go
@@ -3,13 +3,10 @@ package p2p
import (
"fmt"
"math"
- "math/rand"
"net"
"sync"
"time"
- "github.com/pkg/errors"
-
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p/conn"
cmn "github.com/tendermint/tmlibs/common"
@@ -69,7 +66,7 @@ type Switch struct {
filterConnByAddr func(net.Addr) error
filterConnByID func(ID) error
- rng *rand.Rand // seed for randomizing dial times and orders
+ rng *cmn.Rand // seed for randomizing dial times and orders
}
// NewSwitch creates a new Switch with the given config.
@@ -84,15 +81,14 @@ func NewSwitch(config *cfg.P2PConfig) *Switch {
dialing: cmn.NewCMap(),
}
- // Ensure we have a completely undeterministic PRNG. cmd.RandInt64() draws
- // from a seed that's initialized with OS entropy on process start.
- sw.rng = rand.New(rand.NewSource(cmn.RandInt64()))
+ // Ensure we have a completely undeterministic PRNG.
+ sw.rng = cmn.NewRand()
// TODO: collapse the peerConfig into the config ?
sw.peerConfig.MConfig.FlushThrottle = time.Duration(config.FlushThrottleTimeout) * time.Millisecond
sw.peerConfig.MConfig.SendRate = config.SendRate
sw.peerConfig.MConfig.RecvRate = config.RecvRate
- sw.peerConfig.MConfig.MaxMsgPacketPayloadSize = config.MaxMsgPacketPayloadSize
+ sw.peerConfig.MConfig.MaxPacketMsgPayloadSize = config.MaxPacketMsgPayloadSize
sw.peerConfig.AuthEnc = config.AuthEnc
sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
@@ -178,7 +174,7 @@ func (sw *Switch) OnStart() error {
for _, reactor := range sw.reactors {
err := reactor.Start()
if err != nil {
- return errors.Wrapf(err, "failed to start %v", reactor)
+ return cmn.ErrorWrap(err, "failed to start %v", reactor)
}
}
// Start listeners
@@ -213,18 +209,18 @@ func (sw *Switch) OnStop() {
// Broadcast runs a go routine for each attempted send, which will block trying
// to send for defaultSendTimeoutSeconds. Returns a channel which receives
// success values for each attempted send (false if times out). Channel will be
-// closed once msg send to all peers.
+// closed once msg bytes are sent to all peers (or time out).
//
// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved.
-func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool {
+func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool {
successChan := make(chan bool, len(sw.peers.List()))
- sw.Logger.Debug("Broadcast", "channel", chID, "msg", msg)
+ sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", fmt.Sprintf("%X", msgBytes))
var wg sync.WaitGroup
for _, peer := range sw.peers.List() {
wg.Add(1)
go func(peer Peer) {
defer wg.Done()
- success := peer.Send(chID, msg)
+ success := peer.Send(chID, msgBytes)
successChan <- success
}(peer)
}
@@ -363,7 +359,9 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
for _, netAddr := range netAddrs {
// do not add our address or ID
if !netAddr.Same(ourAddr) {
- addrBook.AddAddress(netAddr, ourAddr)
+ if err := addrBook.AddAddress(netAddr, ourAddr); err != nil {
+ sw.Logger.Error("Can't add peer's address to addrbook", "err", err)
+ }
}
}
// Persist some peers to disk right away.
@@ -517,7 +515,7 @@ func (sw *Switch) addPeer(pc peerConn) error {
return err
}
- peerID := peerNodeInfo.ID()
+ peerID := peerNodeInfo.ID
// ensure connection key matches self reported key
if pc.config.AuthEnc {
diff --git a/p2p/switch_test.go b/p2p/switch_test.go
index c02eb26d3..b170ccdc8 100644
--- a/p2p/switch_test.go
+++ b/p2p/switch_test.go
@@ -12,7 +12,6 @@ import (
"github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto"
- wire "github.com/tendermint/go-wire"
"github.com/tendermint/tmlibs/log"
cfg "github.com/tendermint/tendermint/config"
@@ -120,9 +119,9 @@ func TestSwitches(t *testing.T) {
}
// Lets send some messages
- ch0Msg := "channel zero"
- ch1Msg := "channel foo"
- ch2Msg := "channel bar"
+ ch0Msg := []byte("channel zero")
+ ch1Msg := []byte("channel foo")
+ ch2Msg := []byte("channel bar")
s1.Broadcast(byte(0x00), ch0Msg)
s1.Broadcast(byte(0x01), ch1Msg)
@@ -133,15 +132,15 @@ func TestSwitches(t *testing.T) {
assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second)
}
-func assertMsgReceivedWithTimeout(t *testing.T, msg string, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) {
+func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) {
ticker := time.NewTicker(checkPeriod)
for {
select {
case <-ticker.C:
msgs := reactor.getMsgs(channel)
if len(msgs) > 0 {
- if !bytes.Equal(msgs[0].Bytes, wire.BinaryBytes(msg)) {
- t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(msg), msgs[0].Bytes)
+ if !bytes.Equal(msgs[0].Bytes, msgBytes) {
+ t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes)
}
return
}
@@ -222,14 +221,14 @@ func TestConnIDFilter(t *testing.T) {
c1, c2 := conn.NetPipe()
s1.SetIDFilter(func(id ID) error {
- if id == PubKeyToID(s2.nodeInfo.PubKey) {
+ if id == s2.nodeInfo.ID {
return fmt.Errorf("Error: pipe is blacklisted")
}
return nil
})
s2.SetIDFilter(func(id ID) error {
- if id == PubKeyToID(s1.nodeInfo.PubKey) {
+ if id == s1.nodeInfo.ID {
return fmt.Errorf("Error: pipe is blacklisted")
}
return nil
@@ -259,7 +258,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
defer sw.Stop()
// simulate remote peer
- rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: DefaultPeerConfig()}
+ rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp.Start()
defer rp.Stop()
@@ -289,7 +288,7 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
defer sw.Stop()
// simulate remote peer
- rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: DefaultPeerConfig()}
+ rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp.Start()
defer rp.Stop()
@@ -359,7 +358,7 @@ func BenchmarkSwitchBroadcast(b *testing.B) {
// Send random message from foo channel to another
for i := 0; i < b.N; i++ {
chID := byte(i % 4)
- successChan := s1.Broadcast(chID, "test data")
+ successChan := s1.Broadcast(chID, []byte("test data"))
for s := range successChan {
if s {
numSuccess++
diff --git a/p2p/test_util.go b/p2p/test_util.go
index 535b0bd0b..fe5282a90 100644
--- a/p2p/test_util.go
+++ b/p2p/test_util.go
@@ -1,7 +1,6 @@
package p2p
import (
- "math/rand"
"net"
crypto "github.com/tendermint/go-crypto"
@@ -23,8 +22,8 @@ func CreateRandomPeer(outbound bool) *peer {
outbound: outbound,
},
nodeInfo: NodeInfo{
+ ID: netAddr.ID,
ListenAddr: netAddr.DialString(),
- PubKey: crypto.GenPrivKeyEd25519().Wrap().PubKey(),
},
mconn: &conn.MConnection{},
}
@@ -35,7 +34,7 @@ func CreateRandomPeer(outbound bool) *peer {
func CreateRoutableAddr() (addr string, netAddr *NetAddress) {
for {
var err error
- addr = cmn.Fmt("%X@%v.%v.%v.%v:46656", cmn.RandBytes(20), rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256)
+ addr = cmn.Fmt("%X@%v.%v.%v.%v:46656", cmn.RandBytes(20), cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256)
netAddr, err = NewNetAddressString(addr)
if err != nil {
panic(err)
@@ -131,17 +130,17 @@ func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
// new switch, add reactors
// TODO: let the config be passed in?
nodeKey := &NodeKey{
- PrivKey: crypto.GenPrivKeyEd25519().Wrap(),
+ PrivKey: crypto.GenPrivKeyEd25519(),
}
sw := NewSwitch(cfg)
sw.SetLogger(log.TestingLogger())
sw = initSwitch(i, sw)
ni := NodeInfo{
- PubKey: nodeKey.PubKey(),
+ ID: nodeKey.ID(),
Moniker: cmn.Fmt("switch%d", i),
Network: network,
Version: version,
- ListenAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023),
+ ListenAddr: cmn.Fmt("%v:%v", network, cmn.RandIntn(64512)+1023),
}
for ch := range sw.reactorsByCh {
ni.Channels = append(ni.Channels, ch)
diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go
index d2338b95e..55479415f 100644
--- a/p2p/upnp/probe.go
+++ b/p2p/upnp/probe.go
@@ -5,8 +5,6 @@ import (
"net"
"time"
- "github.com/pkg/errors"
-
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
@@ -19,26 +17,26 @@ type UPNPCapabilities struct {
func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) {
nat, err := Discover()
if err != nil {
- return nil, nil, nil, errors.Errorf("NAT upnp could not be discovered: %v", err)
+ return nil, nil, nil, fmt.Errorf("NAT upnp could not be discovered: %v", err)
}
logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP))
ext, err := nat.GetExternalAddress()
if err != nil {
- return nat, nil, nil, errors.Errorf("External address error: %v", err)
+ return nat, nil, nil, fmt.Errorf("External address error: %v", err)
}
logger.Info(cmn.Fmt("External address: %v", ext))
port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0)
if err != nil {
- return nat, nil, ext, errors.Errorf("Port mapping error: %v", err)
+ return nat, nil, ext, fmt.Errorf("Port mapping error: %v", err)
}
logger.Info(cmn.Fmt("Port mapping mapped: %v", port))
// also run the listener, open for all remote addresses.
listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort))
if err != nil {
- return nat, nil, ext, errors.Errorf("Error establishing listener: %v", err)
+ return nat, nil, ext, fmt.Errorf("Error establishing listener: %v", err)
}
return nat, listener, ext, nil
}
diff --git a/p2p/wire.go b/p2p/wire.go
new file mode 100644
index 000000000..a90ac851c
--- /dev/null
+++ b/p2p/wire.go
@@ -0,0 +1,12 @@
+package p2p
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ crypto.RegisterAmino(cdc)
+}
diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go
index 40a42c186..2254c1d1e 100644
--- a/rpc/client/event_test.go
+++ b/rpc/client/event_test.go
@@ -1,16 +1,16 @@
package client_test
import (
+ "reflect"
"testing"
"time"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types"
- cmn "github.com/tendermint/tmlibs/common"
-
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/types"
+ cmn "github.com/tendermint/tmlibs/common"
)
var waitForEventTimeout = 5 * time.Second
@@ -23,116 +23,127 @@ func MakeTxKV() ([]byte, []byte, []byte) {
}
func TestHeaderEvents(t *testing.T) {
- require := require.New(t)
for i, c := range GetClients() {
- // start for this test it if it wasn't already running
- if !c.IsRunning() {
- // if so, then we start it, listen, and stop it.
- err := c.Start()
- require.Nil(err, "%d: %+v", i, err)
- defer c.Stop()
- }
+ i, c := i, c // capture params
+ t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
+ // start for this test it if it wasn't already running
+ if !c.IsRunning() {
+ // if so, then we start it, listen, and stop it.
+ err := c.Start()
+ require.Nil(t, err, "%d: %+v", i, err)
+ defer c.Stop()
+ }
- evtTyp := types.EventNewBlockHeader
- evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
- require.Nil(err, "%d: %+v", i, err)
- _, ok := evt.Unwrap().(types.EventDataNewBlockHeader)
- require.True(ok, "%d: %#v", i, evt)
- // TODO: more checks...
+ evtTyp := types.EventNewBlockHeader
+ evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
+ require.Nil(t, err, "%d: %+v", i, err)
+ _, ok := evt.(types.EventDataNewBlockHeader)
+ require.True(t, ok, "%d: %#v", i, evt)
+ // TODO: more checks...
+ })
}
}
func TestBlockEvents(t *testing.T) {
- require := require.New(t)
for i, c := range GetClients() {
- // start for this test it if it wasn't already running
- if !c.IsRunning() {
- // if so, then we start it, listen, and stop it.
- err := c.Start()
- require.Nil(err, "%d: %+v", i, err)
- defer c.Stop()
- }
+ i, c := i, c // capture params
+ t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
- // listen for a new block; ensure height increases by 1
- var firstBlockHeight int64
- for j := 0; j < 3; j++ {
- evtTyp := types.EventNewBlock
- evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
- require.Nil(err, "%d: %+v", j, err)
- blockEvent, ok := evt.Unwrap().(types.EventDataNewBlock)
- require.True(ok, "%d: %#v", j, evt)
-
- block := blockEvent.Block
- if j == 0 {
- firstBlockHeight = block.Header.Height
- continue
+ // start for this test it if it wasn't already running
+ if !c.IsRunning() {
+ // if so, then we start it, listen, and stop it.
+ err := c.Start()
+ require.Nil(t, err, "%d: %+v", i, err)
+ defer c.Stop()
}
- require.Equal(block.Header.Height, firstBlockHeight+int64(j))
- }
+ // listen for a new block; ensure height increases by 1
+ var firstBlockHeight int64
+ for j := 0; j < 3; j++ {
+ evtTyp := types.EventNewBlock
+ evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
+ require.Nil(t, err, "%d: %+v", j, err)
+ blockEvent, ok := evt.(types.EventDataNewBlock)
+ require.True(t, ok, "%d: %#v", j, evt)
+
+ block := blockEvent.Block
+ if j == 0 {
+ firstBlockHeight = block.Header.Height
+ continue
+ }
+
+ require.Equal(t, block.Header.Height, firstBlockHeight+int64(j))
+ }
+ })
}
}
func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) {
- require := require.New(t)
for i, c := range GetClients() {
- // start for this test it if it wasn't already running
- if !c.IsRunning() {
- // if so, then we start it, listen, and stop it.
- err := c.Start()
- require.Nil(err, "%d: %+v", i, err)
- defer c.Stop()
- }
+ i, c := i, c // capture params
+ t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
- // make the tx
- _, _, tx := MakeTxKV()
- evtTyp := types.EventTx
+ // start for this test it if it wasn't already running
+ if !c.IsRunning() {
+ // if so, then we start it, listen, and stop it.
+ err := c.Start()
+ require.Nil(t, err, "%d: %+v", i, err)
+ defer c.Stop()
+ }
- // send async
- txres, err := c.BroadcastTxAsync(tx)
- require.Nil(err, "%+v", err)
- require.Equal(txres.Code, abci.CodeTypeOK) // FIXME
+ // make the tx
+ _, _, tx := MakeTxKV()
+ evtTyp := types.EventTx
- // and wait for confirmation
- evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
- require.Nil(err, "%d: %+v", i, err)
- // and make sure it has the proper info
- txe, ok := evt.Unwrap().(types.EventDataTx)
- require.True(ok, "%d: %#v", i, evt)
- // make sure this is the proper tx
- require.EqualValues(tx, txe.Tx)
- require.True(txe.Result.IsOK())
+ // send async
+ txres, err := c.BroadcastTxAsync(tx)
+ require.Nil(t, err, "%+v", err)
+ require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME
+
+ // and wait for confirmation
+ evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
+ require.Nil(t, err, "%d: %+v", i, err)
+ // and make sure it has the proper info
+ txe, ok := evt.(types.EventDataTx)
+ require.True(t, ok, "%d: %#v", i, evt)
+ // make sure this is the proper tx
+ require.EqualValues(t, tx, txe.Tx)
+ require.True(t, txe.Result.IsOK())
+ })
}
}
func TestTxEventsSentWithBroadcastTxSync(t *testing.T) {
- require := require.New(t)
for i, c := range GetClients() {
- // start for this test it if it wasn't already running
- if !c.IsRunning() {
- // if so, then we start it, listen, and stop it.
- err := c.Start()
- require.Nil(err, "%d: %+v", i, err)
- defer c.Stop()
- }
+ i, c := i, c // capture params
+ t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
- // make the tx
- _, _, tx := MakeTxKV()
- evtTyp := types.EventTx
+ // start for this test it if it wasn't already running
+ if !c.IsRunning() {
+ // if so, then we start it, listen, and stop it.
+ err := c.Start()
+ require.Nil(t, err, "%d: %+v", i, err)
+ defer c.Stop()
+ }
- // send sync
- txres, err := c.BroadcastTxSync(tx)
- require.Nil(err, "%+v", err)
- require.Equal(txres.Code, abci.CodeTypeOK) // FIXME
+ // make the tx
+ _, _, tx := MakeTxKV()
+ evtTyp := types.EventTx
- // and wait for confirmation
- evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
- require.Nil(err, "%d: %+v", i, err)
- // and make sure it has the proper info
- txe, ok := evt.Unwrap().(types.EventDataTx)
- require.True(ok, "%d: %#v", i, evt)
- // make sure this is the proper tx
- require.EqualValues(tx, txe.Tx)
- require.True(txe.Result.IsOK())
+ // send sync
+ txres, err := c.BroadcastTxSync(tx)
+ require.Nil(t, err, "%+v", err)
+ require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME
+
+ // and wait for confirmation
+ evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
+ require.Nil(t, err, "%d: %+v", i, err)
+ // and make sure it has the proper info
+ txe, ok := evt.(types.EventDataTx)
+ require.True(t, ok, "%d: %#v", i, evt)
+ // make sure this is the proper tx
+ require.EqualValues(t, tx, txe.Tx)
+ require.True(t, txe.Result.IsOK())
+ })
}
}
diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go
index e7a84b6b4..7e64d1164 100644
--- a/rpc/client/helpers.go
+++ b/rpc/client/helpers.go
@@ -41,7 +41,7 @@ func WaitForHeight(c StatusClient, h int64, waiter Waiter) error {
if err != nil {
return err
}
- delta = h - s.LatestBlockHeight
+ delta = h - s.SyncInfo.LatestBlockHeight
// wait for the time, or abort early
if err := waiter(delta); err != nil {
return err
@@ -65,7 +65,7 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type
query := types.QueryForEvent(evtTyp)
err := c.Subscribe(ctx, subscriber, query, evts)
if err != nil {
- return types.TMEventData{}, errors.Wrap(err, "failed to subscribe")
+ return nil, errors.Wrap(err, "failed to subscribe")
}
// make sure to unregister after the test is over
@@ -75,6 +75,6 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type
case evt := <-evts:
return evt.(types.TMEventData), nil
case <-ctx.Done():
- return types.TMEventData{}, errors.New("timed out waiting for event")
+ return nil, errors.New("timed out waiting for event")
}
}
diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go
index cef462475..d5c31ddaf 100644
--- a/rpc/client/helpers_test.go
+++ b/rpc/client/helpers_test.go
@@ -32,7 +32,7 @@ func TestWaitForHeight(t *testing.T) {
// now set current block height to 10
m.Call = mock.Call{
- Response: &ctypes.ResultStatus{LatestBlockHeight: 10},
+ Response: &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 10} },
}
// we will not wait for more than 10 blocks
@@ -52,7 +52,7 @@ func TestWaitForHeight(t *testing.T) {
// we use the callback to update the status height
myWaiter := func(delta int64) error {
// update the height for the next call
- m.Call.Response = &ctypes.ResultStatus{LatestBlockHeight: 15}
+ m.Call.Response = &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 15}}
return client.DefaultWaitStrategy(delta)
}
@@ -66,11 +66,11 @@ func TestWaitForHeight(t *testing.T) {
require.Nil(pre.Error)
prer, ok := pre.Response.(*ctypes.ResultStatus)
require.True(ok)
- assert.Equal(int64(10), prer.LatestBlockHeight)
+ assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight)
post := r.Calls[4]
require.Nil(post.Error)
postr, ok := post.Response.(*ctypes.ResultStatus)
require.True(ok)
- assert.Equal(int64(15), postr.LatestBlockHeight)
+ assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight)
}
diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go
index 83d77f317..83207eb7b 100644
--- a/rpc/client/httpclient.go
+++ b/rpc/client/httpclient.go
@@ -2,11 +2,11 @@ package client
import (
"context"
- "encoding/json"
"sync"
"github.com/pkg/errors"
+ amino "github.com/tendermint/go-amino"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
"github.com/tendermint/tendermint/types"
@@ -32,10 +32,14 @@ type HTTP struct {
// New takes a remote endpoint in the form tcp://:
// and the websocket path (which always seems to be "/websocket")
func NewHTTP(remote, wsEndpoint string) *HTTP {
+ rc := rpcclient.NewJSONRPCClient(remote)
+ cdc := rc.Codec()
+ ctypes.RegisterAmino(cdc)
+
return &HTTP{
- rpc: rpcclient.NewJSONRPCClient(remote),
+ rpc: rc,
remote: remote,
- WSEvents: newWSEvents(remote, wsEndpoint),
+ WSEvents: newWSEvents(cdc, remote, wsEndpoint),
}
}
@@ -217,6 +221,7 @@ func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) {
type WSEvents struct {
cmn.BaseService
+ cdc *amino.Codec
remote string
endpoint string
ws *rpcclient.WSClient
@@ -225,8 +230,9 @@ type WSEvents struct {
subscriptions map[string]chan<- interface{}
}
-func newWSEvents(remote, endpoint string) *WSEvents {
+func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents {
wsEvents := &WSEvents{
+ cdc: cdc,
endpoint: endpoint,
remote: remote,
subscriptions: make(map[string]chan<- interface{}),
@@ -240,6 +246,8 @@ func (w *WSEvents) OnStart() error {
w.ws = rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() {
w.redoSubscriptions()
}))
+ w.ws.SetCodec(w.cdc)
+
err := w.ws.Start()
if err != nil {
return err
@@ -335,7 +343,7 @@ func (w *WSEvents) eventListener() {
continue
}
result := new(ctypes.ResultEvent)
- err := json.Unmarshal(resp.Result, result)
+ err := w.cdc.UnmarshalJSON(resp.Result, result)
if err != nil {
w.Logger.Error("failed to unmarshal response", "err", err)
continue
diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go
index b3827fb13..dafd35080 100644
--- a/rpc/client/mock/status_test.go
+++ b/rpc/client/mock/status_test.go
@@ -17,9 +17,11 @@ func TestStatus(t *testing.T) {
m := &mock.StatusMock{
Call: mock.Call{
Response: &ctypes.ResultStatus{
- LatestBlockHash: cmn.HexBytes("block"),
- LatestAppHash: cmn.HexBytes("app"),
- LatestBlockHeight: 10,
+ SyncInfo: ctypes.SyncInfo{
+ LatestBlockHash: cmn.HexBytes("block"),
+ LatestAppHash: cmn.HexBytes("app"),
+ LatestBlockHeight: 10,
+ },
}},
}
@@ -29,8 +31,8 @@ func TestStatus(t *testing.T) {
// make sure response works proper
status, err := r.Status()
require.Nil(err, "%+v", err)
- assert.EqualValues("block", status.LatestBlockHash)
- assert.EqualValues(10, status.LatestBlockHeight)
+ assert.EqualValues("block", status.SyncInfo.LatestBlockHash)
+ assert.EqualValues(10, status.SyncInfo.LatestBlockHeight)
// make sure recorder works properly
require.Equal(1, len(r.Calls))
@@ -41,6 +43,6 @@ func TestStatus(t *testing.T) {
require.NotNil(rs.Response)
st, ok := rs.Response.(*ctypes.ResultStatus)
require.True(ok)
- assert.EqualValues("block", st.LatestBlockHash)
- assert.EqualValues(10, st.LatestBlockHeight)
+ assert.EqualValues("block", st.SyncInfo.LatestBlockHash)
+ assert.EqualValues(10, st.SyncInfo.LatestBlockHeight)
}
diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go
index c5c5822f9..bfdc707f4 100644
--- a/rpc/client/rpc_test.go
+++ b/rpc/client/rpc_test.go
@@ -50,7 +50,7 @@ func TestInfo(t *testing.T) {
info, err := c.ABCIInfo()
require.Nil(t, err, "%d: %+v", i, err)
// TODO: this is not correct - fix merkleeyes!
- // assert.EqualValues(t, status.LatestBlockHeight, info.Response.LastBlockHeight)
+ // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight)
assert.True(t, strings.Contains(info.Response.Data, "size"))
}
}
@@ -136,7 +136,7 @@ func TestAppCalls(t *testing.T) {
s, err := c.Status()
require.Nil(err, "%d: %+v", i, err)
// sh is start height or status height
- sh := s.LatestBlockHeight
+ sh := s.SyncInfo.LatestBlockHeight
// look for the future
h := sh + 2
diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go
index 25b67925c..7647aef7d 100644
--- a/rpc/core/consensus.go
+++ b/rpc/core/consensus.go
@@ -1,8 +1,9 @@
package core
import (
+ "encoding/json"
+
cm "github.com/tendermint/tendermint/consensus"
- cstypes "github.com/tendermint/tendermint/consensus/types"
p2p "github.com/tendermint/tendermint/p2p"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
sm "github.com/tendermint/tendermint/state"
@@ -58,7 +59,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
return &ctypes.ResultValidators{height, validators.Validators}, nil
}
-// Dump consensus state.
+// DumpConsensusState dumps consensus state.
//
// ```shell
// curl 'localhost:46657/dump_consensus_state'
@@ -83,11 +84,18 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
// }
// ```
func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
- peerRoundStates := make(map[p2p.ID]*cstypes.PeerRoundState)
+ peerRoundStates := make(map[p2p.ID]json.RawMessage)
for _, peer := range p2pSwitch.Peers().List() {
peerState := peer.Get(types.PeerStateKey).(*cm.PeerState)
- peerRoundState := peerState.GetRoundState()
+ peerRoundState, err := peerState.GetRoundStateJSON()
+ if err != nil {
+ return nil, err
+ }
peerRoundStates[peer.ID()] = peerRoundState
}
- return &ctypes.ResultDumpConsensusState{consensusState.GetRoundState(), peerRoundStates}, nil
+ roundState, err := consensusState.GetRoundStateJSON()
+ if err != nil {
+ return nil, err
+ }
+ return &ctypes.ResultDumpConsensusState{roundState, peerRoundStates}, nil
}
diff --git a/rpc/core/events.go b/rpc/core/events.go
index 9353ace6a..a46e0947c 100644
--- a/rpc/core/events.go
+++ b/rpc/core/events.go
@@ -61,7 +61,7 @@ import (
//
// go func() {
// for e := range txs {
-// fmt.Println("got ", e.(types.TMEventData).Unwrap().(types.EventDataTx))
+// fmt.Println("got ", e.(types.EventDataTx))
// }
// }()
// ```
@@ -104,7 +104,7 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri
go func() {
for event := range ch {
tmResult := &ctypes.ResultEvent{query, event.(tmtypes.TMEventData)}
- wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Request.ID+"#event", tmResult))
+ wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Codec(), wsCtx.Request.ID+"#event", tmResult))
}
}()
diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go
index 1dbdd8012..77c8c8446 100644
--- a/rpc/core/mempool.go
+++ b/rpc/core/mempool.go
@@ -189,7 +189,7 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
timer := time.NewTimer(60 * 2 * time.Second)
select {
case deliverTxResMsg := <-deliverTxResCh:
- deliverTxRes := deliverTxResMsg.(types.TMEventData).Unwrap().(types.EventDataTx)
+ deliverTxRes := deliverTxResMsg.(types.EventDataTx)
// The tx was included in a block.
deliverTxR := deliverTxRes.Result
logger.Info("DeliverTx passed ", "tx", cmn.HexBytes(tx), "response", deliverTxR)
diff --git a/rpc/core/net.go b/rpc/core/net.go
index 1918abf11..9b04926ab 100644
--- a/rpc/core/net.go
+++ b/rpc/core/net.go
@@ -43,7 +43,6 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
for _, peer := range p2pSwitch.Peers().List() {
peers = append(peers, ctypes.Peer{
NodeInfo: peer.NodeInfo(),
- ID: peer.ID(),
IsOutbound: peer.IsOutbound(),
ConnectionStatus: peer.Status(),
})
diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go
index 1eb00ceeb..0bbead943 100644
--- a/rpc/core/pipe.go
+++ b/rpc/core/pipe.go
@@ -3,9 +3,8 @@ package core
import (
"time"
- "github.com/tendermint/go-crypto"
+ crypto "github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/consensus"
- cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
@@ -23,7 +22,7 @@ var subscribeTimeout = 5 * time.Second
type Consensus interface {
GetState() sm.State
GetValidators() (int64, []*types.Validator)
- GetRoundState() *cstypes.RoundState
+ GetRoundStateJSON() ([]byte, error)
}
type P2P interface {
diff --git a/rpc/core/routes.go b/rpc/core/routes.go
index 0e10cefee..f1150f03b 100644
--- a/rpc/core/routes.go
+++ b/rpc/core/routes.go
@@ -5,6 +5,7 @@ import (
)
// TODO: better system than "unsafe" prefix
+// NOTE: Amino is registered in rpc/core/types/wire.go.
var Routes = map[string]*rpc.RPCFunc{
// subscribe/unsubscribe are reserved for websocket events.
"subscribe": rpc.NewWSRPCFunc(Subscribe, "query"),
diff --git a/rpc/core/status.go b/rpc/core/status.go
index b4543a61c..9bdf9dd64 100644
--- a/rpc/core/status.go
+++ b/rpc/core/status.go
@@ -27,15 +27,20 @@ import (
// ```json
// {
// "result": {
-// "syncing": false,
-// "latest_block_time": "2017-12-07T18:19:47.617Z",
-// "latest_block_height": 6,
-// "latest_app_hash": "",
-// "latest_block_hash": "A63D0C3307DEDCCFCC82ED411AE9108B70B29E02",
-// "pub_key": {
-// "data": "8C9A68070CBE33F9C445862BA1E9D96A75CEB68C0CF6ADD3652D07DCAC5D0380",
-// "type": "ed25519"
-// },
+// "sync_info": {
+// "syncing": false,
+// "latest_block_time": "2017-12-07T18:19:47.617Z",
+// "latest_block_height": 6,
+// "latest_app_hash": "",
+// "latest_block_hash": "A63D0C3307DEDCCFCC82ED411AE9108B70B29E02",
+// }
+// "validator_info": {
+// "pub_key": {
+// "data": "8C9A68070CBE33F9C445862BA1E9D96A75CEB68C0CF6ADD3652D07DCAC5D0380",
+// "type": "ed25519"
+// },
+// "voting_power": 10
+// }
// "node_info": {
// "other": [
// "wire_version=0.7.2",
@@ -51,9 +56,6 @@ import (
// "network": "test-chain-qhVCa2",
// "moniker": "vagrant-ubuntu-trusty-64",
// "pub_key": "844981FE99ABB19F7816F2D5E94E8A74276AB1153760A7799E925C75401856C6",
-// "validator_status": {
-// "voting_power": 10
-// }
// }
// },
// "id": "",
@@ -78,20 +80,20 @@ func Status() (*ctypes.ResultStatus, error) {
latestBlockTime := time.Unix(0, latestBlockTimeNano)
result := &ctypes.ResultStatus{
- NodeInfo: p2pSwitch.NodeInfo(),
- PubKey: pubKey,
- LatestBlockHash: latestBlockHash,
- LatestAppHash: latestAppHash,
- LatestBlockHeight: latestHeight,
- LatestBlockTime: latestBlockTime,
- Syncing: consensusReactor.FastSync(),
+ NodeInfo: p2pSwitch.NodeInfo(),
+ SyncInfo: ctypes.SyncInfo{
+ LatestBlockHash: latestBlockHash,
+ LatestAppHash: latestAppHash,
+ LatestBlockHeight: latestHeight,
+ LatestBlockTime: latestBlockTime,
+ Syncing: consensusReactor.FastSync(),
+ },
+ ValidatorInfo: ctypes.ValidatorInfo{PubKey: pubKey},
}
// add ValidatorStatus if node is a validator
if val := validatorAtHeight(latestHeight); val != nil {
- result.ValidatorStatus = ctypes.ValidatorStatus{
- VotingPower: val.VotingPower,
- }
+ result.ValidatorInfo.VotingPower = val.VotingPower
}
return result, nil
diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go
index 8a6fff63c..82bfceaa7 100644
--- a/rpc/core/types/responses.go
+++ b/rpc/core/types/responses.go
@@ -1,6 +1,7 @@
package core_types
import (
+ "encoding/json"
"strings"
"time"
@@ -8,7 +9,6 @@ import (
crypto "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
- cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@@ -54,19 +54,23 @@ func NewResultCommit(header *types.Header, commit *types.Commit,
}
}
-type ValidatorStatus struct {
- VotingPower int64 `json:"voting_power"`
+type SyncInfo struct {
+ LatestBlockHash cmn.HexBytes `json:"latest_block_hash"`
+ LatestAppHash cmn.HexBytes `json:"latest_app_hash"`
+ LatestBlockHeight int64 `json:"latest_block_height"`
+ LatestBlockTime time.Time `json:"latest_block_time"`
+ Syncing bool `json:"syncing"`
+}
+
+type ValidatorInfo struct {
+ PubKey crypto.PubKey `json:"pub_key"`
+ VotingPower int64 `json:"voting_power"`
}
type ResultStatus struct {
- NodeInfo p2p.NodeInfo `json:"node_info"`
- PubKey crypto.PubKey `json:"pub_key"`
- LatestBlockHash cmn.HexBytes `json:"latest_block_hash"`
- LatestAppHash cmn.HexBytes `json:"latest_app_hash"`
- LatestBlockHeight int64 `json:"latest_block_height"`
- LatestBlockTime time.Time `json:"latest_block_time"`
- Syncing bool `json:"syncing"`
- ValidatorStatus ValidatorStatus `json:"validator_status,omitempty"`
+ NodeInfo p2p.NodeInfo `json:"node_info"`
+ SyncInfo SyncInfo `json:"sync_info"`
+ ValidatorInfo ValidatorInfo `json:"validator_info"`
}
func (s *ResultStatus) TxIndexEnabled() bool {
@@ -98,7 +102,6 @@ type ResultDialPeers struct {
type Peer struct {
p2p.NodeInfo `json:"node_info"`
- p2p.ID `json:"node_id"`
IsOutbound bool `json:"is_outbound"`
ConnectionStatus p2p.ConnectionStatus `json:"connection_status"`
}
@@ -109,8 +112,8 @@ type ResultValidators struct {
}
type ResultDumpConsensusState struct {
- RoundState *cstypes.RoundState `json:"round_state"`
- PeerRoundStates map[p2p.ID]*cstypes.PeerRoundState `json:"peer_round_states"`
+ RoundState json.RawMessage `json:"round_state"`
+ PeerRoundStates map[p2p.ID]json.RawMessage `json:"peer_round_states"`
}
type ResultBroadcastTx struct {
diff --git a/rpc/core/types/wire.go b/rpc/core/types/wire.go
new file mode 100644
index 000000000..6648364b1
--- /dev/null
+++ b/rpc/core/types/wire.go
@@ -0,0 +1,13 @@
+package core_types
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+ "github.com/tendermint/tendermint/types"
+)
+
+func RegisterAmino(cdc *amino.Codec) {
+ types.RegisterEventDatas(cdc)
+ types.RegisterEvidences(cdc)
+ crypto.RegisterAmino(cdc)
+}
diff --git a/rpc/lib/client/args_test.go b/rpc/lib/client/args_test.go
index ccabd0d2c..4442ac2bc 100644
--- a/rpc/lib/client/args_test.go
+++ b/rpc/lib/client/args_test.go
@@ -5,6 +5,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "github.com/tendermint/go-amino"
)
type Tx []byte
@@ -27,9 +29,11 @@ func TestArgToJSON(t *testing.T) {
{Foo{7, "hello"}, `{"Bar":7,"Baz":"hello"}`},
}
+ cdc := amino.NewCodec()
+
for i, tc := range cases {
args := map[string]interface{}{"data": tc.input}
- err := argsToJson(args)
+ err := argsToJSON(cdc, args)
require.Nil(err, "%d: %+v", i, err)
require.Equal(1, len(args), "%d", i)
data, ok := args["data"].(string)
diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go
index 902b7eebc..e26d8f274 100644
--- a/rpc/lib/client/http_client.go
+++ b/rpc/lib/client/http_client.go
@@ -12,6 +12,7 @@ import (
"strings"
"github.com/pkg/errors"
+ "github.com/tendermint/go-amino"
types "github.com/tendermint/tendermint/rpc/lib/types"
)
@@ -19,6 +20,8 @@ import (
// HTTPClient is a common interface for JSONRPCClient and URIClient.
type HTTPClient interface {
Call(method string, params map[string]interface{}, result interface{}) (interface{}, error)
+ Codec() *amino.Codec
+ SetCodec(*amino.Codec)
}
// TODO: Deprecate support for IP:PORT or /path/to/socket
@@ -66,6 +69,7 @@ func makeHTTPClient(remoteAddr string) (string, *http.Client) {
type JSONRPCClient struct {
address string
client *http.Client
+ cdc *amino.Codec
}
// NewJSONRPCClient returns a JSONRPCClient pointed at the given address.
@@ -74,11 +78,12 @@ func NewJSONRPCClient(remote string) *JSONRPCClient {
return &JSONRPCClient{
address: address,
client: client,
+ cdc: amino.NewCodec(),
}
}
func (c *JSONRPCClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) {
- request, err := types.MapToRequest("jsonrpc-client", method, params)
+ request, err := types.MapToRequest(c.cdc, "jsonrpc-client", method, params)
if err != nil {
return nil, err
}
@@ -100,7 +105,15 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul
return nil, err
}
// log.Info(Fmt("RPC response: %v", string(responseBytes)))
- return unmarshalResponseBytes(responseBytes, result)
+ return unmarshalResponseBytes(c.cdc, responseBytes, result)
+}
+
+func (c *JSONRPCClient) Codec() *amino.Codec {
+ return c.cdc
+}
+
+func (c *JSONRPCClient) SetCodec(cdc *amino.Codec) {
+ c.cdc = cdc
}
//-------------------------------------------------------------
@@ -109,6 +122,7 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul
type URIClient struct {
address string
client *http.Client
+ cdc *amino.Codec
}
func NewURIClient(remote string) *URIClient {
@@ -116,11 +130,12 @@ func NewURIClient(remote string) *URIClient {
return &URIClient{
address: address,
client: client,
+ cdc: amino.NewCodec(),
}
}
func (c *URIClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) {
- values, err := argsToURLValues(params)
+ values, err := argsToURLValues(c.cdc, params)
if err != nil {
return nil, err
}
@@ -135,15 +150,22 @@ func (c *URIClient) Call(method string, params map[string]interface{}, result in
if err != nil {
return nil, err
}
- return unmarshalResponseBytes(responseBytes, result)
+ return unmarshalResponseBytes(c.cdc, responseBytes, result)
+}
+
+func (c *URIClient) Codec() *amino.Codec {
+ return c.cdc
+}
+
+func (c *URIClient) SetCodec(cdc *amino.Codec) {
+ c.cdc = cdc
}
//------------------------------------------------
-func unmarshalResponseBytes(responseBytes []byte, result interface{}) (interface{}, error) {
- // read response
- // if rpc/core/types is imported, the result will unmarshal
- // into the correct type
+func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, result interface{}) (interface{}, error) {
+ // Read response. If rpc/core/types is imported, the result will unmarshal
+ // into the correct type.
// log.Notice("response", "response", string(responseBytes))
var err error
response := &types.RPCResponse{}
@@ -154,20 +176,20 @@ func unmarshalResponseBytes(responseBytes []byte, result interface{}) (interface
if response.Error != nil {
return nil, errors.Errorf("Response error: %v", response.Error)
}
- // unmarshal the RawMessage into the result
- err = json.Unmarshal(response.Result, result)
+ // Unmarshal the RawMessage into the result.
+ err = cdc.UnmarshalJSON(response.Result, result)
if err != nil {
return nil, errors.Errorf("Error unmarshalling rpc response result: %v", err)
}
return result, nil
}
-func argsToURLValues(args map[string]interface{}) (url.Values, error) {
+func argsToURLValues(cdc *amino.Codec, args map[string]interface{}) (url.Values, error) {
values := make(url.Values)
if len(args) == 0 {
return values, nil
}
- err := argsToJson(args)
+ err := argsToJSON(cdc, args)
if err != nil {
return nil, err
}
@@ -177,7 +199,7 @@ func argsToURLValues(args map[string]interface{}) (url.Values, error) {
return values, nil
}
-func argsToJson(args map[string]interface{}) error {
+func argsToJSON(cdc *amino.Codec, args map[string]interface{}) error {
for k, v := range args {
rt := reflect.TypeOf(v)
isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8
@@ -187,7 +209,7 @@ func argsToJson(args map[string]interface{}) error {
continue
}
- data, err := json.Marshal(v)
+ data, err := cdc.MarshalJSON(v)
if err != nil {
return err
}
diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go
index ab2e94d0b..a95ce17d2 100644
--- a/rpc/lib/client/ws_client.go
+++ b/rpc/lib/client/ws_client.go
@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
- "math/rand"
"net"
"net/http"
"sync"
@@ -14,6 +13,7 @@ import (
"github.com/pkg/errors"
metrics "github.com/rcrowley/go-metrics"
+ "github.com/tendermint/go-amino"
types "github.com/tendermint/tendermint/rpc/lib/types"
cmn "github.com/tendermint/tmlibs/common"
)
@@ -31,6 +31,7 @@ type WSClient struct {
cmn.BaseService
conn *websocket.Conn
+ cdc *amino.Codec
Address string // IP:PORT or /path/to/socket
Endpoint string // /websocket/url/endpoint
@@ -77,6 +78,7 @@ type WSClient struct {
func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSClient {
addr, dialer := makeHTTPDialer(remoteAddr)
c := &WSClient{
+ cdc: amino.NewCodec(),
Address: addr,
Dialer: dialer,
Endpoint: endpoint,
@@ -206,7 +208,7 @@ func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error {
// Call the given method. See Send description.
func (c *WSClient) Call(ctx context.Context, method string, params map[string]interface{}) error {
- request, err := types.MapToRequest("ws-client", method, params)
+ request, err := types.MapToRequest(c.cdc, "ws-client", method, params)
if err != nil {
return err
}
@@ -216,13 +218,21 @@ func (c *WSClient) Call(ctx context.Context, method string, params map[string]in
// CallWithArrayParams the given method with params in a form of array. See
// Send description.
func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, params []interface{}) error {
- request, err := types.ArrayToRequest("ws-client", method, params)
+ request, err := types.ArrayToRequest(c.cdc, "ws-client", method, params)
if err != nil {
return err
}
return c.Send(ctx, request)
}
+func (c *WSClient) Codec() *amino.Codec {
+ return c.cdc
+}
+
+func (c *WSClient) SetCodec(cdc *amino.Codec) {
+ c.cdc = cdc
+}
+
///////////////////////////////////////////////////////////////////////////////
// Private methods
@@ -255,7 +265,7 @@ func (c *WSClient) reconnect() error {
}()
for {
- jitterSeconds := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns)
+ jitterSeconds := time.Duration(cmn.RandFloat64() * float64(time.Second)) // 1s == (1e9 ns)
backoffDuration := jitterSeconds + ((1 << uint(attempt)) * time.Second)
c.Logger.Info("reconnecting", "attempt", attempt+1, "backoff_duration", backoffDuration)
diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go
index cadc68f8e..f34b09f68 100644
--- a/rpc/lib/rpc_test.go
+++ b/rpc/lib/rpc_test.go
@@ -17,6 +17,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@@ -60,6 +61,9 @@ var Routes = map[string]*server.RPCFunc{
"echo_int": server.NewRPCFunc(EchoIntResult, "arg"),
}
+// Amino codec required to encode/decode everything above.
+var RoutesCdc = amino.NewCodec()
+
func EchoResult(v string) (*ResultEcho, error) {
return &ResultEcho{v}, nil
}
@@ -114,8 +118,8 @@ func setup() {
tcpLogger := logger.With("socket", "tcp")
mux := http.NewServeMux()
- server.RegisterRPCFuncs(mux, Routes, tcpLogger)
- wm := server.NewWebsocketManager(Routes, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second))
+ server.RegisterRPCFuncs(mux, Routes, RoutesCdc, tcpLogger)
+ wm := server.NewWebsocketManager(Routes, RoutesCdc, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second))
wm.SetLogger(tcpLogger)
mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler)
go func() {
@@ -127,8 +131,8 @@ func setup() {
unixLogger := logger.With("socket", "unix")
mux2 := http.NewServeMux()
- server.RegisterRPCFuncs(mux2, Routes, unixLogger)
- wm = server.NewWebsocketManager(Routes)
+ server.RegisterRPCFuncs(mux2, Routes, RoutesCdc, unixLogger)
+ wm = server.NewWebsocketManager(Routes, RoutesCdc)
wm.SetLogger(unixLogger)
mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler)
go func() {
diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go
index 19fc0f6e4..07ccfb6f3 100644
--- a/rpc/lib/server/handlers.go
+++ b/rpc/lib/server/handlers.go
@@ -17,6 +17,7 @@ import (
"github.com/gorilla/websocket"
"github.com/pkg/errors"
+ "github.com/tendermint/go-amino"
types "github.com/tendermint/tendermint/rpc/lib/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@@ -24,14 +25,14 @@ import (
// RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions.
// "result" is the interface on which the result objects are registered, and is popualted with every RPCResponse
-func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger log.Logger) {
+func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) {
// HTTP endpoints
for funcName, rpcFunc := range funcMap {
- mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, logger))
+ mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, cdc, logger))
}
// JSONRPC endpoints
- mux.HandleFunc("/", makeJSONRPCHandler(funcMap, logger))
+ mux.HandleFunc("/", makeJSONRPCHandler(funcMap, cdc, logger))
}
//-------------------------------------
@@ -98,7 +99,7 @@ func funcReturnTypes(f interface{}) []reflect.Type {
// rpc.json
// jsonrpc calls grab the given method's function info and runs reflect.Call
-func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc {
+func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
@@ -135,7 +136,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
}
var args []reflect.Value
if len(request.Params) > 0 {
- args, err = jsonParamsToArgsRPC(rpcFunc, request.Params)
+ args, err = jsonParamsToArgsRPC(rpcFunc, cdc, request.Params)
if err != nil {
WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "Error converting json params to arguments")))
return
@@ -148,18 +149,18 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
WriteRPCResponseHTTP(w, types.RPCInternalError(request.ID, err))
return
}
- WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(request.ID, result))
+ WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(cdc, request.ID, result))
}
}
-func mapParamsToArgs(rpcFunc *RPCFunc, params map[string]*json.RawMessage, argsOffset int) ([]reflect.Value, error) {
+func mapParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params map[string]json.RawMessage, argsOffset int) ([]reflect.Value, error) {
values := make([]reflect.Value, len(rpcFunc.argNames))
for i, argName := range rpcFunc.argNames {
argType := rpcFunc.args[i+argsOffset]
- if p, ok := params[argName]; ok && p != nil && len(*p) > 0 {
+ if p, ok := params[argName]; ok && p != nil && len(p) > 0 {
val := reflect.New(argType)
- err := json.Unmarshal(*p, val.Interface())
+ err := cdc.UnmarshalJSON(p, val.Interface())
if err != nil {
return nil, err
}
@@ -172,7 +173,7 @@ func mapParamsToArgs(rpcFunc *RPCFunc, params map[string]*json.RawMessage, argsO
return values, nil
}
-func arrayParamsToArgs(rpcFunc *RPCFunc, params []*json.RawMessage, argsOffset int) ([]reflect.Value, error) {
+func arrayParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params []json.RawMessage, argsOffset int) ([]reflect.Value, error) {
if len(rpcFunc.argNames) != len(params) {
return nil, errors.Errorf("Expected %v parameters (%v), got %v (%v)",
len(rpcFunc.argNames), rpcFunc.argNames, len(params), params)
@@ -182,7 +183,7 @@ func arrayParamsToArgs(rpcFunc *RPCFunc, params []*json.RawMessage, argsOffset i
for i, p := range params {
argType := rpcFunc.args[i+argsOffset]
val := reflect.New(argType)
- err := json.Unmarshal(*p, val.Interface())
+ err := cdc.UnmarshalJSON(p, val.Interface())
if err != nil {
return nil, err
}
@@ -191,39 +192,41 @@ func arrayParamsToArgs(rpcFunc *RPCFunc, params []*json.RawMessage, argsOffset i
return values, nil
}
-// raw is unparsed json (from json.RawMessage) encoding either a map or an array.
+// `raw` is unparsed json (from json.RawMessage) encoding either a map or an array.
+// `argsOffset` should be 0 for RPC calls, and 1 for WS requests, where len(rpcFunc.args) != len(rpcFunc.argNames).
//
-// argsOffset should be 0 for RPC calls, and 1 for WS requests, where len(rpcFunc.args) != len(rpcFunc.argNames).
// Example:
// rpcFunc.args = [rpctypes.WSRPCContext string]
// rpcFunc.argNames = ["arg"]
-func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte, argsOffset int) ([]reflect.Value, error) {
- // first, try to get the map..
- var m map[string]*json.RawMessage
+func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte, argsOffset int) ([]reflect.Value, error) {
+
+ // TODO: Make more efficient, perhaps by checking the first character for '{' or '['?
+ // First, try to get the map.
+ var m map[string]json.RawMessage
err := json.Unmarshal(raw, &m)
if err == nil {
- return mapParamsToArgs(rpcFunc, m, argsOffset)
+ return mapParamsToArgs(rpcFunc, cdc, m, argsOffset)
}
- // otherwise, try an array
- var a []*json.RawMessage
+ // Otherwise, try an array.
+ var a []json.RawMessage
err = json.Unmarshal(raw, &a)
if err == nil {
- return arrayParamsToArgs(rpcFunc, a, argsOffset)
+ return arrayParamsToArgs(rpcFunc, cdc, a, argsOffset)
}
- // otherwise, bad format, we cannot parse
+ // Otherwise, bad format, we cannot parse
return nil, errors.Errorf("Unknown type for JSON params: %v. Expected map or array", err)
}
// Convert a []interface{} OR a map[string]interface{} to properly typed values
-func jsonParamsToArgsRPC(rpcFunc *RPCFunc, params json.RawMessage) ([]reflect.Value, error) {
- return jsonParamsToArgs(rpcFunc, params, 0)
+func jsonParamsToArgsRPC(rpcFunc *RPCFunc, cdc *amino.Codec, params json.RawMessage) ([]reflect.Value, error) {
+ return jsonParamsToArgs(rpcFunc, cdc, params, 0)
}
// Same as above, but with the first param the websocket connection
-func jsonParamsToArgsWS(rpcFunc *RPCFunc, params json.RawMessage, wsCtx types.WSRPCContext) ([]reflect.Value, error) {
- values, err := jsonParamsToArgs(rpcFunc, params, 1)
+func jsonParamsToArgsWS(rpcFunc *RPCFunc, cdc *amino.Codec, params json.RawMessage, wsCtx types.WSRPCContext) ([]reflect.Value, error) {
+ values, err := jsonParamsToArgs(rpcFunc, cdc, params, 1)
if err != nil {
return nil, err
}
@@ -235,7 +238,7 @@ func jsonParamsToArgsWS(rpcFunc *RPCFunc, params json.RawMessage, wsCtx types.WS
// rpc.http
// convert from a function name to the http handler
-func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWriter, *http.Request) {
+func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func(http.ResponseWriter, *http.Request) {
// Exception for websocket endpoints
if rpcFunc.ws {
return func(w http.ResponseWriter, r *http.Request) {
@@ -245,7 +248,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit
// All other endpoints
return func(w http.ResponseWriter, r *http.Request) {
logger.Debug("HTTP HANDLER", "req", r)
- args, err := httpParamsToArgs(rpcFunc, r)
+ args, err := httpParamsToArgs(rpcFunc, cdc, r)
if err != nil {
WriteRPCResponseHTTP(w, types.RPCInvalidParamsError("", errors.Wrap(err, "Error converting http params to arguments")))
return
@@ -257,13 +260,13 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit
WriteRPCResponseHTTP(w, types.RPCInternalError("", err))
return
}
- WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse("", result))
+ WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(cdc, "", result))
}
}
// Covert an http query to a list of properly typed values.
// To be properly decoded the arg must be a concrete type from tendermint (if its an interface).
-func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error) {
+func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]reflect.Value, error) {
values := make([]reflect.Value, len(rpcFunc.args))
for i, name := range rpcFunc.argNames {
@@ -278,7 +281,7 @@ func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error
continue
}
- v, err, ok := nonJsonToArg(argType, arg)
+ v, err, ok := nonJSONToArg(cdc, argType, arg)
if err != nil {
return nil, err
}
@@ -287,7 +290,7 @@ func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error
continue
}
- values[i], err = _jsonStringToArg(argType, arg)
+ values[i], err = _jsonStringToArg(cdc, argType, arg)
if err != nil {
return nil, err
}
@@ -296,9 +299,9 @@ func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error
return values, nil
}
-func _jsonStringToArg(ty reflect.Type, arg string) (reflect.Value, error) {
+func _jsonStringToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error) {
v := reflect.New(ty)
- err := json.Unmarshal([]byte(arg), v.Interface())
+ err := cdc.UnmarshalJSON([]byte(arg), v.Interface())
if err != nil {
return v, err
}
@@ -306,7 +309,7 @@ func _jsonStringToArg(ty reflect.Type, arg string) (reflect.Value, error) {
return v, nil
}
-func nonJsonToArg(ty reflect.Type, arg string) (reflect.Value, error, bool) {
+func nonJSONToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error, bool) {
isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`)
isHexString := strings.HasPrefix(strings.ToLower(arg), "0x")
expectingString := ty.Kind() == reflect.String
@@ -332,7 +335,7 @@ func nonJsonToArg(ty reflect.Type, arg string) (reflect.Value, error, bool) {
if isQuotedString && expectingByteSlice {
v := reflect.New(reflect.TypeOf(""))
- err := json.Unmarshal([]byte(arg), v.Interface())
+ err := cdc.UnmarshalJSON([]byte(arg), v.Interface())
if err != nil {
return reflect.ValueOf(nil), err, false
}
@@ -354,7 +357,7 @@ const (
defaultWSPingPeriod = (defaultWSReadWait * 9) / 10
)
-// a single websocket connection contains listener id, underlying ws
+// A single websocket connection contains listener id, underlying ws
// connection, and the event switch for subscribing to events.
//
// In case of an error, the connection is stopped.
@@ -366,6 +369,7 @@ type wsConnection struct {
writeChan chan types.RPCResponse
funcMap map[string]*RPCFunc
+ cdc *amino.Codec
// write channel capacity
writeChanCapacity int
@@ -389,11 +393,12 @@ type wsConnection struct {
// description of how to configure ping period and pong wait time. NOTE: if the
// write buffer is full, pongs may be dropped, which may cause clients to
// disconnect. see https://github.com/gorilla/websocket/issues/97
-func NewWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, options ...func(*wsConnection)) *wsConnection {
+func NewWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, cdc *amino.Codec, options ...func(*wsConnection)) *wsConnection {
wsc := &wsConnection{
remoteAddr: baseConn.RemoteAddr().String(),
baseConn: baseConn,
funcMap: funcMap,
+ cdc: cdc,
writeWait: defaultWSWriteWait,
writeChanCapacity: defaultWSWriteChanCapacity,
readWait: defaultWSReadWait,
@@ -503,6 +508,12 @@ func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool {
}
}
+// Codec returns an amino codec used to decode parameters and encode results.
+// It implements WSRPCConnection.
+func (wsc *wsConnection) Codec() *amino.Codec {
+ return wsc.cdc
+}
+
// Read from the socket and subscribe to or unsubscribe from events
func (wsc *wsConnection) readRoutine() {
defer func() {
@@ -569,11 +580,11 @@ func (wsc *wsConnection) readRoutine() {
if rpcFunc.ws {
wsCtx := types.WSRPCContext{Request: request, WSRPCConnection: wsc}
if len(request.Params) > 0 {
- args, err = jsonParamsToArgsWS(rpcFunc, request.Params, wsCtx)
+ args, err = jsonParamsToArgsWS(rpcFunc, wsc.cdc, request.Params, wsCtx)
}
} else {
if len(request.Params) > 0 {
- args, err = jsonParamsToArgsRPC(rpcFunc, request.Params)
+ args, err = jsonParamsToArgsRPC(rpcFunc, wsc.cdc, request.Params)
}
}
if err != nil {
@@ -590,7 +601,7 @@ func (wsc *wsConnection) readRoutine() {
wsc.WriteRPCResponse(types.RPCInternalError(request.ID, err))
continue
} else {
- wsc.WriteRPCResponse(types.NewRPCSuccessResponse(request.ID, result))
+ wsc.WriteRPCResponse(types.NewRPCSuccessResponse(wsc.cdc, request.ID, result))
continue
}
@@ -666,6 +677,7 @@ func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error
type WebsocketManager struct {
websocket.Upgrader
funcMap map[string]*RPCFunc
+ cdc *amino.Codec
logger log.Logger
wsConnOptions []func(*wsConnection)
}
@@ -673,9 +685,10 @@ type WebsocketManager struct {
// NewWebsocketManager returns a new WebsocketManager that routes according to
// the given funcMap and connects to the server with the given connection
// options.
-func NewWebsocketManager(funcMap map[string]*RPCFunc, wsConnOptions ...func(*wsConnection)) *WebsocketManager {
+func NewWebsocketManager(funcMap map[string]*RPCFunc, cdc *amino.Codec, wsConnOptions ...func(*wsConnection)) *WebsocketManager {
return &WebsocketManager{
funcMap: funcMap,
+ cdc: cdc,
Upgrader: websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
// TODO ???
@@ -702,7 +715,7 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ
}
// register connection
- con := NewWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...)
+ con := NewWSConnection(wsConn, wm.funcMap, wm.cdc, wm.wsConnOptions...)
con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr()))
wm.logger.Info("New websocket connection", "remote", con.remoteAddr)
err = con.Start() // Blocking
diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go
index 664bbd917..92a2d9909 100644
--- a/rpc/lib/server/handlers_test.go
+++ b/rpc/lib/server/handlers_test.go
@@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/tendermint/go-amino"
rs "github.com/tendermint/tendermint/rpc/lib/server"
types "github.com/tendermint/tendermint/rpc/lib/types"
"github.com/tendermint/tmlibs/log"
@@ -21,10 +22,11 @@ func testMux() *http.ServeMux {
funcMap := map[string]*rs.RPCFunc{
"c": rs.NewRPCFunc(func(s string, i int) (string, error) { return "foo", nil }, "s,i"),
}
+ cdc := amino.NewCodec()
mux := http.NewServeMux()
buf := new(bytes.Buffer)
logger := log.NewTMLogger(buf)
- rs.RegisterRPCFuncs(mux, funcMap, logger)
+ rs.RegisterRPCFuncs(mux, funcMap, cdc, logger)
return mux
}
diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go
index 0703d50ed..f4323ef5d 100644
--- a/rpc/lib/server/parse_test.go
+++ b/rpc/lib/server/parse_test.go
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common"
)
@@ -138,6 +139,7 @@ func TestParseRPC(t *testing.T) {
demo := func(height int, name string) {}
call := NewRPCFunc(demo, "height,name")
+ cdc := amino.NewCodec()
cases := []struct {
raw string
@@ -158,7 +160,7 @@ func TestParseRPC(t *testing.T) {
for idx, tc := range cases {
i := strconv.Itoa(idx)
data := []byte(tc.raw)
- vals, err := jsonParamsToArgs(call, data, 0)
+ vals, err := jsonParamsToArgs(call, cdc, data, 0)
if tc.fail {
assert.NotNil(err, i)
} else {
diff --git a/rpc/lib/test/main.go b/rpc/lib/test/main.go
index 702ed9f73..604cbd3d8 100644
--- a/rpc/lib/test/main.go
+++ b/rpc/lib/test/main.go
@@ -5,6 +5,7 @@ import (
"net/http"
"os"
+ "github.com/tendermint/go-amino"
rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@@ -24,8 +25,9 @@ type Result struct {
func main() {
mux := http.NewServeMux()
+ cdc := amino.NewCodec()
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
- rpcserver.RegisterRPCFuncs(mux, routes, logger)
+ rpcserver.RegisterRPCFuncs(mux, routes, cdc, logger)
_, err := rpcserver.StartHTTPServer("0.0.0.0:8008", mux, logger)
if err != nil {
cmn.Exit(err.Error())
diff --git a/rpc/lib/types/types.go b/rpc/lib/types/types.go
index e4b02c58f..5fa723bb4 100644
--- a/rpc/lib/types/types.go
+++ b/rpc/lib/types/types.go
@@ -7,6 +7,7 @@ import (
"strings"
"github.com/pkg/errors"
+ "github.com/tendermint/go-amino"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
)
@@ -33,8 +34,16 @@ func (req RPCRequest) String() string {
return fmt.Sprintf("[%s %s]", req.ID, req.Method)
}
-func MapToRequest(id string, method string, params map[string]interface{}) (RPCRequest, error) {
- payload, err := json.Marshal(params)
+func MapToRequest(cdc *amino.Codec, id string, method string, params map[string]interface{}) (RPCRequest, error) {
+ var params_ = make(map[string]json.RawMessage, len(params))
+ for name, value := range params {
+ valueJSON, err := cdc.MarshalJSON(value)
+ if err != nil {
+ return RPCRequest{}, err
+ }
+ params_[name] = valueJSON
+ }
+ payload, err := json.Marshal(params_) // NOTE: Amino doesn't handle maps yet.
if err != nil {
return RPCRequest{}, err
}
@@ -42,8 +51,16 @@ func MapToRequest(id string, method string, params map[string]interface{}) (RPCR
return request, nil
}
-func ArrayToRequest(id string, method string, params []interface{}) (RPCRequest, error) {
- payload, err := json.Marshal(params)
+func ArrayToRequest(cdc *amino.Codec, id string, method string, params []interface{}) (RPCRequest, error) {
+ var params_ = make([]json.RawMessage, len(params))
+ for i, value := range params {
+ valueJSON, err := cdc.MarshalJSON(value)
+ if err != nil {
+ return RPCRequest{}, err
+ }
+ params_[i] = valueJSON
+ }
+ payload, err := json.Marshal(params_) // NOTE: Amino doesn't handle maps yet.
if err != nil {
return RPCRequest{}, err
}
@@ -75,12 +92,12 @@ type RPCResponse struct {
Error *RPCError `json:"error,omitempty"`
}
-func NewRPCSuccessResponse(id string, res interface{}) RPCResponse {
+func NewRPCSuccessResponse(cdc *amino.Codec, id string, res interface{}) RPCResponse {
var rawMsg json.RawMessage
if res != nil {
var js []byte
- js, err := json.Marshal(res)
+ js, err := cdc.MarshalJSON(res)
if err != nil {
return RPCInternalError(id, errors.Wrap(err, "Error marshalling response"))
}
@@ -137,6 +154,7 @@ type WSRPCConnection interface {
WriteRPCResponse(resp RPCResponse)
TryWriteRPCResponse(resp RPCResponse) bool
GetEventSubscriber() EventSubscriber
+ Codec() *amino.Codec
}
// EventSubscriber mirros tendermint/tendermint/types.EventBusSubscriber
diff --git a/rpc/lib/types/types_test.go b/rpc/lib/types/types_test.go
index 60f7b2213..9dd1b7a18 100644
--- a/rpc/lib/types/types_test.go
+++ b/rpc/lib/types/types_test.go
@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
+ "github.com/tendermint/go-amino"
)
type SampleResult struct {
@@ -16,8 +17,9 @@ type SampleResult struct {
func TestResponses(t *testing.T) {
assert := assert.New(t)
+ cdc := amino.NewCodec()
- a := NewRPCSuccessResponse("1", &SampleResult{"hello"})
+ a := NewRPCSuccessResponse(cdc, "1", &SampleResult{"hello"})
b, _ := json.Marshal(a)
s := `{"jsonrpc":"2.0","id":"1","result":{"Value":"hello"}}`
assert.Equal(string(s), string(b))
diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go
index b67d76349..11cafa0cd 100644
--- a/rpc/test/helpers.go
+++ b/rpc/test/helpers.go
@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"strings"
+ "time"
"github.com/tendermint/tmlibs/log"
@@ -18,7 +19,7 @@ import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
- "github.com/tendermint/tendermint/types"
+ pvm "github.com/tendermint/tendermint/types/priv_validator"
)
var globalConfig *cfg.Config
@@ -26,11 +27,15 @@ var globalConfig *cfg.Config
func waitForRPC() {
laddr := GetConfig().RPC.ListenAddress
client := rpcclient.NewJSONRPCClient(laddr)
+ ctypes.RegisterAmino(client.Codec())
result := new(ctypes.ResultStatus)
for {
_, err := client.Call("status", map[string]interface{}{}, result)
if err == nil {
return
+ } else {
+ fmt.Println("error", err)
+ time.Sleep(time.Millisecond)
}
}
}
@@ -112,10 +117,10 @@ func NewTendermint(app abci.Application) *nm.Node {
config := GetConfig()
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
logger = log.NewFilter(logger, log.AllowError())
- privValidatorFile := config.PrivValidatorFile()
- privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile)
+ pvFile := config.PrivValidatorFile()
+ pv := pvm.LoadOrGenFilePV(pvFile)
papp := proxy.NewLocalClientCreator(app)
- node, err := nm.NewNode(config, privValidator, papp,
+ node, err := nm.NewNode(config, pv, papp,
nm.DefaultGenesisDocProviderFunc(config),
nm.DefaultDBProvider, logger)
if err != nil {
diff --git a/scripts/wire2amino.go b/scripts/wire2amino.go
new file mode 100644
index 000000000..949082877
--- /dev/null
+++ b/scripts/wire2amino.go
@@ -0,0 +1,181 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/tendermint/go-amino"
+ crypto "github.com/tendermint/go-crypto"
+ cmn "github.com/tendermint/tmlibs/common"
+
+ "github.com/tendermint/tendermint/p2p"
+ "github.com/tendermint/tendermint/types"
+ priv_val "github.com/tendermint/tendermint/types/priv_validator"
+)
+
+type GenesisValidator struct {
+ PubKey Data `json:"pub_key"`
+ Power int64 `json:"power"`
+ Name string `json:"name"`
+}
+
+type Genesis struct {
+ GenesisTime time.Time `json:"genesis_time"`
+ ChainID string `json:"chain_id"`
+ ConsensusParams *types.ConsensusParams `json:"consensus_params,omitempty"`
+ Validators []GenesisValidator `json:"validators"`
+ AppHash cmn.HexBytes `json:"app_hash"`
+ AppStateJSON json.RawMessage `json:"app_state,omitempty"`
+ AppOptions json.RawMessage `json:"app_options,omitempty"` // DEPRECATED
+
+}
+
+type NodeKey struct {
+ PrivKey Data `json:"priv_key"`
+}
+
+type PrivVal struct {
+ Address cmn.HexBytes `json:"address"`
+ LastHeight int64 `json:"last_height"`
+ LastRound int `json:"last_round"`
+ LastStep int8 `json:"last_step"`
+ PubKey Data `json:"pub_key"`
+ PrivKey Data `json:"priv_key"`
+}
+
+type Data struct {
+ Type string `json:"type"`
+ Data cmn.HexBytes `json:"data"`
+}
+
+func convertNodeKey(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) {
+ var nodeKey NodeKey
+ err := json.Unmarshal(jsonBytes, &nodeKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var privKey crypto.PrivKeyEd25519
+ copy(privKey[:], nodeKey.PrivKey.Data)
+
+ nodeKeyNew := p2p.NodeKey{privKey}
+
+ bz, err := cdc.MarshalJSON(nodeKeyNew)
+ if err != nil {
+ return nil, err
+ }
+ return bz, nil
+}
+
+func convertPrivVal(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) {
+ var privVal PrivVal
+ err := json.Unmarshal(jsonBytes, &privVal)
+ if err != nil {
+ return nil, err
+ }
+
+ var privKey crypto.PrivKeyEd25519
+ copy(privKey[:], privVal.PrivKey.Data)
+
+ var pubKey crypto.PubKeyEd25519
+ copy(pubKey[:], privVal.PubKey.Data)
+
+ privValNew := priv_val.FilePV{
+ Address: pubKey.Address(),
+ PubKey: pubKey,
+ LastHeight: privVal.LastHeight,
+ LastRound: privVal.LastRound,
+ LastStep: privVal.LastStep,
+ PrivKey: privKey,
+ }
+
+ bz, err := cdc.MarshalJSON(privValNew)
+ if err != nil {
+ return nil, err
+ }
+ return bz, nil
+}
+
+func convertGenesis(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) {
+ var genesis Genesis
+ err := json.Unmarshal(jsonBytes, &genesis)
+ if err != nil {
+ return nil, err
+ }
+
+ genesisNew := types.GenesisDoc{
+ GenesisTime: genesis.GenesisTime,
+ ChainID: genesis.ChainID,
+ ConsensusParams: genesis.ConsensusParams,
+ // Validators
+ AppHash: genesis.AppHash,
+ AppStateJSON: genesis.AppStateJSON,
+ }
+
+ if genesis.AppOptions != nil {
+ genesisNew.AppStateJSON = genesis.AppOptions
+ }
+
+ for _, v := range genesis.Validators {
+ var pubKey crypto.PubKeyEd25519
+ copy(pubKey[:], v.PubKey.Data)
+ genesisNew.Validators = append(
+ genesisNew.Validators,
+ types.GenesisValidator{
+ PubKey: pubKey,
+ Power: v.Power,
+ Name: v.Name,
+ },
+ )
+
+ }
+
+ bz, err := cdc.MarshalJSON(genesisNew)
+ if err != nil {
+ return nil, err
+ }
+ return bz, nil
+}
+
+func main() {
+ cdc := amino.NewCodec()
+ crypto.RegisterAmino(cdc)
+
+ args := os.Args[1:]
+ if len(args) != 1 {
+ fmt.Println("Please specify a file to convert")
+ os.Exit(1)
+ }
+
+ filePath := args[0]
+ fileName := filepath.Base(filePath)
+
+ fileBytes, err := ioutil.ReadFile(filePath)
+ if err != nil {
+ panic(err)
+ }
+
+ var bz []byte
+
+ switch fileName {
+ case "node_key.json":
+ bz, err = convertNodeKey(cdc, fileBytes)
+ case "priv_validator.json":
+ bz, err = convertPrivVal(cdc, fileBytes)
+ case "genesis.json":
+ bz, err = convertGenesis(cdc, fileBytes)
+ default:
+ fmt.Println("Expected file name to be in (node_key.json, priv_validator.json, genesis.json)")
+ os.Exit(1)
+ }
+
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(string(bz))
+
+}
diff --git a/state/execution.go b/state/execution.go
index 64db9f31a..0ce5e44f1 100644
--- a/state/execution.go
+++ b/state/execution.go
@@ -245,7 +245,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
// ./lite/doc.go for details on how a light client tracks validators.
func updateValidators(currentSet *types.ValidatorSet, updates []abci.Validator) error {
for _, v := range updates {
- pubkey, err := crypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-wire encoded pubkey
+ pubkey, err := crypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-amino encoded pubkey
if err != nil {
return err
}
diff --git a/state/execution_test.go b/state/execution_test.go
index e508c0fa8..09c40b5a0 100644
--- a/state/execution_test.go
+++ b/state/execution_test.go
@@ -66,8 +66,8 @@ func TestBeginBlockAbsentValidators(t *testing.T) {
lastCommitPrecommits []*types.Vote
expectedAbsentValidators []int32
}{
- {"none absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now}, {ValidatorIndex: 1, Timestamp: now}}, []int32{}},
- {"one absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now}, nil}, []int32{1}},
+ {"none absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, {ValidatorIndex: 1, Timestamp: now}}, []int32{}},
+ {"one absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, nil}, []int32{1}},
{"multiple absent", []*types.Vote{nil, nil}, []int32{0, 1}},
}
diff --git a/state/state.go b/state/state.go
index fb5d78c47..aa6e04b66 100644
--- a/state/state.go
+++ b/state/state.go
@@ -6,8 +6,6 @@ import (
"io/ioutil"
"time"
- wire "github.com/tendermint/go-wire"
-
"github.com/tendermint/tendermint/types"
)
@@ -81,16 +79,13 @@ func (s State) Copy() State {
// Equals returns true if the States are identical.
func (s State) Equals(s2 State) bool {
- return bytes.Equal(s.Bytes(), s2.Bytes())
+ sbz, s2bz := s.Bytes(), s2.Bytes()
+ return bytes.Equal(sbz, s2bz)
}
-// Bytes serializes the State using go-wire.
+// Bytes serializes the State using go-amino.
func (s State) Bytes() []byte {
- bz, err := wire.MarshalBinary(s)
- if err != nil {
- panic(err)
- }
- return bz
+ return cdc.MustMarshalBinaryBare(s)
}
// IsEmpty returns true if the State is equal to the empty State.
diff --git a/state/state_test.go b/state/state_test.go
index 31f2065cf..ba995cc00 100644
--- a/state/state_test.go
+++ b/state/state_test.go
@@ -7,11 +7,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
abci "github.com/tendermint/abci/types"
-
- crypto "github.com/tendermint/go-crypto"
-
+ "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
@@ -42,7 +39,7 @@ func TestStateCopy(t *testing.T) {
stateCopy := state.Copy()
assert.True(state.Equals(stateCopy),
- cmn.Fmt(`expected state and its copy to be identical. got %v\n expected %v\n`,
+ cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n",
stateCopy, state))
stateCopy.LastBlockHeight++
@@ -62,7 +59,7 @@ func TestStateSaveLoad(t *testing.T) {
loadedState := LoadState(stateDB)
assert.True(state.Equals(loadedState),
- cmn.Fmt(`expected state and its copy to be identical. got %v\n expected %v\n`,
+ cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n",
loadedState, state))
}
@@ -78,8 +75,8 @@ func TestABCIResponsesSaveLoad1(t *testing.T) {
// build mock responses
block := makeBlock(state, 2)
abciResponses := NewABCIResponses(block)
- abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: []cmn.KVPair{}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}
- abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: []cmn.KVPair{}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}
+ abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil}
+ abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil}
abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{
{
PubKey: crypto.GenPrivKeyEd25519().PubKey().Bytes(),
@@ -88,11 +85,11 @@ func TestABCIResponsesSaveLoad1(t *testing.T) {
}}
saveABCIResponses(stateDB, block.Height, abciResponses)
- loadedAbciResponses, err := LoadABCIResponses(stateDB, block.Height)
+ loadedABCIResponses, err := LoadABCIResponses(stateDB, block.Height)
assert.Nil(err)
- assert.Equal(abciResponses, loadedAbciResponses,
- cmn.Fmt(`ABCIResponses don't match: Got %v, Expected %v`, loadedAbciResponses,
- abciResponses))
+ assert.Equal(abciResponses, loadedABCIResponses,
+ cmn.Fmt("ABCIResponses don't match:\ngot: %v\nexpected: %v\n",
+ loadedABCIResponses, abciResponses))
}
// TestResultsSaveLoad tests saving and loading abci results.
@@ -109,8 +106,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
expected types.ABCIResults
}{
0: {
- []*abci.ResponseDeliverTx{},
- types.ABCIResults{},
+ nil,
+ nil,
},
1: {
[]*abci.ResponseDeliverTx{
@@ -129,12 +126,12 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
}},
},
types.ABCIResults{
- {383, []byte{}},
+ {383, nil},
{0, []byte("Gotcha!")},
}},
3: {
nil,
- types.ABCIResults{},
+ nil,
},
}
@@ -430,7 +427,7 @@ func makeHeaderPartsResponsesValPubKeyChange(state State, height int64,
block := makeBlock(state, height)
abciResponses := &ABCIResponses{
- EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{}},
+ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
// if the pubkey is new, remove the old and add the new
@@ -452,7 +449,7 @@ func makeHeaderPartsResponsesValPowerChange(state State, height int64,
block := makeBlock(state, height)
abciResponses := &ABCIResponses{
- EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{}},
+ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
// if the pubkey is new, remove the old and add the new
diff --git a/state/store.go b/state/store.go
index df07ec540..ee0619d34 100644
--- a/state/store.go
+++ b/state/store.go
@@ -4,7 +4,6 @@ import (
"fmt"
abci "github.com/tendermint/abci/types"
- wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
@@ -69,7 +68,7 @@ func loadState(db dbm.DB, key []byte) (state State) {
return state
}
- err := wire.UnmarshalBinary(buf, &state)
+ err := cdc.UnmarshalBinaryBare(buf, &state)
if err != nil {
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed:
@@ -104,22 +103,23 @@ type ABCIResponses struct {
// NewABCIResponses returns a new ABCIResponses
func NewABCIResponses(block *types.Block) *ABCIResponses {
+ resDeliverTxs := make([]*abci.ResponseDeliverTx, block.NumTxs)
+ if block.NumTxs == 0 {
+ // This makes Amino encoding/decoding consistent.
+ resDeliverTxs = nil
+ }
return &ABCIResponses{
- DeliverTx: make([]*abci.ResponseDeliverTx, block.NumTxs),
+ DeliverTx: resDeliverTxs,
}
}
-// Bytes serializes the ABCIResponse using go-wire
-func (a *ABCIResponses) Bytes() []byte {
- bz, err := wire.MarshalBinary(*a)
- if err != nil {
- panic(err)
- }
- return bz
+// Bytes serializes the ABCIResponse using go-amino.
+func (arz *ABCIResponses) Bytes() []byte {
+ return cdc.MustMarshalBinaryBare(arz)
}
-func (a *ABCIResponses) ResultsHash() []byte {
- results := types.NewResults(a.DeliverTx)
+func (arz *ABCIResponses) ResultsHash() []byte {
+ results := types.NewResults(arz.DeliverTx)
return results.Hash()
}
@@ -133,7 +133,7 @@ func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) {
}
abciResponses := new(ABCIResponses)
- err := wire.UnmarshalBinary(buf, abciResponses)
+ err := cdc.UnmarshalBinaryBare(buf, abciResponses)
if err != nil {
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
cmn.Exit(cmn.Fmt(`LoadABCIResponses: Data has been corrupted or its spec has
@@ -159,13 +159,9 @@ type ValidatorsInfo struct {
LastHeightChanged int64
}
-// Bytes serializes the ValidatorsInfo using go-wire
+// Bytes serializes the ValidatorsInfo using go-amino.
func (valInfo *ValidatorsInfo) Bytes() []byte {
- bz, err := wire.MarshalBinary(*valInfo)
- if err != nil {
- panic(err)
- }
- return bz
+ return cdc.MustMarshalBinaryBare(valInfo)
}
// LoadValidators loads the ValidatorSet for a given height.
@@ -194,7 +190,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo {
}
v := new(ValidatorsInfo)
- err := wire.UnmarshalBinary(buf, v)
+ err := cdc.UnmarshalBinaryBare(buf, v)
if err != nil {
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
cmn.Exit(cmn.Fmt(`LoadValidators: Data has been corrupted or its spec has changed:
@@ -227,13 +223,9 @@ type ConsensusParamsInfo struct {
LastHeightChanged int64
}
-// Bytes serializes the ConsensusParamsInfo using go-wire
+// Bytes serializes the ConsensusParamsInfo using go-amino.
func (params ConsensusParamsInfo) Bytes() []byte {
- bz, err := wire.MarshalBinary(params)
- if err != nil {
- panic(err)
- }
- return bz
+ return cdc.MustMarshalBinaryBare(params)
}
// LoadConsensusParams loads the ConsensusParams for a given height.
@@ -263,7 +255,7 @@ func loadConsensusParamsInfo(db dbm.DB, height int64) *ConsensusParamsInfo {
}
paramsInfo := new(ConsensusParamsInfo)
- err := wire.UnmarshalBinary(buf, paramsInfo)
+ err := cdc.UnmarshalBinaryBare(buf, paramsInfo)
if err != nil {
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
cmn.Exit(cmn.Fmt(`LoadConsensusParams: Data has been corrupted or its spec has changed:
diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go
index 3e5fab127..f5420f631 100644
--- a/state/txindex/indexer_service.go
+++ b/state/txindex/indexer_service.go
@@ -34,7 +34,7 @@ func (is *IndexerService) OnStart() error {
go func() {
for event := range ch {
// TODO: may be not perfomant to write one event at a time
- txResult := event.(types.TMEventData).Unwrap().(types.EventDataTx).TxResult
+ txResult := event.(types.EventDataTx).TxResult
is.idr.Index(&txResult)
}
}()
diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go
index 74bf4843b..87861f050 100644
--- a/state/txindex/kv/kv.go
+++ b/state/txindex/kv/kv.go
@@ -10,8 +10,6 @@ import (
"time"
"github.com/pkg/errors"
-
- wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/pubsub/query"
@@ -69,7 +67,7 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) {
}
txResult := new(types.TxResult)
- err := wire.UnmarshalBinary(rawBytes, &txResult)
+ err := cdc.UnmarshalBinaryBare(rawBytes, &txResult)
if err != nil {
return nil, fmt.Errorf("Error reading TxResult: %v", err)
}
@@ -92,7 +90,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error {
}
// index tx by hash
- rawBytes, err := wire.MarshalBinary(result)
+ rawBytes, err := cdc.MarshalBinaryBare(result)
if err != nil {
return err
}
@@ -117,7 +115,7 @@ func (txi *TxIndex) Index(result *types.TxResult) error {
}
// index tx by hash
- rawBytes, err := wire.MarshalBinary(result)
+ rawBytes, err := cdc.MarshalBinaryBare(result)
if err != nil {
return err
}
diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go
index 97024b30b..74a2dd7cb 100644
--- a/state/txindex/kv/kv_test.go
+++ b/state/txindex/kv/kv_test.go
@@ -9,18 +9,19 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types"
- "github.com/tendermint/tendermint/state/txindex"
- "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
db "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/pubsub/query"
+
+ "github.com/tendermint/tendermint/state/txindex"
+ "github.com/tendermint/tendermint/types"
)
func TestTxIndex(t *testing.T) {
indexer := NewTxIndex(db.NewMemDB())
tx := types.Tx("HELLO WORLD")
- txResult := &types.TxResult{Height: 1, Index: 0, Tx: tx, Result: abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: []cmn.KVPair{}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}}
+ txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}}
hash := tx.Hash()
batch := txindex.NewBatch(1)
@@ -35,7 +36,7 @@ func TestTxIndex(t *testing.T) {
assert.Equal(t, txResult, loadedTxResult)
tx2 := types.Tx("BYE BYE WORLD")
- txResult2 := &types.TxResult{Height: 1, Index: 0, Tx: tx2, Result: abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: []cmn.KVPair{}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}}
+ txResult2 := &types.TxResult{1, 0, tx2, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}}
hash2 := tx2.Hash()
err = indexer.Index(txResult2)
@@ -175,12 +176,34 @@ func TestIndexAllTags(t *testing.T) {
func txResultWithTags(tags []cmn.KVPair) *types.TxResult {
tx := types.Tx("HELLO WORLD")
- return &types.TxResult{Height: 1, Index: 0, Tx: tx, Result: abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: tags, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}}
+ return &types.TxResult{
+ Height: 1,
+ Index: 0,
+ Tx: tx,
+ Result: abci.ResponseDeliverTx{
+ Data: []byte{0},
+ Code: abci.CodeTypeOK,
+ Log: "",
+ Tags: tags,
+ Fee: cmn.KI64Pair{Key: nil, Value: 0},
+ },
+ }
}
func benchmarkTxIndex(txsCount int, b *testing.B) {
tx := types.Tx("HELLO WORLD")
- txResult := &types.TxResult{Height: 1, Index: 0, Tx: tx, Result: abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: []cmn.KVPair{}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}}
+ txResult := &types.TxResult{
+ Height: 1,
+ Index: 0,
+ Tx: tx,
+ Result: abci.ResponseDeliverTx{
+ Data: []byte{0},
+ Code: abci.CodeTypeOK,
+ Log: "",
+ Tags: []cmn.KVPair{},
+ Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0},
+ },
+ }
dir, err := ioutil.TempDir("", "tx_index_db")
if err != nil {
diff --git a/state/txindex/kv/wire.go b/state/txindex/kv/wire.go
new file mode 100644
index 000000000..ccca75254
--- /dev/null
+++ b/state/txindex/kv/wire.go
@@ -0,0 +1,10 @@
+package kv
+
+import (
+ "github.com/tendermint/go-amino"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+}
diff --git a/state/wire.go b/state/wire.go
new file mode 100644
index 000000000..3e8b544d9
--- /dev/null
+++ b/state/wire.go
@@ -0,0 +1,12 @@
+package state
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ crypto.RegisterAmino(cdc)
+}
diff --git a/test/p2p/atomic_broadcast/test.sh b/test/p2p/atomic_broadcast/test.sh
index 3224c0427..267330729 100644
--- a/test/p2p/atomic_broadcast/test.sh
+++ b/test/p2p/atomic_broadcast/test.sh
@@ -17,7 +17,7 @@ for i in $(seq 1 "$N"); do
addr=$(test/p2p/ip.sh "$i"):46657
# current state
- HASH1=$(curl -s "$addr/status" | jq .result.latest_app_hash)
+ HASH1=$(curl -s "$addr/status" | jq .result.sync_info.latest_app_hash)
# - send a tx
TX=aadeadbeefbeefbeef0$i
@@ -26,11 +26,11 @@ for i in $(seq 1 "$N"); do
echo ""
# we need to wait another block to get the new app_hash
- h1=$(curl -s "$addr/status" | jq .result.latest_block_height)
+ h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height)
h2=$h1
while [ "$h2" == "$h1" ]; do
sleep 1
- h2=$(curl -s "$addr/status" | jq .result.latest_block_height)
+ h2=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height)
done
# wait for all other peers to get to this height
@@ -39,16 +39,16 @@ for i in $(seq 1 "$N"); do
if [[ "$i" != "$j" ]]; then
addrJ=$(test/p2p/ip.sh "$j"):46657
- h=$(curl -s "$addrJ/status" | jq .result.latest_block_height)
+ h=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_block_height)
while [ "$h" -lt "$minHeight" ]; do
sleep 1
- h=$(curl -s "$addrJ/status" | jq .result.latest_block_height)
+ h=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_block_height)
done
fi
done
# check that hash was updated
- HASH2=$(curl -s "$addr/status" | jq .result.latest_app_hash)
+ HASH2=$(curl -s "$addr/status" | jq .result.sync_info.latest_app_hash)
if [[ "$HASH1" == "$HASH2" ]]; then
echo "Expected state hash to update from $HASH1. Got $HASH2"
exit 1
@@ -58,7 +58,7 @@ for i in $(seq 1 "$N"); do
for j in $(seq 1 "$N"); do
if [[ "$i" != "$j" ]]; then
addrJ=$(test/p2p/ip.sh "$j"):46657
- HASH3=$(curl -s "$addrJ/status" | jq .result.latest_app_hash)
+ HASH3=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_app_hash)
if [[ "$HASH2" != "$HASH3" ]]; then
echo "App hash for node $j doesn't match. Got $HASH3, expected $HASH2"
diff --git a/test/p2p/basic/test.sh b/test/p2p/basic/test.sh
index 63df62be2..4876c8c54 100755
--- a/test/p2p/basic/test.sh
+++ b/test/p2p/basic/test.sh
@@ -54,12 +54,12 @@ for i in `seq 1 $N`; do
done
# - assert block height is greater than 1
- BLOCK_HEIGHT=`curl -s $addr/status | jq .result.latest_block_height`
+ BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height`
COUNT=0
while [ "$BLOCK_HEIGHT" -le 1 ]; do
echo "Waiting for node $i to commit a block ..."
sleep 1
- BLOCK_HEIGHT=`curl -s $addr/status | jq .result.latest_block_height`
+ BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height`
COUNT=$((COUNT+1))
if [ "$COUNT" -gt "$MAX_SLEEP" ]; then
echo "Waited too long for node $i to commit a block"
diff --git a/test/p2p/data/app/init.sh b/test/p2p/data/app/init.sh
deleted file mode 100755
index 24f017630..000000000
--- a/test/p2p/data/app/init.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#! /bin/bash
-# This is a sample bash script for a ABCI application
-
-cd app/
-git clone https://github.com/tendermint/nomnomcoin.git
-cd nomnomcoin
-npm install .
-
-node app.js --eyes="unix:///data/tendermint/data/data.sock"
\ No newline at end of file
diff --git a/test/p2p/data/chain_config.json b/test/p2p/data/chain_config.json
deleted file mode 100644
index 4221ba8da..000000000
--- a/test/p2p/data/chain_config.json
+++ /dev/null
@@ -1,53 +0,0 @@
-{
- "id": "",
- "val_set_id": "anon",
- "validators": [
- {
- "validator": {
- "id": "mach1",
- "pub_key": {
- "type": "ed25519",
- "data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
- }
- },
- "p2p_addr": "",
- "rpc_addr": ""
- },
- {
- "validator": {
- "id": "mach2",
- "pub_key": {
- "type": "ed25519",
- "data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
- }
- },
- "p2p_addr": "",
- "rpc_addr": "",
- "index": 1
- },
- {
- "validator": {
- "id": "mach3",
- "pub_key": {
- "type": "ed25519",
- "data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
- }
- },
- "p2p_addr": "",
- "rpc_addr": "",
- "index": 2
- },
- {
- "validator": {
- "id": "mach4",
- "pub_key": {
- "type": "ed25519",
- "data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
- }
- },
- "p2p_addr": "",
- "rpc_addr": "",
- "index": 3
- }
- ]
-}
diff --git a/test/p2p/data/mach1/core/config/genesis.json b/test/p2p/data/mach1/core/config/genesis.json
index 8d5c6c7b9..19577db00 100644
--- a/test/p2p/data/mach1/core/config/genesis.json
+++ b/test/p2p/data/mach1/core/config/genesis.json
@@ -1,39 +1,39 @@
{
- "app_hash": "",
- "chain_id": "chain-9ujDWI",
- "genesis_time": "2016-06-24T20:01:19.322Z",
- "validators": [
- {
- "power": 1,
- "name": "mach1",
- "pub_key": {
- "type": "ed25519",
- "data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
- }
- },
- {
- "power": 1,
- "name": "mach2",
- "pub_key": {
- "type": "ed25519",
- "data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
- }
- },
- {
- "power": 1,
- "name": "mach3",
- "pub_key": {
- "type": "ed25519",
- "data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
- }
- },
- {
- "power": 1,
- "name": "mach4",
- "pub_key": {
- "type": "ed25519",
- "data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
- }
- }
- ]
+ "genesis_time": "2016-06-24T20:01:19.322Z",
+ "chain_id": "chain-9ujDWI",
+ "validators": [
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
+ },
+ "power": 1,
+ "name": "mach1"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
+ },
+ "power": 1,
+ "name": "mach2"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
+ },
+ "power": 1,
+ "name": "mach3"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k="
+ },
+ "power": 1,
+ "name": "mach4"
+ }
+ ],
+ "app_hash": ""
}
diff --git a/test/p2p/data/mach1/core/config/node_key.json b/test/p2p/data/mach1/core/config/node_key.json
index 7540cb5f4..c6d65008f 100644
--- a/test/p2p/data/mach1/core/config/node_key.json
+++ b/test/p2p/data/mach1/core/config/node_key.json
@@ -1 +1,6 @@
-{"priv_key":{"type":"ed25519","data":"06962D169F314ABB9D05AE5A04B46E48F0FBD8F1830149B47493910CBDCA7796096E5B94CD179F545AE3C281D9BF5C9E0E3D8FF719048B62F7849094CFFA8591"}}
\ No newline at end of file
+{
+ "priv_key": {
+ "type": "954568A3288910",
+ "value": "BpYtFp8xSrudBa5aBLRuSPD72PGDAUm0dJORDL3Kd5YJbluUzRefVFrjwoHZv1yeDj2P9xkEi2L3hJCUz/qFkQ=="
+ }
+}
diff --git a/test/p2p/data/mach1/core/config/priv_validator.json b/test/p2p/data/mach1/core/config/priv_validator.json
index 6538281b4..08c7c503b 100644
--- a/test/p2p/data/mach1/core/config/priv_validator.json
+++ b/test/p2p/data/mach1/core/config/priv_validator.json
@@ -1,14 +1,14 @@
{
- "address": "0E6925C3EE4C599DFF1536A5071AF4A26DF33635",
- "last_height": 0,
- "last_round": 0,
- "last_step": 0,
- "priv_key": {
- "type": "ed25519",
- "data": "547AA07C7A8CE16C5CB2A40C6C26D15B0A32960410A9F1EA6E50B636F1AB389ABE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
+ "address": "7E9D1FB08EDBAFCF116638D4C8FAFAEE2ABE1AAA",
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
},
- "pub_key": {
- "type": "ed25519",
- "data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
+ "last_height": 0,
+ "last_round": 0,
+ "last_step": 0,
+ "priv_key": {
+ "type": "954568A3288910",
+ "value": "VHqgfHqM4WxcsqQMbCbRWwoylgQQqfHqblC2NvGrOJq+iTPf8WAMAm40cY8XhaTN6rkMNWmLOU44tpR66R3hFg=="
}
}
diff --git a/test/p2p/data/mach2/core/config/genesis.json b/test/p2p/data/mach2/core/config/genesis.json
index 8d5c6c7b9..19577db00 100644
--- a/test/p2p/data/mach2/core/config/genesis.json
+++ b/test/p2p/data/mach2/core/config/genesis.json
@@ -1,39 +1,39 @@
{
- "app_hash": "",
- "chain_id": "chain-9ujDWI",
- "genesis_time": "2016-06-24T20:01:19.322Z",
- "validators": [
- {
- "power": 1,
- "name": "mach1",
- "pub_key": {
- "type": "ed25519",
- "data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
- }
- },
- {
- "power": 1,
- "name": "mach2",
- "pub_key": {
- "type": "ed25519",
- "data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
- }
- },
- {
- "power": 1,
- "name": "mach3",
- "pub_key": {
- "type": "ed25519",
- "data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
- }
- },
- {
- "power": 1,
- "name": "mach4",
- "pub_key": {
- "type": "ed25519",
- "data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
- }
- }
- ]
+ "genesis_time": "2016-06-24T20:01:19.322Z",
+ "chain_id": "chain-9ujDWI",
+ "validators": [
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
+ },
+ "power": 1,
+ "name": "mach1"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
+ },
+ "power": 1,
+ "name": "mach2"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
+ },
+ "power": 1,
+ "name": "mach3"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k="
+ },
+ "power": 1,
+ "name": "mach4"
+ }
+ ],
+ "app_hash": ""
}
diff --git a/test/p2p/data/mach2/core/config/node_key.json b/test/p2p/data/mach2/core/config/node_key.json
index efad2f0b3..146a1328d 100644
--- a/test/p2p/data/mach2/core/config/node_key.json
+++ b/test/p2p/data/mach2/core/config/node_key.json
@@ -1 +1,6 @@
-{"priv_key":{"type":"ed25519","data":"B8CE8B0D5138C10208526ABDADCE91C735FCCC4186E06E0972EC35E64973428A45EBC61F24CE1B91B3D26AFBAB11C2789EF04CBAC28183619C01116B66A9C528"}}
\ No newline at end of file
+{
+ "priv_key": {
+ "type": "954568A3288910",
+ "value": "uM6LDVE4wQIIUmq9rc6RxzX8zEGG4G4Jcuw15klzQopF68YfJM4bkbPSavurEcJ4nvBMusKBg2GcARFrZqnFKA=="
+ }
+}
diff --git a/test/p2p/data/mach2/core/config/priv_validator.json b/test/p2p/data/mach2/core/config/priv_validator.json
index 3602454f5..8e813dff4 100644
--- a/test/p2p/data/mach2/core/config/priv_validator.json
+++ b/test/p2p/data/mach2/core/config/priv_validator.json
@@ -1,14 +1,14 @@
{
- "address": "99DBBD2AFC28FB5BAC5574AFAF0D9C806CED3B55",
- "last_height": 0,
- "last_round": 0,
- "last_step": 0,
- "priv_key": {
- "type": "ed25519",
- "data": "D047889E60502FC3129D0AB7F334B1838ED9ED1ECD99CBB96B71AD5ABF5A81436DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
+ "address": "8893D14FE09F1157E39CD34B98036048D51B4985",
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
},
- "pub_key": {
- "type": "ed25519",
- "data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
+ "last_height": 0,
+ "last_round": 0,
+ "last_step": 0,
+ "priv_key": {
+ "type": "954568A3288910",
+ "value": "0EeInmBQL8MSnQq38zSxg47Z7R7Nmcu5a3GtWr9agUNtxTRGUyMSZYfSoqk7WdaJtxcHOx3paKJabvE9WVMYrQ=="
}
}
diff --git a/test/p2p/data/mach3/core/config/genesis.json b/test/p2p/data/mach3/core/config/genesis.json
index 8d5c6c7b9..19577db00 100644
--- a/test/p2p/data/mach3/core/config/genesis.json
+++ b/test/p2p/data/mach3/core/config/genesis.json
@@ -1,39 +1,39 @@
{
- "app_hash": "",
- "chain_id": "chain-9ujDWI",
- "genesis_time": "2016-06-24T20:01:19.322Z",
- "validators": [
- {
- "power": 1,
- "name": "mach1",
- "pub_key": {
- "type": "ed25519",
- "data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
- }
- },
- {
- "power": 1,
- "name": "mach2",
- "pub_key": {
- "type": "ed25519",
- "data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
- }
- },
- {
- "power": 1,
- "name": "mach3",
- "pub_key": {
- "type": "ed25519",
- "data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
- }
- },
- {
- "power": 1,
- "name": "mach4",
- "pub_key": {
- "type": "ed25519",
- "data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
- }
- }
- ]
+ "genesis_time": "2016-06-24T20:01:19.322Z",
+ "chain_id": "chain-9ujDWI",
+ "validators": [
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
+ },
+ "power": 1,
+ "name": "mach1"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
+ },
+ "power": 1,
+ "name": "mach2"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
+ },
+ "power": 1,
+ "name": "mach3"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k="
+ },
+ "power": 1,
+ "name": "mach4"
+ }
+ ],
+ "app_hash": ""
}
diff --git a/test/p2p/data/mach3/core/config/node_key.json b/test/p2p/data/mach3/core/config/node_key.json
index 58880f6f3..82689b8e0 100644
--- a/test/p2p/data/mach3/core/config/node_key.json
+++ b/test/p2p/data/mach3/core/config/node_key.json
@@ -1 +1,6 @@
-{"priv_key":{"type":"ed25519","data":"913DE8AC6D18922A53F6B0196EF023B4693FECFBB565E084F0B4941768F3DAE892B35ADD954562FE071C465BC244B2AFAED4A270EC849269341473CE192DE682"}}
\ No newline at end of file
+{
+ "priv_key": {
+ "type": "954568A3288910",
+ "value": "kT3orG0YkipT9rAZbvAjtGk/7Pu1ZeCE8LSUF2jz2uiSs1rdlUVi/gccRlvCRLKvrtSicOyEkmk0FHPOGS3mgg=="
+ }
+}
diff --git a/test/p2p/data/mach3/core/config/priv_validator.json b/test/p2p/data/mach3/core/config/priv_validator.json
index 743a931f5..84c98b98f 100644
--- a/test/p2p/data/mach3/core/config/priv_validator.json
+++ b/test/p2p/data/mach3/core/config/priv_validator.json
@@ -1,14 +1,14 @@
{
- "address": "4C5F061DAC28660853904A66705B12CA2B317572",
- "last_height": 0,
- "last_round": 0,
- "last_step": 0,
- "priv_key": {
- "type": "ed25519",
- "data": "C1A4E47F349FC5F556F4A9A27BA776B94424C312BAA6CF6EE44B867348D7C3F2AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
+ "address": "7C747D7E002932B3864E3FBE9AC04287043F66A0",
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
},
- "pub_key": {
- "type": "ed25519",
- "data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
+ "last_height": 0,
+ "last_round": 0,
+ "last_step": 0,
+ "priv_key": {
+ "type": "954568A3288910",
+ "value": "waTkfzSfxfVW9Kmie6d2uUQkwxK6ps9u5EuGc0jXw/KuZ6xpfRNaoLRgHqV+qrP+v0uqTyKcRaWYwphbEvzRoQ=="
}
}
diff --git a/test/p2p/data/mach4/core/config/genesis.json b/test/p2p/data/mach4/core/config/genesis.json
index 8d5c6c7b9..19577db00 100644
--- a/test/p2p/data/mach4/core/config/genesis.json
+++ b/test/p2p/data/mach4/core/config/genesis.json
@@ -1,39 +1,39 @@
{
- "app_hash": "",
- "chain_id": "chain-9ujDWI",
- "genesis_time": "2016-06-24T20:01:19.322Z",
- "validators": [
- {
- "power": 1,
- "name": "mach1",
- "pub_key": {
- "type": "ed25519",
- "data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
- }
- },
- {
- "power": 1,
- "name": "mach2",
- "pub_key": {
- "type": "ed25519",
- "data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
- }
- },
- {
- "power": 1,
- "name": "mach3",
- "pub_key": {
- "type": "ed25519",
- "data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
- }
- },
- {
- "power": 1,
- "name": "mach4",
- "pub_key": {
- "type": "ed25519",
- "data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
- }
- }
- ]
+ "genesis_time": "2016-06-24T20:01:19.322Z",
+ "chain_id": "chain-9ujDWI",
+ "validators": [
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
+ },
+ "power": 1,
+ "name": "mach1"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
+ },
+ "power": 1,
+ "name": "mach2"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
+ },
+ "power": 1,
+ "name": "mach3"
+ },
+ {
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k="
+ },
+ "power": 1,
+ "name": "mach4"
+ }
+ ],
+ "app_hash": ""
}
diff --git a/test/p2p/data/mach4/core/config/node_key.json b/test/p2p/data/mach4/core/config/node_key.json
index 72e7b25df..a0c8d3917 100644
--- a/test/p2p/data/mach4/core/config/node_key.json
+++ b/test/p2p/data/mach4/core/config/node_key.json
@@ -1 +1,6 @@
-{"priv_key":{"type":"ed25519","data":"408226F3F40411AC22262DD9A33BFE27D6FED42B9F084906B3797118C951CB82F81552170A85C94F0608AE8B59B70A0CA8B604A9057585B28A266140DC615E97"}}
\ No newline at end of file
+{
+ "priv_key": {
+ "type": "954568A3288910",
+ "value": "QIIm8/QEEawiJi3Zozv+J9b+1CufCEkGs3lxGMlRy4L4FVIXCoXJTwYIrotZtwoMqLYEqQV1hbKKJmFA3GFelw=="
+ }
+}
diff --git a/test/p2p/data/mach4/core/config/priv_validator.json b/test/p2p/data/mach4/core/config/priv_validator.json
index 1c10eb8a6..4f88045b5 100644
--- a/test/p2p/data/mach4/core/config/priv_validator.json
+++ b/test/p2p/data/mach4/core/config/priv_validator.json
@@ -1,14 +1,14 @@
{
- "address": "86F6DA4B34F16D743D2D992B5ACB12F5E724CC2D",
- "last_height": 0,
- "last_round": 0,
- "last_step": 0,
- "priv_key": {
- "type": "ed25519",
- "data": "C4CC3ED28F020C2DBDA98BCDBF08C3CED370470E74F25E938D5D295E8E3D2B0C9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
+ "address": "CEBEFE3CA1363D425643EF63FC179E77A50A1E9A",
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k="
},
- "pub_key": {
- "type": "ed25519",
- "data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
+ "last_height": 0,
+ "last_round": 0,
+ "last_step": 0,
+ "priv_key": {
+ "type": "954568A3288910",
+ "value": "xMw+0o8CDC29qYvNvwjDztNwRw508l6TjV0pXo49KwyevI9YztS0bc1auKulkd0lPNfLUDcnP9oyvAtkYcTv2Q=="
}
}
diff --git a/test/p2p/fast_sync/check_peer.sh b/test/p2p/fast_sync/check_peer.sh
index b10c3efc5..537a5cfe9 100644
--- a/test/p2p/fast_sync/check_peer.sh
+++ b/test/p2p/fast_sync/check_peer.sh
@@ -15,10 +15,10 @@ peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1
peer_addr=$(test/p2p/ip.sh $peerID):46657
# get another peer's height
-h1=`curl -s $peer_addr/status | jq .result.latest_block_height`
+h1=`curl -s $peer_addr/status | jq .result.sync_info.latest_block_height`
# get another peer's state
-root1=`curl -s $peer_addr/status | jq .result.latest_app_hash`
+root1=`curl -s $peer_addr/status | jq .result.sync_info.latest_app_hash`
echo "Other peer is on height $h1 with state $root1"
echo "Waiting for peer $ID to catch up"
@@ -29,12 +29,12 @@ set +o pipefail
h2="0"
while [[ "$h2" -lt "$(($h1+3))" ]]; do
sleep 1
- h2=`curl -s $addr/status | jq .result.latest_block_height`
+ h2=`curl -s $addr/status | jq .result.sync_info.latest_block_height`
echo "... $h2"
done
# check the app hash
-root2=`curl -s $addr/status | jq .result.latest_app_hash`
+root2=`curl -s $addr/status | jq .result.sync_info.latest_app_hash`
if [[ "$root1" != "$root2" ]]; then
echo "App hash after fast sync does not match. Got $root2; expected $root1"
diff --git a/test/p2p/kill_all/check_peers.sh b/test/p2p/kill_all/check_peers.sh
index 37cd13a42..40d9813bf 100644
--- a/test/p2p/kill_all/check_peers.sh
+++ b/test/p2p/kill_all/check_peers.sh
@@ -23,7 +23,7 @@ set -e
# get the first peer's height
addr=$(test/p2p/ip.sh 1):46657
-h1=$(curl -s "$addr/status" | jq .result.latest_block_height)
+h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height)
echo "1st peer is on height $h1"
echo "Waiting until other peers reporting a height higher than the 1st one"
@@ -33,7 +33,7 @@ for i in $(seq 2 "$NUM_OF_PEERS"); do
while [[ $hi -le $h1 ]] ; do
addr=$(test/p2p/ip.sh "$i"):46657
- hi=$(curl -s "$addr/status" | jq .result.latest_block_height)
+ hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height)
echo "... peer $i is on height $hi"
diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh
index d5ede231a..15d44ff33 100644
--- a/test/p2p/peer.sh
+++ b/test/p2p/peer.sh
@@ -10,13 +10,10 @@ set +u
NODE_FLAGS=$5
set -u
-set +eu
-
echo "starting tendermint peer ID=$ID"
# start tendermint container on the network
# NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be
# treated as one flag.
-set -u
docker run -d \
--net="$NETWORK_NAME" \
--ip=$(test/p2p/ip.sh "$ID") \
@@ -27,4 +24,4 @@ docker run -d \
--log-opt syslog-address=udp://127.0.0.1:5514 \
--log-opt syslog-facility=daemon \
--log-opt tag="{{.Name}}" \
- "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY"
+ "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY"
diff --git a/test/persist/test_failure_indices.sh b/test/persist/test_failure_indices.sh
index 42739107f..dd630e1b7 100644
--- a/test/persist/test_failure_indices.sh
+++ b/test/persist/test_failure_indices.sh
@@ -31,7 +31,7 @@ function start_procs(){
if [[ "$CIRCLECI" == true ]]; then
$TM_CMD &
else
- $TM_CMD &> "tendermint_${name}.log" &
+ $TM_CMD &> "tendermint_${name}.log" &
fi
PID_TENDERMINT=$!
else
@@ -60,8 +60,8 @@ function wait_for_port() {
i=0
while [ "$ERR" == 0 ]; do
echo "... port $port is still bound. waiting ..."
- sleep 1
- nc -z 127.0.0.1 $port
+ sleep 1
+ nc -z 127.0.0.1 $port
ERR=$?
i=$((i + 1))
if [[ $i == 10 ]]; then
@@ -97,7 +97,7 @@ for failIndex in $(seq $failsStart $failsEnd); do
ERR=$?
i=0
while [ "$ERR" != 0 ]; do
- sleep 1
+ sleep 1
curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null
ERR=$?
i=$((i + 1))
@@ -108,11 +108,11 @@ for failIndex in $(seq $failsStart $failsEnd); do
done
# wait for a new block
- h1=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.latest_block_height)
+ h1=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.sync_info.latest_block_height)
h2=$h1
while [ "$h2" == "$h1" ]; do
sleep 1
- h2=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.latest_block_height)
+ h2=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.sync_info.latest_block_height)
done
kill_procs
diff --git a/test/persist/test_simple.sh b/test/persist/test_simple.sh
index be7852598..171ab747d 100644
--- a/test/persist/test_simple.sh
+++ b/test/persist/test_simple.sh
@@ -46,7 +46,7 @@ curl -s $addr/status > /dev/null
ERR=$?
i=0
while [ "$ERR" != 0 ]; do
- sleep 1
+ sleep 1
curl -s $addr/status > /dev/null
ERR=$?
i=$(($i + 1))
@@ -57,11 +57,11 @@ while [ "$ERR" != 0 ]; do
done
# wait for a new block
-h1=`curl -s $addr/status | jq .result.latest_block_height`
+h1=`curl -s $addr/status | jq .result.sync_info.latest_block_height`
h2=$h1
while [ "$h2" == "$h1" ]; do
sleep 1
- h2=`curl -s $addr/status | jq .result.latest_block_height`
+ h2=`curl -s $addr/status | jq .result.sync_info.latest_block_height`
done
kill_procs
diff --git a/types/block.go b/types/block.go
index 22d605d66..34d2cb85d 100644
--- a/types/block.go
+++ b/types/block.go
@@ -5,9 +5,9 @@ import (
"errors"
"fmt"
"strings"
+ "sync"
"time"
- wire "github.com/tendermint/tendermint/wire"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/merkle"
"golang.org/x/crypto/ripemd160"
@@ -16,6 +16,7 @@ import (
// Block defines the atomic unit of a Tendermint blockchain.
// TODO: add Version byte
type Block struct {
+ mtx sync.Mutex
*Header `json:"header"`
*Data `json:"data"`
Evidence EvidenceData `json:"evidence"`
@@ -36,7 +37,7 @@ func MakeBlock(height int64, txs []Tx, commit *Commit) *Block {
Txs: txs,
},
}
- block.FillHeader()
+ block.fillHeader()
return block
}
@@ -48,6 +49,12 @@ func (b *Block) AddEvidence(evidence []Evidence) {
// ValidateBasic performs basic validation that doesn't involve state data.
// It checks the internal consistency of the block.
func (b *Block) ValidateBasic() error {
+ if b == nil {
+ return errors.New("Nil blocks are invalid")
+ }
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+
newTxs := int64(len(b.Data.Txs))
if b.NumTxs != newTxs {
return fmt.Errorf("Wrong Block.Header.NumTxs. Expected %v, got %v", newTxs, b.NumTxs)
@@ -69,8 +76,8 @@ func (b *Block) ValidateBasic() error {
return nil
}
-// FillHeader fills in any remaining header fields that are a function of the block data
-func (b *Block) FillHeader() {
+// fillHeader fills in any remaining header fields that are a function of the block data
+func (b *Block) fillHeader() {
if b.LastCommitHash == nil {
b.LastCommitHash = b.LastCommit.Hash()
}
@@ -85,17 +92,31 @@ func (b *Block) FillHeader() {
// Hash computes and returns the block hash.
// If the block is incomplete, block hash is nil for safety.
func (b *Block) Hash() cmn.HexBytes {
+ if b == nil {
+ return nil
+ }
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+
if b == nil || b.Header == nil || b.Data == nil || b.LastCommit == nil {
return nil
}
- b.FillHeader()
+ b.fillHeader()
return b.Header.Hash()
}
// MakePartSet returns a PartSet containing parts of a serialized block.
// This is the form in which the block is gossipped to peers.
func (b *Block) MakePartSet(partSize int) *PartSet {
- bz, err := wire.MarshalBinary(b)
+ if b == nil {
+ return nil
+ }
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+
+ // We prefix the byte length, so that unmarshaling
+ // can easily happen via a reader.
+ bz, err := cdc.MarshalBinary(b)
if err != nil {
panic(err)
}
@@ -184,19 +205,19 @@ func (h *Header) Hash() cmn.HexBytes {
return nil
}
return merkle.SimpleHashFromMap(map[string]merkle.Hasher{
- "ChainID": wireHasher(h.ChainID),
- "Height": wireHasher(h.Height),
- "Time": wireHasher(h.Time),
- "NumTxs": wireHasher(h.NumTxs),
- "TotalTxs": wireHasher(h.TotalTxs),
- "LastBlockID": wireHasher(h.LastBlockID),
- "LastCommit": wireHasher(h.LastCommitHash),
- "Data": wireHasher(h.DataHash),
- "Validators": wireHasher(h.ValidatorsHash),
- "App": wireHasher(h.AppHash),
- "Consensus": wireHasher(h.ConsensusHash),
- "Results": wireHasher(h.LastResultsHash),
- "Evidence": wireHasher(h.EvidenceHash),
+ "ChainID": aminoHasher(h.ChainID),
+ "Height": aminoHasher(h.Height),
+ "Time": aminoHasher(h.Time),
+ "NumTxs": aminoHasher(h.NumTxs),
+ "TotalTxs": aminoHasher(h.TotalTxs),
+ "LastBlockID": aminoHasher(h.LastBlockID),
+ "LastCommit": aminoHasher(h.LastCommitHash),
+ "Data": aminoHasher(h.DataHash),
+ "Validators": aminoHasher(h.ValidatorsHash),
+ "App": aminoHasher(h.AppHash),
+ "Consensus": aminoHasher(h.ConsensusHash),
+ "Results": aminoHasher(h.LastResultsHash),
+ "Evidence": aminoHasher(h.EvidenceHash),
})
}
@@ -365,7 +386,7 @@ func (commit *Commit) Hash() cmn.HexBytes {
if commit.hash == nil {
bs := make([]merkle.Hasher, len(commit.Precommits))
for i, precommit := range commit.Precommits {
- bs[i] = wireHasher(precommit)
+ bs[i] = aminoHasher(precommit)
}
commit.hash = merkle.SimpleHashFromHashers(bs)
}
@@ -503,7 +524,7 @@ func (blockID BlockID) Equals(other BlockID) bool {
// Key returns a machine-readable string representation of the BlockID
func (blockID BlockID) Key() string {
- bz, err := wire.MarshalBinary(blockID.PartsHeader)
+ bz, err := cdc.MarshalBinaryBare(blockID.PartsHeader)
if err != nil {
panic(err)
}
@@ -523,23 +544,25 @@ type hasher struct {
func (h hasher) Hash() []byte {
hasher := ripemd160.New()
- bz, err := wire.MarshalBinary(h.item)
- if err != nil {
- panic(err)
- }
- _, err = hasher.Write(bz)
- if err != nil {
- panic(err)
+ if h.item != nil && !cmn.IsTypedNil(h.item) && !cmn.IsEmpty(h.item) {
+ bz, err := cdc.MarshalBinaryBare(h.item)
+ if err != nil {
+ panic(err)
+ }
+ _, err = hasher.Write(bz)
+ if err != nil {
+ panic(err)
+ }
}
return hasher.Sum(nil)
}
-func tmHash(item interface{}) []byte {
+func aminoHash(item interface{}) []byte {
h := hasher{item}
return h.Hash()
}
-func wireHasher(item interface{}) merkle.Hasher {
+func aminoHasher(item interface{}) merkle.Hasher {
return hasher{item}
}
diff --git a/types/block_test.go b/types/block_test.go
index 4aaa16a97..3c2942cd5 100644
--- a/types/block_test.go
+++ b/types/block_test.go
@@ -15,8 +15,7 @@ func TestValidateBlock(t *testing.T) {
lastID := makeBlockIDRandom()
h := int64(3)
- voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit,
- 10, 1)
+ voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1)
commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals)
require.NoError(t, err)
diff --git a/types/canonical_json.go b/types/canonical_json.go
index 4eeeb2064..95ade9c67 100644
--- a/types/canonical_json.go
+++ b/types/canonical_json.go
@@ -3,14 +3,14 @@ package types
import (
"time"
- wire "github.com/tendermint/tendermint/wire"
+ "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common"
)
-// canonical json is wire's json for structs with fields in alphabetical order
+// Canonical json is amino's json for structs with fields in alphabetical order
// TimeFormat is used for generating the sigs
-const TimeFormat = wire.RFC3339Millis
+const TimeFormat = amino.RFC3339Millis
type CanonicalJSONBlockID struct {
Hash cmn.HexBytes `json:"hash,omitempty"`
@@ -18,11 +18,13 @@ type CanonicalJSONBlockID struct {
}
type CanonicalJSONPartSetHeader struct {
- Hash cmn.HexBytes `json:"hash"`
- Total int `json:"total"`
+ Hash cmn.HexBytes `json:"hash,omitempty"`
+ Total int `json:"total,omitempty"`
}
type CanonicalJSONProposal struct {
+ ChainID string `json:"@chain_id"`
+ Type string `json:"@type"`
BlockPartsHeader CanonicalJSONPartSetHeader `json:"block_parts_header"`
Height int64 `json:"height"`
POLBlockID CanonicalJSONBlockID `json:"pol_block_id"`
@@ -32,14 +34,18 @@ type CanonicalJSONProposal struct {
}
type CanonicalJSONVote struct {
+ ChainID string `json:"@chain_id"`
+ Type string `json:"@type"`
BlockID CanonicalJSONBlockID `json:"block_id"`
Height int64 `json:"height"`
Round int `json:"round"`
Timestamp string `json:"timestamp"`
- Type byte `json:"type"`
+ VoteType byte `json:"type"`
}
type CanonicalJSONHeartbeat struct {
+ ChainID string `json:"@chain_id"`
+ Type string `json:"@type"`
Height int64 `json:"height"`
Round int `json:"round"`
Sequence int `json:"sequence"`
@@ -47,24 +53,6 @@ type CanonicalJSONHeartbeat struct {
ValidatorIndex int `json:"validator_index"`
}
-//------------------------------------
-// Messages including a "chain id" can only be applied to one chain, hence "Once"
-
-type CanonicalJSONOnceProposal struct {
- ChainID string `json:"chain_id"`
- Proposal CanonicalJSONProposal `json:"proposal"`
-}
-
-type CanonicalJSONOnceVote struct {
- ChainID string `json:"chain_id"`
- Vote CanonicalJSONVote `json:"vote"`
-}
-
-type CanonicalJSONOnceHeartbeat struct {
- ChainID string `json:"chain_id"`
- Heartbeat CanonicalJSONHeartbeat `json:"heartbeat"`
-}
-
//-----------------------------------
// Canonicalize the structs
@@ -82,8 +70,10 @@ func CanonicalPartSetHeader(psh PartSetHeader) CanonicalJSONPartSetHeader {
}
}
-func CanonicalProposal(proposal *Proposal) CanonicalJSONProposal {
+func CanonicalProposal(chainID string, proposal *Proposal) CanonicalJSONProposal {
return CanonicalJSONProposal{
+ ChainID: chainID,
+ Type: "proposal",
BlockPartsHeader: CanonicalPartSetHeader(proposal.BlockPartsHeader),
Height: proposal.Height,
Timestamp: CanonicalTime(proposal.Timestamp),
@@ -93,28 +83,32 @@ func CanonicalProposal(proposal *Proposal) CanonicalJSONProposal {
}
}
-func CanonicalVote(vote *Vote) CanonicalJSONVote {
+func CanonicalVote(chainID string, vote *Vote) CanonicalJSONVote {
return CanonicalJSONVote{
+ ChainID: chainID,
+ Type: "vote",
BlockID: CanonicalBlockID(vote.BlockID),
Height: vote.Height,
Round: vote.Round,
Timestamp: CanonicalTime(vote.Timestamp),
- Type: vote.Type,
+ VoteType: vote.Type,
}
}
-func CanonicalHeartbeat(heartbeat *Heartbeat) CanonicalJSONHeartbeat {
+func CanonicalHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalJSONHeartbeat {
return CanonicalJSONHeartbeat{
- heartbeat.Height,
- heartbeat.Round,
- heartbeat.Sequence,
- heartbeat.ValidatorAddress,
- heartbeat.ValidatorIndex,
+ ChainID: chainID,
+ Type: "heartbeat",
+ Height: heartbeat.Height,
+ Round: heartbeat.Round,
+ Sequence: heartbeat.Sequence,
+ ValidatorAddress: heartbeat.ValidatorAddress,
+ ValidatorIndex: heartbeat.ValidatorIndex,
}
}
func CanonicalTime(t time.Time) string {
- // note that sending time over wire resets it to
+ // Note that sending time over amino resets it to
// local time, we need to force UTC here, so the
// signatures match
return t.UTC().Format(TimeFormat)
diff --git a/types/event_bus.go b/types/event_bus.go
index 37bd5619d..460a3e294 100644
--- a/types/event_bus.go
+++ b/types/event_bus.go
@@ -67,22 +67,22 @@ func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error
func (b *EventBus) Publish(eventType string, eventData TMEventData) error {
// no explicit deadline for publishing events
ctx := context.Background()
- b.pubsub.PublishWithTags(ctx, eventData, map[string]interface{}{EventTypeKey: eventType})
+ b.pubsub.PublishWithTags(ctx, eventData, tmpubsub.NewTagMap(map[string]interface{}{EventTypeKey: eventType}))
return nil
}
//--- block, tx, and vote events
func (b *EventBus) PublishEventNewBlock(event EventDataNewBlock) error {
- return b.Publish(EventNewBlock, TMEventData{event})
+ return b.Publish(EventNewBlock, event)
}
func (b *EventBus) PublishEventNewBlockHeader(event EventDataNewBlockHeader) error {
- return b.Publish(EventNewBlockHeader, TMEventData{event})
+ return b.Publish(EventNewBlockHeader, event)
}
func (b *EventBus) PublishEventVote(event EventDataVote) error {
- return b.Publish(EventVote, TMEventData{event})
+ return b.Publish(EventVote, event)
}
// PublishEventTx publishes tx event with tags from Result. Note it will add
@@ -114,50 +114,50 @@ func (b *EventBus) PublishEventTx(event EventDataTx) error {
logIfTagExists(TxHeightKey, tags, b.Logger)
tags[TxHeightKey] = event.Height
- b.pubsub.PublishWithTags(ctx, TMEventData{event}, tags)
+ b.pubsub.PublishWithTags(ctx, event, tmpubsub.NewTagMap(tags))
return nil
}
func (b *EventBus) PublishEventProposalHeartbeat(event EventDataProposalHeartbeat) error {
- return b.Publish(EventProposalHeartbeat, TMEventData{event})
+ return b.Publish(EventProposalHeartbeat, event)
}
//--- EventDataRoundState events
func (b *EventBus) PublishEventNewRoundStep(event EventDataRoundState) error {
- return b.Publish(EventNewRoundStep, TMEventData{event})
+ return b.Publish(EventNewRoundStep, event)
}
func (b *EventBus) PublishEventTimeoutPropose(event EventDataRoundState) error {
- return b.Publish(EventTimeoutPropose, TMEventData{event})
+ return b.Publish(EventTimeoutPropose, event)
}
func (b *EventBus) PublishEventTimeoutWait(event EventDataRoundState) error {
- return b.Publish(EventTimeoutWait, TMEventData{event})
+ return b.Publish(EventTimeoutWait, event)
}
func (b *EventBus) PublishEventNewRound(event EventDataRoundState) error {
- return b.Publish(EventNewRound, TMEventData{event})
+ return b.Publish(EventNewRound, event)
}
func (b *EventBus) PublishEventCompleteProposal(event EventDataRoundState) error {
- return b.Publish(EventCompleteProposal, TMEventData{event})
+ return b.Publish(EventCompleteProposal, event)
}
func (b *EventBus) PublishEventPolka(event EventDataRoundState) error {
- return b.Publish(EventPolka, TMEventData{event})
+ return b.Publish(EventPolka, event)
}
func (b *EventBus) PublishEventUnlock(event EventDataRoundState) error {
- return b.Publish(EventUnlock, TMEventData{event})
+ return b.Publish(EventUnlock, event)
}
func (b *EventBus) PublishEventRelock(event EventDataRoundState) error {
- return b.Publish(EventRelock, TMEventData{event})
+ return b.Publish(EventRelock, event)
}
func (b *EventBus) PublishEventLock(event EventDataRoundState) error {
- return b.Publish(EventLock, TMEventData{event})
+ return b.Publish(EventLock, event)
}
func logIfTagExists(tag string, tags map[string]interface{}, logger log.Logger) {
diff --git a/types/event_bus_test.go b/types/event_bus_test.go
index c1b4c1b0f..70a537745 100644
--- a/types/event_bus_test.go
+++ b/types/event_bus_test.go
@@ -35,7 +35,7 @@ func TestEventBusPublishEventTx(t *testing.T) {
done := make(chan struct{})
go func() {
for e := range txEventsCh {
- edt := e.(TMEventData).Unwrap().(EventDataTx)
+ edt := e.(EventDataTx)
assert.Equal(t, int64(1), edt.Height)
assert.Equal(t, uint32(0), edt.Index)
assert.Equal(t, tx, edt.Tx)
@@ -122,7 +122,7 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes
eventType = randEvent()
}
- eventBus.Publish(eventType, TMEventData{"Gamora"})
+ eventBus.Publish(eventType, EventDataString("Gamora"))
}
}
diff --git a/types/events.go b/types/events.go
index d6f7b012c..342d4bc20 100644
--- a/types/events.go
+++ b/types/events.go
@@ -3,7 +3,7 @@ package types
import (
"fmt"
- "github.com/tendermint/go-wire/data"
+ "github.com/tendermint/go-amino"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
tmquery "github.com/tendermint/tmlibs/pubsub/query"
)
@@ -35,66 +35,31 @@ const (
// ENCODING / DECODING
///////////////////////////////////////////////////////////////////////////////
-var (
- EventDataNameNewBlock = "new_block"
- EventDataNameNewBlockHeader = "new_block_header"
- EventDataNameTx = "tx"
- EventDataNameRoundState = "round_state"
- EventDataNameVote = "vote"
- EventDataNameProposalHeartbeat = "proposal_heartbeat"
-)
-
// implements events.EventData
-type TMEventDataInner interface {
+type TMEventData interface {
+ AssertIsTMEventData()
// empty interface
}
-type TMEventData struct {
- TMEventDataInner `json:"unwrap"`
+func (_ EventDataNewBlock) AssertIsTMEventData() {}
+func (_ EventDataNewBlockHeader) AssertIsTMEventData() {}
+func (_ EventDataTx) AssertIsTMEventData() {}
+func (_ EventDataRoundState) AssertIsTMEventData() {}
+func (_ EventDataVote) AssertIsTMEventData() {}
+func (_ EventDataProposalHeartbeat) AssertIsTMEventData() {}
+func (_ EventDataString) AssertIsTMEventData() {}
+
+func RegisterEventDatas(cdc *amino.Codec) {
+ cdc.RegisterInterface((*TMEventData)(nil), nil)
+ cdc.RegisterConcrete(EventDataNewBlock{}, "tendermint/event/NewBlock", nil)
+ cdc.RegisterConcrete(EventDataNewBlockHeader{}, "tendermint/event/NewBlockHeader", nil)
+ cdc.RegisterConcrete(EventDataTx{}, "tendermint/event/Tx", nil)
+ cdc.RegisterConcrete(EventDataRoundState{}, "tendermint/event/RoundState", nil)
+ cdc.RegisterConcrete(EventDataVote{}, "tendermint/event/Vote", nil)
+ cdc.RegisterConcrete(EventDataProposalHeartbeat{}, "tendermint/event/ProposalHeartbeat", nil)
+ cdc.RegisterConcrete(EventDataString(""), "tendermint/event/ProposalString", nil)
}
-func (tmr TMEventData) MarshalJSON() ([]byte, error) {
- return tmEventDataMapper.ToJSON(tmr.TMEventDataInner)
-}
-
-func (tmr *TMEventData) UnmarshalJSON(data []byte) (err error) {
- parsed, err := tmEventDataMapper.FromJSON(data)
- if err == nil && parsed != nil {
- tmr.TMEventDataInner = parsed.(TMEventDataInner)
- }
- return
-}
-
-func (tmr TMEventData) Unwrap() TMEventDataInner {
- tmrI := tmr.TMEventDataInner
- for wrap, ok := tmrI.(TMEventData); ok; wrap, ok = tmrI.(TMEventData) {
- tmrI = wrap.TMEventDataInner
- }
- return tmrI
-}
-
-func (tmr TMEventData) Empty() bool {
- return tmr.TMEventDataInner == nil
-}
-
-const (
- EventDataTypeNewBlock = byte(0x01)
- EventDataTypeFork = byte(0x02)
- EventDataTypeTx = byte(0x03)
- EventDataTypeNewBlockHeader = byte(0x04)
- EventDataTypeRoundState = byte(0x11)
- EventDataTypeVote = byte(0x12)
- EventDataTypeProposalHeartbeat = byte(0x20)
-)
-
-var tmEventDataMapper = data.NewMapper(TMEventData{}).
- RegisterImplementation(EventDataNewBlock{}, EventDataNameNewBlock, EventDataTypeNewBlock).
- RegisterImplementation(EventDataNewBlockHeader{}, EventDataNameNewBlockHeader, EventDataTypeNewBlockHeader).
- RegisterImplementation(EventDataTx{}, EventDataNameTx, EventDataTypeTx).
- RegisterImplementation(EventDataRoundState{}, EventDataNameRoundState, EventDataTypeRoundState).
- RegisterImplementation(EventDataVote{}, EventDataNameVote, EventDataTypeVote).
- RegisterImplementation(EventDataProposalHeartbeat{}, EventDataNameProposalHeartbeat, EventDataTypeProposalHeartbeat)
-
// Most event messages are basic types (a block, a transaction)
// but some (an input to a call tx or a receive) are more exotic
@@ -130,6 +95,8 @@ type EventDataVote struct {
Vote *Vote
}
+type EventDataString string
+
///////////////////////////////////////////////////////////////////////////////
// PUBSUB
///////////////////////////////////////////////////////////////////////////////
diff --git a/types/evidence.go b/types/evidence.go
index 0b3496045..ee7f44a58 100644
--- a/types/evidence.go
+++ b/types/evidence.go
@@ -4,8 +4,8 @@ import (
"bytes"
"fmt"
+ "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
- wire "github.com/tendermint/tendermint/wire"
"github.com/tendermint/tmlibs/merkle"
)
@@ -38,56 +38,11 @@ type Evidence interface {
String() string
}
-//-------------------------------------------
-
-// EvidenceList is a list of Evidence. Evidences is not a word.
-type EvidenceList []Evidence
-
-// Hash returns the simple merkle root hash of the EvidenceList.
-func (evl EvidenceList) Hash() []byte {
- // Recursive impl.
- // Copied from tmlibs/merkle to avoid allocations
- switch len(evl) {
- case 0:
- return nil
- case 1:
- return evl[0].Hash()
- default:
- left := EvidenceList(evl[:(len(evl)+1)/2]).Hash()
- right := EvidenceList(evl[(len(evl)+1)/2:]).Hash()
- return merkle.SimpleHashFromTwoHashes(left, right)
- }
+func RegisterEvidences(cdc *amino.Codec) {
+ cdc.RegisterInterface((*Evidence)(nil), nil)
+ cdc.RegisterConcrete(&DuplicateVoteEvidence{}, "tendermint/DuplicateVoteEvidence", nil)
}
-func (evl EvidenceList) String() string {
- s := ""
- for _, e := range evl {
- s += fmt.Sprintf("%s\t\t", e)
- }
- return s
-}
-
-// Has returns true if the evidence is in the EvidenceList.
-func (evl EvidenceList) Has(evidence Evidence) bool {
- for _, ev := range evl {
- if ev.Equal(evidence) {
- return true
- }
- }
- return false
-}
-
-//-------------------------------------------
-
-const (
- evidenceTypeDuplicateVote = byte(0x01)
-)
-
-var _ = wire.RegisterInterface(
- struct{ Evidence }{},
- wire.ConcreteType{&DuplicateVoteEvidence{}, evidenceTypeDuplicateVote},
-)
-
//-------------------------------------------
// DuplicateVoteEvidence contains evidence a validator signed two conflicting votes.
@@ -120,7 +75,7 @@ func (dve *DuplicateVoteEvidence) Index() int {
// Hash returns the hash of the evidence.
func (dve *DuplicateVoteEvidence) Hash() []byte {
- return wireHasher(dve).Hash()
+ return aminoHasher(dve).Hash()
}
// Verify returns an error if the two votes aren't conflicting.
@@ -165,8 +120,8 @@ func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool {
}
// just check their hashes
- dveHash := wireHasher(dve).Hash()
- evHash := wireHasher(ev).Hash()
+ dveHash := aminoHasher(dve).Hash()
+ evHash := aminoHasher(ev).Hash()
return bytes.Equal(dveHash, evHash)
}
@@ -216,3 +171,42 @@ func (e MockBadEvidence) Equal(ev Evidence) bool {
func (e MockBadEvidence) String() string {
return fmt.Sprintf("BadEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_)
}
+
+//-------------------------------------------
+
+// EvidenceList is a list of Evidence. Evidences is not a word.
+type EvidenceList []Evidence
+
+// Hash returns the simple merkle root hash of the EvidenceList.
+func (evl EvidenceList) Hash() []byte {
+ // Recursive impl.
+ // Copied from tmlibs/merkle to avoid allocations
+ switch len(evl) {
+ case 0:
+ return nil
+ case 1:
+ return evl[0].Hash()
+ default:
+ left := EvidenceList(evl[:(len(evl)+1)/2]).Hash()
+ right := EvidenceList(evl[(len(evl)+1)/2:]).Hash()
+ return merkle.SimpleHashFromTwoHashes(left, right)
+ }
+}
+
+func (evl EvidenceList) String() string {
+ s := ""
+ for _, e := range evl {
+ s += fmt.Sprintf("%s\t\t", e)
+ }
+ return s
+}
+
+// Has returns true if the evidence is in the EvidenceList.
+func (evl EvidenceList) Has(evidence Evidence) bool {
+ for _, ev := range evl {
+ if ev.Equal(evidence) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/types/evidence_test.go b/types/evidence_test.go
index 84811514a..f2b1f91bb 100644
--- a/types/evidence_test.go
+++ b/types/evidence_test.go
@@ -4,7 +4,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
- cmn "github.com/tendermint/tmlibs/common"
)
type voteData struct {
@@ -13,25 +12,25 @@ type voteData struct {
valid bool
}
-func makeVote(val *PrivValidatorFS, chainID string, valIndex int, height int64, round, step int, blockID BlockID) *Vote {
+func makeVote(val PrivValidator, chainID string, valIndex int, height int64, round, step int, blockID BlockID) *Vote {
v := &Vote{
- ValidatorAddress: val.PubKey.Address(),
+ ValidatorAddress: val.GetAddress(),
ValidatorIndex: valIndex,
Height: height,
Round: round,
Type: byte(step),
BlockID: blockID,
}
- sig := val.PrivKey.Sign(v.SignBytes(chainID))
- v.Signature = sig
+ err := val.SignVote(chainID, v)
+ if err != nil {
+ panic(err)
+ }
return v
-
}
func TestEvidence(t *testing.T) {
- _, tmpFilePath := cmn.Tempfile("priv_validator_")
- val := GenPrivValidatorFS(tmpFilePath)
- val2 := GenPrivValidatorFS(tmpFilePath)
+ val := NewMockPV()
+ val2 := NewMockPV()
blockID := makeBlockID("blockhash", 1000, "partshash")
blockID2 := makeBlockID("blockhash2", 1000, "partshash")
blockID3 := makeBlockID("blockhash", 10000, "partshash")
@@ -41,7 +40,10 @@ func TestEvidence(t *testing.T) {
vote1 := makeVote(val, chainID, 0, 10, 2, 1, blockID)
badVote := makeVote(val, chainID, 0, 10, 2, 1, blockID)
- badVote.Signature = val2.PrivKey.Sign(badVote.SignBytes(chainID))
+ err := val2.SignVote(chainID, badVote)
+ if err != nil {
+ panic(err)
+ }
cases := []voteData{
{vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID2), true}, // different block ids
@@ -59,7 +61,7 @@ func TestEvidence(t *testing.T) {
for _, c := range cases {
ev := &DuplicateVoteEvidence{
- PubKey: val.PubKey,
+ PubKey: val.GetPubKey(),
VoteA: c.vote1,
VoteB: c.vote2,
}
diff --git a/types/genesis.go b/types/genesis.go
index 21c61806a..faf511196 100644
--- a/types/genesis.go
+++ b/types/genesis.go
@@ -5,9 +5,7 @@ import (
"io/ioutil"
"time"
- "github.com/pkg/errors"
-
- crypto "github.com/tendermint/go-crypto"
+ "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
)
@@ -43,7 +41,7 @@ func (genDoc *GenesisDoc) AppState() json.RawMessage {
// SaveAs is a utility method for saving GenensisDoc as a JSON file.
func (genDoc *GenesisDoc) SaveAs(file string) error {
- genDocBytes, err := json.Marshal(genDoc)
+ genDocBytes, err := cdc.MarshalJSON(genDoc)
if err != nil {
return err
}
@@ -65,7 +63,7 @@ func (genDoc *GenesisDoc) ValidatorHash() []byte {
func (genDoc *GenesisDoc) ValidateAndComplete() error {
if genDoc.ChainID == "" {
- return errors.Errorf("Genesis doc must include non-empty chain_id")
+ return cmn.NewError("Genesis doc must include non-empty chain_id")
}
if genDoc.ConsensusParams == nil {
@@ -77,12 +75,12 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error {
}
if len(genDoc.Validators) == 0 {
- return errors.Errorf("The genesis file must have at least one validator")
+ return cmn.NewError("The genesis file must have at least one validator")
}
for _, v := range genDoc.Validators {
if v.Power == 0 {
- return errors.Errorf("The genesis file cannot contain validators with no voting power: %v", v)
+ return cmn.NewError("The genesis file cannot contain validators with no voting power: %v", v)
}
}
@@ -99,7 +97,7 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error {
// GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc.
func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) {
genDoc := GenesisDoc{}
- err := json.Unmarshal(jsonBlob, &genDoc)
+ err := cdc.UnmarshalJSON(jsonBlob, &genDoc)
if err != nil {
return nil, err
}
@@ -115,11 +113,11 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) {
func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) {
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
- return nil, errors.Wrap(err, "Couldn't read GenesisDoc file")
+ return nil, cmn.ErrorWrap(err, "Couldn't read GenesisDoc file")
}
genDoc, err := GenesisDocFromJSON(jsonBlob)
if err != nil {
- return nil, errors.Wrap(err, cmn.Fmt("Error reading GenesisDoc at %v", genDocFile))
+ return nil, cmn.ErrorWrap(err, cmn.Fmt("Error reading GenesisDoc at %v", genDocFile))
}
return genDoc, nil
}
diff --git a/types/genesis_test.go b/types/genesis_test.go
index aa713289f..17ebf1cfc 100644
--- a/types/genesis_test.go
+++ b/types/genesis_test.go
@@ -1,35 +1,34 @@
package types
import (
- "encoding/json"
- "testing"
-
"github.com/stretchr/testify/assert"
-
- crypto "github.com/tendermint/go-crypto"
+ "github.com/tendermint/go-crypto"
+ "testing"
)
-func TestGenesis(t *testing.T) {
+func TestGenesisBad(t *testing.T) {
// test some bad ones from raw json
testCases := [][]byte{
- []byte{}, // empty
- []byte{1, 1, 1, 1, 1}, // junk
- []byte(`{}`), // empty
- []byte(`{"chain_id": "mychain"}`), // missing validators
- []byte(`{"chain_id": "mychain", "validators": []`), // missing validators
- []byte(`{"chain_id": "mychain", "validators": [{}]`), // missing validators
- []byte(`{"validators":[{"pub_key":
- {"type":"ed25519","data":"961EAB8752E51A03618502F55C2B6E09C38C65635C64CCF3173ED452CF86C957"},
- "power":10,"name":""}]}`), // missing chain_id
+ []byte{}, // empty
+ []byte{1, 1, 1, 1, 1}, // junk
+ []byte(`{}`), // empty
+ []byte(`{"chain_id":"mychain"}`), // missing validators
+ []byte(`{"chain_id":"mychain","validators":[]}`), // missing validators
+ []byte(`{"chain_id":"mychain","validators":[{}]}`), // missing validators
+ []byte(`{"chain_id":"mychain","validators":null}`), // missing validators
+ []byte(`{"chain_id":"mychain"}`), // missing validators
+ []byte(`{"validators":[{"pub_key":{"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":10,"name":""}]}`), // missing chain_id
}
for _, testCase := range testCases {
_, err := GenesisDocFromJSON(testCase)
assert.Error(t, err, "expected error for empty genDoc json")
}
+}
+func TestGenesisGood(t *testing.T) {
// test a good one by raw json
- genDocBytes := []byte(`{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-QDKdJr","consensus_params":null,"validators":[{"pub_key":{"type":"ed25519","data":"961EAB8752E51A03618502F55C2B6E09C38C65635C64CCF3173ED452CF86C957"},"power":10,"name":""}],"app_hash":"","app_state":{"account_owner": "Bob"}}`)
+ genDocBytes := []byte(`{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-QDKdJr","consensus_params":null,"validators":[{"pub_key":{"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":10,"name":""}],"app_hash":"","app_state":{"account_owner": "Bob"}}`)
_, err := GenesisDocFromJSON(genDocBytes)
assert.NoError(t, err, "expected no error for good genDoc json")
@@ -38,7 +37,7 @@ func TestGenesis(t *testing.T) {
ChainID: "abc",
Validators: []GenesisValidator{{crypto.GenPrivKeyEd25519().PubKey(), 10, "myval"}},
}
- genDocBytes, err = json.Marshal(baseGenDoc)
+ genDocBytes, err = cdc.MarshalJSON(baseGenDoc)
assert.NoError(t, err, "error marshalling genDoc")
// test base gendoc and check consensus params were filled
@@ -47,14 +46,14 @@ func TestGenesis(t *testing.T) {
assert.NotNil(t, genDoc.ConsensusParams, "expected consensus params to be filled in")
// create json with consensus params filled
- genDocBytes, err = json.Marshal(genDoc)
+ genDocBytes, err = cdc.MarshalJSON(genDoc)
assert.NoError(t, err, "error marshalling genDoc")
genDoc, err = GenesisDocFromJSON(genDocBytes)
assert.NoError(t, err, "expected no error for valid genDoc json")
// test with invalid consensus params
genDoc.ConsensusParams.BlockSize.MaxBytes = 0
- genDocBytes, err = json.Marshal(genDoc)
+ genDocBytes, err = cdc.MarshalJSON(genDoc)
assert.NoError(t, err, "error marshalling genDoc")
genDoc, err = GenesisDocFromJSON(genDocBytes)
assert.Error(t, err, "expected error for genDoc json with block size of 0")
diff --git a/types/heartbeat.go b/types/heartbeat.go
index 8b86a15ba..097dd22db 100644
--- a/types/heartbeat.go
+++ b/types/heartbeat.go
@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/tendermint/go-crypto"
- "github.com/tendermint/tendermint/wire"
cmn "github.com/tendermint/tmlibs/common"
)
@@ -25,10 +24,7 @@ type Heartbeat struct {
// SignBytes returns the Heartbeat bytes for signing.
// It panics if the Heartbeat is nil.
func (heartbeat *Heartbeat) SignBytes(chainID string) []byte {
- bz, err := wire.MarshalJSON(CanonicalJSONOnceHeartbeat{
- chainID,
- CanonicalHeartbeat(heartbeat),
- })
+ bz, err := cdc.MarshalJSON(CanonicalHeartbeat(chainID, heartbeat))
if err != nil {
panic(err)
}
diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go
index 206636166..3c4536028 100644
--- a/types/heartbeat_test.go
+++ b/types/heartbeat_test.go
@@ -25,22 +25,23 @@ func TestHeartbeatString(t *testing.T) {
require.Contains(t, nilHb.String(), "nil", "expecting a string and no panic")
hb := &Heartbeat{ValidatorIndex: 1, Height: 11, Round: 2}
- require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) {}}")
+ require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) }")
var key crypto.PrivKeyEd25519
hb.Signature = key.Sign([]byte("Tendermint"))
- require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) {/FF41E371B9BF.../}}")
+ require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) /FF41E371B9BF.../}")
}
func TestHeartbeatWriteSignBytes(t *testing.T) {
hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1}
bz := hb.SignBytes("0xdeadbeef")
- require.Equal(t, string(bz), `{"chain_id":"0xdeadbeef","heartbeat":{"height":10,"round":1,"sequence":0,"validator_address":"","validator_index":1}}`)
+ // XXX HMMMMMMM
+ require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":10,"round":1,"sequence":0,"validator_address":"","validator_index":1}`)
plainHb := &Heartbeat{}
bz = plainHb.SignBytes("0xdeadbeef")
- require.Equal(t, string(bz), `{"chain_id":"0xdeadbeef","heartbeat":{"height":0,"round":0,"sequence":0,"validator_address":"","validator_index":0}}`)
+ require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":0,"round":0,"sequence":0,"validator_address":"","validator_index":0}`)
require.Panics(t, func() {
var nilHb *Heartbeat
diff --git a/types/params.go b/types/params.go
index 0e8ac577f..2df092d62 100644
--- a/types/params.go
+++ b/types/params.go
@@ -1,14 +1,13 @@
package types
import (
- "github.com/pkg/errors"
-
abci "github.com/tendermint/abci/types"
+ cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/merkle"
)
const (
- maxBlockSizeBytes = 104857600 // 100MB
+ MaxBlockSizeBytes = 104857600 // 100MB
)
// ConsensusParams contains consensus critical parameters
@@ -89,16 +88,16 @@ func DefaultEvidenceParams() EvidenceParams {
func (params *ConsensusParams) Validate() error {
// ensure some values are greater than 0
if params.BlockSize.MaxBytes <= 0 {
- return errors.Errorf("BlockSize.MaxBytes must be greater than 0. Got %d", params.BlockSize.MaxBytes)
+ return cmn.NewError("BlockSize.MaxBytes must be greater than 0. Got %d", params.BlockSize.MaxBytes)
}
if params.BlockGossip.BlockPartSizeBytes <= 0 {
- return errors.Errorf("BlockGossip.BlockPartSizeBytes must be greater than 0. Got %d", params.BlockGossip.BlockPartSizeBytes)
+ return cmn.NewError("BlockGossip.BlockPartSizeBytes must be greater than 0. Got %d", params.BlockGossip.BlockPartSizeBytes)
}
// ensure blocks aren't too big
- if params.BlockSize.MaxBytes > maxBlockSizeBytes {
- return errors.Errorf("BlockSize.MaxBytes is too big. %d > %d",
- params.BlockSize.MaxBytes, maxBlockSizeBytes)
+ if params.BlockSize.MaxBytes > MaxBlockSizeBytes {
+ return cmn.NewError("BlockSize.MaxBytes is too big. %d > %d",
+ params.BlockSize.MaxBytes, MaxBlockSizeBytes)
}
return nil
}
@@ -107,12 +106,12 @@ func (params *ConsensusParams) Validate() error {
// in the block header
func (params *ConsensusParams) Hash() []byte {
return merkle.SimpleHashFromMap(map[string]merkle.Hasher{
- "block_gossip_part_size_bytes": wireHasher(params.BlockGossip.BlockPartSizeBytes),
- "block_size_max_bytes": wireHasher(params.BlockSize.MaxBytes),
- "block_size_max_gas": wireHasher(params.BlockSize.MaxGas),
- "block_size_max_txs": wireHasher(params.BlockSize.MaxTxs),
- "tx_size_max_bytes": wireHasher(params.TxSize.MaxBytes),
- "tx_size_max_gas": wireHasher(params.TxSize.MaxGas),
+ "block_gossip_part_size_bytes": aminoHasher(params.BlockGossip.BlockPartSizeBytes),
+ "block_size_max_bytes": aminoHasher(params.BlockSize.MaxBytes),
+ "block_size_max_gas": aminoHasher(params.BlockSize.MaxGas),
+ "block_size_max_txs": aminoHasher(params.BlockSize.MaxTxs),
+ "tx_size_max_bytes": aminoHasher(params.TxSize.MaxBytes),
+ "tx_size_max_gas": aminoHasher(params.TxSize.MaxGas),
})
}
diff --git a/types/priv_validator.go b/types/priv_validator.go
index 052ace978..8759d3f99 100644
--- a/types/priv_validator.go
+++ b/types/priv_validator.go
@@ -2,88 +2,11 @@ package types
import (
"bytes"
- "encoding/json"
- "errors"
"fmt"
- "io/ioutil"
- "sync"
- "time"
- crypto "github.com/tendermint/go-crypto"
- cmn "github.com/tendermint/tmlibs/common"
+ "github.com/tendermint/go-crypto"
)
-// TODO: type ?
-const (
- stepNone int8 = 0 // Used to distinguish the initial state
- stepPropose int8 = 1
- stepPrevote int8 = 2
- stepPrecommit int8 = 3
-)
-
-func voteToStep(vote *Vote) int8 {
- switch vote.Type {
- case VoteTypePrevote:
- return stepPrevote
- case VoteTypePrecommit:
- return stepPrecommit
- default:
- cmn.PanicSanity("Unknown vote type")
- return 0
- }
-}
-
-//--------------------------------------------------------------
-// PrivValidator is being upgraded! See types/priv_validator/
-
-// ValidatorID contains the identity of the validator.
-type ValidatorID struct {
- Address cmn.HexBytes `json:"address"`
- PubKey crypto.PubKey `json:"pub_key"`
-}
-
-// PrivValidator defines the functionality of a local Tendermint validator
-// that signs votes, proposals, and heartbeats, and never double signs.
-type PrivValidator2 interface {
- Address() (Address, error) // redundant since .PubKey().Address()
- PubKey() (crypto.PubKey, error)
-
- SignVote(chainID string, vote *Vote) error
- SignProposal(chainID string, proposal *Proposal) error
- SignHeartbeat(chainID string, heartbeat *Heartbeat) error
-}
-
-type TestSigner interface {
- Address() cmn.HexBytes
- PubKey() crypto.PubKey
- Sign([]byte) (crypto.Signature, error)
-}
-
-func GenSigner() TestSigner {
- return &DefaultTestSigner{
- crypto.GenPrivKeyEd25519().Wrap(),
- }
-}
-
-type DefaultTestSigner struct {
- crypto.PrivKey
-}
-
-func (ds *DefaultTestSigner) Address() cmn.HexBytes {
- return ds.PubKey().Address()
-}
-
-func (ds *DefaultTestSigner) PubKey() crypto.PubKey {
- return ds.PrivKey.PubKey()
-}
-
-func (ds *DefaultTestSigner) Sign(msg []byte) (crypto.Signature, error) {
- return ds.PrivKey.Sign(msg), nil
-}
-
-//--------------------------------------------------------------
-// TODO: Deprecate!
-
// PrivValidator defines the functionality of a local Tendermint validator
// that signs votes, proposals, and heartbeats, and never double signs.
type PrivValidator interface {
@@ -95,313 +18,10 @@ type PrivValidator interface {
SignHeartbeat(chainID string, heartbeat *Heartbeat) error
}
-// PrivValidatorFS implements PrivValidator using data persisted to disk
-// to prevent double signing. The Signer itself can be mutated to use
-// something besides the default, for instance a hardware signer.
-// NOTE: the directory containing the privVal.filePath must already exist.
-type PrivValidatorFS struct {
- Address Address `json:"address"`
- PubKey crypto.PubKey `json:"pub_key"`
- LastHeight int64 `json:"last_height"`
- LastRound int `json:"last_round"`
- LastStep int8 `json:"last_step"`
- LastSignature crypto.Signature `json:"last_signature,omitempty"` // so we dont lose signatures
- LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` // so we dont lose signatures
+//----------------------------------------
+// Misc.
- // PrivKey should be empty if a Signer other than the default is being used.
- PrivKey crypto.PrivKey `json:"priv_key"`
- Signer `json:"-"`
-
- // For persistence.
- // Overloaded for testing.
- filePath string
- mtx sync.Mutex
-}
-
-// Signer is an interface that defines how to sign messages.
-// It is the caller's duty to verify the msg before calling Sign,
-// eg. to avoid double signing.
-// Currently, the only callers are SignVote, SignProposal, and SignHeartbeat.
-type Signer interface {
- Sign(msg []byte) (crypto.Signature, error)
-}
-
-// DefaultSigner implements Signer.
-// It uses a standard, unencrypted crypto.PrivKey.
-type DefaultSigner struct {
- PrivKey crypto.PrivKey `json:"priv_key"`
-}
-
-// NewDefaultSigner returns an instance of DefaultSigner.
-func NewDefaultSigner(priv crypto.PrivKey) *DefaultSigner {
- return &DefaultSigner{
- PrivKey: priv,
- }
-}
-
-// Sign implements Signer. It signs the byte slice with a private key.
-func (ds *DefaultSigner) Sign(msg []byte) (crypto.Signature, error) {
- return ds.PrivKey.Sign(msg), nil
-}
-
-// GetAddress returns the address of the validator.
-// Implements PrivValidator.
-func (pv *PrivValidatorFS) GetAddress() Address {
- return pv.Address
-}
-
-// GetPubKey returns the public key of the validator.
-// Implements PrivValidator.
-func (pv *PrivValidatorFS) GetPubKey() crypto.PubKey {
- return pv.PubKey
-}
-
-// GenPrivValidatorFS generates a new validator with randomly generated private key
-// and sets the filePath, but does not call Save().
-func GenPrivValidatorFS(filePath string) *PrivValidatorFS {
- privKey := crypto.GenPrivKeyEd25519().Wrap()
- return &PrivValidatorFS{
- Address: privKey.PubKey().Address(),
- PubKey: privKey.PubKey(),
- PrivKey: privKey,
- LastStep: stepNone,
- Signer: NewDefaultSigner(privKey),
- filePath: filePath,
- }
-}
-
-// LoadPrivValidatorFS loads a PrivValidatorFS from the filePath.
-func LoadPrivValidatorFS(filePath string) *PrivValidatorFS {
- return LoadPrivValidatorFSWithSigner(filePath, func(privVal PrivValidator) Signer {
- return NewDefaultSigner(privVal.(*PrivValidatorFS).PrivKey)
- })
-}
-
-// LoadOrGenPrivValidatorFS loads a PrivValidatorFS from the given filePath
-// or else generates a new one and saves it to the filePath.
-func LoadOrGenPrivValidatorFS(filePath string) *PrivValidatorFS {
- var privVal *PrivValidatorFS
- if cmn.FileExists(filePath) {
- privVal = LoadPrivValidatorFS(filePath)
- } else {
- privVal = GenPrivValidatorFS(filePath)
- privVal.Save()
- }
- return privVal
-}
-
-// LoadPrivValidatorWithSigner loads a PrivValidatorFS with a custom
-// signer object. The PrivValidatorFS handles double signing prevention by persisting
-// data to the filePath, while the Signer handles the signing.
-// If the filePath does not exist, the PrivValidatorFS must be created manually and saved.
-func LoadPrivValidatorFSWithSigner(filePath string, signerFunc func(PrivValidator) Signer) *PrivValidatorFS {
- privValJSONBytes, err := ioutil.ReadFile(filePath)
- if err != nil {
- cmn.Exit(err.Error())
- }
- privVal := &PrivValidatorFS{}
- err = json.Unmarshal(privValJSONBytes, &privVal)
- if err != nil {
- cmn.Exit(cmn.Fmt("Error reading PrivValidator from %v: %v\n", filePath, err))
- }
-
- privVal.filePath = filePath
- privVal.Signer = signerFunc(privVal)
- return privVal
-}
-
-// Save persists the PrivValidatorFS to disk.
-func (pv *PrivValidatorFS) Save() {
- pv.mtx.Lock()
- defer pv.mtx.Unlock()
- pv.save()
-}
-
-func (pv *PrivValidatorFS) save() {
- outFile := pv.filePath
- if outFile == "" {
- panic("Cannot save PrivValidator: filePath not set")
- }
- jsonBytes, err := json.Marshal(pv)
- if err != nil {
- panic(err)
- }
- err = cmn.WriteFileAtomic(outFile, jsonBytes, 0600)
- if err != nil {
- panic(err)
- }
-}
-
-// Reset resets all fields in the PrivValidatorFS.
-// NOTE: Unsafe!
-func (pv *PrivValidatorFS) Reset() {
- var sig crypto.Signature
- pv.LastHeight = 0
- pv.LastRound = 0
- pv.LastStep = 0
- pv.LastSignature = sig
- pv.LastSignBytes = nil
- pv.Save()
-}
-
-// SignVote signs a canonical representation of the vote, along with the
-// chainID. Implements PrivValidator.
-func (pv *PrivValidatorFS) SignVote(chainID string, vote *Vote) error {
- pv.mtx.Lock()
- defer pv.mtx.Unlock()
- if err := pv.signVote(chainID, vote); err != nil {
- return errors.New(cmn.Fmt("Error signing vote: %v", err))
- }
- return nil
-}
-
-// SignProposal signs a canonical representation of the proposal, along with
-// the chainID. Implements PrivValidator.
-func (pv *PrivValidatorFS) SignProposal(chainID string, proposal *Proposal) error {
- pv.mtx.Lock()
- defer pv.mtx.Unlock()
- if err := pv.signProposal(chainID, proposal); err != nil {
- return fmt.Errorf("Error signing proposal: %v", err)
- }
- return nil
-}
-
-// returns error if HRS regression or no LastSignBytes. returns true if HRS is unchanged
-func (pv *PrivValidatorFS) checkHRS(height int64, round int, step int8) (bool, error) {
- if pv.LastHeight > height {
- return false, errors.New("Height regression")
- }
-
- if pv.LastHeight == height {
- if pv.LastRound > round {
- return false, errors.New("Round regression")
- }
-
- if pv.LastRound == round {
- if pv.LastStep > step {
- return false, errors.New("Step regression")
- } else if pv.LastStep == step {
- if pv.LastSignBytes != nil {
- if pv.LastSignature.Empty() {
- panic("privVal: LastSignature is nil but LastSignBytes is not!")
- }
- return true, nil
- }
- return false, errors.New("No LastSignature found")
- }
- }
- }
- return false, nil
-}
-
-// signVote checks if the vote is good to sign and sets the vote signature.
-// It may need to set the timestamp as well if the vote is otherwise the same as
-// a previously signed vote (ie. we crashed after signing but before the vote hit the WAL).
-func (pv *PrivValidatorFS) signVote(chainID string, vote *Vote) error {
- height, round, step := vote.Height, vote.Round, voteToStep(vote)
- signBytes := vote.SignBytes(chainID)
-
- sameHRS, err := pv.checkHRS(height, round, step)
- if err != nil {
- return err
- }
-
- // We might crash before writing to the wal,
- // causing us to try to re-sign for the same HRS.
- // If signbytes are the same, use the last signature.
- // If they only differ by timestamp, use last timestamp and signature
- // Otherwise, return error
- if sameHRS {
- if bytes.Equal(signBytes, pv.LastSignBytes) {
- vote.Signature = pv.LastSignature
- } else if timestamp, ok := checkVotesOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok {
- vote.Timestamp = timestamp
- vote.Signature = pv.LastSignature
- } else {
- err = fmt.Errorf("Conflicting data")
- }
- return err
- }
-
- // It passed the checks. Sign the vote
- sig, err := pv.Sign(signBytes)
- if err != nil {
- return err
- }
- pv.saveSigned(height, round, step, signBytes, sig)
- vote.Signature = sig
- return nil
-}
-
-// signProposal checks if the proposal is good to sign and sets the proposal signature.
-// It may need to set the timestamp as well if the proposal is otherwise the same as
-// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL).
-func (pv *PrivValidatorFS) signProposal(chainID string, proposal *Proposal) error {
- height, round, step := proposal.Height, proposal.Round, stepPropose
- signBytes := proposal.SignBytes(chainID)
-
- sameHRS, err := pv.checkHRS(height, round, step)
- if err != nil {
- return err
- }
-
- // We might crash before writing to the wal,
- // causing us to try to re-sign for the same HRS.
- // If signbytes are the same, use the last signature.
- // If they only differ by timestamp, use last timestamp and signature
- // Otherwise, return error
- if sameHRS {
- if bytes.Equal(signBytes, pv.LastSignBytes) {
- proposal.Signature = pv.LastSignature
- } else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok {
- proposal.Timestamp = timestamp
- proposal.Signature = pv.LastSignature
- } else {
- err = fmt.Errorf("Conflicting data")
- }
- return err
- }
-
- // It passed the checks. Sign the proposal
- sig, err := pv.Sign(signBytes)
- if err != nil {
- return err
- }
- pv.saveSigned(height, round, step, signBytes, sig)
- proposal.Signature = sig
- return nil
-}
-
-// Persist height/round/step and signature
-func (pv *PrivValidatorFS) saveSigned(height int64, round int, step int8,
- signBytes []byte, sig crypto.Signature) {
-
- pv.LastHeight = height
- pv.LastRound = round
- pv.LastStep = step
- pv.LastSignature = sig
- pv.LastSignBytes = signBytes
- pv.save()
-}
-
-// SignHeartbeat signs a canonical representation of the heartbeat, along with the chainID.
-// Implements PrivValidator.
-func (pv *PrivValidatorFS) SignHeartbeat(chainID string, heartbeat *Heartbeat) error {
- pv.mtx.Lock()
- defer pv.mtx.Unlock()
- var err error
- heartbeat.Signature, err = pv.Sign(heartbeat.SignBytes(chainID))
- return err
-}
-
-// String returns a string representation of the PrivValidatorFS.
-func (pv *PrivValidatorFS) String() string {
- return fmt.Sprintf("PrivValidator{%v LH:%v, LR:%v, LS:%v}", pv.GetAddress(), pv.LastHeight, pv.LastRound, pv.LastStep)
-}
-
-//-------------------------------------
-
-type PrivValidatorsByAddress []*PrivValidatorFS
+type PrivValidatorsByAddress []PrivValidator
func (pvs PrivValidatorsByAddress) Len() int {
return len(pvs)
@@ -417,56 +37,59 @@ func (pvs PrivValidatorsByAddress) Swap(i, j int) {
pvs[j] = it
}
-//-------------------------------------
+//----------------------------------------
+// MockPV
-// returns the timestamp from the lastSignBytes.
-// returns true if the only difference in the votes is their timestamp.
-func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
- var lastVote, newVote CanonicalJSONOnceVote
- if err := json.Unmarshal(lastSignBytes, &lastVote); err != nil {
- panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err))
- }
- if err := json.Unmarshal(newSignBytes, &newVote); err != nil {
- panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err))
- }
-
- lastTime, err := time.Parse(TimeFormat, lastVote.Vote.Timestamp)
- if err != nil {
- panic(err)
- }
-
- // set the times to the same value and check equality
- now := CanonicalTime(time.Now())
- lastVote.Vote.Timestamp = now
- newVote.Vote.Timestamp = now
- lastVoteBytes, _ := json.Marshal(lastVote)
- newVoteBytes, _ := json.Marshal(newVote)
-
- return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes)
+// MockPV implements PrivValidator without any safety or persistence.
+// Only use it for testing.
+type MockPV struct {
+ privKey crypto.PrivKey
}
-// returns the timestamp from the lastSignBytes.
-// returns true if the only difference in the proposals is their timestamp
-func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
- var lastProposal, newProposal CanonicalJSONOnceProposal
- if err := json.Unmarshal(lastSignBytes, &lastProposal); err != nil {
- panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err))
- }
- if err := json.Unmarshal(newSignBytes, &newProposal); err != nil {
- panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err))
- }
-
- lastTime, err := time.Parse(TimeFormat, lastProposal.Proposal.Timestamp)
- if err != nil {
- panic(err)
- }
-
- // set the times to the same value and check equality
- now := CanonicalTime(time.Now())
- lastProposal.Proposal.Timestamp = now
- newProposal.Proposal.Timestamp = now
- lastProposalBytes, _ := json.Marshal(lastProposal)
- newProposalBytes, _ := json.Marshal(newProposal)
-
- return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes)
+func NewMockPV() *MockPV {
+ return &MockPV{crypto.GenPrivKeyEd25519()}
+}
+
+// Implements PrivValidator.
+func (pv *MockPV) GetAddress() Address {
+ return pv.privKey.PubKey().Address()
+}
+
+// Implements PrivValidator.
+func (pv *MockPV) GetPubKey() crypto.PubKey {
+ return pv.privKey.PubKey()
+}
+
+// Implements PrivValidator.
+func (pv *MockPV) SignVote(chainID string, vote *Vote) error {
+ signBytes := vote.SignBytes(chainID)
+ sig := pv.privKey.Sign(signBytes)
+ vote.Signature = sig
+ return nil
+}
+
+// Implements PrivValidator.
+func (pv *MockPV) SignProposal(chainID string, proposal *Proposal) error {
+ signBytes := proposal.SignBytes(chainID)
+ sig := pv.privKey.Sign(signBytes)
+ proposal.Signature = sig
+ return nil
+}
+
+// signHeartbeat signs the heartbeat without any checking.
+func (pv *MockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error {
+ sig := pv.privKey.Sign(heartbeat.SignBytes(chainID))
+ heartbeat.Signature = sig
+ return nil
+}
+
+// String returns a string representation of the MockPV.
+func (pv *MockPV) String() string {
+ return fmt.Sprintf("MockPV{%v}", pv.GetAddress())
+}
+
+// XXX: Implement.
+func (pv *MockPV) DisableChecks() {
+ // Currently this does nothing,
+ // as MockPV has no safety checks at all.
}
diff --git a/types/priv_validator/json.go b/types/priv_validator/json.go
deleted file mode 100644
index 5c0849ebd..000000000
--- a/types/priv_validator/json.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package types
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io/ioutil"
-
- crypto "github.com/tendermint/go-crypto"
- "github.com/tendermint/tendermint/types"
- cmn "github.com/tendermint/tmlibs/common"
-)
-
-// PrivValidator aliases types.PrivValidator
-type PrivValidator = types.PrivValidator2
-
-//-----------------------------------------------------
-
-// PrivKey implements Signer
-type PrivKey crypto.PrivKey
-
-// Sign - Implements Signer
-func (pk PrivKey) Sign(msg []byte) (crypto.Signature, error) {
- return crypto.PrivKey(pk).Sign(msg), nil
-}
-
-// MarshalJSON satisfies json.Marshaler.
-func (pk PrivKey) MarshalJSON() ([]byte, error) {
- return crypto.PrivKey(pk).MarshalJSON()
-}
-
-// UnmarshalJSON satisfies json.Unmarshaler.
-func (pk *PrivKey) UnmarshalJSON(b []byte) error {
- cpk := new(crypto.PrivKey)
- if err := cpk.UnmarshalJSON(b); err != nil {
- return err
- }
- *pk = (PrivKey)(*cpk)
- return nil
-}
-
-//-----------------------------------------------------
-
-var _ types.PrivValidator2 = (*PrivValidatorJSON)(nil)
-
-// PrivValidatorJSON wraps PrivValidatorUnencrypted
-// and persists it to disk after every SignVote and SignProposal.
-type PrivValidatorJSON struct {
- *PrivValidatorUnencrypted
-
- filePath string
-}
-
-// SignVote implements PrivValidator. It persists to disk.
-func (pvj *PrivValidatorJSON) SignVote(chainID string, vote *types.Vote) error {
- err := pvj.PrivValidatorUnencrypted.SignVote(chainID, vote)
- if err != nil {
- return err
- }
- pvj.Save()
- return nil
-}
-
-// SignProposal implements PrivValidator. It persists to disk.
-func (pvj *PrivValidatorJSON) SignProposal(chainID string, proposal *types.Proposal) error {
- err := pvj.PrivValidatorUnencrypted.SignProposal(chainID, proposal)
- if err != nil {
- return err
- }
- pvj.Save()
- return nil
-}
-
-//-------------------------------------------------------
-
-// String returns a string representation of the PrivValidatorJSON.
-func (pvj *PrivValidatorJSON) String() string {
- addr, err := pvj.Address()
- if err != nil {
- panic(err)
- }
-
- return fmt.Sprintf("PrivValidator{%v %v}", addr, pvj.PrivValidatorUnencrypted.String())
-}
-
-// Save persists the PrivValidatorJSON to disk.
-func (pvj *PrivValidatorJSON) Save() {
- pvj.save()
-}
-
-func (pvj *PrivValidatorJSON) save() {
- if pvj.filePath == "" {
- panic("Cannot save PrivValidator: filePath not set")
- }
- jsonBytes, err := json.Marshal(pvj)
- if err != nil {
- // ; BOOM!!!
- panic(err)
- }
- err = cmn.WriteFileAtomic(pvj.filePath, jsonBytes, 0600)
- if err != nil {
- // ; BOOM!!!
- panic(err)
- }
-}
-
-// Reset resets the PrivValidatorUnencrypted. Panics if the Signer is the wrong type.
-// NOTE: Unsafe!
-func (pvj *PrivValidatorJSON) Reset() {
- pvj.PrivValidatorUnencrypted.LastSignedInfo.Reset()
- pvj.Save()
-}
-
-//----------------------------------------------------------------
-
-// GenPrivValidatorJSON generates a new validator with randomly generated private key
-// and the given filePath. It does not persist to file.
-func GenPrivValidatorJSON(filePath string) *PrivValidatorJSON {
- privKey := crypto.GenPrivKeyEd25519().Wrap()
- return &PrivValidatorJSON{
- PrivValidatorUnencrypted: NewPrivValidatorUnencrypted(privKey),
- filePath: filePath,
- }
-}
-
-// LoadPrivValidatorJSON loads a PrivValidatorJSON from the filePath.
-func LoadPrivValidatorJSON(filePath string) *PrivValidatorJSON {
- pvJSONBytes, err := ioutil.ReadFile(filePath)
- if err != nil {
- cmn.Exit(err.Error())
- }
- pvj := PrivValidatorJSON{}
- err = json.Unmarshal(pvJSONBytes, &pvj)
- if err != nil {
- cmn.Exit(cmn.Fmt("Error reading PrivValidatorJSON from %v: %v\n", filePath, err))
- }
-
- // enable persistence
- pvj.filePath = filePath
- return &pvj
-}
-
-// LoadOrGenPrivValidatorJSON loads a PrivValidatorJSON from the given filePath
-// or else generates a new one and saves it to the filePath.
-func LoadOrGenPrivValidatorJSON(filePath string) *PrivValidatorJSON {
- var pvj *PrivValidatorJSON
- if cmn.FileExists(filePath) {
- pvj = LoadPrivValidatorJSON(filePath)
- } else {
- pvj = GenPrivValidatorJSON(filePath)
- pvj.Save()
- }
- return pvj
-}
-
-//--------------------------------------------------------------
-
-// NewTestPrivValidator returns a PrivValidatorJSON with a tempfile
-// for the file path.
-func NewTestPrivValidator(signer types.TestSigner) *PrivValidatorJSON {
- _, tempFilePath := cmn.Tempfile("priv_validator_")
- pv := &PrivValidatorJSON{
- PrivValidatorUnencrypted: NewPrivValidatorUnencrypted(signer.(*types.DefaultTestSigner).PrivKey),
- filePath: tempFilePath,
- }
- return pv
-}
-
-//------------------------------------------------------
-
-// PrivValidatorsByAddress is a list of PrivValidatorJSON ordered by their
-// addresses.
-type PrivValidatorsByAddress []*PrivValidatorJSON
-
-func (pvs PrivValidatorsByAddress) Len() int {
- return len(pvs)
-}
-
-func (pvs PrivValidatorsByAddress) Less(i, j int) bool {
- iaddr, err := pvs[j].Address()
- if err != nil {
- panic(err)
- }
-
- jaddr, err := pvs[i].Address()
- if err != nil {
- panic(err)
- }
-
- return bytes.Compare(iaddr, jaddr) == -1
-}
-
-func (pvs PrivValidatorsByAddress) Swap(i, j int) {
- it := pvs[i]
- pvs[i] = pvs[j]
- pvs[j] = it
-}
diff --git a/types/priv_validator/priv_validator.go b/types/priv_validator/priv_validator.go
new file mode 100644
index 000000000..baff28f64
--- /dev/null
+++ b/types/priv_validator/priv_validator.go
@@ -0,0 +1,345 @@
+package privval
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "sync"
+ "time"
+
+ "github.com/tendermint/go-crypto"
+ "github.com/tendermint/tendermint/types"
+ cmn "github.com/tendermint/tmlibs/common"
+)
+
+// TODO: type ?
+const (
+ stepNone int8 = 0 // Used to distinguish the initial state
+ stepPropose int8 = 1
+ stepPrevote int8 = 2
+ stepPrecommit int8 = 3
+)
+
+func voteToStep(vote *types.Vote) int8 {
+ switch vote.Type {
+ case types.VoteTypePrevote:
+ return stepPrevote
+ case types.VoteTypePrecommit:
+ return stepPrecommit
+ default:
+ cmn.PanicSanity("Unknown vote type")
+ return 0
+ }
+}
+
+// FilePV implements PrivValidator using data persisted to disk
+// to prevent double signing.
+// NOTE: the directory containing the pv.filePath must already exist.
+type FilePV struct {
+ Address types.Address `json:"address"`
+ PubKey crypto.PubKey `json:"pub_key"`
+ LastHeight int64 `json:"last_height"`
+ LastRound int `json:"last_round"`
+ LastStep int8 `json:"last_step"`
+ LastSignature crypto.Signature `json:"last_signature,omitempty"` // so we dont lose signatures XXX Why would we lose signatures?
+ LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` // so we dont lose signatures XXX Why would we lose signatures?
+ PrivKey crypto.PrivKey `json:"priv_key"`
+
+ // For persistence.
+ // Overloaded for testing.
+ filePath string
+ mtx sync.Mutex
+}
+
+// GetAddress returns the address of the validator.
+// Implements PrivValidator.
+func (pv *FilePV) GetAddress() types.Address {
+ return pv.Address
+}
+
+// GetPubKey returns the public key of the validator.
+// Implements PrivValidator.
+func (pv *FilePV) GetPubKey() crypto.PubKey {
+ return pv.PubKey
+}
+
+// GenFilePV generates a new validator with randomly generated private key
+// and sets the filePath, but does not call Save().
+func GenFilePV(filePath string) *FilePV {
+ privKey := crypto.GenPrivKeyEd25519()
+ return &FilePV{
+ Address: privKey.PubKey().Address(),
+ PubKey: privKey.PubKey(),
+ PrivKey: privKey,
+ LastStep: stepNone,
+ filePath: filePath,
+ }
+}
+
+// LoadFilePV loads a FilePV from the filePath. The FilePV handles double
+// signing prevention by persisting data to the filePath. If the filePath does
+// not exist, the FilePV must be created manually and saved.
+func LoadFilePV(filePath string) *FilePV {
+ pvJSONBytes, err := ioutil.ReadFile(filePath)
+ if err != nil {
+ cmn.Exit(err.Error())
+ }
+ pv := &FilePV{}
+ err = cdc.UnmarshalJSON(pvJSONBytes, &pv)
+ if err != nil {
+ cmn.Exit(cmn.Fmt("Error reading PrivValidator from %v: %v\n", filePath, err))
+ }
+
+ pv.filePath = filePath
+ return pv
+}
+
+// LoadOrGenFilePV loads a FilePV from the given filePath
+// or else generates a new one and saves it to the filePath.
+func LoadOrGenFilePV(filePath string) *FilePV {
+ var pv *FilePV
+ if cmn.FileExists(filePath) {
+ pv = LoadFilePV(filePath)
+ } else {
+ pv = GenFilePV(filePath)
+ pv.Save()
+ }
+ return pv
+}
+
+// Save persists the FilePV to disk.
+func (pv *FilePV) Save() {
+ pv.mtx.Lock()
+ defer pv.mtx.Unlock()
+ pv.save()
+}
+
+func (pv *FilePV) save() {
+ outFile := pv.filePath
+ if outFile == "" {
+ panic("Cannot save PrivValidator: filePath not set")
+ }
+ jsonBytes, err := cdc.MarshalJSON(pv)
+ if err != nil {
+ panic(err)
+ }
+ err = cmn.WriteFileAtomic(outFile, jsonBytes, 0600)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// Reset resets all fields in the FilePV.
+// NOTE: Unsafe!
+func (pv *FilePV) Reset() {
+ var sig crypto.Signature
+ pv.LastHeight = 0
+ pv.LastRound = 0
+ pv.LastStep = 0
+ pv.LastSignature = sig
+ pv.LastSignBytes = nil
+ pv.Save()
+}
+
+// SignVote signs a canonical representation of the vote, along with the
+// chainID. Implements PrivValidator.
+func (pv *FilePV) SignVote(chainID string, vote *types.Vote) error {
+ pv.mtx.Lock()
+ defer pv.mtx.Unlock()
+ if err := pv.signVote(chainID, vote); err != nil {
+ return errors.New(cmn.Fmt("Error signing vote: %v", err))
+ }
+ return nil
+}
+
+// SignProposal signs a canonical representation of the proposal, along with
+// the chainID. Implements PrivValidator.
+func (pv *FilePV) SignProposal(chainID string, proposal *types.Proposal) error {
+ pv.mtx.Lock()
+ defer pv.mtx.Unlock()
+ if err := pv.signProposal(chainID, proposal); err != nil {
+ return fmt.Errorf("Error signing proposal: %v", err)
+ }
+ return nil
+}
+
+// returns error if HRS regression or no LastSignBytes. returns true if HRS is unchanged
+func (pv *FilePV) checkHRS(height int64, round int, step int8) (bool, error) {
+ if pv.LastHeight > height {
+ return false, errors.New("Height regression")
+ }
+
+ if pv.LastHeight == height {
+ if pv.LastRound > round {
+ return false, errors.New("Round regression")
+ }
+
+ if pv.LastRound == round {
+ if pv.LastStep > step {
+ return false, errors.New("Step regression")
+ } else if pv.LastStep == step {
+ if pv.LastSignBytes != nil {
+ if pv.LastSignature == nil {
+ panic("pv: LastSignature is nil but LastSignBytes is not!")
+ }
+ return true, nil
+ }
+ return false, errors.New("No LastSignature found")
+ }
+ }
+ }
+ return false, nil
+}
+
+// signVote checks if the vote is good to sign and sets the vote signature.
+// It may need to set the timestamp as well if the vote is otherwise the same as
+// a previously signed vote (ie. we crashed after signing but before the vote hit the WAL).
+func (pv *FilePV) signVote(chainID string, vote *types.Vote) error {
+ height, round, step := vote.Height, vote.Round, voteToStep(vote)
+ signBytes := vote.SignBytes(chainID)
+
+ sameHRS, err := pv.checkHRS(height, round, step)
+ if err != nil {
+ return err
+ }
+
+ // We might crash before writing to the wal,
+ // causing us to try to re-sign for the same HRS.
+ // If signbytes are the same, use the last signature.
+ // If they only differ by timestamp, use last timestamp and signature
+ // Otherwise, return error
+ if sameHRS {
+ if bytes.Equal(signBytes, pv.LastSignBytes) {
+ vote.Signature = pv.LastSignature
+ } else if timestamp, ok := checkVotesOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok {
+ vote.Timestamp = timestamp
+ vote.Signature = pv.LastSignature
+ } else {
+ err = fmt.Errorf("Conflicting data")
+ }
+ return err
+ }
+
+ // It passed the checks. Sign the vote
+ sig := pv.PrivKey.Sign(signBytes)
+ pv.saveSigned(height, round, step, signBytes, sig)
+ vote.Signature = sig
+ return nil
+}
+
+// signProposal checks if the proposal is good to sign and sets the proposal signature.
+// It may need to set the timestamp as well if the proposal is otherwise the same as
+// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL).
+func (pv *FilePV) signProposal(chainID string, proposal *types.Proposal) error {
+ height, round, step := proposal.Height, proposal.Round, stepPropose
+ signBytes := proposal.SignBytes(chainID)
+
+ sameHRS, err := pv.checkHRS(height, round, step)
+ if err != nil {
+ return err
+ }
+
+ // We might crash before writing to the wal,
+ // causing us to try to re-sign for the same HRS.
+ // If signbytes are the same, use the last signature.
+ // If they only differ by timestamp, use last timestamp and signature
+ // Otherwise, return error
+ if sameHRS {
+ if bytes.Equal(signBytes, pv.LastSignBytes) {
+ proposal.Signature = pv.LastSignature
+ } else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok {
+ proposal.Timestamp = timestamp
+ proposal.Signature = pv.LastSignature
+ } else {
+ err = fmt.Errorf("Conflicting data")
+ }
+ return err
+ }
+
+ // It passed the checks. Sign the proposal
+ sig := pv.PrivKey.Sign(signBytes)
+ pv.saveSigned(height, round, step, signBytes, sig)
+ proposal.Signature = sig
+ return nil
+}
+
+// Persist height/round/step and signature
+func (pv *FilePV) saveSigned(height int64, round int, step int8,
+ signBytes []byte, sig crypto.Signature) {
+
+ pv.LastHeight = height
+ pv.LastRound = round
+ pv.LastStep = step
+ pv.LastSignature = sig
+ pv.LastSignBytes = signBytes
+ pv.save()
+}
+
+// SignHeartbeat signs a canonical representation of the heartbeat, along with the chainID.
+// Implements PrivValidator.
+func (pv *FilePV) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error {
+ pv.mtx.Lock()
+ defer pv.mtx.Unlock()
+ heartbeat.Signature = pv.PrivKey.Sign(heartbeat.SignBytes(chainID))
+ return nil
+}
+
+// String returns a string representation of the FilePV.
+func (pv *FilePV) String() string {
+ return fmt.Sprintf("PrivValidator{%v LH:%v, LR:%v, LS:%v}", pv.GetAddress(), pv.LastHeight, pv.LastRound, pv.LastStep)
+}
+
+//-------------------------------------
+
+// returns the timestamp from the lastSignBytes.
+// returns true if the only difference in the votes is their timestamp.
+func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
+ var lastVote, newVote types.CanonicalJSONVote
+ if err := cdc.UnmarshalJSON(lastSignBytes, &lastVote); err != nil {
+ panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err))
+ }
+ if err := cdc.UnmarshalJSON(newSignBytes, &newVote); err != nil {
+ panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err))
+ }
+
+ lastTime, err := time.Parse(types.TimeFormat, lastVote.Timestamp)
+ if err != nil {
+ panic(err)
+ }
+
+ // set the times to the same value and check equality
+ now := types.CanonicalTime(time.Now())
+ lastVote.Timestamp = now
+ newVote.Timestamp = now
+ lastVoteBytes, _ := cdc.MarshalJSON(lastVote)
+ newVoteBytes, _ := cdc.MarshalJSON(newVote)
+
+ return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes)
+}
+
+// returns the timestamp from the lastSignBytes.
+// returns true if the only difference in the proposals is their timestamp
+func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
+ var lastProposal, newProposal types.CanonicalJSONProposal
+ if err := cdc.UnmarshalJSON(lastSignBytes, &lastProposal); err != nil {
+ panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err))
+ }
+ if err := cdc.UnmarshalJSON(newSignBytes, &newProposal); err != nil {
+ panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err))
+ }
+
+ lastTime, err := time.Parse(types.TimeFormat, lastProposal.Timestamp)
+ if err != nil {
+ panic(err)
+ }
+
+ // set the times to the same value and check equality
+ now := types.CanonicalTime(time.Now())
+ lastProposal.Timestamp = now
+ newProposal.Timestamp = now
+ lastProposalBytes, _ := cdc.MarshalJSON(lastProposal)
+ newProposalBytes, _ := cdc.MarshalJSON(newProposal)
+
+ return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes)
+}
diff --git a/types/priv_validator/priv_validator_test.go b/types/priv_validator/priv_validator_test.go
index 120a0c86e..c7212d594 100644
--- a/types/priv_validator/priv_validator_test.go
+++ b/types/priv_validator/priv_validator_test.go
@@ -1,8 +1,7 @@
-package types
+package privval
import (
- "encoding/hex"
- "encoding/json"
+ "encoding/base64"
"fmt"
"os"
"testing"
@@ -10,113 +9,89 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- crypto "github.com/tendermint/go-crypto"
- cmn "github.com/tendermint/tmlibs/common"
-
+ "github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/types"
+ cmn "github.com/tendermint/tmlibs/common"
)
func TestGenLoadValidator(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
+ assert := assert.New(t)
_, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal := GenPrivValidatorJSON(tempFilePath)
+ privVal := GenFilePV(tempFilePath)
height := int64(100)
- privVal.LastSignedInfo.Height = height
+ privVal.LastHeight = height
privVal.Save()
- addr, err := privVal.Address()
- require.Nil(err)
+ addr := privVal.GetAddress()
- privVal = LoadPrivValidatorJSON(tempFilePath)
- pAddr, err := privVal.Address()
- require.Nil(err)
-
- assert.Equal(addr, pAddr, "expected privval addr to be the same")
- assert.Equal(height, privVal.LastSignedInfo.Height, "expected privval.LastHeight to have been saved")
+ privVal = LoadFilePV(tempFilePath)
+ assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same")
+ assert.Equal(height, privVal.LastHeight, "expected privval.LastHeight to have been saved")
}
func TestLoadOrGenValidator(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
+ assert := assert.New(t)
_, tempFilePath := cmn.Tempfile("priv_validator_")
if err := os.Remove(tempFilePath); err != nil {
t.Error(err)
}
- privVal := LoadOrGenPrivValidatorJSON(tempFilePath)
- addr, err := privVal.Address()
- require.Nil(err)
-
- privVal = LoadOrGenPrivValidatorJSON(tempFilePath)
- pAddr, err := privVal.Address()
- require.Nil(err)
-
- assert.Equal(addr, pAddr, "expected privval addr to be the same")
+ privVal := LoadOrGenFilePV(tempFilePath)
+ addr := privVal.GetAddress()
+ privVal = LoadOrGenFilePV(tempFilePath)
+ assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same")
}
func TestUnmarshalValidator(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// create some fixed values
- addrStr := "D028C9981F7A87F3093672BF0D5B0E2A1B3ED456"
- pubStr := "3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
- privStr := "27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
- addrBytes, _ := hex.DecodeString(addrStr)
- pubBytes, _ := hex.DecodeString(pubStr)
- privBytes, _ := hex.DecodeString(privStr)
-
- // prepend type byte
- pubKey, err := crypto.PubKeyFromBytes(append([]byte{1}, pubBytes...))
- require.Nil(err, "%+v", err)
- privKey, err := crypto.PrivKeyFromBytes(append([]byte{1}, privBytes...))
- require.Nil(err, "%+v", err)
+ privKey := crypto.GenPrivKeyEd25519()
+ pubKey := privKey.PubKey()
+ addr := pubKey.Address()
+ pubArray := [32]byte(pubKey.(crypto.PubKeyEd25519))
+ pubBytes := pubArray[:]
+ privArray := [64]byte(privKey)
+ privBytes := privArray[:]
+ pubB64 := base64.StdEncoding.EncodeToString(pubBytes)
+ privB64 := base64.StdEncoding.EncodeToString(privBytes)
serialized := fmt.Sprintf(`{
- "id": {
- "address": "%s",
- "pub_key": {
- "type": "ed25519",
- "data": "%s"
- }
- },
- "priv_key": {
- "type": "ed25519",
- "data": "%s"
- },
- "last_signed_info": {
- "height": 0,
- "round": 0,
- "step": 0,
- "signature": null
- }
-}`, addrStr, pubStr, privStr)
+ "address": "%s",
+ "pub_key": {
+ "type": "AC26791624DE60",
+ "value": "%s"
+ },
+ "last_height": 0,
+ "last_round": 0,
+ "last_step": 0,
+ "priv_key": {
+ "type": "954568A3288910",
+ "value": "%s"
+ }
+}`, addr, pubB64, privB64)
- val := PrivValidatorJSON{}
- err = json.Unmarshal([]byte(serialized), &val)
+ val := FilePV{}
+ err := cdc.UnmarshalJSON([]byte(serialized), &val)
require.Nil(err, "%+v", err)
// make sure the values match
- vAddr, err := val.Address()
- require.Nil(err)
-
- pKey, err := val.PubKey()
- require.Nil(err)
-
- assert.EqualValues(addrBytes, vAddr)
- assert.EqualValues(pubKey, pKey)
+ assert.EqualValues(addr, val.GetAddress())
+ assert.EqualValues(pubKey, val.GetPubKey())
assert.EqualValues(privKey, val.PrivKey)
// export it and make sure it is the same
- out, err := json.Marshal(val)
+ out, err := cdc.MarshalJSON(val)
require.Nil(err, "%+v", err)
assert.JSONEq(serialized, string(out))
}
func TestSignVote(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
+ assert := assert.New(t)
_, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal := GenPrivValidatorJSON(tempFilePath)
+ privVal := GenFilePV(tempFilePath)
block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}}
block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{}}
@@ -124,11 +99,8 @@ func TestSignVote(t *testing.T) {
voteType := types.VoteTypePrevote
// sign a vote for first time
- addr, err := privVal.Address()
- require.Nil(err)
-
- vote := newVote(addr, 0, height, round, voteType, block1)
- err = privVal.SignVote("mychainid", vote)
+ vote := newVote(privVal.Address, 0, height, round, voteType, block1)
+ err := privVal.SignVote("mychainid", vote)
assert.NoError(err, "expected no error signing vote")
// try to sign the same vote again; should be fine
@@ -137,10 +109,10 @@ func TestSignVote(t *testing.T) {
// now try some bad votes
cases := []*types.Vote{
- newVote(addr, 0, height, round-1, voteType, block1), // round regression
- newVote(addr, 0, height-1, round, voteType, block1), // height regression
- newVote(addr, 0, height-2, round+4, voteType, block1), // height regression and different round
- newVote(addr, 0, height, round, voteType, block2), // different block
+ newVote(privVal.Address, 0, height, round-1, voteType, block1), // round regression
+ newVote(privVal.Address, 0, height-1, round, voteType, block1), // height regression
+ newVote(privVal.Address, 0, height-2, round+4, voteType, block1), // height regression and different round
+ newVote(privVal.Address, 0, height, round, voteType, block2), // different block
}
for _, c := range cases {
@@ -160,7 +132,7 @@ func TestSignProposal(t *testing.T) {
assert := assert.New(t)
_, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal := GenPrivValidatorJSON(tempFilePath)
+ privVal := GenFilePV(tempFilePath)
block1 := types.PartSetHeader{5, []byte{1, 2, 3}}
block2 := types.PartSetHeader{10, []byte{3, 2, 1}}
@@ -197,10 +169,8 @@ func TestSignProposal(t *testing.T) {
}
func TestDifferByTimestamp(t *testing.T) {
- require := require.New(t)
-
_, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal := GenPrivValidatorJSON(tempFilePath)
+ privVal := GenFilePV(tempFilePath)
block1 := types.PartSetHeader{5, []byte{1, 2, 3}}
height, round := int64(10), 1
@@ -217,7 +187,8 @@ func TestDifferByTimestamp(t *testing.T) {
// manipulate the timestamp. should get changed back
proposal.Timestamp = proposal.Timestamp.Add(time.Millisecond)
- proposal.Signature = crypto.Signature{}
+ var emptySig crypto.Signature
+ proposal.Signature = emptySig
err = privVal.SignProposal("mychainid", proposal)
assert.NoError(t, err, "expected no error on signing same proposal")
@@ -228,13 +199,10 @@ func TestDifferByTimestamp(t *testing.T) {
// test vote
{
- addr, err := privVal.Address()
- require.Nil(err)
-
voteType := types.VoteTypePrevote
blockID := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}}
- vote := newVote(addr, 0, height, round, voteType, blockID)
- err = privVal.SignVote("mychainid", vote)
+ vote := newVote(privVal.Address, 0, height, round, voteType, blockID)
+ err := privVal.SignVote("mychainid", vote)
assert.NoError(t, err, "expected no error signing vote")
signBytes := vote.SignBytes(chainID)
@@ -243,7 +211,8 @@ func TestDifferByTimestamp(t *testing.T) {
// manipulate the timestamp. should get changed back
vote.Timestamp = vote.Timestamp.Add(time.Millisecond)
- vote.Signature = crypto.Signature{}
+ var emptySig crypto.Signature
+ vote.Signature = emptySig
err = privVal.SignVote("mychainid", vote)
assert.NoError(t, err, "expected no error on signing same vote")
@@ -253,7 +222,7 @@ func TestDifferByTimestamp(t *testing.T) {
}
}
-func newVote(addr cmn.HexBytes, idx int, height int64, round int, typ byte, blockID types.BlockID) *types.Vote {
+func newVote(addr types.Address, idx int, height int64, round int, typ byte, blockID types.BlockID) *types.Vote {
return &types.Vote{
ValidatorAddress: addr,
ValidatorIndex: idx,
diff --git a/types/priv_validator/sign_info.go b/types/priv_validator/sign_info.go
deleted file mode 100644
index 8b135df63..000000000
--- a/types/priv_validator/sign_info.go
+++ /dev/null
@@ -1,238 +0,0 @@
-package types
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "time"
-
- crypto "github.com/tendermint/go-crypto"
- "github.com/tendermint/tendermint/types"
- cmn "github.com/tendermint/tmlibs/common"
-)
-
-// TODO: type ?
-const (
- stepNone int8 = 0 // Used to distinguish the initial state
- stepPropose int8 = 1
- stepPrevote int8 = 2
- stepPrecommit int8 = 3
-)
-
-func voteToStep(vote *types.Vote) int8 {
- switch vote.Type {
- case types.VoteTypePrevote:
- return stepPrevote
- case types.VoteTypePrecommit:
- return stepPrecommit
- default:
- panic("Unknown vote type")
- }
-}
-
-//-------------------------------------
-
-// LastSignedInfo contains information about the latest
-// data signed by a validator to help prevent double signing.
-type LastSignedInfo struct {
- Height int64 `json:"height"`
- Round int `json:"round"`
- Step int8 `json:"step"`
- Signature crypto.Signature `json:"signature,omitempty"` // so we dont lose signatures
- SignBytes cmn.HexBytes `json:"signbytes,omitempty"` // so we dont lose signatures
-}
-
-func NewLastSignedInfo() *LastSignedInfo {
- return &LastSignedInfo{
- Step: stepNone,
- }
-}
-
-func (lsi *LastSignedInfo) String() string {
- return fmt.Sprintf("LH:%v, LR:%v, LS:%v", lsi.Height, lsi.Round, lsi.Step)
-}
-
-// Verify returns an error if there is a height/round/step regression
-// or if the HRS matches but there are no LastSignBytes.
-// It returns true if HRS matches exactly and the LastSignature exists.
-// It panics if the HRS matches, the LastSignBytes are not empty, but the LastSignature is empty.
-func (lsi LastSignedInfo) Verify(height int64, round int, step int8) (bool, error) {
- if lsi.Height > height {
- return false, errors.New("Height regression")
- }
-
- if lsi.Height == height {
- if lsi.Round > round {
- return false, errors.New("Round regression")
- }
-
- if lsi.Round == round {
- if lsi.Step > step {
- return false, errors.New("Step regression")
- } else if lsi.Step == step {
- if lsi.SignBytes != nil {
- if lsi.Signature.Empty() {
- panic("info: LastSignature is nil but LastSignBytes is not!")
- }
- return true, nil
- }
- return false, errors.New("No LastSignature found")
- }
- }
- }
- return false, nil
-}
-
-// Set height/round/step and signature on the info
-func (lsi *LastSignedInfo) Set(height int64, round int, step int8,
- signBytes []byte, sig crypto.Signature) {
-
- lsi.Height = height
- lsi.Round = round
- lsi.Step = step
- lsi.Signature = sig
- lsi.SignBytes = signBytes
-}
-
-// Reset resets all the values.
-// XXX: Unsafe.
-func (lsi *LastSignedInfo) Reset() {
- lsi.Height = 0
- lsi.Round = 0
- lsi.Step = 0
- lsi.Signature = crypto.Signature{}
- lsi.SignBytes = nil
-}
-
-// SignVote checks the height/round/step (HRS) are greater than the latest state of the LastSignedInfo.
-// If so, it signs the vote, updates the LastSignedInfo, and sets the signature on the vote.
-// If the HRS are equal and the only thing changed is the timestamp, it sets the vote.Timestamp to the previous
-// value and the Signature to the LastSignedInfo.Signature.
-// Else it returns an error.
-func (lsi *LastSignedInfo) SignVote(signer types.Signer, chainID string, vote *types.Vote) error {
- height, round, step := vote.Height, vote.Round, voteToStep(vote)
- signBytes := vote.SignBytes(chainID)
-
- sameHRS, err := lsi.Verify(height, round, step)
- if err != nil {
- return err
- }
-
- // We might crash before writing to the wal,
- // causing us to try to re-sign for the same HRS.
- // If signbytes are the same, use the last signature.
- // If they only differ by timestamp, use last timestamp and signature
- // Otherwise, return error
- if sameHRS {
- if bytes.Equal(signBytes, lsi.SignBytes) {
- vote.Signature = lsi.Signature
- } else if timestamp, ok := checkVotesOnlyDifferByTimestamp(lsi.SignBytes, signBytes); ok {
- vote.Timestamp = timestamp
- vote.Signature = lsi.Signature
- } else {
- err = fmt.Errorf("Conflicting data")
- }
- return err
- }
- sig, err := signer.Sign(signBytes)
- if err != nil {
- return err
- }
- lsi.Set(height, round, step, signBytes, sig)
- vote.Signature = sig
- return nil
-}
-
-// SignProposal checks if the height/round/step (HRS) are greater than the latest state of the LastSignedInfo.
-// If so, it signs the proposal, updates the LastSignedInfo, and sets the signature on the proposal.
-// If the HRS are equal and the only thing changed is the timestamp, it sets the timestamp to the previous
-// value and the Signature to the LastSignedInfo.Signature.
-// Else it returns an error.
-func (lsi *LastSignedInfo) SignProposal(signer types.Signer, chainID string, proposal *types.Proposal) error {
- height, round, step := proposal.Height, proposal.Round, stepPropose
- signBytes := proposal.SignBytes(chainID)
-
- sameHRS, err := lsi.Verify(height, round, step)
- if err != nil {
- return err
- }
-
- // We might crash before writing to the wal,
- // causing us to try to re-sign for the same HRS.
- // If signbytes are the same, use the last signature.
- // If they only differ by timestamp, use last timestamp and signature
- // Otherwise, return error
- if sameHRS {
- if bytes.Equal(signBytes, lsi.SignBytes) {
- proposal.Signature = lsi.Signature
- } else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(lsi.SignBytes, signBytes); ok {
- proposal.Timestamp = timestamp
- proposal.Signature = lsi.Signature
- } else {
- err = fmt.Errorf("Conflicting data")
- }
- return err
- }
- sig, err := signer.Sign(signBytes)
- if err != nil {
- return err
- }
- lsi.Set(height, round, step, signBytes, sig)
- proposal.Signature = sig
- return nil
-}
-
-//-------------------------------------
-
-// returns the timestamp from the lastSignBytes.
-// returns true if the only difference in the votes is their timestamp.
-func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
- var lastVote, newVote types.CanonicalJSONOnceVote
- if err := json.Unmarshal(lastSignBytes, &lastVote); err != nil {
- panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err))
- }
- if err := json.Unmarshal(newSignBytes, &newVote); err != nil {
- panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err))
- }
-
- lastTime, err := time.Parse(types.TimeFormat, lastVote.Vote.Timestamp)
- if err != nil {
- panic(err)
- }
-
- // set the times to the same value and check equality
- now := types.CanonicalTime(time.Now())
- lastVote.Vote.Timestamp = now
- newVote.Vote.Timestamp = now
- lastVoteBytes, _ := json.Marshal(lastVote)
- newVoteBytes, _ := json.Marshal(newVote)
-
- return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes)
-}
-
-// returns the timestamp from the lastSignBytes.
-// returns true if the only difference in the proposals is their timestamp
-func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
- var lastProposal, newProposal types.CanonicalJSONOnceProposal
- if err := json.Unmarshal(lastSignBytes, &lastProposal); err != nil {
- panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err))
- }
- if err := json.Unmarshal(newSignBytes, &newProposal); err != nil {
- panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err))
- }
-
- lastTime, err := time.Parse(types.TimeFormat, lastProposal.Proposal.Timestamp)
- if err != nil {
- panic(err)
- }
-
- // set the times to the same value and check equality
- now := types.CanonicalTime(time.Now())
- lastProposal.Proposal.Timestamp = now
- newProposal.Proposal.Timestamp = now
- lastProposalBytes, _ := json.Marshal(lastProposal)
- newProposalBytes, _ := json.Marshal(newProposal)
-
- return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes)
-}
diff --git a/types/priv_validator/socket.go b/types/priv_validator/socket.go
index 26cab72b9..9f59a8152 100644
--- a/types/priv_validator/socket.go
+++ b/types/priv_validator/socket.go
@@ -1,14 +1,14 @@
-package types
+package privval
import (
+ "errors"
"fmt"
"io"
"net"
"time"
- "github.com/pkg/errors"
- crypto "github.com/tendermint/go-crypto"
- wire "github.com/tendermint/go-wire"
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@@ -37,36 +37,36 @@ var (
connHeartbeat = time.Second * defaultConnHeartBeatSeconds
)
-// SocketClientOption sets an optional parameter on the SocketClient.
-type SocketClientOption func(*SocketClient)
+// SocketPVOption sets an optional parameter on the SocketPV.
+type SocketPVOption func(*SocketPV)
-// SocketClientAcceptDeadline sets the deadline for the SocketClient listener.
+// SocketPVAcceptDeadline sets the deadline for the SocketPV listener.
// A zero time value disables the deadline.
-func SocketClientAcceptDeadline(deadline time.Duration) SocketClientOption {
- return func(sc *SocketClient) { sc.acceptDeadline = deadline }
+func SocketPVAcceptDeadline(deadline time.Duration) SocketPVOption {
+ return func(sc *SocketPV) { sc.acceptDeadline = deadline }
}
-// SocketClientConnDeadline sets the read and write deadline for connections
+// SocketPVConnDeadline sets the read and write deadline for connections
// from external signing processes.
-func SocketClientConnDeadline(deadline time.Duration) SocketClientOption {
- return func(sc *SocketClient) { sc.connDeadline = deadline }
+func SocketPVConnDeadline(deadline time.Duration) SocketPVOption {
+ return func(sc *SocketPV) { sc.connDeadline = deadline }
}
-// SocketClientHeartbeat sets the period on which to check the liveness of the
+// SocketPVHeartbeat sets the period on which to check the liveness of the
// connected Signer connections.
-func SocketClientHeartbeat(period time.Duration) SocketClientOption {
- return func(sc *SocketClient) { sc.connHeartbeat = period }
+func SocketPVHeartbeat(period time.Duration) SocketPVOption {
+ return func(sc *SocketPV) { sc.connHeartbeat = period }
}
-// SocketClientConnWait sets the timeout duration before connection of external
+// SocketPVConnWait sets the timeout duration before connection of external
// signing processes are considered to be unsuccessful.
-func SocketClientConnWait(timeout time.Duration) SocketClientOption {
- return func(sc *SocketClient) { sc.connWaitTimeout = timeout }
+func SocketPVConnWait(timeout time.Duration) SocketPVOption {
+ return func(sc *SocketPV) { sc.connWaitTimeout = timeout }
}
-// SocketClient implements PrivValidator, it uses a socket to request signatures
+// SocketPV implements PrivValidator, it uses a socket to request signatures
// from an external process.
-type SocketClient struct {
+type SocketPV struct {
cmn.BaseService
addr string
@@ -80,16 +80,16 @@ type SocketClient struct {
listener net.Listener
}
-// Check that SocketClient implements PrivValidator2.
-var _ types.PrivValidator2 = (*SocketClient)(nil)
+// Check that SocketPV implements PrivValidator.
+var _ types.PrivValidator = (*SocketPV)(nil)
-// NewSocketClient returns an instance of SocketClient.
-func NewSocketClient(
+// NewSocketPV returns an instance of SocketPV.
+func NewSocketPV(
logger log.Logger,
socketAddr string,
privKey crypto.PrivKeyEd25519,
-) *SocketClient {
- sc := &SocketClient{
+) *SocketPV {
+ sc := &SocketPV{
addr: socketAddr,
acceptDeadline: acceptDeadline,
connDeadline: connDeadline,
@@ -98,15 +98,14 @@ func NewSocketClient(
privKey: privKey,
}
- sc.BaseService = *cmn.NewBaseService(logger, "SocketClient", sc)
+ sc.BaseService = *cmn.NewBaseService(logger, "SocketPV", sc)
return sc
}
// GetAddress implements PrivValidator.
-// TODO(xla): Remove when PrivValidator2 replaced PrivValidator.
-func (sc *SocketClient) GetAddress() types.Address {
- addr, err := sc.Address()
+func (sc *SocketPV) GetAddress() types.Address {
+ addr, err := sc.getAddress()
if err != nil {
panic(err)
}
@@ -115,8 +114,8 @@ func (sc *SocketClient) GetAddress() types.Address {
}
// Address is an alias for PubKey().Address().
-func (sc *SocketClient) Address() (cmn.HexBytes, error) {
- p, err := sc.PubKey()
+func (sc *SocketPV) getAddress() (cmn.HexBytes, error) {
+ p, err := sc.getPubKey()
if err != nil {
return nil, err
}
@@ -125,9 +124,8 @@ func (sc *SocketClient) Address() (cmn.HexBytes, error) {
}
// GetPubKey implements PrivValidator.
-// TODO(xla): Remove when PrivValidator2 replaced PrivValidator.
-func (sc *SocketClient) GetPubKey() crypto.PubKey {
- pubKey, err := sc.PubKey()
+func (sc *SocketPV) GetPubKey() crypto.PubKey {
+ pubKey, err := sc.getPubKey()
if err != nil {
panic(err)
}
@@ -135,23 +133,22 @@ func (sc *SocketClient) GetPubKey() crypto.PubKey {
return pubKey
}
-// PubKey implements PrivValidator2.
-func (sc *SocketClient) PubKey() (crypto.PubKey, error) {
+func (sc *SocketPV) getPubKey() (crypto.PubKey, error) {
err := writeMsg(sc.conn, &PubKeyMsg{})
if err != nil {
- return crypto.PubKey{}, err
+ return nil, err
}
res, err := readMsg(sc.conn)
if err != nil {
- return crypto.PubKey{}, err
+ return nil, err
}
return res.(*PubKeyMsg).PubKey, nil
}
-// SignVote implements PrivValidator2.
-func (sc *SocketClient) SignVote(chainID string, vote *types.Vote) error {
+// SignVote implements PrivValidator.
+func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error {
err := writeMsg(sc.conn, &SignVoteMsg{Vote: vote})
if err != nil {
return err
@@ -167,8 +164,8 @@ func (sc *SocketClient) SignVote(chainID string, vote *types.Vote) error {
return nil
}
-// SignProposal implements PrivValidator2.
-func (sc *SocketClient) SignProposal(
+// SignProposal implements PrivValidator.
+func (sc *SocketPV) SignProposal(
chainID string,
proposal *types.Proposal,
) error {
@@ -187,8 +184,8 @@ func (sc *SocketClient) SignProposal(
return nil
}
-// SignHeartbeat implements PrivValidator2.
-func (sc *SocketClient) SignHeartbeat(
+// SignHeartbeat implements PrivValidator.
+func (sc *SocketPV) SignHeartbeat(
chainID string,
heartbeat *types.Heartbeat,
) error {
@@ -208,21 +205,22 @@ func (sc *SocketClient) SignHeartbeat(
}
// OnStart implements cmn.Service.
-func (sc *SocketClient) OnStart() error {
+func (sc *SocketPV) OnStart() error {
if err := sc.listen(); err != nil {
+ err = cmn.ErrorWrap(err, "failed to listen")
sc.Logger.Error(
"OnStart",
- "err", errors.Wrap(err, "failed to listen"),
+ "err", err,
)
-
return err
}
conn, err := sc.waitConnection()
if err != nil {
+ err = cmn.ErrorWrap(err, "failed to accept connection")
sc.Logger.Error(
"OnStart",
- "err", errors.Wrap(err, "failed to accept connection"),
+ "err", err,
)
return err
@@ -234,27 +232,29 @@ func (sc *SocketClient) OnStart() error {
}
// OnStop implements cmn.Service.
-func (sc *SocketClient) OnStop() {
+func (sc *SocketPV) OnStop() {
if sc.conn != nil {
if err := sc.conn.Close(); err != nil {
+ err = cmn.ErrorWrap(err, "failed to close connection")
sc.Logger.Error(
"OnStop",
- "err", errors.Wrap(err, "failed to close connection"),
+ "err", err,
)
}
}
if sc.listener != nil {
if err := sc.listener.Close(); err != nil {
+ err = cmn.ErrorWrap(err, "failed to close listener")
sc.Logger.Error(
"OnStop",
- "err", errors.Wrap(err, "failed to close listener"),
+ "err", err,
)
}
}
}
-func (sc *SocketClient) acceptConnection() (net.Conn, error) {
+func (sc *SocketPV) acceptConnection() (net.Conn, error) {
conn, err := sc.listener.Accept()
if err != nil {
if !sc.IsRunning() {
@@ -264,7 +264,7 @@ func (sc *SocketClient) acceptConnection() (net.Conn, error) {
}
- conn, err = p2pconn.MakeSecretConnection(conn, sc.privKey.Wrap())
+ conn, err = p2pconn.MakeSecretConnection(conn, sc.privKey)
if err != nil {
return nil, err
}
@@ -272,7 +272,7 @@ func (sc *SocketClient) acceptConnection() (net.Conn, error) {
return conn, nil
}
-func (sc *SocketClient) listen() error {
+func (sc *SocketPV) listen() error {
ln, err := net.Listen(cmn.ProtocolAndAddress(sc.addr))
if err != nil {
return err
@@ -290,7 +290,7 @@ func (sc *SocketClient) listen() error {
// waitConnection uses the configured wait timeout to error if no external
// process connects in the time period.
-func (sc *SocketClient) waitConnection() (net.Conn, error) {
+func (sc *SocketPV) waitConnection() (net.Conn, error) {
var (
connc = make(chan net.Conn, 1)
errc = make(chan error, 1)
@@ -311,7 +311,7 @@ func (sc *SocketClient) waitConnection() (net.Conn, error) {
return conn, nil
case err := <-errc:
if _, ok := err.(timeoutError); ok {
- return nil, errors.Wrap(ErrConnWaitTimeout, err.Error())
+ return nil, cmn.ErrorWrap(ErrConnWaitTimeout, err.Error())
}
return nil, err
case <-time.After(sc.connWaitTimeout):
@@ -344,7 +344,7 @@ type RemoteSigner struct {
connDeadline time.Duration
connRetries int
privKey crypto.PrivKeyEd25519
- privVal PrivValidator
+ privVal types.PrivValidator
conn net.Conn
}
@@ -353,7 +353,7 @@ type RemoteSigner struct {
func NewRemoteSigner(
logger log.Logger,
chainID, socketAddr string,
- privVal PrivValidator,
+ privVal types.PrivValidator,
privKey crypto.PrivKeyEd25519,
) *RemoteSigner {
rs := &RemoteSigner{
@@ -374,8 +374,8 @@ func NewRemoteSigner(
func (rs *RemoteSigner) OnStart() error {
conn, err := rs.connect()
if err != nil {
- rs.Logger.Error("OnStart", "err", errors.Wrap(err, "connect"))
-
+ err = cmn.ErrorWrap(err, "connect")
+ rs.Logger.Error("OnStart", "err", err)
return err
}
@@ -391,7 +391,7 @@ func (rs *RemoteSigner) OnStop() {
}
if err := rs.conn.Close(); err != nil {
- rs.Logger.Error("OnStop", "err", errors.Wrap(err, "closing listener failed"))
+ rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed"))
}
}
@@ -404,28 +404,31 @@ func (rs *RemoteSigner) connect() (net.Conn, error) {
conn, err := cmn.Connect(rs.addr)
if err != nil {
+ err = cmn.ErrorWrap(err, "connection failed")
rs.Logger.Error(
"connect",
"addr", rs.addr,
- "err", errors.Wrap(err, "connection failed"),
+ "err", err,
)
continue
}
if err := conn.SetDeadline(time.Now().Add(connDeadline)); err != nil {
+ err = cmn.ErrorWrap(err, "setting connection timeout failed")
rs.Logger.Error(
"connect",
- "err", errors.Wrap(err, "setting connection timeout failed"),
+ "err", err,
)
continue
}
- conn, err = p2pconn.MakeSecretConnection(conn, rs.privKey.Wrap())
+ conn, err = p2pconn.MakeSecretConnection(conn, rs.privKey)
if err != nil {
+ err = cmn.ErrorWrap(err, "encrypting connection failed")
rs.Logger.Error(
"connect",
- "err", errors.Wrap(err, "encrypting connection failed"),
+ "err", err,
)
continue
@@ -451,13 +454,12 @@ func (rs *RemoteSigner) handleConnection(conn net.Conn) {
return
}
- var res PrivValMsg
+ var res SocketPVMsg
switch r := req.(type) {
case *PubKeyMsg:
var p crypto.PubKey
-
- p, err = rs.privVal.PubKey()
+ p = rs.privVal.GetPubKey()
res = &PubKeyMsg{p}
case *SignVoteMsg:
err = rs.privVal.SignVote(rs.chainID, r.Vote)
@@ -487,23 +489,16 @@ func (rs *RemoteSigner) handleConnection(conn net.Conn) {
//---------------------------------------------------------
-const (
- msgTypePubKey = byte(0x01)
- msgTypeSignVote = byte(0x10)
- msgTypeSignProposal = byte(0x11)
- msgTypeSignHeartbeat = byte(0x12)
-)
+// SocketPVMsg is sent between RemoteSigner and SocketPV.
+type SocketPVMsg interface{}
-// PrivValMsg is sent between RemoteSigner and SocketClient.
-type PrivValMsg interface{}
-
-var _ = wire.RegisterInterface(
- struct{ PrivValMsg }{},
- wire.ConcreteType{&PubKeyMsg{}, msgTypePubKey},
- wire.ConcreteType{&SignVoteMsg{}, msgTypeSignVote},
- wire.ConcreteType{&SignProposalMsg{}, msgTypeSignProposal},
- wire.ConcreteType{&SignHeartbeatMsg{}, msgTypeSignHeartbeat},
-)
+func RegisterSocketPVMsg(cdc *amino.Codec) {
+ cdc.RegisterInterface((*SocketPVMsg)(nil), nil)
+ cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/socketpv/PubKeyMsg", nil)
+ cdc.RegisterConcrete(&SignVoteMsg{}, "tendermint/socketpv/SignVoteMsg", nil)
+ cdc.RegisterConcrete(&SignProposalMsg{}, "tendermint/socketpv/SignProposalMsg", nil)
+ cdc.RegisterConcrete(&SignHeartbeatMsg{}, "tendermint/socketpv/SignHeartbeatMsg", nil)
+}
// PubKeyMsg is a PrivValidatorSocket message containing the public key.
type PubKeyMsg struct {
@@ -525,40 +520,19 @@ type SignHeartbeatMsg struct {
Heartbeat *types.Heartbeat
}
-func readMsg(r io.Reader) (PrivValMsg, error) {
- var (
- n int
- err error
- )
-
- read := wire.ReadBinary(struct{ PrivValMsg }{}, r, 0, &n, &err)
- if err != nil {
- if _, ok := err.(timeoutError); ok {
- return nil, errors.Wrap(ErrConnTimeout, err.Error())
- }
-
- return nil, err
- }
-
- w, ok := read.(struct{ PrivValMsg })
- if !ok {
- return nil, errors.New("unknown type")
- }
-
- return w.PrivValMsg, nil
-}
-
-func writeMsg(w io.Writer, msg interface{}) error {
- var (
- err error
- n int
- )
-
- // TODO(xla): This extra wrap should be gone with the sdk-2 update.
- wire.WriteBinary(struct{ PrivValMsg }{msg}, w, &n, &err)
+func readMsg(r io.Reader) (msg SocketPVMsg, err error) {
+ const maxSocketPVMsgSize = 1024 * 10
+ _, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize)
if _, ok := err.(timeoutError); ok {
- return errors.Wrap(ErrConnTimeout, err.Error())
+ err = cmn.ErrorWrap(ErrConnTimeout, err.Error())
}
-
- return err
+ return
+}
+
+func writeMsg(w io.Writer, msg interface{}) (err error) {
+ _, err = cdc.MarshalBinaryWriter(w, msg)
+ if _, ok := err.(timeoutError); ok {
+ err = cmn.ErrorWrap(ErrConnTimeout, err.Error())
+ }
+ return
}
diff --git a/types/priv_validator/socket_tcp.go b/types/priv_validator/socket_tcp.go
index 2421eb9f4..b26db00c2 100644
--- a/types/priv_validator/socket_tcp.go
+++ b/types/priv_validator/socket_tcp.go
@@ -1,4 +1,4 @@
-package types
+package privval
import (
"net"
diff --git a/types/priv_validator/socket_tcp_test.go b/types/priv_validator/socket_tcp_test.go
index cd95ab0b9..44a673c0c 100644
--- a/types/priv_validator/socket_tcp_test.go
+++ b/types/priv_validator/socket_tcp_test.go
@@ -1,4 +1,4 @@
-package types
+package privval
import (
"net"
diff --git a/types/priv_validator/socket_test.go b/types/priv_validator/socket_test.go
index 2859c9452..197c2508e 100644
--- a/types/priv_validator/socket_test.go
+++ b/types/priv_validator/socket_test.go
@@ -1,4 +1,4 @@
-package types
+package privval
import (
"fmt"
@@ -6,11 +6,10 @@ import (
"testing"
"time"
- "github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- crypto "github.com/tendermint/go-crypto"
+ "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@@ -18,7 +17,7 @@ import (
"github.com/tendermint/tendermint/types"
)
-func TestSocketClientAddress(t *testing.T) {
+func TestSocketPVAddress(t *testing.T) {
var (
chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID)
@@ -26,10 +25,9 @@ func TestSocketClientAddress(t *testing.T) {
defer sc.Stop()
defer rs.Stop()
- serverAddr, err := rs.privVal.Address()
- require.NoError(t, err)
+ serverAddr := rs.privVal.GetAddress()
- clientAddr, err := sc.Address()
+ clientAddr, err := sc.getAddress()
require.NoError(t, err)
assert.Equal(t, serverAddr, clientAddr)
@@ -39,7 +37,7 @@ func TestSocketClientAddress(t *testing.T) {
}
-func TestSocketClientPubKey(t *testing.T) {
+func TestSocketPVPubKey(t *testing.T) {
var (
chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID)
@@ -47,11 +45,10 @@ func TestSocketClientPubKey(t *testing.T) {
defer sc.Stop()
defer rs.Stop()
- clientKey, err := sc.PubKey()
+ clientKey, err := sc.getPubKey()
require.NoError(t, err)
- privKey, err := rs.privVal.PubKey()
- require.NoError(t, err)
+ privKey := rs.privVal.GetPubKey()
assert.Equal(t, privKey, clientKey)
@@ -59,7 +56,7 @@ func TestSocketClientPubKey(t *testing.T) {
assert.Equal(t, privKey, sc.GetPubKey())
}
-func TestSocketClientProposal(t *testing.T) {
+func TestSocketPVProposal(t *testing.T) {
var (
chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID)
@@ -76,7 +73,7 @@ func TestSocketClientProposal(t *testing.T) {
assert.Equal(t, privProposal.Signature, clientProposal.Signature)
}
-func TestSocketClientVote(t *testing.T) {
+func TestSocketPVVote(t *testing.T) {
var (
chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID)
@@ -94,7 +91,7 @@ func TestSocketClientVote(t *testing.T) {
assert.Equal(t, want.Signature, have.Signature)
}
-func TestSocketClientHeartbeat(t *testing.T) {
+func TestSocketPVHeartbeat(t *testing.T) {
var (
chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID)
@@ -110,9 +107,9 @@ func TestSocketClientHeartbeat(t *testing.T) {
assert.Equal(t, want.Signature, have.Signature)
}
-func TestSocketClientAcceptDeadline(t *testing.T) {
+func TestSocketPVAcceptDeadline(t *testing.T) {
var (
- sc = NewSocketClient(
+ sc = NewSocketPV(
log.TestingLogger(),
"127.0.0.1:0",
crypto.GenPrivKeyEd25519(),
@@ -120,26 +117,26 @@ func TestSocketClientAcceptDeadline(t *testing.T) {
)
defer sc.Stop()
- SocketClientAcceptDeadline(time.Millisecond)(sc)
+ SocketPVAcceptDeadline(time.Millisecond)(sc)
- assert.Equal(t, errors.Cause(sc.Start()), ErrConnWaitTimeout)
+ assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout)
}
-func TestSocketClientDeadline(t *testing.T) {
+func TestSocketPVDeadline(t *testing.T) {
var (
addr = testFreeAddr(t)
listenc = make(chan struct{})
- sc = NewSocketClient(
+ sc = NewSocketPV(
log.TestingLogger(),
addr,
crypto.GenPrivKeyEd25519(),
)
)
- SocketClientConnDeadline(10 * time.Millisecond)(sc)
- SocketClientConnWait(500 * time.Millisecond)(sc)
+ SocketPVConnDeadline(100 * time.Millisecond)(sc)
+ SocketPVConnWait(500 * time.Millisecond)(sc)
- go func(sc *SocketClient) {
+ go func(sc *SocketPV) {
defer close(listenc)
require.NoError(t, sc.Start())
@@ -155,7 +152,7 @@ func TestSocketClientDeadline(t *testing.T) {
_, err = p2pconn.MakeSecretConnection(
conn,
- crypto.GenPrivKeyEd25519().Wrap(),
+ crypto.GenPrivKeyEd25519(),
)
if err == nil {
break
@@ -167,21 +164,21 @@ func TestSocketClientDeadline(t *testing.T) {
// Sleep to guarantee deadline has been hit.
time.Sleep(20 * time.Microsecond)
- _, err := sc.PubKey()
- assert.Equal(t, errors.Cause(err), ErrConnTimeout)
+ _, err := sc.getPubKey()
+ assert.Equal(t, err.(cmn.Error).Cause(), ErrConnTimeout)
}
-func TestSocketClientWait(t *testing.T) {
- sc := NewSocketClient(
+func TestSocketPVWait(t *testing.T) {
+ sc := NewSocketPV(
log.TestingLogger(),
"127.0.0.1:0",
crypto.GenPrivKeyEd25519(),
)
defer sc.Stop()
- SocketClientConnWait(time.Millisecond)(sc)
+ SocketPVConnWait(time.Millisecond)(sc)
- assert.Equal(t, errors.Cause(sc.Start()), ErrConnWaitTimeout)
+ assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout)
}
func TestRemoteSignerRetry(t *testing.T) {
@@ -216,7 +213,7 @@ func TestRemoteSignerRetry(t *testing.T) {
log.TestingLogger(),
cmn.RandStr(12),
ln.Addr().String(),
- NewTestPrivValidator(types.GenSigner()),
+ types.NewMockPV(),
crypto.GenPrivKeyEd25519(),
)
defer rs.Stop()
@@ -224,7 +221,7 @@ func TestRemoteSignerRetry(t *testing.T) {
RemoteSignerConnDeadline(time.Millisecond)(rs)
RemoteSignerConnRetries(retries)(rs)
- assert.Equal(t, errors.Cause(rs.Start()), ErrDialRetryMax)
+ assert.Equal(t, rs.Start().(cmn.Error).Cause(), ErrDialRetryMax)
select {
case attempts := <-attemptc:
@@ -237,12 +234,11 @@ func TestRemoteSignerRetry(t *testing.T) {
func testSetupSocketPair(
t *testing.T,
chainID string,
-) (*SocketClient, *RemoteSigner) {
+) (*SocketPV, *RemoteSigner) {
var (
addr = testFreeAddr(t)
logger = log.TestingLogger()
- signer = types.GenSigner()
- privVal = NewTestPrivValidator(signer)
+ privVal = types.NewMockPV()
readyc = make(chan struct{})
rs = NewRemoteSigner(
logger,
@@ -251,14 +247,14 @@ func testSetupSocketPair(
privVal,
crypto.GenPrivKeyEd25519(),
)
- sc = NewSocketClient(
+ sc = NewSocketPV(
logger,
addr,
crypto.GenPrivKeyEd25519(),
)
)
- go func(sc *SocketClient) {
+ go func(sc *SocketPV) {
require.NoError(t, sc.Start())
assert.True(t, sc.IsRunning())
diff --git a/types/priv_validator/unencrypted.go b/types/priv_validator/unencrypted.go
deleted file mode 100644
index 10a304d9e..000000000
--- a/types/priv_validator/unencrypted.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package types
-
-import (
- "fmt"
-
- crypto "github.com/tendermint/go-crypto"
- "github.com/tendermint/tendermint/types"
- cmn "github.com/tendermint/tmlibs/common"
-)
-
-//-----------------------------------------------------------------
-
-var _ types.PrivValidator2 = (*PrivValidatorUnencrypted)(nil)
-
-// PrivValidatorUnencrypted implements PrivValidator.
-// It uses an in-memory crypto.PrivKey that is
-// persisted to disk unencrypted.
-type PrivValidatorUnencrypted struct {
- ID types.ValidatorID `json:"id"`
- PrivKey PrivKey `json:"priv_key"`
- LastSignedInfo *LastSignedInfo `json:"last_signed_info"`
-}
-
-// NewPrivValidatorUnencrypted returns an instance of PrivValidatorUnencrypted.
-func NewPrivValidatorUnencrypted(priv crypto.PrivKey) *PrivValidatorUnencrypted {
- return &PrivValidatorUnencrypted{
- ID: types.ValidatorID{
- Address: priv.PubKey().Address(),
- PubKey: priv.PubKey(),
- },
- PrivKey: PrivKey(priv),
- LastSignedInfo: NewLastSignedInfo(),
- }
-}
-
-// String returns a string representation of the PrivValidatorUnencrypted
-func (upv *PrivValidatorUnencrypted) String() string {
- addr, err := upv.Address()
- if err != nil {
- panic(err)
- }
-
- return fmt.Sprintf("PrivValidator{%v %v}", addr, upv.LastSignedInfo.String())
-}
-
-func (upv *PrivValidatorUnencrypted) Address() (cmn.HexBytes, error) {
- return upv.PrivKey.PubKey().Address(), nil
-}
-
-func (upv *PrivValidatorUnencrypted) PubKey() (crypto.PubKey, error) {
- return upv.PrivKey.PubKey(), nil
-}
-
-func (upv *PrivValidatorUnencrypted) SignVote(chainID string, vote *types.Vote) error {
- return upv.LastSignedInfo.SignVote(upv.PrivKey, chainID, vote)
-}
-
-func (upv *PrivValidatorUnencrypted) SignProposal(chainID string, proposal *types.Proposal) error {
- return upv.LastSignedInfo.SignProposal(upv.PrivKey, chainID, proposal)
-}
-
-func (upv *PrivValidatorUnencrypted) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error {
- var err error
- heartbeat.Signature, err = upv.PrivKey.Sign(heartbeat.SignBytes(chainID))
- return err
-}
diff --git a/types/priv_validator/upgrade.go b/types/priv_validator/upgrade.go
deleted file mode 100644
index 063655421..000000000
--- a/types/priv_validator/upgrade.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package types
-
-import (
- "encoding/json"
- "io/ioutil"
-
- crypto "github.com/tendermint/go-crypto"
- "github.com/tendermint/tendermint/types"
- cmn "github.com/tendermint/tmlibs/common"
-)
-
-type PrivValidatorV1 struct {
- Address cmn.HexBytes `json:"address"`
- PubKey crypto.PubKey `json:"pub_key"`
- LastHeight int64 `json:"last_height"`
- LastRound int `json:"last_round"`
- LastStep int8 `json:"last_step"`
- LastSignature crypto.Signature `json:"last_signature,omitempty"` // so we dont lose signatures
- LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` // so we dont lose signatures
- PrivKey crypto.PrivKey `json:"priv_key"`
-}
-
-func UpgradePrivValidator(filePath string) (*PrivValidatorJSON, error) {
- b, err := ioutil.ReadFile(filePath)
- if err != nil {
- return nil, err
- }
-
- pv := new(PrivValidatorV1)
- err = json.Unmarshal(b, pv)
- if err != nil {
- return nil, err
- }
-
- pvNew := &PrivValidatorJSON{
- PrivValidatorUnencrypted: &PrivValidatorUnencrypted{
- ID: types.ValidatorID{
- Address: pv.Address,
- PubKey: pv.PubKey,
- },
- PrivKey: PrivKey(pv.PrivKey),
- LastSignedInfo: &LastSignedInfo{
- Height: pv.LastHeight,
- Round: pv.LastRound,
- Step: pv.LastStep,
- SignBytes: pv.LastSignBytes,
- Signature: pv.LastSignature,
- },
- },
- }
-
- b, err = json.MarshalIndent(pvNew, "", " ")
- if err != nil {
- return nil, err
- }
-
- err = ioutil.WriteFile(filePath, b, 0600)
- return pvNew, err
-}
diff --git a/types/priv_validator/upgrade_pv/main.go b/types/priv_validator/upgrade_pv/main.go
deleted file mode 100644
index 5a0d4f263..000000000
--- a/types/priv_validator/upgrade_pv/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
- "fmt"
- "os"
-
- priv_val "github.com/tendermint/tendermint/types/priv_validator"
-)
-
-func main() {
- if len(os.Args) < 2 {
- fmt.Println("USAGE: priv_val_converter ")
- os.Exit(1)
- }
- file := os.Args[1]
- _, err := priv_val.UpgradePrivValidator(file)
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
-}
diff --git a/types/priv_validator/wire.go b/types/priv_validator/wire.go
new file mode 100644
index 000000000..688910834
--- /dev/null
+++ b/types/priv_validator/wire.go
@@ -0,0 +1,13 @@
+package privval
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ crypto.RegisterAmino(cdc)
+ RegisterSocketPVMsg(cdc)
+}
diff --git a/types/priv_validator_test.go b/types/priv_validator_test.go
deleted file mode 100644
index edfcdf58c..000000000
--- a/types/priv_validator_test.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package types
-
-import (
- "encoding/hex"
- "encoding/json"
- "fmt"
- "os"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- crypto "github.com/tendermint/go-crypto"
- cmn "github.com/tendermint/tmlibs/common"
-)
-
-func TestGenLoadValidator(t *testing.T) {
- assert := assert.New(t)
-
- _, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal := GenPrivValidatorFS(tempFilePath)
-
- height := int64(100)
- privVal.LastHeight = height
- privVal.Save()
- addr := privVal.GetAddress()
-
- privVal = LoadPrivValidatorFS(tempFilePath)
- assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same")
- assert.Equal(height, privVal.LastHeight, "expected privval.LastHeight to have been saved")
-}
-
-func TestLoadOrGenValidator(t *testing.T) {
- assert := assert.New(t)
-
- _, tempFilePath := cmn.Tempfile("priv_validator_")
- if err := os.Remove(tempFilePath); err != nil {
- t.Error(err)
- }
- privVal := LoadOrGenPrivValidatorFS(tempFilePath)
- addr := privVal.GetAddress()
- privVal = LoadOrGenPrivValidatorFS(tempFilePath)
- assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same")
-}
-
-func TestUnmarshalValidator(t *testing.T) {
- assert, require := assert.New(t), require.New(t)
-
- // create some fixed values
- addrStr := "D028C9981F7A87F3093672BF0D5B0E2A1B3ED456"
- pubStr := "3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
- privStr := "27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
- addrBytes, _ := hex.DecodeString(addrStr)
- pubBytes, _ := hex.DecodeString(pubStr)
- privBytes, _ := hex.DecodeString(privStr)
-
- // prepend type byte
- pubKey, err := crypto.PubKeyFromBytes(append([]byte{1}, pubBytes...))
- require.Nil(err, "%+v", err)
- privKey, err := crypto.PrivKeyFromBytes(append([]byte{1}, privBytes...))
- require.Nil(err, "%+v", err)
-
- serialized := fmt.Sprintf(`{
- "address": "%s",
- "pub_key": {
- "type": "ed25519",
- "data": "%s"
- },
- "last_height": 0,
- "last_round": 0,
- "last_step": 0,
- "last_signature": null,
- "priv_key": {
- "type": "ed25519",
- "data": "%s"
- }
-}`, addrStr, pubStr, privStr)
-
- val := PrivValidatorFS{}
- err = json.Unmarshal([]byte(serialized), &val)
- require.Nil(err, "%+v", err)
-
- // make sure the values match
- assert.EqualValues(addrBytes, val.GetAddress())
- assert.EqualValues(pubKey, val.GetPubKey())
- assert.EqualValues(privKey, val.PrivKey)
-
- // export it and make sure it is the same
- out, err := json.Marshal(val)
- require.Nil(err, "%+v", err)
- assert.JSONEq(serialized, string(out))
-}
-
-func TestSignVote(t *testing.T) {
- assert := assert.New(t)
-
- _, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal := GenPrivValidatorFS(tempFilePath)
-
- block1 := BlockID{[]byte{1, 2, 3}, PartSetHeader{}}
- block2 := BlockID{[]byte{3, 2, 1}, PartSetHeader{}}
- height, round := int64(10), 1
- voteType := VoteTypePrevote
-
- // sign a vote for first time
- vote := newVote(privVal.Address, 0, height, round, voteType, block1)
- err := privVal.SignVote("mychainid", vote)
- assert.NoError(err, "expected no error signing vote")
-
- // try to sign the same vote again; should be fine
- err = privVal.SignVote("mychainid", vote)
- assert.NoError(err, "expected no error on signing same vote")
-
- // now try some bad votes
- cases := []*Vote{
- newVote(privVal.Address, 0, height, round-1, voteType, block1), // round regression
- newVote(privVal.Address, 0, height-1, round, voteType, block1), // height regression
- newVote(privVal.Address, 0, height-2, round+4, voteType, block1), // height regression and different round
- newVote(privVal.Address, 0, height, round, voteType, block2), // different block
- }
-
- for _, c := range cases {
- err = privVal.SignVote("mychainid", c)
- assert.Error(err, "expected error on signing conflicting vote")
- }
-
- // try signing a vote with a different time stamp
- sig := vote.Signature
- vote.Timestamp = vote.Timestamp.Add(time.Duration(1000))
- err = privVal.SignVote("mychainid", vote)
- assert.NoError(err)
- assert.Equal(sig, vote.Signature)
-}
-
-func TestSignProposal(t *testing.T) {
- assert := assert.New(t)
-
- _, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal := GenPrivValidatorFS(tempFilePath)
-
- block1 := PartSetHeader{5, []byte{1, 2, 3}}
- block2 := PartSetHeader{10, []byte{3, 2, 1}}
- height, round := int64(10), 1
-
- // sign a proposal for first time
- proposal := newProposal(height, round, block1)
- err := privVal.SignProposal("mychainid", proposal)
- assert.NoError(err, "expected no error signing proposal")
-
- // try to sign the same proposal again; should be fine
- err = privVal.SignProposal("mychainid", proposal)
- assert.NoError(err, "expected no error on signing same proposal")
-
- // now try some bad Proposals
- cases := []*Proposal{
- newProposal(height, round-1, block1), // round regression
- newProposal(height-1, round, block1), // height regression
- newProposal(height-2, round+4, block1), // height regression and different round
- newProposal(height, round, block2), // different block
- }
-
- for _, c := range cases {
- err = privVal.SignProposal("mychainid", c)
- assert.Error(err, "expected error on signing conflicting proposal")
- }
-
- // try signing a proposal with a different time stamp
- sig := proposal.Signature
- proposal.Timestamp = proposal.Timestamp.Add(time.Duration(1000))
- err = privVal.SignProposal("mychainid", proposal)
- assert.NoError(err)
- assert.Equal(sig, proposal.Signature)
-}
-
-func TestDifferByTimestamp(t *testing.T) {
- _, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal := GenPrivValidatorFS(tempFilePath)
-
- block1 := PartSetHeader{5, []byte{1, 2, 3}}
- height, round := int64(10), 1
- chainID := "mychainid"
-
- // test proposal
- {
- proposal := newProposal(height, round, block1)
- err := privVal.SignProposal(chainID, proposal)
- assert.NoError(t, err, "expected no error signing proposal")
- signBytes := proposal.SignBytes(chainID)
- sig := proposal.Signature
- timeStamp := clipToMS(proposal.Timestamp)
-
- // manipulate the timestamp. should get changed back
- proposal.Timestamp = proposal.Timestamp.Add(time.Millisecond)
- var emptySig crypto.Signature
- proposal.Signature = emptySig
- err = privVal.SignProposal("mychainid", proposal)
- assert.NoError(t, err, "expected no error on signing same proposal")
-
- assert.Equal(t, timeStamp, proposal.Timestamp)
- assert.Equal(t, signBytes, proposal.SignBytes(chainID))
- assert.Equal(t, sig, proposal.Signature)
- }
-
- // test vote
- {
- voteType := VoteTypePrevote
- blockID := BlockID{[]byte{1, 2, 3}, PartSetHeader{}}
- vote := newVote(privVal.Address, 0, height, round, voteType, blockID)
- err := privVal.SignVote("mychainid", vote)
- assert.NoError(t, err, "expected no error signing vote")
-
- signBytes := vote.SignBytes(chainID)
- sig := vote.Signature
- timeStamp := clipToMS(vote.Timestamp)
-
- // manipulate the timestamp. should get changed back
- vote.Timestamp = vote.Timestamp.Add(time.Millisecond)
- var emptySig crypto.Signature
- vote.Signature = emptySig
- err = privVal.SignVote("mychainid", vote)
- assert.NoError(t, err, "expected no error on signing same vote")
-
- assert.Equal(t, timeStamp, vote.Timestamp)
- assert.Equal(t, signBytes, vote.SignBytes(chainID))
- assert.Equal(t, sig, vote.Signature)
- }
-}
-
-func newVote(addr Address, idx int, height int64, round int, typ byte, blockID BlockID) *Vote {
- return &Vote{
- ValidatorAddress: addr,
- ValidatorIndex: idx,
- Height: height,
- Round: round,
- Type: typ,
- Timestamp: time.Now().UTC(),
- BlockID: blockID,
- }
-}
-
-func newProposal(height int64, round int, partsHeader PartSetHeader) *Proposal {
- return &Proposal{
- Height: height,
- Round: round,
- BlockPartsHeader: partsHeader,
- Timestamp: time.Now().UTC(),
- }
-}
-
-func clipToMS(t time.Time) time.Time {
- nano := t.UnixNano()
- million := int64(1000000)
- nano = (nano / million) * million
- return time.Unix(0, nano).UTC()
-}
diff --git a/types/proposal.go b/types/proposal.go
index c240756bb..95008897b 100644
--- a/types/proposal.go
+++ b/types/proposal.go
@@ -6,7 +6,6 @@ import (
"time"
"github.com/tendermint/go-crypto"
- "github.com/tendermint/tendermint/wire"
)
var (
@@ -51,10 +50,7 @@ func (p *Proposal) String() string {
// SignBytes returns the Proposal bytes for signing
func (p *Proposal) SignBytes(chainID string) []byte {
- bz, err := wire.MarshalJSON(CanonicalJSONOnceProposal{
- ChainID: chainID,
- Proposal: CanonicalProposal(p),
- })
+ bz, err := cdc.MarshalJSON(CanonicalProposal(chainID, p))
if err != nil {
panic(err)
}
diff --git a/types/proposal_test.go b/types/proposal_test.go
index 610f76855..43fb7c209 100644
--- a/types/proposal_test.go
+++ b/types/proposal_test.go
@@ -5,8 +5,6 @@ import (
"time"
"github.com/stretchr/testify/require"
-
- wire "github.com/tendermint/tendermint/wire"
)
var testProposal *Proposal
@@ -29,7 +27,7 @@ func TestProposalSignable(t *testing.T) {
signBytes := testProposal.SignBytes("test_chain_id")
signStr := string(signBytes)
- expected := `{"chain_id":"test_chain_id","proposal":{"block_parts_header":{"hash":"626C6F636B7061727473","total":111},"height":12345,"pol_block_id":{},"pol_round":-1,"round":23456,"timestamp":"2018-02-11T07:09:22.765Z"}}`
+ expected := `{"@chain_id":"test_chain_id","@type":"proposal","block_parts_header":{"hash":"626C6F636B7061727473","total":111},"height":12345,"pol_block_id":{},"pol_round":-1,"round":23456,"timestamp":"2018-02-11T07:09:22.765Z"}`
if signStr != expected {
t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr)
}
@@ -37,38 +35,38 @@ func TestProposalSignable(t *testing.T) {
func TestProposalString(t *testing.T) {
str := testProposal.String()
- expected := `Proposal{12345/23456 111:626C6F636B70 (-1,:0:000000000000) {} @ 2018-02-11T07:09:22.765Z}`
+ expected := `Proposal{12345/23456 111:626C6F636B70 (-1,:0:000000000000) @ 2018-02-11T07:09:22.765Z}`
if str != expected {
t.Errorf("Got unexpected string for Proposal. Expected:\n%v\nGot:\n%v", expected, str)
}
}
func TestProposalVerifySignature(t *testing.T) {
- privVal := GenPrivValidatorFS("")
+ privVal := NewMockPV()
pubKey := privVal.GetPubKey()
prop := NewProposal(4, 2, PartSetHeader{777, []byte("proper")}, 2, BlockID{})
signBytes := prop.SignBytes("test_chain_id")
// sign it
- signature, err := privVal.Signer.Sign(signBytes)
+ err := privVal.SignProposal("test_chain_id", prop)
require.NoError(t, err)
// verify the same proposal
- valid := pubKey.VerifyBytes(prop.SignBytes("test_chain_id"), signature)
+ valid := pubKey.VerifyBytes(signBytes, prop.Signature)
require.True(t, valid)
// serialize, deserialize and verify again....
newProp := new(Proposal)
- bs, err := wire.MarshalBinary(prop)
+ bs, err := cdc.MarshalBinary(prop)
require.NoError(t, err)
- err = wire.UnmarshalBinary(bs, &newProp)
+ err = cdc.UnmarshalBinary(bs, &newProp)
require.NoError(t, err)
// verify the transmitted proposal
newSignBytes := newProp.SignBytes("test_chain_id")
require.Equal(t, string(signBytes), string(newSignBytes))
- valid = pubKey.VerifyBytes(newSignBytes, signature)
+ valid = pubKey.VerifyBytes(newSignBytes, newProp.Signature)
require.True(t, valid)
}
@@ -79,9 +77,9 @@ func BenchmarkProposalWriteSignBytes(b *testing.B) {
}
func BenchmarkProposalSign(b *testing.B) {
- privVal := GenPrivValidatorFS("")
+ privVal := NewMockPV()
for i := 0; i < b.N; i++ {
- _, err := privVal.Signer.Sign(testProposal.SignBytes("test_chain_id"))
+ err := privVal.SignProposal("test_chain_id", testProposal)
if err != nil {
b.Error(err)
}
@@ -89,12 +87,12 @@ func BenchmarkProposalSign(b *testing.B) {
}
func BenchmarkProposalVerifySignature(b *testing.B) {
- signBytes := testProposal.SignBytes("test_chain_id")
- privVal := GenPrivValidatorFS("")
- signature, _ := privVal.Signer.Sign(signBytes)
+ privVal := NewMockPV()
+ err := privVal.SignProposal("test_chain_id", testProposal)
+ require.Nil(b, err)
pubKey := privVal.GetPubKey()
for i := 0; i < b.N; i++ {
- pubKey.VerifyBytes(testProposal.SignBytes("test_chain_id"), signature)
+ pubKey.VerifyBytes(testProposal.SignBytes("test_chain_id"), testProposal.Signature)
}
}
diff --git a/types/results.go b/types/results.go
index 71834664d..326cee48d 100644
--- a/types/results.go
+++ b/types/results.go
@@ -2,7 +2,6 @@ package types
import (
abci "github.com/tendermint/abci/types"
- wire "github.com/tendermint/tendermint/wire"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/merkle"
)
@@ -18,7 +17,8 @@ type ABCIResult struct {
// Hash returns the canonical hash of the ABCIResult
func (a ABCIResult) Hash() []byte {
- return tmHash(a)
+ bz := aminoHash(a)
+ return bz
}
// ABCIResults wraps the deliver tx results to return a proof
@@ -42,7 +42,7 @@ func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult {
// Bytes serializes the ABCIResponse using wire
func (a ABCIResults) Bytes() []byte {
- bz, err := wire.MarshalBinary(a)
+ bz, err := cdc.MarshalBinary(a)
if err != nil {
panic(err)
}
diff --git a/types/results_test.go b/types/results_test.go
index 88624e8c8..009e2693d 100644
--- a/types/results_test.go
+++ b/types/results_test.go
@@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestABCIResults(t *testing.T) {
@@ -14,13 +15,15 @@ func TestABCIResults(t *testing.T) {
e := ABCIResult{Code: 14, Data: []byte("foo")}
f := ABCIResult{Code: 14, Data: []byte("bar")}
- // nil and []byte{} should produce same hash
- assert.Equal(t, a.Hash(), b.Hash())
+ // Nil and []byte{} should produce the same hash.
+ require.Equal(t, a.Hash(), a.Hash())
+ require.Equal(t, b.Hash(), b.Hash())
+ require.Equal(t, a.Hash(), b.Hash())
- // a and b should be the same, don't go in results
+ // a and b should be the same, don't go in results.
results := ABCIResults{a, c, d, e, f}
- // make sure each result hashes properly
+ // Make sure each result hashes properly.
var last []byte
for i, res := range results {
h := res.Hash()
@@ -28,8 +31,7 @@ func TestABCIResults(t *testing.T) {
last = h
}
- // make sure that we can get a root hash from results
- // and verify proofs
+ // Make sure that we can get a root hash from results and verify proofs.
root := results.Hash()
assert.NotEmpty(t, root)
diff --git a/types/signable.go b/types/signable.go
index 19829ede7..cc6498882 100644
--- a/types/signable.go
+++ b/types/signable.go
@@ -9,8 +9,3 @@ package types
type Signable interface {
SignBytes(chainID string) []byte
}
-
-// HashSignBytes is a convenience method for getting the hash of the bytes of a signable
-func HashSignBytes(chainID string, o Signable) []byte {
- return tmHash(o.SignBytes(chainID))
-}
diff --git a/types/test_util.go b/types/test_util.go
index 73e53eb19..f21c2831f 100644
--- a/types/test_util.go
+++ b/types/test_util.go
@@ -4,7 +4,7 @@ import "time"
func MakeCommit(blockID BlockID, height int64, round int,
voteSet *VoteSet,
- validators []*PrivValidatorFS) (*Commit, error) {
+ validators []PrivValidator) (*Commit, error) {
// all sign
for i := 0; i < len(validators); i++ {
@@ -28,8 +28,8 @@ func MakeCommit(blockID BlockID, height int64, round int,
return voteSet.MakeCommit(), nil
}
-func signAddVote(privVal *PrivValidatorFS, vote *Vote, voteSet *VoteSet) (signed bool, err error) {
- vote.Signature, err = privVal.Signer.Sign(vote.SignBytes(voteSet.ChainID()))
+func signAddVote(privVal PrivValidator, vote *Vote, voteSet *VoteSet) (signed bool, err error) {
+ err = privVal.SignVote(voteSet.ChainID(), vote)
if err != nil {
return false, err
}
diff --git a/types/tx.go b/types/tx.go
index fc1d27212..e7247693a 100644
--- a/types/tx.go
+++ b/types/tx.go
@@ -18,7 +18,7 @@ type Tx []byte
// Hash computes the RIPEMD160 hash of the wire encoded transaction.
func (tx Tx) Hash() []byte {
- return wireHasher(tx).Hash()
+ return aminoHasher(tx).Hash()
}
// String returns the hex-encoded transaction as a string.
@@ -66,9 +66,7 @@ func (txs Txs) IndexByHash(hash []byte) int {
}
// Proof returns a simple merkle proof for this node.
-//
// Panics if i < 0 or i >= len(txs)
-//
// TODO: optimize this!
func (txs Txs) Proof(i int) TxProof {
l := len(txs)
@@ -95,7 +93,7 @@ type TxProof struct {
Proof merkle.SimpleProof
}
-// LeadHash returns the hash of the transaction this proof refers to.
+// LeadHash returns the hash of the this proof refers to.
func (tp TxProof) LeafHash() []byte {
return tp.Data.Hash()
}
@@ -106,7 +104,12 @@ func (tp TxProof) Validate(dataHash []byte) error {
if !bytes.Equal(dataHash, tp.RootHash) {
return errors.New("Proof matches different data hash")
}
-
+ if tp.Index < 0 {
+ return errors.New("Proof index cannot be negative")
+ }
+ if tp.Total <= 0 {
+ return errors.New("Proof total must be positive")
+ }
valid := tp.Proof.Verify(tp.Index, tp.Total, tp.LeafHash(), tp.RootHash)
if !valid {
return errors.New("Proof is not internally consistent")
diff --git a/types/tx_test.go b/types/tx_test.go
index 340bd5a6b..2a93ceb31 100644
--- a/types/tx_test.go
+++ b/types/tx_test.go
@@ -6,7 +6,6 @@ import (
"github.com/stretchr/testify/assert"
- wire "github.com/tendermint/tendermint/wire"
cmn "github.com/tendermint/tmlibs/common"
ctest "github.com/tendermint/tmlibs/test"
)
@@ -69,9 +68,9 @@ func TestValidTxProof(t *testing.T) {
// read-write must also work
var p2 TxProof
- bin, err := wire.MarshalBinary(proof)
+ bin, err := cdc.MarshalBinary(proof)
assert.Nil(err)
- err = wire.UnmarshalBinary(bin, &p2)
+ err = cdc.UnmarshalBinary(bin, &p2)
if assert.Nil(err, "%d: %d: %+v", h, i, err) {
assert.Nil(p2.Validate(root), "%d: %d", h, i)
}
@@ -97,7 +96,7 @@ func testTxProofUnchangable(t *testing.T) {
// make sure it is valid to start with
assert.Nil(proof.Validate(root))
- bin, err := wire.MarshalBinary(proof)
+ bin, err := cdc.MarshalBinary(proof)
assert.Nil(err)
// try mutating the data and make sure nothing breaks
@@ -109,16 +108,17 @@ func testTxProofUnchangable(t *testing.T) {
}
}
-// this make sure the proof doesn't deserialize into something valid
+// This makes sure that the proof doesn't deserialize into something valid.
func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) {
var proof TxProof
- err := wire.UnmarshalBinary(bad, &proof)
+ err := cdc.UnmarshalBinary(bad, &proof)
if err == nil {
err = proof.Validate(root)
if err == nil {
- // okay, this can happen if we have a slightly different total
- // (where the path ends up the same), if it is something else, we have
- // a real problem
+ // XXX Fix simple merkle proofs so the following is *not* OK.
+ // This can happen if we have a slightly different total (where the
+ // path ends up the same). If it is something else, we have a real
+ // problem.
assert.NotEqual(t, proof.Total, good.Total, "bad: %#v\ngood: %#v", proof, good)
}
}
diff --git a/types/validator.go b/types/validator.go
index a9b42fe8c..46dc61d07 100644
--- a/types/validator.go
+++ b/types/validator.go
@@ -71,7 +71,7 @@ func (v *Validator) String() string {
// Hash computes the unique ID of a validator with a given voting power.
// It excludes the Accum value, which changes with every round.
func (v *Validator) Hash() []byte {
- return tmHash(struct {
+ return aminoHash(struct {
Address Address
PubKey crypto.PubKey
VotingPower int64
@@ -82,14 +82,13 @@ func (v *Validator) Hash() []byte {
})
}
-//--------------------------------------------------------------------------------
-// For testing...
+//----------------------------------------
+// RandValidator
// RandValidator returns a randomized validator, useful for testing.
// UNSTABLE
-func RandValidator(randPower bool, minPower int64) (*Validator, *PrivValidatorFS) {
- _, tempFilePath := cmn.Tempfile("priv_validator_")
- privVal := GenPrivValidatorFS(tempFilePath)
+func RandValidator(randPower bool, minPower int64) (*Validator, PrivValidator) {
+ privVal := NewMockPV()
votePower := minPower
if randPower {
votePower += int64(cmn.RandUint32())
diff --git a/types/validator_set.go b/types/validator_set.go
index 4b84f85d7..fa6a2ad9c 100644
--- a/types/validator_set.go
+++ b/types/validator_set.go
@@ -7,7 +7,6 @@ import (
"sort"
"strings"
- "github.com/pkg/errors"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/merkle"
)
@@ -302,10 +301,10 @@ func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string
blockID BlockID, height int64, commit *Commit) error {
if newSet.Size() != len(commit.Precommits) {
- return errors.Errorf("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits))
+ return cmn.NewError("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits))
}
if height != commit.Height() {
- return errors.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height())
+ return cmn.NewError("Invalid commit -- wrong height: %v vs %v", height, commit.Height())
}
oldVotingPower := int64(0)
@@ -320,13 +319,13 @@ func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string
}
if precommit.Height != height {
// return certerr.ErrHeightMismatch(height, precommit.Height)
- return errors.Errorf("Blocks don't match - %d vs %d", round, precommit.Round)
+ return cmn.NewError("Blocks don't match - %d vs %d", round, precommit.Round)
}
if precommit.Round != round {
- return errors.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round)
+ return cmn.NewError("Invalid commit -- wrong round: %v vs %v", round, precommit.Round)
}
if precommit.Type != VoteTypePrecommit {
- return errors.Errorf("Invalid commit -- not precommit @ index %v", idx)
+ return cmn.NewError("Invalid commit -- not precommit @ index %v", idx)
}
if !blockID.Equals(precommit.BlockID) {
continue // Not an error, but doesn't count
@@ -342,7 +341,7 @@ func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string
// Validate signature old school
precommitSignBytes := precommit.SignBytes(chainID)
if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
- return errors.Errorf("Invalid commit -- invalid signature: %v", precommit)
+ return cmn.NewError("Invalid commit -- invalid signature: %v", precommit)
}
// Good precommit!
oldVotingPower += ov.VotingPower
@@ -356,10 +355,10 @@ func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string
}
if oldVotingPower <= valSet.TotalVotingPower()*2/3 {
- return errors.Errorf("Invalid commit -- insufficient old voting power: got %v, needed %v",
+ return cmn.NewError("Invalid commit -- insufficient old voting power: got %v, needed %v",
oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1))
} else if newVotingPower <= newSet.TotalVotingPower()*2/3 {
- return errors.Errorf("Invalid commit -- insufficient cur voting power: got %v, needed %v",
+ return cmn.NewError("Invalid commit -- insufficient cur voting power: got %v, needed %v",
newVotingPower, (newSet.TotalVotingPower()*2/3 + 1))
}
return nil
@@ -429,9 +428,9 @@ func (ac accumComparable) Less(o interface{}) bool {
// RandValidatorSet returns a randomized validator set, useful for testing.
// NOTE: PrivValidator are in order.
// UNSTABLE
-func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []*PrivValidatorFS) {
+func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) {
vals := make([]*Validator, numValidators)
- privValidators := make([]*PrivValidatorFS, numValidators)
+ privValidators := make([]PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privValidator := RandValidator(false, votingPower)
vals[i] = val
diff --git a/types/validator_set_test.go b/types/validator_set_test.go
index 8769ac5ff..8db53a533 100644
--- a/types/validator_set_test.go
+++ b/types/validator_set_test.go
@@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/assert"
crypto "github.com/tendermint/go-crypto"
- wire "github.com/tendermint/tendermint/wire"
cmn "github.com/tendermint/tmlibs/common"
)
@@ -198,7 +197,7 @@ func newValidator(address []byte, power int64) *Validator {
func randPubKey() crypto.PubKey {
var pubKey [32]byte
copy(pubKey[:], cmn.RandBytes(32))
- return crypto.PubKeyEd25519(pubKey).Wrap()
+ return crypto.PubKeyEd25519(pubKey)
}
func randValidator_() *Validator {
@@ -216,7 +215,7 @@ func randValidatorSet(numValidators int) *ValidatorSet {
}
func (valSet *ValidatorSet) toBytes() []byte {
- bz, err := wire.MarshalBinary(valSet)
+ bz, err := cdc.MarshalBinary(valSet)
if err != nil {
panic(err)
}
@@ -224,7 +223,7 @@ func (valSet *ValidatorSet) toBytes() []byte {
}
func (valSet *ValidatorSet) fromBytes(b []byte) {
- err := wire.UnmarshalBinary(b, &valSet)
+ err := cdc.UnmarshalBinary(b, &valSet)
if err != nil {
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
panic(err)
diff --git a/types/vote.go b/types/vote.go
index ceb6e985e..e4ead612a 100644
--- a/types/vote.go
+++ b/types/vote.go
@@ -7,7 +7,6 @@ import (
"time"
crypto "github.com/tendermint/go-crypto"
- "github.com/tendermint/tendermint/wire"
cmn "github.com/tendermint/tmlibs/common"
)
@@ -73,10 +72,7 @@ type Vote struct {
}
func (vote *Vote) SignBytes(chainID string) []byte {
- bz, err := wire.MarshalJSON(CanonicalJSONOnceVote{
- chainID,
- CanonicalVote(vote),
- })
+ bz, err := cdc.MarshalJSON(CanonicalVote(chainID, vote))
if err != nil {
panic(err)
}
diff --git a/types/vote_set_test.go b/types/vote_set_test.go
index 4a09d04d3..d424667b1 100644
--- a/types/vote_set_test.go
+++ b/types/vote_set_test.go
@@ -11,7 +11,7 @@ import (
)
// NOTE: privValidators are in order
-func randVoteSet(height int64, round int, type_ byte, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []*PrivValidatorFS) {
+func randVoteSet(height int64, round int, type_ byte, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []PrivValidator) {
valSet, privValidators := RandValidatorSet(numValidators, votingPower)
return NewVoteSet("test_chain_id", height, round, type_, valSet), valSet, privValidators
}
diff --git a/types/vote_test.go b/types/vote_test.go
index a4a0f309f..263d2c351 100644
--- a/types/vote_test.go
+++ b/types/vote_test.go
@@ -5,8 +5,6 @@ import (
"time"
"github.com/stretchr/testify/require"
-
- wire "github.com/tendermint/tendermint/wire"
)
func examplePrevote() *Vote {
@@ -45,7 +43,7 @@ func TestVoteSignable(t *testing.T) {
signBytes := vote.SignBytes("test_chain_id")
signStr := string(signBytes)
- expected := `{"chain_id":"test_chain_id","vote":{"block_id":{"hash":"68617368","parts":{"hash":"70617274735F68617368","total":1000000}},"height":12345,"round":2,"timestamp":"2017-12-25T03:00:01.234Z","type":2}}`
+ expected := `{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"68617368","parts":{"hash":"70617274735F68617368","total":1000000}},"height":12345,"round":2,"timestamp":"2017-12-25T03:00:01.234Z","type":2}`
if signStr != expected {
// NOTE: when this fails, you probably want to fix up consensus/replay_test too
t.Errorf("Got unexpected sign string for Vote. Expected:\n%v\nGot:\n%v", expected, signStr)
@@ -58,8 +56,8 @@ func TestVoteString(t *testing.T) {
in string
out string
}{
- {"Precommit", examplePrecommit().String(), `Vote{56789:616464720000 12345/02/2(Precommit) 686173680000 {} @ 2017-12-25T03:00:01.234Z}`},
- {"Prevote", examplePrevote().String(), `Vote{56789:616464720000 12345/02/1(Prevote) 686173680000 {} @ 2017-12-25T03:00:01.234Z}`},
+ {"Precommit", examplePrecommit().String(), `Vote{56789:616464720000 12345/02/2(Precommit) 686173680000 @ 2017-12-25T03:00:01.234Z}`},
+ {"Prevote", examplePrevote().String(), `Vote{56789:616464720000 12345/02/1(Prevote) 686173680000 @ 2017-12-25T03:00:01.234Z}`},
}
for _, tt := range tc {
@@ -73,31 +71,31 @@ func TestVoteString(t *testing.T) {
}
func TestVoteVerifySignature(t *testing.T) {
- privVal := GenPrivValidatorFS("")
+ privVal := NewMockPV()
pubKey := privVal.GetPubKey()
vote := examplePrecommit()
signBytes := vote.SignBytes("test_chain_id")
// sign it
- signature, err := privVal.Signer.Sign(signBytes)
+ err := privVal.SignVote("test_chain_id", vote)
require.NoError(t, err)
// verify the same vote
- valid := pubKey.VerifyBytes(vote.SignBytes("test_chain_id"), signature)
+ valid := pubKey.VerifyBytes(vote.SignBytes("test_chain_id"), vote.Signature)
require.True(t, valid)
// serialize, deserialize and verify again....
precommit := new(Vote)
- bs, err := wire.MarshalBinary(vote)
+ bs, err := cdc.MarshalBinary(vote)
require.NoError(t, err)
- err = wire.UnmarshalBinary(bs, &precommit)
+ err = cdc.UnmarshalBinary(bs, &precommit)
require.NoError(t, err)
// verify the transmitted vote
newSignBytes := precommit.SignBytes("test_chain_id")
require.Equal(t, string(signBytes), string(newSignBytes))
- valid = pubKey.VerifyBytes(newSignBytes, signature)
+ valid = pubKey.VerifyBytes(newSignBytes, precommit.Signature)
require.True(t, valid)
}
diff --git a/types/wire.go b/types/wire.go
new file mode 100644
index 000000000..bd5c4497d
--- /dev/null
+++ b/types/wire.go
@@ -0,0 +1,12 @@
+package types
+
+import (
+ "github.com/tendermint/go-amino"
+ "github.com/tendermint/go-crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+ crypto.RegisterAmino(cdc)
+}
diff --git a/version/version.go b/version/version.go
index a8fbfa84f..b0123eb1f 100644
--- a/version/version.go
+++ b/version/version.go
@@ -1,13 +1,13 @@
package version
const Maj = "0"
-const Min = "18"
+const Min = "19"
const Fix = "0"
var (
// Version is the current version of Tendermint
// Must be a string because scripts like dist.sh read this file.
- Version = "0.18.0"
+ Version = "0.19.0"
// GitCommit is the current HEAD set using ldflags.
GitCommit string
diff --git a/wire/wire.go b/wire/wire.go
deleted file mode 100644
index 9d0d2c208..000000000
--- a/wire/wire.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package wire
-
-import (
- "github.com/tendermint/go-wire"
-)
-
-/*
-// Expose access to a global wire codec
-// TODO: maybe introduce some Context object
-// containing logger, config, codec that can
-// be threaded through everything to avoid this global
-var cdc *wire.Codec
-
-func init() {
- cdc = wire.NewCodec()
- crypto.RegisterWire(cdc)
-}
-*/
-
-// Just a flow through to go-wire.
-// To be used later for the global codec
-
-func MarshalBinary(o interface{}) ([]byte, error) {
- return wire.MarshalBinary(o)
-}
-
-func UnmarshalBinary(bz []byte, ptr interface{}) error {
- return wire.UnmarshalBinary(bz, ptr)
-}
-
-func MarshalJSON(o interface{}) ([]byte, error) {
- return wire.MarshalJSON(o)
-}
-
-func UnmarshalJSON(jsonBz []byte, ptr interface{}) error {
- return wire.UnmarshalJSON(jsonBz, ptr)
-}
-
-type ConcreteType = wire.ConcreteType
-
-func RegisterInterface(o interface{}, ctypes ...ConcreteType) *wire.TypeInfo {
- return wire.RegisterInterface(o, ctypes...)
-}
-
-const RFC3339Millis = wire.RFC3339Millis
-
-/*
-
-func RegisterInterface(ptr interface{}, opts *wire.InterfaceOptions) {
- cdc.RegisterInterface(ptr, opts)
-}
-
-func RegisterConcrete(o interface{}, name string, opts *wire.ConcreteOptions) {
- cdc.RegisterConcrete(o, name, opts)
-}
-
-//-------------------------------
-
-const RFC3339Millis = wire.RFC3339Millis
-*/