mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-20 03:32:49 +00:00
Compare commits
2 Commits
tmp
...
tessr/tm-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
97fd8f136e | ||
|
|
823f2acb14 |
@@ -6,11 +6,11 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -71,7 +71,7 @@ type Application struct {
|
||||
}
|
||||
|
||||
func NewApplication() *Application {
|
||||
state := loadState(dbm.NewMemDB())
|
||||
state := loadState(memdb.NewDB())
|
||||
return &Application{state: state}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,13 +7,12 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
"github.com/tendermint/tm-db/goleveldb"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -37,7 +36,7 @@ type PersistentKVStoreApplication struct {
|
||||
|
||||
func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication {
|
||||
name := "kvstore"
|
||||
db, err := dbm.NewGoLevelDB(name, dbDir)
|
||||
db, err := goleveldb.NewDB(name, dbDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/goleveldb"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
@@ -115,7 +116,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
witnessesAddrs = strings.Split(witnessAddrsJoined, ",")
|
||||
}
|
||||
|
||||
lightDB, err := dbm.NewGoLevelDB("light-client-db", dir)
|
||||
lightDB, err := goleveldb.NewDB("light-client-db", dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create a db: %w", err)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
tmdb "github.com/tendermint/tm-db"
|
||||
tmdb "github.com/tendermint/tm-db/metadb"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
db "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/metadb"
|
||||
)
|
||||
|
||||
// ServiceProvider takes a config and a logger and returns a ready to go Node.
|
||||
@@ -21,6 +22,6 @@ type DBProvider func(*DBContext) (db.DB, error)
|
||||
// DefaultDBProvider returns a database using the DBBackend and DBDir
|
||||
// specified in the Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (db.DB, error) {
|
||||
dbType := db.BackendType(ctx.Config.DBBackend)
|
||||
return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
dbType := metadb.BackendType(ctx.Config.DBBackend)
|
||||
return metadb.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
}
|
||||
|
||||
2
go.mod
2
go.mod
@@ -33,7 +33,7 @@ require (
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tendermint/tm-db v0.6.4
|
||||
github.com/tendermint/tm-db v0.6.5
|
||||
github.com/vektra/mockery/v2 v2.9.0
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
||||
|
||||
22
go.sum
22
go.sum
@@ -50,7 +50,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
|
||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
|
||||
@@ -67,7 +66,6 @@ github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+q
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us=
|
||||
github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
|
||||
@@ -186,12 +184,9 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/denis-tingajkin/go-header v0.4.2 h1:jEeSF4sdv8/3cT/WY8AgDHUoItNSoEZ7qg9dX7pc218=
|
||||
github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA=
|
||||
github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k=
|
||||
github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
|
||||
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA=
|
||||
github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
|
||||
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
@@ -199,7 +194,6 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
@@ -218,11 +212,8 @@ github.com/esimonov/ifshort v1.0.2 h1:K5s1W2fGfkoWXsFlxBNqT6J0ZCncPaKrGM5qe0bni6
|
||||
github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE=
|
||||
github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=
|
||||
github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
|
||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=
|
||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
@@ -495,7 +486,6 @@ github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=
|
||||
github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
@@ -808,7 +798,6 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ
|
||||
github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ=
|
||||
github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
|
||||
@@ -858,10 +847,9 @@ github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D6
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
|
||||
github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U=
|
||||
github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok=
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
|
||||
github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ=
|
||||
github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw=
|
||||
github.com/tendermint/tm-db v0.6.5 h1:vWv/FRqi14AKmW0tbpoPKUv47/LdsZgxdHHIyiO79wA=
|
||||
github.com/tendermint/tm-db v0.6.5/go.mod h1:hnkblbtGOQtt6hsFY2zLLgwJ4inOeYbcIeaWDAAP46k=
|
||||
github.com/tetafro/godot v1.4.7 h1:zBaoSY4JRVVz33y/qnODsdaKj2yAaMr91HCbqHCifVc=
|
||||
github.com/tetafro/godot v1.4.7/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
|
||||
github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8=
|
||||
@@ -914,8 +902,7 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
@@ -1127,6 +1114,7 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
type reactorTestSuite struct {
|
||||
@@ -100,8 +100,8 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
rts.app[nodeID] = proxy.NewAppConns(proxy.NewLocalClientCreator(&abci.BaseApplication{}))
|
||||
require.NoError(t, rts.app[nodeID].Start())
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockDB := memdb.NewDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
@@ -29,6 +28,7 @@ import (
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
tmstore "github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
type mockPeer struct {
|
||||
@@ -167,9 +167,9 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
stateStore := sm.NewStore(db)
|
||||
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := tmstore.NewBlockStore(memdb.NewDB())
|
||||
appl = sm.NewBlockExecutor(
|
||||
stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
|
||||
err = stateStore.Save(state)
|
||||
@@ -489,8 +489,8 @@ func newReactorStore(
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
|
||||
stateDB := memdb.NewDB()
|
||||
blockStore := tmstore.NewBlockStore(memdb.NewDB())
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
@@ -23,7 +24,7 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
// Byzantine node sends two different prevotes (nil and blockID) to the same
|
||||
@@ -43,7 +44,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i := 0; i < nValidators; i++ {
|
||||
func() {
|
||||
logger := consensusLogger().With("test", "byzantine", "validator", i)
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateDB := memdb.NewDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
@@ -57,7 +58,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockDB := memdb.NewDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
@@ -73,7 +74,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make a full instance of the evidence pool
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
evidenceDB := memdb.NewDB()
|
||||
evpool, err := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -16,8 +16,6 @@ import (
|
||||
|
||||
"path"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
@@ -36,6 +34,7 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -397,7 +396,7 @@ func newStateWithConfig(
|
||||
pv types.PrivValidator,
|
||||
app abci.Application,
|
||||
) *State {
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockStore)
|
||||
}
|
||||
|
||||
@@ -423,7 +422,7 @@ func newStateWithConfigAndBlockStore(
|
||||
evpool := sm.EmptyEvidencePool{}
|
||||
|
||||
// Make State
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
if err := stateStore.Save(state); err != nil { // for save height 1's validators info
|
||||
panic(err)
|
||||
@@ -721,7 +720,7 @@ func randConsensusState(
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db
|
||||
blockStore := store.NewBlockStore(memdb.NewDB()) // each state needs its own db
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
|
||||
@@ -11,14 +11,13 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
mempl "github.com/tendermint/tendermint/internal/mempool"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
// for testing
|
||||
@@ -124,8 +123,8 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
|
||||
state, privVals := randGenesisState(config, 1, false, 10)
|
||||
stateStore := sm.NewStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(memdb.NewDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
@@ -151,8 +150,8 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
|
||||
state, privVals := randGenesisState(config, 1, false, 10)
|
||||
app := NewCounterApplication()
|
||||
stateStore := sm.NewStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(memdb.NewDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockStore)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -328,7 +328,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
logger := consensusLogger()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateDB := memdb.NewDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
@@ -342,7 +342,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
pv := privVals[i]
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockDB := memdb.NewDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
@@ -20,6 +18,7 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db/metadb"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
@@ -36,6 +35,8 @@ import (
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
// These tests ensure we can always recover from failure at any part of the consensus process.
|
||||
@@ -59,7 +60,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
cs := newStateWithConfigAndBlockStore(
|
||||
consensusReplayConfig,
|
||||
state,
|
||||
@@ -148,8 +149,8 @@ LOOP:
|
||||
|
||||
// create consensus state from a clean slate
|
||||
logger := log.NewNopLogger()
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockDB := memdb.NewDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
|
||||
@@ -691,7 +692,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
|
||||
if testValidatorsChange {
|
||||
testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
|
||||
defer func() { _ = os.RemoveAll(testConfig.RootDir) }()
|
||||
stateDB = dbm.NewMemDB()
|
||||
stateDB = memdb.NewDB()
|
||||
|
||||
genesisState = sim.GenesisState
|
||||
config = sim.Config
|
||||
@@ -745,7 +746,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
|
||||
// run nBlocks against a new client to build up the app state.
|
||||
// use a throwaway tendermint state
|
||||
proxyApp := proxy.NewAppConns(clientCreator2)
|
||||
stateDB1 := dbm.NewMemDB()
|
||||
stateDB1 := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB1)
|
||||
err := stateStore.Save(genesisState)
|
||||
require.NoError(t, err)
|
||||
@@ -1155,7 +1156,7 @@ func stateAndStore(
|
||||
config *cfg.Config,
|
||||
pubKey crypto.PubKey,
|
||||
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
|
||||
state.Version.Consensus.App = appVersion
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
db "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
@@ -21,6 +20,7 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a
|
||||
@@ -50,7 +50,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read genesis file: %w", err)
|
||||
}
|
||||
blockStoreDB := db.NewMemDB()
|
||||
blockStoreDB := memdb.NewDB()
|
||||
stateDB := blockStoreDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
"github.com/tendermint/tendermint/internal/evidence/mocks"
|
||||
@@ -34,7 +35,7 @@ func TestEvidencePoolBasic(t *testing.T) {
|
||||
var (
|
||||
height = int64(1)
|
||||
stateStore = &smmocks.Store{}
|
||||
evidenceDB = dbm.NewMemDB()
|
||||
evidenceDB = memdb.NewDB()
|
||||
blockStore = &mocks.BlockStore{}
|
||||
)
|
||||
|
||||
@@ -92,7 +93,7 @@ func TestAddExpiredEvidence(t *testing.T) {
|
||||
val = types.NewMockPV()
|
||||
height = int64(30)
|
||||
stateStore = initializeValidatorState(t, val, height)
|
||||
evidenceDB = dbm.NewMemDB()
|
||||
evidenceDB = memdb.NewDB()
|
||||
blockStore = &mocks.BlockStore{}
|
||||
expiredEvidenceTime = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
expiredHeight = int64(2)
|
||||
@@ -292,7 +293,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) {
|
||||
blockStore.On("LoadBlockCommit", height).Return(trusted.Commit)
|
||||
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
|
||||
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
hash := ev.Hash()
|
||||
@@ -329,13 +330,13 @@ func TestRecoverPendingEvidence(t *testing.T) {
|
||||
height := int64(10)
|
||||
val := types.NewMockPV()
|
||||
valAddress := val.PrivKey.PubKey().Address()
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
evidenceDB := memdb.NewDB()
|
||||
stateStore := initializeValidatorState(t, val, height)
|
||||
|
||||
state, err := stateStore.Load()
|
||||
require.NoError(t, err)
|
||||
|
||||
blockStore := initializeBlockStore(dbm.NewMemDB(), state, valAddress)
|
||||
blockStore := initializeBlockStore(memdb.NewDB(), state, valAddress)
|
||||
|
||||
// create previous pool and populate it
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore)
|
||||
@@ -386,7 +387,7 @@ func TestRecoverPendingEvidence(t *testing.T) {
|
||||
}
|
||||
|
||||
func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, height int64) sm.Store {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state := sm.State{
|
||||
ChainID: evidenceChainID,
|
||||
@@ -466,10 +467,10 @@ func makeCommit(height int64, valAddr []byte) *types.Commit {
|
||||
func defaultTestPool(t *testing.T, height int64) (*evidence.Pool, types.MockPV) {
|
||||
val := types.NewMockPV()
|
||||
valAddress := val.PrivKey.PubKey().Address()
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
evidenceDB := memdb.NewDB()
|
||||
stateStore := initializeValidatorState(t, val, height)
|
||||
state, _ := stateStore.Load()
|
||||
blockStore := initializeBlockStore(dbm.NewMemDB(), state, valAddress)
|
||||
blockStore := initializeBlockStore(memdb.NewDB(), state, valAddress)
|
||||
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore)
|
||||
require.NoError(t, err, "test evidence pool could not be created")
|
||||
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
@@ -24,6 +22,7 @@ import (
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -73,7 +72,7 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite {
|
||||
evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
for nodeID := range rts.network.Nodes {
|
||||
logger := rts.logger.With("validator", idx)
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
evidenceDB := memdb.NewDB()
|
||||
blockStore := &mocks.BlockStore{}
|
||||
state, _ := stateStores[idx].Load()
|
||||
blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *types.BlockMeta {
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
@@ -21,6 +19,7 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
smmocks "github.com/tendermint/tendermint/state/mocks"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -91,7 +90,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
|
||||
blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header})
|
||||
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
|
||||
blockStore.On("LoadBlockCommit", height).Return(trusted.Commit)
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
evList := types.EvidenceList{ev}
|
||||
@@ -113,20 +112,20 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
|
||||
|
||||
// duplicate evidence should be rejected
|
||||
evList = types.EvidenceList{ev, ev}
|
||||
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
pool, err = evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
assert.Error(t, pool.CheckEvidence(evList))
|
||||
|
||||
// If evidence is submitted with an altered timestamp it should return an error
|
||||
ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute)
|
||||
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
pool, err = evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
assert.Error(t, pool.AddEvidence(ev))
|
||||
ev.Timestamp = defaultEvidenceTime
|
||||
|
||||
// Evidence submitted with a different validator power should fail
|
||||
ev.TotalVotingPower = 1
|
||||
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
pool, err = evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
assert.Error(t, pool.AddEvidence(ev))
|
||||
ev.TotalVotingPower = common.ValidatorSet.TotalVotingPower()
|
||||
@@ -167,7 +166,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
|
||||
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
|
||||
blockStore.On("LoadBlockCommit", nodeHeight).Return(trusted.Commit)
|
||||
blockStore.On("Height").Return(nodeHeight)
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check that the evidence pool correctly verifies the evidence
|
||||
@@ -185,7 +184,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
|
||||
oldBlockStore.On("Height").Return(nodeHeight)
|
||||
require.Equal(t, defaultEvidenceTime, oldBlockStore.LoadBlockMeta(nodeHeight).Header.Time)
|
||||
|
||||
pool, err = evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, oldBlockStore)
|
||||
pool, err = evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, oldBlockStore)
|
||||
require.NoError(t, err)
|
||||
assert.Error(t, pool.CheckEvidence(types.EvidenceList{ev}))
|
||||
}
|
||||
@@ -273,7 +272,7 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
|
||||
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader})
|
||||
blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit)
|
||||
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
evList := types.EvidenceList{ev}
|
||||
@@ -359,7 +358,7 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
|
||||
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader})
|
||||
blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit)
|
||||
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
evList := types.EvidenceList{ev}
|
||||
@@ -451,7 +450,7 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) {
|
||||
blockStore := &mocks.BlockStore{}
|
||||
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}})
|
||||
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), memdb.NewDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
evList := types.EvidenceList{goodEv}
|
||||
|
||||
@@ -8,13 +8,13 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
// Network sets up an in-memory network that can be used for high-level P2P
|
||||
@@ -238,7 +238,7 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node {
|
||||
transport := n.memoryNetwork.CreateTransport(nodeID)
|
||||
require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint")
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(nodeID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MinRetryTime: 10 * time.Millisecond,
|
||||
MaxRetryTime: 100 * time.Millisecond,
|
||||
RetryTimeJitter: time.Millisecond,
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
func TestPeerScoring(t *testing.T) {
|
||||
@@ -17,7 +17,7 @@ func TestPeerScoring(t *testing.T) {
|
||||
selfID := types.NodeIDFromPubKey(selfKey.PubKey())
|
||||
|
||||
// create a mock peer manager
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
peerManager, err := NewPeerManager(selfID, db, PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
// FIXME: We should probably have some randomized property-based tests for the
|
||||
@@ -110,11 +110,11 @@ func TestPeerManagerOptions_Validate(t *testing.T) {
|
||||
|
||||
func TestNewPeerManager(t *testing.T) {
|
||||
// Zero options should be valid.
|
||||
_, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
_, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Invalid options should error.
|
||||
_, err = p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
_, err = p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
PersistentPeers: []types.NodeID{"foo"},
|
||||
})
|
||||
require.Error(t, err)
|
||||
@@ -148,7 +148,7 @@ func TestNewPeerManager_Persistence(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create an initial peer manager and add the peers.
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{
|
||||
PersistentPeers: []types.NodeID{aID},
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{bID: 1},
|
||||
@@ -197,7 +197,7 @@ func TestNewPeerManager_SelfIDChange(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -222,7 +222,7 @@ func TestPeerManager_Add(t *testing.T) {
|
||||
bID := types.NodeID(strings.Repeat("b", 40))
|
||||
cID := types.NodeID(strings.Repeat("c", 40))
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
PersistentPeers: []types.NodeID{aID, cID},
|
||||
MaxPeers: 2,
|
||||
MaxConnected: 2,
|
||||
@@ -275,7 +275,7 @@ func TestPeerManager_Add(t *testing.T) {
|
||||
func TestPeerManager_DialNext(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add an address. DialNext should return it.
|
||||
@@ -302,7 +302,7 @@ func TestPeerManager_DialNext_Retry(t *testing.T) {
|
||||
MinRetryTime: 100 * time.Millisecond,
|
||||
MaxRetryTime: 500 * time.Millisecond,
|
||||
}
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), options)
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), options)
|
||||
require.NoError(t, err)
|
||||
|
||||
added, err := peerManager.Add(a)
|
||||
@@ -344,7 +344,7 @@ func TestPeerManager_DialNext_Retry(t *testing.T) {
|
||||
func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Spawn a goroutine to add a peer after a delay.
|
||||
@@ -364,7 +364,7 @@ func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -404,7 +404,7 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) {
|
||||
|
||||
func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) {
|
||||
options := p2p.PeerManagerOptions{MinRetryTime: 200 * time.Millisecond}
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), options)
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), options)
|
||||
require.NoError(t, err)
|
||||
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
@@ -432,7 +432,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) {
|
||||
func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
added, err := peerManager.Add(a)
|
||||
@@ -462,7 +462,7 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) {
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -500,7 +500,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
|
||||
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
|
||||
e := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("e", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
a.NodeID: 0,
|
||||
b.NodeID: 1,
|
||||
@@ -582,7 +582,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
@@ -624,7 +624,7 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) {
|
||||
bID := types.NodeID(strings.Repeat("b", 40))
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: bID}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -671,7 +671,7 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) {
|
||||
{Protocol: "tcp", NodeID: bID, Hostname: "::1"},
|
||||
}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, address := range addresses {
|
||||
@@ -704,7 +704,7 @@ func TestPeerManager_DialFailed(t *testing.T) {
|
||||
bID := types.NodeID(strings.Repeat("b", 40))
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: bID}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
added, err := peerManager.Add(a)
|
||||
@@ -739,7 +739,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
@@ -784,7 +784,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Marking a as dialed twice should error.
|
||||
@@ -811,7 +811,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPeerManager_Dialed_Self(t *testing.T) {
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Dialing self should error.
|
||||
@@ -823,7 +823,7 @@ func TestPeerManager_Dialed_MaxConnected(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -853,7 +853,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1},
|
||||
@@ -891,7 +891,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
|
||||
func TestPeerManager_Dialed_Unknown(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Marking an unknown node as dialed should error.
|
||||
@@ -903,7 +903,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 2,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
|
||||
@@ -946,7 +946,7 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
@@ -999,7 +999,7 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) {
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 2,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
@@ -1047,7 +1047,7 @@ func TestPeerManager_Accepted(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Accepting a connection from self should error.
|
||||
@@ -1093,7 +1093,7 @@ func TestPeerManager_Accepted_MaxConnected(t *testing.T) {
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1122,7 +1122,7 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) {
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
c.NodeID: 1,
|
||||
d.NodeID: 2,
|
||||
@@ -1167,7 +1167,7 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) {
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 1,
|
||||
@@ -1210,7 +1210,7 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) {
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{
|
||||
b.NodeID: 1,
|
||||
c.NodeID: 1,
|
||||
@@ -1256,7 +1256,7 @@ func TestPeerManager_Ready(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
sub := peerManager.Subscribe()
|
||||
@@ -1291,7 +1291,7 @@ func TestPeerManager_Ready(t *testing.T) {
|
||||
func TestPeerManager_EvictNext(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
added, err := peerManager.Add(a)
|
||||
@@ -1324,7 +1324,7 @@ func TestPeerManager_EvictNext(t *testing.T) {
|
||||
func TestPeerManager_EvictNext_WakeOnError(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
added, err := peerManager.Add(a)
|
||||
@@ -1351,7 +1351,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
|
||||
@@ -1389,7 +1389,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MaxConnected: 1,
|
||||
MaxConnectedUpgrade: 1,
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
|
||||
@@ -1419,7 +1419,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) {
|
||||
func TestPeerManager_TryEvictNext(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
added, err := peerManager.Add(a)
|
||||
@@ -1455,7 +1455,7 @@ func TestPeerManager_TryEvictNext(t *testing.T) {
|
||||
func TestPeerManager_Disconnected(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
sub := peerManager.Subscribe()
|
||||
@@ -1509,7 +1509,7 @@ func TestPeerManager_Disconnected(t *testing.T) {
|
||||
func TestPeerManager_Errored(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Erroring an unknown peer does nothing.
|
||||
@@ -1545,7 +1545,7 @@ func TestPeerManager_Errored(t *testing.T) {
|
||||
func TestPeerManager_Subscribe(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// This tests all subscription events for full peer lifecycles.
|
||||
@@ -1606,7 +1606,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
|
||||
func TestPeerManager_Subscribe_Close(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
sub := peerManager.Subscribe()
|
||||
@@ -1633,7 +1633,7 @@ func TestPeerManager_Subscribe_Broadcast(t *testing.T) {
|
||||
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
s1 := peerManager.Subscribe()
|
||||
@@ -1677,7 +1677,7 @@ func TestPeerManager_Close(t *testing.T) {
|
||||
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
MinRetryTime: 10 * time.Second,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1716,7 +1716,7 @@ func TestPeerManager_Advertise(t *testing.T) {
|
||||
dID := types.NodeID(strings.Repeat("d", 40))
|
||||
|
||||
// Create an initial peer manager and add the peers.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{
|
||||
PeerScores: map[types.NodeID]p2p.PeerScore{aID: 3, bID: 2, cID: 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1764,7 +1764,7 @@ func TestPeerManager_SetHeight_GetHeight(t *testing.T) {
|
||||
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
|
||||
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
proto "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -298,7 +298,7 @@ func setupSingle(t *testing.T) *singleTestReactor {
|
||||
|
||||
peerCh := make(chan p2p.PeerUpdate, chBuf)
|
||||
peerUpdates := p2p.NewPeerUpdates(peerCh, chBuf)
|
||||
peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(nodeID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor := pex.NewReactorV2(log.TestingLogger(), peerManager, pexCh, peerUpdates)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
gogotypes "github.com/gogo/protobuf/types"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
@@ -25,6 +24,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
func echoReactor(channel *p2p.Channel) {
|
||||
@@ -98,7 +98,7 @@ func TestRouter_Channel_Basic(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Set up a router with no transports (so no peers).
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -370,7 +370,7 @@ func TestRouter_AcceptPeers(t *testing.T) {
|
||||
mockTransport.On("Accept").Once().Return(nil, io.EOF)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -425,7 +425,7 @@ func TestRouter_AcceptPeers_Error(t *testing.T) {
|
||||
mockTransport.On("Close").Return(nil)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -459,7 +459,7 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) {
|
||||
mockTransport.On("Close").Return(nil)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -507,7 +507,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
|
||||
mockTransport.On("Accept").Once().Return(nil, io.EOF)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -600,7 +600,7 @@ func TestRouter_DialPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -677,7 +677,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) {
|
||||
}
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -758,7 +758,7 @@ func TestRouter_EvictPeers(t *testing.T) {
|
||||
mockTransport.On("Accept").Once().Return(nil, io.EOF)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -822,7 +822,7 @@ func TestRouter_ChannelCompatability(t *testing.T) {
|
||||
mockTransport.On("Accept").Once().Return(nil, io.EOF)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
@@ -872,7 +872,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) {
|
||||
mockTransport.On("Accept").Once().Return(nil, io.EOF)
|
||||
|
||||
// Set up and start the router.
|
||||
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
peerManager, err := p2p.NewPeerManager(selfID, memdb.NewDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
defer peerManager.Close()
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
@@ -25,6 +24,7 @@ import (
|
||||
smmocks "github.com/tendermint/tendermint/state/mocks"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
type reactorTestSuite struct {
|
||||
@@ -119,7 +119,7 @@ func setup(
|
||||
)
|
||||
|
||||
rts.stateStore = &smmocks.Store{}
|
||||
rts.blockStore = store.NewBlockStore(dbm.NewMemDB())
|
||||
rts.blockStore = store.NewBlockStore(memdb.NewDB())
|
||||
|
||||
cfg := config.DefaultStateSyncConfig()
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
@@ -18,6 +16,7 @@ import (
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh StateProvider
|
||||
@@ -71,7 +70,7 @@ func NewLightClientStateProvider(
|
||||
}
|
||||
|
||||
lc, err := light.NewClient(ctx, chainID, trustOptions, providers[0], providers[1:],
|
||||
lightdb.New(dbm.NewMemDB()), light.Logger(logger))
|
||||
lightdb.New(memdb.NewDB()), light.Logger(logger))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -105,7 +104,7 @@ func NewLightClientStateProviderFromDispatcher(
|
||||
}
|
||||
|
||||
lc, err := light.NewClient(ctx, chainID, trustOptions, providers[0], providers[1:],
|
||||
lightdb.New(dbm.NewMemDB()), light.Logger(logger))
|
||||
lightdb.New(memdb.NewDB()), light.Logger(logger))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -5,13 +5,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
"github.com/tendermint/tendermint/light/provider"
|
||||
dbs "github.com/tendermint/tendermint/light/store/db"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
// NOTE: block is produced every minute. Make sure the verification time
|
||||
@@ -75,7 +74,7 @@ func BenchmarkSequence(b *testing.B) {
|
||||
},
|
||||
benchmarkFullNode,
|
||||
[]provider.Provider{benchmarkFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
light.SequentialVerification(),
|
||||
)
|
||||
@@ -107,7 +106,7 @@ func BenchmarkBisection(b *testing.B) {
|
||||
},
|
||||
benchmarkFullNode,
|
||||
[]provider.Provider{benchmarkFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -138,7 +137,7 @@ func BenchmarkBackwards(b *testing.B) {
|
||||
},
|
||||
benchmarkFullNode,
|
||||
[]provider.Provider{benchmarkFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
@@ -21,6 +19,7 @@ import (
|
||||
provider_mocks "github.com/tendermint/tendermint/light/provider/mocks"
|
||||
dbs "github.com/tendermint/tendermint/light/store/db"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -218,7 +217,7 @@ func TestClient_SequentialVerification(t *testing.T) {
|
||||
trustOptions,
|
||||
mockNode,
|
||||
[]provider.Provider{mockNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.SequentialVerification(),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
@@ -338,7 +337,7 @@ func TestClient_SkippingVerification(t *testing.T) {
|
||||
trustOptions,
|
||||
mockNode,
|
||||
[]provider.Provider{mockNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.SkippingVerification(light.DefaultTrustLevel),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
@@ -391,7 +390,7 @@ func TestClientLargeBisectionVerification(t *testing.T) {
|
||||
},
|
||||
mockNode,
|
||||
[]provider.Provider{mockNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.SkippingVerification(light.DefaultTrustLevel),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -418,7 +417,7 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) {
|
||||
},
|
||||
mockFullNode,
|
||||
[]provider.Provider{mockFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.SkippingVerification(light.DefaultTrustLevel),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -445,7 +444,7 @@ func TestClient_Cleanup(t *testing.T) {
|
||||
trustOptions,
|
||||
mockFullNode,
|
||||
[]provider.Provider{mockFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -467,7 +466,7 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
|
||||
// 1. options.Hash == trustedHeader.Hash
|
||||
t.Run("hashes should match", func(t *testing.T) {
|
||||
mockNode := &provider_mocks.Provider{}
|
||||
trustedStore := dbs.New(dbm.NewMemDB())
|
||||
trustedStore := dbs.New(memdb.NewDB())
|
||||
err := trustedStore.SaveLightBlock(l1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -492,7 +491,7 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
|
||||
|
||||
// 2. options.Hash != trustedHeader.Hash
|
||||
t.Run("hashes should not match", func(t *testing.T) {
|
||||
trustedStore := dbs.New(dbm.NewMemDB())
|
||||
trustedStore := dbs.New(memdb.NewDB())
|
||||
err := trustedStore.SaveLightBlock(l1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -538,7 +537,7 @@ func TestClient_Update(t *testing.T) {
|
||||
trustOptions,
|
||||
mockFullNode,
|
||||
[]provider.Provider{mockFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -563,7 +562,7 @@ func TestClient_Concurrency(t *testing.T) {
|
||||
trustOptions,
|
||||
mockFullNode,
|
||||
[]provider.Provider{mockFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -609,7 +608,7 @@ func TestClient_AddProviders(t *testing.T) {
|
||||
trustOptions,
|
||||
mockFullNode,
|
||||
[]provider.Provider{mockFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -645,7 +644,7 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) {
|
||||
trustOptions,
|
||||
mockDeadNode,
|
||||
[]provider.Provider{mockFullNode, mockFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
|
||||
@@ -683,7 +682,7 @@ func TestClient_BackwardsVerification(t *testing.T) {
|
||||
},
|
||||
mockLargeFullNode,
|
||||
[]provider.Provider{mockLargeFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -760,7 +759,7 @@ func TestClient_BackwardsVerification(t *testing.T) {
|
||||
},
|
||||
mockNode,
|
||||
[]provider.Provider{mockNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err, idx)
|
||||
@@ -774,7 +773,7 @@ func TestClient_BackwardsVerification(t *testing.T) {
|
||||
|
||||
func TestClient_NewClientFromTrustedStore(t *testing.T) {
|
||||
// 1) Initiate DB and fill with a "trusted" header
|
||||
db := dbs.New(dbm.NewMemDB())
|
||||
db := dbs.New(memdb.NewDB())
|
||||
err := db.SaveLightBlock(l1)
|
||||
require.NoError(t, err)
|
||||
mockNode := &provider_mocks.Provider{}
|
||||
@@ -833,7 +832,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) {
|
||||
trustOptions,
|
||||
mockFullNode,
|
||||
[]provider.Provider{mockBadNode1, mockBadNode2},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
// witness should have behaved properly -> no error
|
||||
@@ -889,7 +888,7 @@ func TestClient_TrustedValidatorSet(t *testing.T) {
|
||||
trustOptions,
|
||||
mockFullNode,
|
||||
[]provider.Provider{mockBadValSetNode, mockFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -921,7 +920,7 @@ func TestClientPrunesHeadersAndValidatorSets(t *testing.T) {
|
||||
trustOptions,
|
||||
mockFullNode,
|
||||
[]provider.Provider{mockFullNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
light.PruningSize(1),
|
||||
)
|
||||
@@ -1011,7 +1010,7 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) {
|
||||
trustOptions,
|
||||
mockBadNode,
|
||||
[]provider.Provider{mockBadNode, mockBadNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1049,7 +1048,7 @@ func TestClientHandlesContexts(t *testing.T) {
|
||||
trustOptions,
|
||||
mockNode,
|
||||
[]provider.Provider{mockNode, mockNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
)
|
||||
require.Error(t, ctxTimeOut.Err())
|
||||
require.Error(t, err)
|
||||
@@ -1062,7 +1061,7 @@ func TestClientHandlesContexts(t *testing.T) {
|
||||
trustOptions,
|
||||
mockNode,
|
||||
[]provider.Provider{mockNode, mockNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -9,14 +9,13 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
"github.com/tendermint/tendermint/light/provider"
|
||||
provider_mocks "github.com/tendermint/tendermint/light/provider/mocks"
|
||||
dbs "github.com/tendermint/tendermint/light/store/db"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
|
||||
@@ -87,7 +86,7 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
|
||||
},
|
||||
mockPrimary,
|
||||
[]provider.Provider{mockWitness},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -198,7 +197,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) {
|
||||
},
|
||||
mockPrimary,
|
||||
[]provider.Provider{mockWitness},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
testCase.lightOption,
|
||||
)
|
||||
@@ -297,7 +296,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
|
||||
},
|
||||
mockPrimary,
|
||||
[]provider.Provider{mockWitness, accomplice},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
light.MaxClockDrift(1*time.Second),
|
||||
light.MaxBlockLag(1*time.Second),
|
||||
@@ -357,7 +356,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
|
||||
},
|
||||
mockPrimary,
|
||||
[]provider.Provider{mockLaggingWitness, accomplice},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
light.MaxClockDrift(1*time.Second),
|
||||
light.MaxBlockLag(1*time.Second),
|
||||
@@ -391,7 +390,7 @@ func TestClientDivergentTraces1(t *testing.T) {
|
||||
},
|
||||
mockPrimary,
|
||||
[]provider.Provider{mockWitness},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.Error(t, err)
|
||||
@@ -419,7 +418,7 @@ func TestClientDivergentTraces2(t *testing.T) {
|
||||
},
|
||||
mockPrimaryNode,
|
||||
[]provider.Provider{mockDeadNode, mockDeadNode, mockPrimaryNode},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -457,7 +456,7 @@ func TestClientDivergentTraces3(t *testing.T) {
|
||||
},
|
||||
mockPrimary,
|
||||
[]provider.Provider{mockWitness},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -495,7 +494,7 @@ func TestClientDivergentTraces4(t *testing.T) {
|
||||
},
|
||||
mockPrimary,
|
||||
[]provider.Provider{mockWitness},
|
||||
dbs.New(dbm.NewMemDB()),
|
||||
dbs.New(memdb.NewDB()),
|
||||
light.Logger(log.TestingLogger()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -63,7 +63,7 @@ This package provides three major things:
|
||||
|
||||
Example usage:
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
db, err := goleveldb.NewDB("light-client-db", dbDir)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
@@ -16,6 +14,7 @@ import (
|
||||
httpp "github.com/tendermint/tendermint/light/provider/http"
|
||||
dbs "github.com/tendermint/tendermint/light/store/db"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
"github.com/tendermint/tm-db/goleveldb"
|
||||
)
|
||||
|
||||
// Manually getting light blocks and verifying them.
|
||||
@@ -55,7 +54,7 @@ func ExampleClient() {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
db, err := goleveldb.NewDB("light-client-db", dbDir)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
dbs "github.com/tendermint/tendermint/light/store/db"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/goleveldb"
|
||||
)
|
||||
|
||||
// NOTE: these are ports of the tests from example_test.go but
|
||||
@@ -53,7 +53,7 @@ func TestClientIntegration_Update(t *testing.T) {
|
||||
block, err := waitForBlock(ctx, primary, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
db, err := goleveldb.NewDB("light-client-db", dbDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := light.NewClient(
|
||||
@@ -111,7 +111,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) {
|
||||
block, err := waitForBlock(ctx, primary, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
db, err := goleveldb.NewDB("light-client-db", dbDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := light.NewClient(ctx,
|
||||
|
||||
@@ -8,18 +8,17 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
func TestLast_FirstLightBlockHeight(t *testing.T) {
|
||||
dbStore := New(dbm.NewMemDB())
|
||||
dbStore := New(memdb.NewDB())
|
||||
|
||||
// Empty store
|
||||
height, err := dbStore.LastLightBlockHeight()
|
||||
@@ -44,7 +43,7 @@ func TestLast_FirstLightBlockHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_SaveLightBlock(t *testing.T) {
|
||||
dbStore := New(dbm.NewMemDB())
|
||||
dbStore := New(memdb.NewDB())
|
||||
|
||||
// Empty store
|
||||
h, err := dbStore.LightBlock(1)
|
||||
@@ -74,7 +73,7 @@ func Test_SaveLightBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_LightBlockBefore(t *testing.T) {
|
||||
dbStore := New(dbm.NewMemDB())
|
||||
dbStore := New(memdb.NewDB())
|
||||
|
||||
assert.Panics(t, func() {
|
||||
_, _ = dbStore.LightBlockBefore(0)
|
||||
@@ -95,7 +94,7 @@ func Test_LightBlockBefore(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_Prune(t *testing.T) {
|
||||
dbStore := New(dbm.NewMemDB())
|
||||
dbStore := New(memdb.NewDB())
|
||||
|
||||
// Empty store
|
||||
assert.EqualValues(t, 0, dbStore.Size())
|
||||
@@ -132,7 +131,7 @@ func Test_Prune(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_Concurrency(t *testing.T) {
|
||||
dbStore := New(dbm.NewMemDB())
|
||||
dbStore := New(memdb.NewDB())
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 1; i <= 100; i++ {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
@@ -248,8 +249,8 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
mp.SetLogger(logger)
|
||||
|
||||
// Make EvidencePool
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
evidenceDB := memdb.NewDB()
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
evidencePool, err := evidence.NewPool(logger, evidenceDB, stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -322,7 +323,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) {
|
||||
const height int64 = 1
|
||||
state, stateDB, _ := state(1, height)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
const maxBytes int64 = 16384
|
||||
const partSize uint32 = 256
|
||||
state.ConsensusParams.Block.MaxBytes = maxBytes
|
||||
@@ -383,7 +384,7 @@ func TestMaxProposalBlockSize(t *testing.T) {
|
||||
|
||||
state, stateDB, _ := state(types.MaxVotesCount, int64(1))
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
const maxBytes int64 = 1024 * 1024 * 2
|
||||
state.ConsensusParams.Block.MaxBytes = maxBytes
|
||||
proposerAddr, _ := state.Validators.GetByIndex(0)
|
||||
@@ -614,7 +615,7 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) {
|
||||
})
|
||||
|
||||
// save validators to db for 2 heights
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
if err := stateStore.Save(s); err != nil {
|
||||
panic(err)
|
||||
@@ -637,7 +638,7 @@ func TestLoadStateFromGenesis(t *testing.T) {
|
||||
func loadStatefromGenesis(t *testing.T) sm.State {
|
||||
t.Helper()
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
config := cfg.ResetTestRoot("load_state_from_genesis")
|
||||
|
||||
|
||||
@@ -7,14 +7,13 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
func TestBlockchainInfo(t *testing.T) {
|
||||
@@ -81,7 +80,7 @@ func TestBlockResults(t *testing.T) {
|
||||
}
|
||||
|
||||
env := &Environment{}
|
||||
env.StateStore = sm.NewStore(dbm.NewMemDB())
|
||||
env.StateStore = sm.NewStore(memdb.NewDB())
|
||||
err := env.StateStore.SaveABCIResponses(100, results)
|
||||
require.NoError(t, err)
|
||||
env.BlockStore = mockBlockStore{height: 100}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/google/orderedcode"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
func makeKey(t *testing.T, elems ...interface{}) []byte {
|
||||
@@ -71,7 +72,7 @@ func getNewPrefixKeys(t *testing.T, val int) map[string][]byte {
|
||||
}
|
||||
|
||||
func getLegacyDatabase(t *testing.T) (int, dbm.DB) {
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
batch := db.NewBatch()
|
||||
ct := 0
|
||||
|
||||
@@ -166,11 +167,11 @@ func TestMigration(t *testing.T) {
|
||||
})
|
||||
t.Run("Replacement", func(t *testing.T) {
|
||||
t.Run("MissingKey", func(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
require.NoError(t, replaceKey(db, keyID("hi"), nil))
|
||||
})
|
||||
t.Run("ReplacementFails", func(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
key := keyID("hi")
|
||||
require.NoError(t, db.Set(key, []byte("world")))
|
||||
require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) {
|
||||
@@ -178,7 +179,7 @@ func TestMigration(t *testing.T) {
|
||||
}))
|
||||
})
|
||||
t.Run("KeyDisapears", func(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
key := keyID("hi")
|
||||
require.NoError(t, db.Set(key, []byte("world")))
|
||||
require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) {
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -42,7 +42,7 @@ func TestApplyBlock(t *testing.T) {
|
||||
|
||||
state, stateDB, _ := makeState(1, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
|
||||
|
||||
@@ -197,7 +197,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
|
||||
evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return()
|
||||
evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil)
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mmock.Mempool{}, evpool, blockStore)
|
||||
@@ -355,7 +355,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
|
||||
|
||||
state, stateDB, _ := makeState(1, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
@@ -428,7 +428,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
|
||||
|
||||
state, stateDB, _ := makeState(1, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
@@ -109,7 +110,7 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida
|
||||
AppHash: nil,
|
||||
})
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
if err := stateStore.Save(s); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -11,10 +11,11 @@ import (
|
||||
blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
db "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
func TestBlockIndexer(t *testing.T) {
|
||||
store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events"))
|
||||
store := db.NewPrefixDB(memdb.NewDB(), []byte("block_events"))
|
||||
indexer := blockidxkv.New(store)
|
||||
|
||||
require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
kv "github.com/tendermint/tendermint/state/indexer/sink/kv"
|
||||
psql "github.com/tendermint/tendermint/state/indexer/sink/psql"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
db "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
var psqldb *sql.DB
|
||||
@@ -55,7 +55,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
|
||||
pool, err := setupDB(t)
|
||||
assert.Nil(t, err)
|
||||
|
||||
store := db.NewMemDB()
|
||||
store := memdb.NewDB()
|
||||
eventSinks := []indexer.EventSink{kv.NewEventSink(store), pSink}
|
||||
assert.True(t, indexer.KVSinkEnabled(eventSinks))
|
||||
assert.True(t, indexer.IndexingEnabled(eventSinks))
|
||||
|
||||
@@ -8,26 +8,28 @@ import (
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
kvtx "github.com/tendermint/tendermint/state/indexer/tx/kv"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
db "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
func TestType(t *testing.T) {
|
||||
kvSink := NewEventSink(db.NewMemDB())
|
||||
kvSink := NewEventSink(memdb.NewDB())
|
||||
assert.Equal(t, indexer.KV, kvSink.Type())
|
||||
}
|
||||
|
||||
func TestStop(t *testing.T) {
|
||||
kvSink := NewEventSink(db.NewMemDB())
|
||||
kvSink := NewEventSink(memdb.NewDB())
|
||||
assert.Nil(t, kvSink.Stop())
|
||||
}
|
||||
|
||||
func TestBlockFuncs(t *testing.T) {
|
||||
store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events"))
|
||||
store := db.NewPrefixDB(memdb.NewDB(), []byte("block_events"))
|
||||
indexer := NewEventSink(store)
|
||||
|
||||
require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{
|
||||
@@ -158,7 +160,7 @@ func TestBlockFuncs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearchWithCancelation(t *testing.T) {
|
||||
indexer := NewEventSink(db.NewMemDB())
|
||||
indexer := NewEventSink(memdb.NewDB())
|
||||
|
||||
txResult := txResultWithEvents([]abci.Event{
|
||||
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
|
||||
@@ -180,7 +182,7 @@ func TestTxSearchWithCancelation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearchDeprecatedIndexing(t *testing.T) {
|
||||
esdb := db.NewMemDB()
|
||||
esdb := memdb.NewDB()
|
||||
indexer := NewEventSink(esdb)
|
||||
|
||||
// index tx using events indexing (composite key)
|
||||
@@ -260,7 +262,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
|
||||
indexer := NewEventSink(db.NewMemDB())
|
||||
indexer := NewEventSink(memdb.NewDB())
|
||||
|
||||
txResult := txResultWithEvents([]abci.Event{
|
||||
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
|
||||
@@ -282,7 +284,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearchMultipleTxs(t *testing.T) {
|
||||
indexer := NewEventSink(db.NewMemDB())
|
||||
indexer := NewEventSink(memdb.NewDB())
|
||||
|
||||
// indexed first, but bigger height (to test the order of transactions)
|
||||
txResult := txResultWithEvents([]abci.Event{
|
||||
|
||||
@@ -7,11 +7,10 @@ import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/goleveldb"
|
||||
)
|
||||
|
||||
func BenchmarkTxSearch(b *testing.B) {
|
||||
@@ -20,7 +19,7 @@ func BenchmarkTxSearch(b *testing.B) {
|
||||
b.Errorf("failed to create temporary directory: %s", err)
|
||||
}
|
||||
|
||||
db, err := dbm.NewGoLevelDB("benchmark_tx_search_test", dbDir)
|
||||
db, err := goleveldb.NewDB("benchmark_tx_search_test", dbDir)
|
||||
if err != nil {
|
||||
b.Errorf("failed to create database: %s", err)
|
||||
}
|
||||
|
||||
@@ -11,17 +11,16 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
db "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
indexer "github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
func TestTxIndex(t *testing.T) {
|
||||
txIndexer := NewTxIndex(db.NewMemDB())
|
||||
txIndexer := NewTxIndex(memdb.NewDB())
|
||||
|
||||
tx := types.Tx("HELLO WORLD")
|
||||
txResult := &abci.TxResult{
|
||||
@@ -67,7 +66,7 @@ func TestTxIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearch(t *testing.T) {
|
||||
indexer := NewTxIndex(db.NewMemDB())
|
||||
indexer := NewTxIndex(memdb.NewDB())
|
||||
|
||||
txResult := txResultWithEvents([]abci.Event{
|
||||
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
|
||||
@@ -147,7 +146,7 @@ func TestTxSearch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearchWithCancelation(t *testing.T) {
|
||||
indexer := NewTxIndex(db.NewMemDB())
|
||||
indexer := NewTxIndex(memdb.NewDB())
|
||||
|
||||
txResult := txResultWithEvents([]abci.Event{
|
||||
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
|
||||
@@ -165,7 +164,7 @@ func TestTxSearchWithCancelation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearchDeprecatedIndexing(t *testing.T) {
|
||||
indexer := NewTxIndex(db.NewMemDB())
|
||||
indexer := NewTxIndex(memdb.NewDB())
|
||||
|
||||
// index tx using events indexing (composite key)
|
||||
txResult1 := txResultWithEvents([]abci.Event{
|
||||
@@ -244,7 +243,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
|
||||
indexer := NewTxIndex(db.NewMemDB())
|
||||
indexer := NewTxIndex(memdb.NewDB())
|
||||
|
||||
txResult := txResultWithEvents([]abci.Event{
|
||||
{Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}},
|
||||
@@ -266,7 +265,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearchMultipleTxs(t *testing.T) {
|
||||
indexer := NewTxIndex(db.NewMemDB())
|
||||
indexer := NewTxIndex(memdb.NewDB())
|
||||
|
||||
// indexed first, but bigger height (to test the order of transactions)
|
||||
txResult := txResultWithEvents([]abci.Event{
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
@@ -23,6 +21,7 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db/metadb"
|
||||
)
|
||||
|
||||
// setupTestCase does setup common to all test cases.
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
@@ -19,6 +17,8 @@ import (
|
||||
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
dbm "github.com/tendermint/tm-db/metadb"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -27,7 +27,7 @@ const (
|
||||
)
|
||||
|
||||
func TestStoreBootstrap(t *testing.T) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
val, _ := factory.RandValidator(true, 10)
|
||||
val2, _ := factory.RandValidator(true, 10)
|
||||
@@ -53,7 +53,7 @@ func TestStoreBootstrap(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStoreLoadValidators(t *testing.T) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
val, _ := factory.RandValidator(true, 10)
|
||||
val2, _ := factory.RandValidator(true, 10)
|
||||
@@ -140,7 +140,7 @@ func BenchmarkLoadValidators(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestStoreLoadConsensusParams(t *testing.T) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := memdb.NewDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
err := stateStore.Save(makeRandomStateFromConsensusParams(types.DefaultConsensusParams(), 1, 1))
|
||||
require.NoError(t, err)
|
||||
@@ -182,7 +182,7 @@ func TestPruneStates(t *testing.T) {
|
||||
for name, tc := range testcases {
|
||||
tc := tc
|
||||
t.Run(name, func(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
|
||||
stateStore := sm.NewStore(db)
|
||||
pk := ed25519.GenPrivKey().PubKey()
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
)
|
||||
|
||||
const validationTestsStopHeight int64 = 10
|
||||
@@ -34,7 +34,7 @@ func TestValidateBlockHeader(t *testing.T) {
|
||||
|
||||
state, stateDB, privVals := makeState(3, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
@@ -121,7 +121,7 @@ func TestValidateBlockCommit(t *testing.T) {
|
||||
|
||||
state, stateDB, privVals := makeState(1, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
@@ -242,7 +242,7 @@ func TestValidateBlockEvidence(t *testing.T) {
|
||||
|
||||
state, stateDB, privVals := makeState(4, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(memdb.NewDB())
|
||||
defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
evpool := &mocks.EvidencePool{}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
"github.com/tendermint/tm-db/memdb"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
@@ -47,7 +48,7 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
|
||||
|
||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockDB := memdb.NewDB()
|
||||
state, err := sm.MakeGenesisStateFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
@@ -56,7 +57,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu
|
||||
}
|
||||
|
||||
func freshBlockStore() (*BlockStore, dbm.DB) {
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
return NewBlockStore(db), db
|
||||
}
|
||||
|
||||
@@ -296,7 +297,7 @@ func TestLoadBaseMeta(t *testing.T) {
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
state, err := sm.MakeGenesisStateFromFile(config.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
bs := NewBlockStore(dbm.NewMemDB())
|
||||
bs := NewBlockStore(memdb.NewDB())
|
||||
|
||||
for h := int64(1); h <= 10; h++ {
|
||||
block := factory.MakeBlock(state, h, new(types.Commit))
|
||||
@@ -352,7 +353,7 @@ func TestPruneBlocks(t *testing.T) {
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
state, err := sm.MakeGenesisStateFromFile(config.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
db := dbm.NewMemDB()
|
||||
db := memdb.NewDB()
|
||||
bs := NewBlockStore(db)
|
||||
assert.EqualValues(t, 0, bs.Base())
|
||||
assert.EqualValues(t, 0, bs.Height())
|
||||
|
||||
Reference in New Issue
Block a user