mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-07 05:46:32 +00:00
104
lite2/client_benchmark_test.go
Normal file
104
lite2/client_benchmark_test.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package lite
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/lite2/provider"
|
||||
mockp "github.com/tendermint/tendermint/lite2/provider/mock"
|
||||
dbs "github.com/tendermint/tendermint/lite2/store/db"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
// NOTE: block is produced every minute. Make sure the verification time
|
||||
// provided in the function call is correct for the size of the blockchain. The
|
||||
// benchmarking may take some time hence it can be more useful to set the time
|
||||
// or the amount of iterations use the flag -benchtime t -> i.e. -benchtime 5m
|
||||
// or -benchtime 100x.
|
||||
//
|
||||
// Remember that none of these benchmarks account for network latency.
|
||||
var (
|
||||
largeFullNode = mockp.New(GenMockNode(chainID, 1000, 100, 1, bTime))
|
||||
genesisHeader, _ = largeFullNode.SignedHeader(1)
|
||||
)
|
||||
|
||||
func BenchmarkSequence(b *testing.B) {
|
||||
c, err := NewClient(
|
||||
chainID,
|
||||
TrustOptions{
|
||||
Period: 24 * time.Hour,
|
||||
Height: 1,
|
||||
Hash: genesisHeader.Hash(),
|
||||
},
|
||||
largeFullNode,
|
||||
[]provider.Provider{largeFullNode},
|
||||
dbs.New(dbm.NewMemDB(), chainID),
|
||||
Logger(log.TestingLogger()),
|
||||
SequentialVerification(),
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err = c.VerifyHeaderAtHeight(1000, bTime.Add(1000*time.Minute))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBisection(b *testing.B) {
|
||||
c, err := NewClient(
|
||||
chainID,
|
||||
TrustOptions{
|
||||
Period: 24 * time.Hour,
|
||||
Height: 1,
|
||||
Hash: genesisHeader.Hash(),
|
||||
},
|
||||
largeFullNode,
|
||||
[]provider.Provider{largeFullNode},
|
||||
dbs.New(dbm.NewMemDB(), chainID),
|
||||
Logger(log.TestingLogger()),
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err = c.VerifyHeaderAtHeight(1000, bTime.Add(1000*time.Minute))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBackwards(b *testing.B) {
|
||||
trustedHeader, _ := largeFullNode.SignedHeader(0)
|
||||
c, err := NewClient(
|
||||
chainID,
|
||||
TrustOptions{
|
||||
Period: 24 * time.Hour,
|
||||
Height: trustedHeader.Height,
|
||||
Hash: trustedHeader.Hash(),
|
||||
},
|
||||
largeFullNode,
|
||||
[]provider.Provider{largeFullNode},
|
||||
dbs.New(dbm.NewMemDB(), chainID),
|
||||
Logger(log.TestingLogger()),
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err = c.VerifyHeaderAtHeight(1, bTime)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -160,3 +160,61 @@ func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTi
|
||||
Commit: pkz.signHeader(header, first, last),
|
||||
}
|
||||
}
|
||||
|
||||
func (pkz privKeys) ChangeKeys(delta int) privKeys {
|
||||
newKeys := pkz[delta:]
|
||||
return newKeys.Extend(delta)
|
||||
}
|
||||
|
||||
// Generates the header and validator set to create a full entire mock node with blocks to height (
|
||||
// blockSize) and with variation in validator sets. BlockIntervals are in per minute.
|
||||
// NOTE: Expected to have a large validator set size ~ 100 validators.
|
||||
func GenMockNode(
|
||||
chainID string,
|
||||
blockSize int64,
|
||||
valSize int,
|
||||
valVariation float32,
|
||||
bTime time.Time) (
|
||||
string,
|
||||
map[int64]*types.SignedHeader,
|
||||
map[int64]*types.ValidatorSet) {
|
||||
|
||||
var (
|
||||
headers = make(map[int64]*types.SignedHeader, blockSize)
|
||||
valset = make(map[int64]*types.ValidatorSet, blockSize)
|
||||
keys = genPrivKeys(valSize)
|
||||
totalVariation = valVariation
|
||||
valVariationInt int
|
||||
newKeys privKeys
|
||||
)
|
||||
|
||||
valVariationInt = int(totalVariation)
|
||||
totalVariation = -float32(valVariationInt)
|
||||
newKeys = keys.ChangeKeys(valVariationInt)
|
||||
|
||||
// genesis header and vals
|
||||
lastHeader := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Minute), nil,
|
||||
keys.ToValidators(2, 2), newKeys.ToValidators(2, 2), []byte("app_hash"), []byte("cons_hash"),
|
||||
[]byte("results_hash"), 0, len(keys))
|
||||
currentHeader := lastHeader
|
||||
headers[1] = currentHeader
|
||||
valset[1] = keys.ToValidators(2, 2)
|
||||
keys = newKeys
|
||||
|
||||
for height := int64(2); height <= blockSize; height++ {
|
||||
totalVariation += valVariation
|
||||
valVariationInt = int(totalVariation)
|
||||
totalVariation = -float32(valVariationInt)
|
||||
newKeys = keys.ChangeKeys(valVariationInt)
|
||||
currentHeader = keys.GenSignedHeaderLastBlockID(chainID, height, bTime.Add(time.Duration(height)*time.Minute),
|
||||
nil,
|
||||
keys.ToValidators(2, 2), newKeys.ToValidators(2, 2), []byte("app_hash"), []byte("cons_hash"),
|
||||
[]byte("results_hash"), 0, len(keys), types.BlockID{Hash: lastHeader.Hash()})
|
||||
headers[height] = currentHeader
|
||||
valset[height] = keys.ToValidators(2, 2)
|
||||
lastHeader = currentHeader
|
||||
keys = newKeys
|
||||
}
|
||||
|
||||
return chainID, headers, valset
|
||||
}
|
||||
|
||||
@@ -174,9 +174,10 @@ func verifyNewHeaderAndVals(
|
||||
}
|
||||
|
||||
if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) {
|
||||
return errors.Errorf("expected new header validators (%X) to match those that were supplied (%X)",
|
||||
return errors.Errorf("expected new header validators (%X) to match those that were supplied (%X) at height %d",
|
||||
untrustedHeader.ValidatorsHash,
|
||||
untrustedVals.Hash(),
|
||||
untrustedHeader.Height,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user