mirror of
https://github.com/tendermint/tendermint.git
synced 2025-12-23 06:15:19 +00:00
test: add the loadtime report tool (#9351)
This pull request adds the report tool and modifies the loadtime libraries to better support its use.
This commit is contained in:
1
go.mod
1
go.mod
@@ -257,6 +257,7 @@ require (
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
gonum.org/v1/gonum v0.11.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -1660,6 +1660,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
||||
gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E=
|
||||
gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
GOMOD="github.com/tendermint/tendermint/test/loadtime"
|
||||
OUTPUT?=build/loadtime
|
||||
OUTPUT?=build/
|
||||
|
||||
build:
|
||||
go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT) .
|
||||
go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT)load ./cmd/load/
|
||||
go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT)report ./cmd/report/
|
||||
.PHONY: build
|
||||
|
||||
check-proto-gen-deps:
|
||||
|
||||
@@ -1,32 +1,66 @@
|
||||
# loadtime
|
||||
|
||||
This directory contains `loadtime`, a tool for generating transaction load against Tendermint.
|
||||
`loadtime` generates transactions that contain the timestamp corresponding to when they were generated
|
||||
as well as additional metadata to track the variables used when generating the load.
|
||||
This directory contains the `loadtime` tools, a set of tools for generating
|
||||
transaction load against Tendermint and measuring their resulting latency.
|
||||
`loadtime` generates transactions that contain the timestamp of when they were
|
||||
generated as well as additional metadata to track the variables used when
|
||||
generating the load.
|
||||
|
||||
|
||||
## Building loadtime
|
||||
## Building the tool set
|
||||
|
||||
The `Makefile` contains a target for building the `loadtime` tool.
|
||||
The `Makefile` contains a target for building the `loadtime` tools.
|
||||
|
||||
The following command will build the tool and place the resulting binary in `./build/loadtime`.
|
||||
The following command will build the tool and place the resulting binaries in `./build/`.
|
||||
|
||||
```bash
|
||||
make build
|
||||
```
|
||||
|
||||
## Use
|
||||
## `load`
|
||||
|
||||
`loadtime` leverages the [tm-load-test](https://github.com/informalsystems/tm-load-test)
|
||||
The `load` binary is built when `make build` is invoked. The `load` tool generates
|
||||
transactions and broadcasts them to Tendermint.
|
||||
|
||||
`load` leverages the [tm-load-test](https://github.com/informalsystems/tm-load-test)
|
||||
framework. As a result, all flags and options specified on the `tm-load-test` apply to
|
||||
`loadtime`.
|
||||
`load`.
|
||||
|
||||
Below is a basic invocation for generating load against a Tendermint websocket running
|
||||
on `localhost:25567`
|
||||
|
||||
```bash
|
||||
loadtime \
|
||||
./build/load \
|
||||
-c 1 -T 10 -r 1000 -s 1024 \
|
||||
--broadcast-tx-method sync \
|
||||
--endpoints ws://localhost:26657/websocket
|
||||
```
|
||||
|
||||
## `report`
|
||||
|
||||
The `report` binary is built when `make build` is invoked. The `report` tool
|
||||
reads all of the blocks from the specified blockstore database and calculates
|
||||
transaction latency metrics. `report` reads transactions generated by `load`
|
||||
and uses the difference between the timestamp contained in the transaction and
|
||||
the timestamp of the block the transaction was executed in to determine transaction latency.
|
||||
`report` outputs a set of metrics calculated on the list of latencies, including
|
||||
minimum, maximum, and average latency as well as the standard deviation.
|
||||
|
||||
Below is a basic invocation of the report tool with a data directory under `/home/test/.tendermint/data/`
|
||||
where the data was saved in a `goleveldb` database.
|
||||
|
||||
|
||||
```bash
|
||||
./build/report --database-type goleveldb --data-dir ~/.tendermint/data
|
||||
```
|
||||
|
||||
|
||||
The `report` tool also supports outputting the raw data as `csv`. This can be
|
||||
useful if you want to use a more powerful tool to aggregate and analyze the data.
|
||||
|
||||
Below is an invocation of the report tool that outputs the data to a `csv` file
|
||||
in `out.csv`
|
||||
|
||||
```bash
|
||||
./build/report --database-type goleveldb --data-dir ~/.tendermint/data --csv out.csv
|
||||
```
|
||||
|
||||
@@ -4,7 +4,7 @@ set -euo pipefail
|
||||
|
||||
# A basic invocation of the loadtime tool.
|
||||
|
||||
./build/loadtime \
|
||||
./build/load \
|
||||
-c 1 -T 10 -r 1000 -s 1024 \
|
||||
--broadcast-tx-method sync \
|
||||
--endpoints ws://localhost:26657/websocket
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
|
||||
"github.com/informalsystems/tm-load-test/pkg/loadtest"
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
// Ensure all of the interfaces are correctly satisfied.
|
||||
@@ -23,10 +20,9 @@ type ClientFactory struct{}
|
||||
// TxGenerator holds the set of information that will be used to generate
|
||||
// each transaction.
|
||||
type TxGenerator struct {
|
||||
conns uint64
|
||||
rate uint64
|
||||
size uint64
|
||||
payloadSizeBytes uint64
|
||||
conns uint64
|
||||
rate uint64
|
||||
size uint64
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -42,11 +38,10 @@ func main() {
|
||||
}
|
||||
|
||||
func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error {
|
||||
psb, err := payload.CalculateUnpaddedSizeBytes()
|
||||
psb, err := payload.MaxUnpaddedSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if psb > cfg.Size {
|
||||
return fmt.Errorf("payload size exceeds configured size")
|
||||
}
|
||||
@@ -54,36 +49,17 @@ func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error {
|
||||
}
|
||||
|
||||
func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) {
|
||||
psb, err := payload.CalculateUnpaddedSizeBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TxGenerator{
|
||||
conns: uint64(cfg.Connections),
|
||||
rate: uint64(cfg.Rate),
|
||||
size: uint64(cfg.Size),
|
||||
payloadSizeBytes: uint64(psb),
|
||||
conns: uint64(cfg.Connections),
|
||||
rate: uint64(cfg.Rate),
|
||||
size: uint64(cfg.Size),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *TxGenerator) GenerateTx() ([]byte, error) {
|
||||
p := &payload.Payload{
|
||||
Time: timestamppb.Now(),
|
||||
return payload.NewBytes(&payload.Payload{
|
||||
Connections: c.conns,
|
||||
Rate: c.rate,
|
||||
Size: c.size,
|
||||
Padding: make([]byte, c.size-c.payloadSizeBytes),
|
||||
}
|
||||
_, err := rand.Read(p.Padding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// prepend a single key so that the kv store only ever stores a single
|
||||
// transaction instead of storing all tx and ballooning in size.
|
||||
return append([]byte("a="), b...), nil
|
||||
})
|
||||
}
|
||||
87
test/loadtime/cmd/report/main.go
Normal file
87
test/loadtime/cmd/report/main.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/test/loadtime/report"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var (
|
||||
db = flag.String("database-type", "goleveldb", "the type of database holding the blockstore")
|
||||
dir = flag.String("data-dir", "", "path to the directory containing the tendermint databases")
|
||||
csvOut = flag.String("csv", "", "dump the extracted latencies as raw csv for use in additional tooling")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *db == "" {
|
||||
log.Fatalf("must specify a database-type")
|
||||
}
|
||||
if *dir == "" {
|
||||
log.Fatalf("must specify a data-dir")
|
||||
}
|
||||
d := strings.TrimPrefix(*dir, "~/")
|
||||
if d != *dir {
|
||||
h, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
d = h + "/" + d
|
||||
}
|
||||
_, err := os.Stat(d)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dbType := dbm.BackendType(*db)
|
||||
db, err := dbm.NewDB("blockstore", dbType, d)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s := store.NewBlockStore(db)
|
||||
defer s.Close()
|
||||
r, err := report.GenerateFromBlockStore(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if *csvOut != "" {
|
||||
cf, err := os.Create(*csvOut)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
w := csv.NewWriter(cf)
|
||||
err = w.WriteAll(toRecords(r.All))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf(""+
|
||||
"Total Valid Tx: %d\n"+
|
||||
"Total Invalid Tx: %d\n"+
|
||||
"Total Negative Latencies: %d\n"+
|
||||
"Minimum Latency: %s\n"+
|
||||
"Maximum Latency: %s\n"+
|
||||
"Average Latency: %s\n"+
|
||||
"Standard Deviation: %s\n", len(r.All), r.ErrorCount, r.NegativeCount, r.Min, r.Max, r.Avg, r.StdDev)
|
||||
}
|
||||
|
||||
func toRecords(l []time.Duration) [][]string {
|
||||
res := make([][]string, len(l)+1)
|
||||
|
||||
res[0] = make([]string, 1)
|
||||
res[0][0] = "duration_ns"
|
||||
for i, v := range l {
|
||||
res[1+i] = []string{strconv.FormatInt(int64(v), 10)}
|
||||
}
|
||||
return res
|
||||
}
|
||||
87
test/loadtime/payload/payload.go
Normal file
87
test/loadtime/payload/payload.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package payload
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const keyPrefix = "a="
|
||||
|
||||
// NewBytes generates a new payload and returns the encoded representation of
|
||||
// the payload as a slice of bytes. NewBytes uses the fields on the Options
|
||||
// to create the payload.
|
||||
func NewBytes(p *Payload) ([]byte, error) {
|
||||
p.Padding = make([]byte, 1)
|
||||
if p.Time == nil {
|
||||
p.Time = timestamppb.Now()
|
||||
}
|
||||
us, err := CalculateUnpaddedSize(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.Size < uint64(us) {
|
||||
return nil, fmt.Errorf("configured size %d not large enough to fit unpadded transaction of size %d", p.Size, us)
|
||||
}
|
||||
p.Padding = make([]byte, p.Size-uint64(us))
|
||||
_, err = rand.Read(p.Padding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// prepend a single key so that the kv store only ever stores a single
|
||||
// transaction instead of storing all tx and ballooning in size.
|
||||
return append([]byte(keyPrefix), b...), nil
|
||||
}
|
||||
|
||||
// FromBytes extracts a paylod from the byte representation of the payload.
|
||||
// FromBytes leaves the padding untouched, returning it to the caller to handle
|
||||
// or discard per their preference.
|
||||
func FromBytes(b []byte) (*Payload, error) {
|
||||
p := &Payload{}
|
||||
tr := bytes.TrimPrefix(b, []byte(keyPrefix))
|
||||
if bytes.Equal(b, tr) {
|
||||
return nil, errors.New("payload bytes missing key prefix")
|
||||
}
|
||||
err := proto.Unmarshal(tr, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// MaxUnpaddedSize returns the maximum size that a payload may be if no padding
|
||||
// is included.
|
||||
func MaxUnpaddedSize() (int, error) {
|
||||
p := &Payload{
|
||||
Time: timestamppb.Now(),
|
||||
Connections: math.MaxUint64,
|
||||
Rate: math.MaxUint64,
|
||||
Size: math.MaxUint64,
|
||||
Padding: make([]byte, 1),
|
||||
}
|
||||
return CalculateUnpaddedSize(p)
|
||||
}
|
||||
|
||||
// CalculateUnpaddedSize calculates the size of the passed in payload for the
|
||||
// purpose of determining how much padding to add to add to reach the target size.
|
||||
// CalculateUnpaddedSize returns an error if the payload Padding field is longer than 1.
|
||||
func CalculateUnpaddedSize(p *Payload) (int, error) {
|
||||
if len(p.Padding) != 1 {
|
||||
return 0, fmt.Errorf("expected length of padding to be 1, received %d", len(p.Padding))
|
||||
}
|
||||
b, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b) + len(keyPrefix), nil
|
||||
}
|
||||
50
test/loadtime/payload/payload_test.go
Normal file
50
test/loadtime/payload/payload_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package payload_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
)
|
||||
|
||||
const payloadSizeTarget = 1024 // 1kb
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
s, err := payload.MaxUnpaddedSize()
|
||||
if err != nil {
|
||||
t.Fatalf("calculating max unpadded size %s", err)
|
||||
}
|
||||
if s > payloadSizeTarget {
|
||||
t.Fatalf("unpadded payload size %d exceeds target %d", s, payloadSizeTarget)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
const (
|
||||
testConns = 512
|
||||
testRate = 4
|
||||
)
|
||||
b, err := payload.NewBytes(&payload.Payload{
|
||||
Size: payloadSizeTarget,
|
||||
Connections: testConns,
|
||||
Rate: testRate,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
if len(b) < payloadSizeTarget {
|
||||
t.Fatalf("payload size in bytes %d less than expected %d", len(b), payloadSizeTarget)
|
||||
}
|
||||
p, err := payload.FromBytes(b)
|
||||
if err != nil {
|
||||
t.Fatalf("reading payload %s", err)
|
||||
}
|
||||
if p.Size != payloadSizeTarget {
|
||||
t.Fatalf("payload size value %d does not match expected %d", p.Size, payloadSizeTarget)
|
||||
}
|
||||
if p.Connections != testConns {
|
||||
t.Fatalf("payload connections value %d does not match expected %d", p.Connections, testConns)
|
||||
}
|
||||
if p.Rate != testRate {
|
||||
t.Fatalf("payload rate value %d does not match expected %d", p.Rate, testRate)
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package payload
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
func CalculateUnpaddedSizeBytes() (int, error) {
|
||||
p := &Payload{
|
||||
Time: timestamppb.Now(),
|
||||
Connections: math.MaxUint64,
|
||||
Rate: math.MaxUint64,
|
||||
Size: math.MaxUint64,
|
||||
Padding: make([]byte, 1),
|
||||
}
|
||||
b, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package payload_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
)
|
||||
|
||||
const payloadSizeTarget = 1024 // 1kb
|
||||
|
||||
func TestCalculateSize(t *testing.T) {
|
||||
s, err := payload.CalculateUnpaddedSizeBytes()
|
||||
if err != nil {
|
||||
t.Fatalf("calculating unpadded size %s", err)
|
||||
}
|
||||
if s > payloadSizeTarget {
|
||||
t.Fatalf("unpadded payload size %d exceeds target %d", s, payloadSizeTarget)
|
||||
}
|
||||
}
|
||||
153
test/loadtime/report/report.go
Normal file
153
test/loadtime/report/report.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package report
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"gonum.org/v1/gonum/stat"
|
||||
)
|
||||
|
||||
// BlockStore defines the set of methods needed by the report generator from
|
||||
// Tendermint's store.Blockstore type. Using an interface allows for tests to
|
||||
// more easily simulate the required behavior without having to use the more
|
||||
// complex real API.
|
||||
type BlockStore interface {
|
||||
Height() int64
|
||||
Base() int64
|
||||
LoadBlock(int64) *types.Block
|
||||
}
|
||||
|
||||
// Report contains the data calculated from reading the timestamped transactions
|
||||
// of each block found in the blockstore.
|
||||
type Report struct {
|
||||
Max, Min, Avg, StdDev time.Duration
|
||||
|
||||
// ErrorCount is the number of parsing errors encountered while reading the
|
||||
// transaction data. Parsing errors may occur if a transaction not generated
|
||||
// by the payload package is submitted to the chain.
|
||||
ErrorCount int
|
||||
|
||||
// NegativeCount is the number of negative durations encountered while
|
||||
// reading the transaction data. A negative duration means that
|
||||
// a transaction timestamp was greater than the timestamp of the block it
|
||||
// was included in and likely indicates an issue with the experimental
|
||||
// setup.
|
||||
NegativeCount int
|
||||
|
||||
// All contains all data points gathered from all valid transactions.
|
||||
// The order of the contents of All is not guaranteed to be match the order of transactions
|
||||
// in the chain.
|
||||
All []time.Duration
|
||||
}
|
||||
|
||||
// GenerateFromBlockStore creates a Report using the data in the provided
|
||||
// BlockStore.
|
||||
func GenerateFromBlockStore(s BlockStore) (Report, error) {
|
||||
type payloadData struct {
|
||||
l time.Duration
|
||||
err error
|
||||
}
|
||||
type txData struct {
|
||||
tx []byte
|
||||
bt time.Time
|
||||
}
|
||||
|
||||
// Deserializing to proto can be slow but does not depend on other data
|
||||
// and can therefore be done in parallel.
|
||||
// Deserializing in parallel does mean that the resulting data is
|
||||
// not guaranteed to be delivered in the same order it was given to the
|
||||
// worker pool.
|
||||
const poolSize = 16
|
||||
|
||||
txc := make(chan txData)
|
||||
pdc := make(chan payloadData, poolSize)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(poolSize)
|
||||
for i := 0; i < poolSize; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for b := range txc {
|
||||
p, err := payload.FromBytes(b.tx)
|
||||
if err != nil {
|
||||
pdc <- payloadData{err: err}
|
||||
continue
|
||||
}
|
||||
|
||||
l := b.bt.Sub(p.Time.AsTime())
|
||||
pdc <- payloadData{l: l}
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(pdc)
|
||||
}()
|
||||
|
||||
r := Report{
|
||||
Max: 0,
|
||||
Min: math.MaxInt64,
|
||||
}
|
||||
var sum int64
|
||||
go func() {
|
||||
base, height := s.Base(), s.Height()
|
||||
prev := s.LoadBlock(base)
|
||||
for i := base + 1; i < height; i++ {
|
||||
// Data from two adjacent block are used here simultaneously,
|
||||
// blocks of height H and H+1. The transactions of the block of
|
||||
// height H are used with the timestamp from the block of height
|
||||
// H+1. This is done because the timestamp from H+1 is calculated
|
||||
// by using the precommits submitted at height H. The timestamp in
|
||||
// block H+1 represents the time at which block H was committed.
|
||||
//
|
||||
// In the (very unlikely) event that the very last block of the
|
||||
// chain contains payload transactions, those transactions will not
|
||||
// be used in the latency calculations because the last block whose
|
||||
// transactions are used is the block one before the last.
|
||||
cur := s.LoadBlock(i)
|
||||
for _, tx := range prev.Data.Txs {
|
||||
txc <- txData{tx: tx, bt: cur.Time}
|
||||
}
|
||||
prev = cur
|
||||
}
|
||||
close(txc)
|
||||
}()
|
||||
for pd := range pdc {
|
||||
if pd.err != nil {
|
||||
r.ErrorCount++
|
||||
continue
|
||||
}
|
||||
r.All = append(r.All, pd.l)
|
||||
if pd.l > r.Max {
|
||||
r.Max = pd.l
|
||||
}
|
||||
if pd.l < r.Min {
|
||||
r.Min = pd.l
|
||||
}
|
||||
if int64(pd.l) < 0 {
|
||||
r.NegativeCount++
|
||||
}
|
||||
// Using an int64 here makes an assumption about the scale and quantity of the data we are processing.
|
||||
// If all latencies were 2 seconds, we would need around 4 billion records to overflow this.
|
||||
// We are therefore assuming that the data does not exceed these bounds.
|
||||
sum += int64(pd.l)
|
||||
}
|
||||
if len(r.All) == 0 {
|
||||
r.Min = 0
|
||||
return r, nil
|
||||
}
|
||||
r.Avg = time.Duration(sum / int64(len(r.All)))
|
||||
r.StdDev = time.Duration(int64(stat.StdDev(toFloat(r.All), nil)))
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func toFloat(in []time.Duration) []float64 {
|
||||
r := make([]float64, len(in))
|
||||
for i, v := range in {
|
||||
r[i] = float64(int64(v))
|
||||
}
|
||||
return r
|
||||
}
|
||||
114
test/loadtime/report/report_test.go
Normal file
114
test/loadtime/report/report_test.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package report_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"github.com/tendermint/tendermint/test/loadtime/report"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
type mockBlockStore struct {
|
||||
base int64
|
||||
blocks []*types.Block
|
||||
}
|
||||
|
||||
func (m *mockBlockStore) Height() int64 {
|
||||
return m.base + int64(len(m.blocks))
|
||||
}
|
||||
|
||||
func (m *mockBlockStore) Base() int64 {
|
||||
return m.base
|
||||
}
|
||||
|
||||
func (m *mockBlockStore) LoadBlock(i int64) *types.Block {
|
||||
return m.blocks[i-m.base]
|
||||
}
|
||||
|
||||
func TestGenerateReport(t *testing.T) {
|
||||
t1 := time.Now()
|
||||
b1, err := payload.NewBytes(&payload.Payload{
|
||||
Time: timestamppb.New(t1.Add(-10 * time.Second)),
|
||||
Size: 1024,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
b2, err := payload.NewBytes(&payload.Payload{
|
||||
Time: timestamppb.New(t1.Add(-4 * time.Second)),
|
||||
Size: 1024,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
b3, err := payload.NewBytes(&payload.Payload{
|
||||
Time: timestamppb.New(t1.Add(2 * time.Second)),
|
||||
Size: 1024,
|
||||
})
|
||||
t2 := t1.Add(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
s := &mockBlockStore{
|
||||
blocks: []*types.Block{
|
||||
{
|
||||
Data: types.Data{
|
||||
Txs: []types.Tx{b1, b2},
|
||||
},
|
||||
},
|
||||
{
|
||||
// The timestamp from block H+1 is used to calculate the
|
||||
// latency for the transactions in block H.
|
||||
Header: types.Header{
|
||||
Time: t1,
|
||||
},
|
||||
Data: types.Data{
|
||||
Txs: []types.Tx{[]byte("error")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Data: types.Data{
|
||||
Txs: []types.Tx{b3, b3},
|
||||
},
|
||||
},
|
||||
{
|
||||
Header: types.Header{
|
||||
Time: t2,
|
||||
},
|
||||
Data: types.Data{
|
||||
Txs: []types.Tx{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
r, err := report.GenerateFromBlockStore(s)
|
||||
if err != nil {
|
||||
t.Fatalf("generating report %s", err)
|
||||
}
|
||||
if len(r.All) != 4 {
|
||||
t.Fatalf("report contained different number of data points from expected. Expected %d but contained %d", 4, len(r.All))
|
||||
}
|
||||
if r.ErrorCount != 1 {
|
||||
t.Fatalf("ErrorCount did not match expected. Expected %d but contained %d", 1, r.ErrorCount)
|
||||
}
|
||||
if r.NegativeCount != 2 {
|
||||
t.Fatalf("NegativeCount did not match expected. Expected %d but contained %d", 2, r.NegativeCount)
|
||||
}
|
||||
if r.Avg != 3*time.Second {
|
||||
t.Fatalf("Avg did not match expected. Expected %s but contained %s", 3*time.Second, r.Avg)
|
||||
}
|
||||
if r.Min != -time.Second {
|
||||
t.Fatalf("Min did not match expected. Expected %s but contained %s", time.Second, r.Min)
|
||||
}
|
||||
if r.Max != 10*time.Second {
|
||||
t.Fatalf("Max did not match expected. Expected %s but contained %s", 10*time.Second, r.Max)
|
||||
}
|
||||
// Verified using online standard deviation calculator:
|
||||
// https://www.calculator.net/standard-deviation-calculator.html?numberinputs=10%2C+4%2C+-1%2C+-1&ctype=s&x=45&y=12
|
||||
expectedStdDev := 5228129047 * time.Nanosecond
|
||||
if r.StdDev != expectedStdDev {
|
||||
t.Fatalf("StdDev did not match expected. Expected %s but contained %s", expectedStdDev, r.StdDev)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user