mirror of
https://github.com/tendermint/tendermint.git
synced 2025-12-23 06:15:19 +00:00
* add separated runs by UUID (#9367)
This _should_ be the last piece needed for this tool.
This allows the tool to generate reports on multiple experimental runs that may have been performed against the same chain.
The `load` tool has been updated to generate a `UUID` on startup to uniquely identify each experimental run. The `report` tool separates all of the results it reads by `UUID` and performs separate calculations for each discovered experiment.
Sample output is as follows
```
Experiment ID: 6bd7d1e8-d82c-4dbe-a1b3-40ab99e4fa30
Connections: 1
Rate: 1000
Size: 1024
Total Valid Tx: 9000
Total Negative Latencies: 0
Minimum Latency: 86.632837ms
Maximum Latency: 1.151089602s
Average Latency: 813.759361ms
Standard Deviation: 225.189977ms
Experiment ID: 453960af-6295-4282-aed6-367fc17c0de0
Connections: 1
Rate: 1000
Size: 1024
Total Valid Tx: 9000
Total Negative Latencies: 0
Minimum Latency: 79.312992ms
Maximum Latency: 1.162446243s
Average Latency: 422.755139ms
Standard Deviation: 241.832475ms
Total Invalid Tx: 0
```
closes: #9352
#### PR checklist
- [ ] Tests written/updated, or no tests needed
- [ ] `CHANGELOG_PENDING.md` updated, or no changelog entry needed
- [ ] Updated relevant documentation (`docs/`) and code comments, or no
documentation updates needed
(cherry picked from commit 1067ba1571)
# Conflicts:
# go.mod
* fix merge conflict
* fix lint
Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
Co-authored-by: William Banfield <wbanfield@gmail.com>
This commit is contained in:
14
go.mod
14
go.mod
@@ -9,7 +9,9 @@ require (
|
||||
github.com/adlio/schema v1.3.3
|
||||
github.com/btcsuite/btcd v0.22.1
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/bufbuild/buf v1.7.0
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/go-kit/log v0.2.1
|
||||
@@ -32,6 +34,12 @@ require (
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/viper v1.12.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/gofrs/uuid v4.2.0+incompatible
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
|
||||
golang.org/x/net v0.0.0-20220726230323-06994584191e
|
||||
@@ -39,16 +47,14 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bufbuild/buf v1.7.0
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/golangci/golangci-lint v1.49.0
|
||||
github.com/prometheus/common v0.34.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
|
||||
github.com/vektra/mockery/v2 v2.14.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/informalsystems/tm-load-test v1.0.0
|
||||
gonum.org/v1/gonum v0.11.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
)
|
||||
|
||||
@@ -118,7 +124,6 @@ require (
|
||||
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gofrs/uuid v4.2.0+incompatible // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/snappy v0.0.3 // indirect
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
||||
@@ -256,7 +261,6 @@ require (
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
gonum.org/v1/gonum v0.11.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -491,6 +491,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
|
||||
@@ -54,7 +54,6 @@ where the data was saved in a `goleveldb` database.
|
||||
./build/report --database-type goleveldb --data-dir ~/.tendermint/data
|
||||
```
|
||||
|
||||
|
||||
The `report` tool also supports outputting the raw data as `csv`. This can be
|
||||
useful if you want to use a more powerful tool to aggregate and analyze the data.
|
||||
|
||||
@@ -64,3 +63,7 @@ in `out.csv`
|
||||
```bash
|
||||
./build/report --database-type goleveldb --data-dir ~/.tendermint/data --csv out.csv
|
||||
```
|
||||
|
||||
The `report` tool outputs the data for each experiment separately, identified
|
||||
by the UUID generated by the `load` tool at the start of the experiment. It also
|
||||
outputs the experimental values used for the run.
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/informalsystems/tm-load-test/pkg/loadtest"
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
)
|
||||
@@ -20,6 +21,7 @@ type ClientFactory struct{}
|
||||
// TxGenerator holds the set of information that will be used to generate
|
||||
// each transaction.
|
||||
type TxGenerator struct {
|
||||
id []byte
|
||||
conns uint64
|
||||
rate uint64
|
||||
size uint64
|
||||
@@ -49,7 +51,9 @@ func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error {
|
||||
}
|
||||
|
||||
func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) {
|
||||
u := [16]byte(uuid.New())
|
||||
return &TxGenerator{
|
||||
id: u[:],
|
||||
conns: uint64(cfg.Connections),
|
||||
rate: uint64(cfg.Rate),
|
||||
size: uint64(cfg.Size),
|
||||
@@ -61,5 +65,6 @@ func (c *TxGenerator) GenerateTx() ([]byte, error) {
|
||||
Connections: c.conns,
|
||||
Rate: c.rate,
|
||||
Size: c.size,
|
||||
Id: c.id,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/test/loadtime/report"
|
||||
@@ -48,7 +47,7 @@ func main() {
|
||||
}
|
||||
s := store.NewBlockStore(db)
|
||||
defer s.Close()
|
||||
r, err := report.GenerateFromBlockStore(s)
|
||||
rs, err := report.GenerateFromBlockStore(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -58,30 +57,47 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
w := csv.NewWriter(cf)
|
||||
err = w.WriteAll(toRecords(r.All))
|
||||
err = w.WriteAll(toCSVRecords(rs.List()))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
for _, r := range rs.List() {
|
||||
fmt.Printf(""+
|
||||
"Experiment ID: %s\n\n"+
|
||||
"\tConnections: %d\n"+
|
||||
"\tRate: %d\n"+
|
||||
"\tSize: %d\n\n"+
|
||||
"\tTotal Valid Tx: %d\n"+
|
||||
"\tTotal Negative Latencies: %d\n"+
|
||||
"\tMinimum Latency: %s\n"+
|
||||
"\tMaximum Latency: %s\n"+
|
||||
"\tAverage Latency: %s\n"+
|
||||
"\tStandard Deviation: %s\n\n", r.ID, r.Connections, r.Rate, r.Size, len(r.All), r.NegativeCount, r.Min, r.Max, r.Avg, r.StdDev) //nolint:lll
|
||||
|
||||
fmt.Printf(""+
|
||||
"Total Valid Tx: %d\n"+
|
||||
"Total Invalid Tx: %d\n"+
|
||||
"Total Negative Latencies: %d\n"+
|
||||
"Minimum Latency: %s\n"+
|
||||
"Maximum Latency: %s\n"+
|
||||
"Average Latency: %s\n"+
|
||||
"Standard Deviation: %s\n", len(r.All), r.ErrorCount, r.NegativeCount, r.Min, r.Max, r.Avg, r.StdDev)
|
||||
}
|
||||
fmt.Printf("Total Invalid Tx: %d\n", rs.ErrorCount())
|
||||
}
|
||||
|
||||
func toRecords(l []time.Duration) [][]string {
|
||||
res := make([][]string, len(l)+1)
|
||||
func toCSVRecords(rs []report.Report) [][]string {
|
||||
total := 0
|
||||
for _, v := range rs {
|
||||
total += len(v.All)
|
||||
}
|
||||
res := make([][]string, total+1)
|
||||
|
||||
res[0] = make([]string, 1)
|
||||
res[0][0] = "duration_ns"
|
||||
for i, v := range l {
|
||||
res[1+i] = []string{strconv.FormatInt(int64(v), 10)}
|
||||
res[0] = []string{"experiment_id", "duration_ns", "connections", "rate", "size"}
|
||||
offset := 1
|
||||
for _, r := range rs {
|
||||
idStr := r.ID.String()
|
||||
connStr := strconv.FormatInt(int64(r.Connections), 10)
|
||||
rateStr := strconv.FormatInt(int64(r.Rate), 10)
|
||||
sizeStr := strconv.FormatInt(int64(r.Size), 10)
|
||||
for i, v := range r.All {
|
||||
res[offset+i] = []string{idStr, strconv.FormatInt(int64(v), 10), connStr, rateStr, sizeStr}
|
||||
}
|
||||
offset += len(r.All)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -21,6 +21,9 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Payload is the structure of the loadtime transaction. Proto has a compact
|
||||
// encoded representation, making it ideal for the loadtime usecase which aims to
|
||||
// keep the generated transactions small.
|
||||
type Payload struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -30,7 +33,8 @@ type Payload struct {
|
||||
Rate uint64 `protobuf:"varint,2,opt,name=rate,proto3" json:"rate,omitempty"`
|
||||
Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
||||
Time *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=time,proto3" json:"time,omitempty"`
|
||||
Padding []byte `protobuf:"bytes,5,opt,name=padding,proto3" json:"padding,omitempty"`
|
||||
Id []byte `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Padding []byte `protobuf:"bytes,6,opt,name=padding,proto3" json:"padding,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Payload) Reset() {
|
||||
@@ -93,6 +97,13 @@ func (x *Payload) GetTime() *timestamppb.Timestamp {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Payload) GetId() []byte {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Payload) GetPadding() []byte {
|
||||
if x != nil {
|
||||
return x.Padding
|
||||
@@ -107,7 +118,7 @@ var file_payload_payload_proto_rawDesc = []byte{
|
||||
0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x74, 0x69, 0x6d,
|
||||
0x65, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x01, 0x0a, 0x07, 0x50,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, 0x0a, 0x07, 0x50,
|
||||
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e,
|
||||
0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x74, 0x65,
|
||||
@@ -116,7 +127,8 @@ var file_payload_payload_proto_rawDesc = []byte{
|
||||
0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65,
|
||||
0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||
0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64,
|
||||
0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d,
|
||||
0x69, 0x6e, 0x74, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2f, 0x74,
|
||||
|
||||
@@ -9,9 +9,10 @@ import "google/protobuf/timestamp.proto";
|
||||
// encoded representation, making it ideal for the loadtime usecase which aims to
|
||||
// keep the generated transactions small.
|
||||
message Payload {
|
||||
uint64 connections = 1;
|
||||
uint64 rate = 2;
|
||||
uint64 size = 3;
|
||||
google.protobuf.Timestamp time = 4;
|
||||
bytes padding = 5;
|
||||
uint64 connections = 1;
|
||||
uint64 rate = 2;
|
||||
uint64 size = 3;
|
||||
google.protobuf.Timestamp time = 4;
|
||||
bytes id = 5;
|
||||
bytes padding = 6;
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package payload_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
)
|
||||
|
||||
@@ -23,10 +25,12 @@ func TestRoundTrip(t *testing.T) {
|
||||
testConns = 512
|
||||
testRate = 4
|
||||
)
|
||||
testID := [16]byte(uuid.New())
|
||||
b, err := payload.NewBytes(&payload.Payload{
|
||||
Size: payloadSizeTarget,
|
||||
Connections: testConns,
|
||||
Rate: testRate,
|
||||
Id: testID[:],
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
@@ -47,4 +51,7 @@ func TestRoundTrip(t *testing.T) {
|
||||
if p.Rate != testRate {
|
||||
t.Fatalf("payload rate value %d does not match expected %d", p.Rate, testRate)
|
||||
}
|
||||
if !bytes.Equal(p.Id, testID[:]) {
|
||||
t.Fatalf("payload ID value %d does not match expected %d", p.Id, testID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"gonum.org/v1/gonum/stat"
|
||||
@@ -23,12 +24,9 @@ type BlockStore interface {
|
||||
// Report contains the data calculated from reading the timestamped transactions
|
||||
// of each block found in the blockstore.
|
||||
type Report struct {
|
||||
Max, Min, Avg, StdDev time.Duration
|
||||
|
||||
// ErrorCount is the number of parsing errors encountered while reading the
|
||||
// transaction data. Parsing errors may occur if a transaction not generated
|
||||
// by the payload package is submitted to the chain.
|
||||
ErrorCount int
|
||||
ID uuid.UUID
|
||||
Rate, Connections, Size uint64
|
||||
Max, Min, Avg, StdDev time.Duration
|
||||
|
||||
// NegativeCount is the number of negative durations encountered while
|
||||
// reading the transaction data. A negative duration means that
|
||||
@@ -41,19 +39,93 @@ type Report struct {
|
||||
// The order of the contents of All is not guaranteed to be match the order of transactions
|
||||
// in the chain.
|
||||
All []time.Duration
|
||||
|
||||
// used for calculating average during report creation.
|
||||
sum int64
|
||||
}
|
||||
|
||||
type Reports struct {
|
||||
s map[uuid.UUID]Report
|
||||
l []Report
|
||||
|
||||
// errorCount is the number of parsing errors encountered while reading the
|
||||
// transaction data. Parsing errors may occur if a transaction not generated
|
||||
// by the payload package is submitted to the chain.
|
||||
errorCount int
|
||||
}
|
||||
|
||||
func (rs *Reports) List() []Report {
|
||||
return rs.l
|
||||
}
|
||||
|
||||
func (rs *Reports) ErrorCount() int {
|
||||
return rs.errorCount
|
||||
}
|
||||
|
||||
func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, conns, rate, size uint64) {
|
||||
r, ok := rs.s[id]
|
||||
if !ok {
|
||||
r = Report{
|
||||
Max: 0,
|
||||
Min: math.MaxInt64,
|
||||
ID: id,
|
||||
Connections: conns,
|
||||
Rate: rate,
|
||||
Size: size,
|
||||
}
|
||||
rs.s[id] = r
|
||||
}
|
||||
r.All = append(r.All, l)
|
||||
if l > r.Max {
|
||||
r.Max = l
|
||||
}
|
||||
if l < r.Min {
|
||||
r.Min = l
|
||||
}
|
||||
if int64(l) < 0 {
|
||||
r.NegativeCount++
|
||||
}
|
||||
// Using an int64 here makes an assumption about the scale and quantity of the data we are processing.
|
||||
// If all latencies were 2 seconds, we would need around 4 billion records to overflow this.
|
||||
// We are therefore assuming that the data does not exceed these bounds.
|
||||
r.sum += int64(l)
|
||||
rs.s[id] = r
|
||||
}
|
||||
|
||||
func (rs *Reports) calculateAll() {
|
||||
rs.l = make([]Report, 0, len(rs.s))
|
||||
for _, r := range rs.s {
|
||||
if len(r.All) == 0 {
|
||||
r.Min = 0
|
||||
rs.l = append(rs.l, r)
|
||||
continue
|
||||
}
|
||||
r.Avg = time.Duration(r.sum / int64(len(r.All)))
|
||||
r.StdDev = time.Duration(int64(stat.StdDev(toFloat(r.All), nil)))
|
||||
rs.l = append(rs.l, r)
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *Reports) addError() {
|
||||
rs.errorCount++
|
||||
}
|
||||
|
||||
// GenerateFromBlockStore creates a Report using the data in the provided
|
||||
// BlockStore.
|
||||
func GenerateFromBlockStore(s BlockStore) (Report, error) {
|
||||
func GenerateFromBlockStore(s BlockStore) (*Reports, error) {
|
||||
type payloadData struct {
|
||||
l time.Duration
|
||||
err error
|
||||
id uuid.UUID
|
||||
l time.Duration
|
||||
connections, rate, size uint64
|
||||
err error
|
||||
}
|
||||
type txData struct {
|
||||
tx []byte
|
||||
bt time.Time
|
||||
}
|
||||
reports := &Reports{
|
||||
s: make(map[uuid.UUID]Report),
|
||||
}
|
||||
|
||||
// Deserializing to proto can be slow but does not depend on other data
|
||||
// and can therefore be done in parallel.
|
||||
@@ -78,7 +150,14 @@ func GenerateFromBlockStore(s BlockStore) (Report, error) {
|
||||
}
|
||||
|
||||
l := b.bt.Sub(p.Time.AsTime())
|
||||
pdc <- payloadData{l: l}
|
||||
b := (*[16]byte)(p.Id)
|
||||
pdc <- payloadData{
|
||||
l: l,
|
||||
id: uuid.UUID(*b),
|
||||
connections: p.Connections,
|
||||
rate: p.Rate,
|
||||
size: p.Size,
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -87,11 +166,6 @@ func GenerateFromBlockStore(s BlockStore) (Report, error) {
|
||||
close(pdc)
|
||||
}()
|
||||
|
||||
r := Report{
|
||||
Max: 0,
|
||||
Min: math.MaxInt64,
|
||||
}
|
||||
var sum int64
|
||||
go func() {
|
||||
base, height := s.Base(), s.Height()
|
||||
prev := s.LoadBlock(base)
|
||||
@@ -117,31 +191,13 @@ func GenerateFromBlockStore(s BlockStore) (Report, error) {
|
||||
}()
|
||||
for pd := range pdc {
|
||||
if pd.err != nil {
|
||||
r.ErrorCount++
|
||||
reports.addError()
|
||||
continue
|
||||
}
|
||||
r.All = append(r.All, pd.l)
|
||||
if pd.l > r.Max {
|
||||
r.Max = pd.l
|
||||
}
|
||||
if pd.l < r.Min {
|
||||
r.Min = pd.l
|
||||
}
|
||||
if int64(pd.l) < 0 {
|
||||
r.NegativeCount++
|
||||
}
|
||||
// Using an int64 here makes an assumption about the scale and quantity of the data we are processing.
|
||||
// If all latencies were 2 seconds, we would need around 4 billion records to overflow this.
|
||||
// We are therefore assuming that the data does not exceed these bounds.
|
||||
sum += int64(pd.l)
|
||||
reports.addDataPoint(pd.id, pd.l, pd.connections, pd.rate, pd.size)
|
||||
}
|
||||
if len(r.All) == 0 {
|
||||
r.Min = 0
|
||||
return r, nil
|
||||
}
|
||||
r.Avg = time.Duration(sum / int64(len(r.All)))
|
||||
r.StdDev = time.Duration(int64(stat.StdDev(toFloat(r.All), nil)))
|
||||
return r, nil
|
||||
reports.calculateAll()
|
||||
return reports, nil
|
||||
}
|
||||
|
||||
func toFloat(in []time.Duration) []float64 {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"github.com/tendermint/tendermint/test/loadtime/report"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -29,7 +30,9 @@ func (m *mockBlockStore) LoadBlock(i int64) *types.Block {
|
||||
|
||||
func TestGenerateReport(t *testing.T) {
|
||||
t1 := time.Now()
|
||||
u := [16]byte(uuid.New())
|
||||
b1, err := payload.NewBytes(&payload.Payload{
|
||||
Id: u[:],
|
||||
Time: timestamppb.New(t1.Add(-10 * time.Second)),
|
||||
Size: 1024,
|
||||
})
|
||||
@@ -37,6 +40,7 @@ func TestGenerateReport(t *testing.T) {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
b2, err := payload.NewBytes(&payload.Payload{
|
||||
Id: u[:],
|
||||
Time: timestamppb.New(t1.Add(-4 * time.Second)),
|
||||
Size: 1024,
|
||||
})
|
||||
@@ -44,6 +48,7 @@ func TestGenerateReport(t *testing.T) {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
b3, err := payload.NewBytes(&payload.Payload{
|
||||
Id: u[:],
|
||||
Time: timestamppb.New(t1.Add(2 * time.Second)),
|
||||
Size: 1024,
|
||||
})
|
||||
@@ -83,16 +88,21 @@ func TestGenerateReport(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
r, err := report.GenerateFromBlockStore(s)
|
||||
rs, err := report.GenerateFromBlockStore(s)
|
||||
if err != nil {
|
||||
t.Fatalf("generating report %s", err)
|
||||
}
|
||||
if rs.ErrorCount() != 1 {
|
||||
t.Fatalf("ErrorCount did not match expected. Expected %d but contained %d", 1, rs.ErrorCount())
|
||||
}
|
||||
rl := rs.List()
|
||||
if len(rl) != 1 {
|
||||
t.Fatalf("number of reports did not match expected. Expected %d but contained %d", 1, len(rl))
|
||||
}
|
||||
r := rl[0]
|
||||
if len(r.All) != 4 {
|
||||
t.Fatalf("report contained different number of data points from expected. Expected %d but contained %d", 4, len(r.All)) //nolint:lll
|
||||
}
|
||||
if r.ErrorCount != 1 {
|
||||
t.Fatalf("ErrorCount did not match expected. Expected %d but contained %d", 1, r.ErrorCount)
|
||||
}
|
||||
if r.NegativeCount != 2 {
|
||||
t.Fatalf("NegativeCount did not match expected. Expected %d but contained %d", 2, r.NegativeCount)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user