Files
tendermint/internal/p2p/peermanager_scoring_test.go
Sam Kleinman 83526cacbc p2p: peer store and dialing changes (0.35.x backport) (#8740)
* p2p: peer store and dialing changes

(cherry picked from commit 9dbb135152)

* reduce persistent peer max

(cherry picked from commit b213a2766f)

* don't gossip inactive peers

(cherry picked from commit cc28ce298f)

* fix small case

(cherry picked from commit 56a91642dc)

* fix error message

(cherry picked from commit 86db59f53b)

* remove seed flag

(cherry picked from commit 000aa05485)

* reduce logging level

(cherry picked from commit 4e2bc8f51e)

* make const

(cherry picked from commit e3068b50b2)

* update comment

(cherry picked from commit 31bd396c88)

* cleanup

(cherry picked from commit eddb23b5af)

* oops

* overflows

(cherry picked from commit 4c8651026a)

* Update internal/p2p/peermanager.go

Co-authored-by: M. J. Fromberger <michael.j.fromberger@gmail.com>
(cherry picked from commit f23f6e1089)

* Update internal/p2p/peermanager.go

Co-authored-by: M. J. Fromberger <michael.j.fromberger@gmail.com>
(cherry picked from commit 1c02758eaf)

* comment

(cherry picked from commit 9f604fd2ef)

* test: new scoring

(cherry picked from commit 930fd7f2be)

* fix scoring test

(cherry picked from commit 9abc55f3a0)

* cleanup peer manager

* fix panic

* add metrics

* fix compile

* fix test

* default metrics to noop

* noop metrics

* update metrics

(cherry picked from commit 720600ef62)

* rename metrics

* actually shuffle peers more

* fix up advertise

(cherry picked from commit 8195c97590)

* add max dialing attempts

* connection tracking

* comments mostly

(cherry picked from commit 053ecd9b8c)

* Apply suggestions from code review

Co-authored-by: M. J. Fromberger <michael.j.fromberger@gmail.com>

* comments

* fix lint

* cr feedback

* fixup cherrypick

* make wb happy

* more comments

* fixup

* fix lint

* iota fix

* add skip

* cleanup

* remove comment

* fix rand

* fix rand

* use numaddresses correctly

* advertise fixes

* remove some things

* cleanup comment

* more fixes

* toml

* fix comment

* fix spell

* dec limit

* fixes

* up the attmept max

* cr feedback

* probablistic test

* fix spell

* add metrics for peers stored on startup

* p2p: peer score should not wrap around (#8790)

(cherry picked from commit 4d820ff4f5)

# Conflicts:
#	internal/p2p/peermanager.go

* fix

* wake more

* wake if we need to

Co-authored-by: M. J. Fromberger <michael.j.fromberger@gmail.com>
2022-06-20 13:13:21 -04:00

253 lines
5.9 KiB
Go

package p2p
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/types"
)
func TestPeerScoring(t *testing.T) {
// coppied from p2p_test shared variables
selfKey := ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd})
selfID := types.NodeIDFromPubKey(selfKey.PubKey())
// create a mock peer manager
db := dbm.NewMemDB()
peerManager, err := NewPeerManager(selfID, db, PeerManagerOptions{})
require.NoError(t, err)
defer peerManager.Close()
// create a fake node
id := types.NodeID(strings.Repeat("a1", 20))
added, err := peerManager.Add(NodeAddress{NodeID: id, Protocol: "memory"})
require.NoError(t, err)
require.True(t, added)
t.Run("Synchronous", func(t *testing.T) {
// update the manager and make sure it's correct
require.Zero(t, peerManager.Scores()[id])
// add a bunch of good status updates and watch things increase.
for i := 1; i < 10; i++ {
peerManager.processPeerEvent(PeerUpdate{
NodeID: id,
Status: PeerStatusGood,
})
require.EqualValues(t, i, peerManager.Scores()[id])
}
// watch the corresponding decreases respond to update
for i := 10; i == 0; i-- {
peerManager.processPeerEvent(PeerUpdate{
NodeID: id,
Status: PeerStatusBad,
})
require.EqualValues(t, i, peerManager.Scores()[id])
}
})
t.Run("AsynchronousIncrement", func(t *testing.T) {
start := peerManager.Scores()[id]
pu := peerManager.Subscribe()
defer pu.Close()
pu.SendUpdate(PeerUpdate{
NodeID: id,
Status: PeerStatusGood,
})
require.Eventually(t,
func() bool { return start+1 == peerManager.Scores()[id] },
time.Second,
time.Millisecond,
"startAt=%d score=%d", start, peerManager.Scores()[id])
})
t.Run("AsynchronousDecrement", func(t *testing.T) {
start := peerManager.Scores()[id]
pu := peerManager.Subscribe()
defer pu.Close()
pu.SendUpdate(PeerUpdate{
NodeID: id,
Status: PeerStatusBad,
})
require.Eventually(t,
func() bool { return start-1 == peerManager.Scores()[id] },
time.Second,
time.Millisecond,
"startAt=%d score=%d", start, peerManager.Scores()[id])
})
}
func makeMockPeerStore(t *testing.T, peers ...peerInfo) *peerStore {
t.Helper()
s, err := newPeerStore(dbm.NewMemDB())
if err != nil {
t.Fatal(err)
}
for idx := range peers {
if err := s.Set(peers[idx]); err != nil {
t.Fatal(err)
}
}
return s
}
func TestPeerRanking(t *testing.T) {
t.Run("InactiveSecond", func(t *testing.T) {
t.Skip("inactive status is not currently factored into peer rank.")
store := makeMockPeerStore(t,
peerInfo{ID: "second", Inactive: true},
peerInfo{ID: "first", Inactive: false},
)
ranked := store.Ranked()
if len(ranked) != 2 {
t.Fatal("missing peer in ranked output")
}
if ranked[0].ID != "first" {
t.Error("inactive peer is first")
}
if ranked[1].ID != "second" {
t.Error("active peer is second")
}
})
t.Run("ScoreOrder", func(t *testing.T) {
for _, test := range []struct {
Name string
First int64
Second int64
}{
{
Name: "Mirror",
First: 100,
Second: -100,
},
{
Name: "VeryLow",
First: 0,
Second: -100,
},
{
Name: "High",
First: 300,
Second: 256,
},
} {
t.Run(test.Name, func(t *testing.T) {
store := makeMockPeerStore(t,
peerInfo{
ID: "second",
MutableScore: test.Second,
},
peerInfo{
ID: "first",
MutableScore: test.First,
})
ranked := store.Ranked()
if len(ranked) != 2 {
t.Fatal("missing peer in ranked output")
}
if ranked[0].ID != "first" {
t.Error("higher peer is first")
}
if ranked[1].ID != "second" {
t.Error("higher peer is second")
}
})
}
})
}
func TestLastDialed(t *testing.T) {
t.Run("Zero", func(t *testing.T) {
p := &peerInfo{}
ts, ok := p.LastDialed()
if !ts.IsZero() {
t.Error("timestamp should be zero:", ts)
}
if ok {
t.Error("peer reported success, despite none")
}
})
t.Run("NeverDialed", func(t *testing.T) {
p := &peerInfo{
AddressInfo: map[NodeAddress]*peerAddressInfo{
{NodeID: "kip"}: {},
{NodeID: "merlin"}: {},
},
}
ts, ok := p.LastDialed()
if !ts.IsZero() {
t.Error("timestamp should be zero:", ts)
}
if ok {
t.Error("peer reported success, despite none")
}
})
t.Run("Ordered", func(t *testing.T) {
base := time.Now()
for _, test := range []struct {
Name string
SuccessTime time.Time
FailTime time.Time
ExpectedSuccess bool
}{
{
Name: "Zero",
},
{
Name: "Success",
SuccessTime: base.Add(time.Hour),
FailTime: base,
ExpectedSuccess: true,
},
{
Name: "Equal",
SuccessTime: base,
FailTime: base,
ExpectedSuccess: true,
},
{
Name: "Failure",
SuccessTime: base,
FailTime: base.Add(time.Hour),
ExpectedSuccess: false,
},
} {
t.Run(test.Name, func(t *testing.T) {
p := &peerInfo{
AddressInfo: map[NodeAddress]*peerAddressInfo{
{NodeID: "kip"}: {LastDialSuccess: test.SuccessTime},
{NodeID: "merlin"}: {LastDialFailure: test.FailTime},
},
}
ts, ok := p.LastDialed()
if test.ExpectedSuccess && !ts.Equal(test.SuccessTime) {
if !ts.Equal(test.FailTime) {
t.Fatal("got unexpected timestamp:", ts)
}
t.Error("last dialed time reported incorrect value:", ts)
}
if !test.ExpectedSuccess && !ts.Equal(test.FailTime) {
if !ts.Equal(test.SuccessTime) {
t.Fatal("got unexpected timestamp:", ts)
}
t.Error("last dialed time reported incorrect value:", ts)
}
if test.ExpectedSuccess != ok {
t.Error("test reported incorrect outcome for last dialed type")
}
})
}
})
}