Merge branch 'master' into callum/p2p-provider

This commit is contained in:
Callum Waters
2021-08-27 11:27:59 +02:00
83 changed files with 3308 additions and 920 deletions

View File

@@ -107,11 +107,11 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
switch opt["p2p"].(P2PMode) {
case NewP2PMode:
manifest.DisableLegacyP2P = true
manifest.UseLegacyP2P = true
case LegacyP2PMode:
manifest.DisableLegacyP2P = false
manifest.UseLegacyP2P = false
case HybridP2PMode:
manifest.DisableLegacyP2P = false
manifest.UseLegacyP2P = true
p2pNodeFactor = 2
default:
return manifest, fmt.Errorf("unknown p2p mode %s", opt["p2p"])
@@ -138,9 +138,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
node := generateNode(r, e2e.ModeSeed, 0, manifest.InitialHeight, false)
if p2pNodeFactor == 0 {
node.DisableLegacyP2P = manifest.DisableLegacyP2P
node.UseLegacyP2P = manifest.UseLegacyP2P
} else if p2pNodeFactor%i == 0 {
node.DisableLegacyP2P = !manifest.DisableLegacyP2P
node.UseLegacyP2P = !manifest.UseLegacyP2P
}
manifest.Nodes[fmt.Sprintf("seed%02d", i)] = node
@@ -162,9 +162,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2)
if p2pNodeFactor == 0 {
node.DisableLegacyP2P = manifest.DisableLegacyP2P
node.UseLegacyP2P = manifest.UseLegacyP2P
} else if p2pNodeFactor%i == 0 {
node.DisableLegacyP2P = !manifest.DisableLegacyP2P
node.UseLegacyP2P = !manifest.UseLegacyP2P
}
manifest.Nodes[name] = node
@@ -198,9 +198,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
node := generateNode(r, e2e.ModeFull, startAt, manifest.InitialHeight, false)
if p2pNodeFactor == 0 {
node.DisableLegacyP2P = manifest.DisableLegacyP2P
node.UseLegacyP2P = manifest.UseLegacyP2P
} else if p2pNodeFactor%i == 0 {
node.DisableLegacyP2P = !manifest.DisableLegacyP2P
node.UseLegacyP2P = !manifest.UseLegacyP2P
}
manifest.Nodes[fmt.Sprintf("full%02d", i)] = node
}

View File

@@ -59,8 +59,8 @@ type Manifest struct {
// by individual nodes.
LogLevel string `toml:"log_level"`
// DisableLegacyP2P enables use of the new p2p layer for all nodes in a test.
DisableLegacyP2P bool `toml:"disable_legacy_p2p"`
// UseLegacyP2P uses the legacy p2p layer for all nodes in a test.
UseLegacyP2P bool `toml:"use_legacy_p2p"`
// QueueType describes the type of queue that the system uses internally
QueueType string `toml:"queue_type"`
@@ -148,8 +148,8 @@ type ManifestNode struct {
// level.
LogLevel string `toml:"log_level"`
// UseNewP2P enables use of the new p2p layer for this node.
DisableLegacyP2P bool `toml:"disable_legacy_p2p"`
// UseLegacyP2P enables use of the legacy p2p layer for this node.
UseLegacyP2P bool `toml:"use_legacy_p2p"`
}
// Save saves the testnet manifest to a file.

View File

@@ -96,7 +96,7 @@ type Node struct {
PersistentPeers []*Node
Perturbations []Perturbation
LogLevel string
DisableLegacyP2P bool
UseLegacyP2P bool
QueueType string
}
@@ -181,7 +181,7 @@ func LoadTestnet(file string) (*Testnet, error) {
Perturbations: []Perturbation{},
LogLevel: manifest.LogLevel,
QueueType: manifest.QueueType,
DisableLegacyP2P: manifest.DisableLegacyP2P || nodeManifest.DisableLegacyP2P,
UseLegacyP2P: manifest.UseLegacyP2P && nodeManifest.UseLegacyP2P,
}
if node.StartAt == testnet.InitialHeight {
@@ -426,16 +426,6 @@ func (t Testnet) ArchiveNodes() []*Node {
return nodes
}
// RandomNode returns a random non-seed node.
func (t Testnet) RandomNode() *Node {
for {
node := t.Nodes[rand.Intn(len(t.Nodes))]
if node.Mode != ModeSeed {
return node
}
}
}
// IPv6 returns true if the testnet is an IPv6 network.
func (t Testnet) IPv6() bool {
return t.IP.IP.To4() == nil

View File

@@ -3,6 +3,7 @@ package main
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"math/rand"
@@ -29,7 +30,21 @@ const lightClientEvidenceRatio = 4
// DuplicateVoteEvidence.
func InjectEvidence(testnet *e2e.Testnet, amount int) error {
// select a random node
targetNode := testnet.RandomNode()
var targetNode *e2e.Node
for i := 0; i < len(testnet.Nodes)-1; i++ {
targetNode = testnet.Nodes[rand.Intn(len(testnet.Nodes))] // nolint: gosec
if targetNode.Mode == e2e.ModeSeed {
targetNode = nil
continue
}
break
}
if targetNode == nil {
return errors.New("could not find node to inject evidence into")
}
logger.Info(fmt.Sprintf("Injecting evidence through %v (amount: %d)...", targetNode.Name, amount))

View File

@@ -1,6 +1,7 @@
package main
import (
"container/ring"
"context"
"crypto/rand"
"errors"
@@ -93,34 +94,64 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int, siz
// loadProcess processes transactions
func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- types.Tx) {
// Each worker gets its own client to each node, which allows for some
// concurrency while still bounding it.
clients := map[string]*rpchttp.HTTP{}
// Each worker gets its own client to each usable node, which
// allows for some concurrency while still bounding it.
clients := make([]*rpchttp.HTTP, 0, len(testnet.Nodes))
var err error
for tx := range chTx {
node := testnet.RandomNode()
client, ok := clients[node.Name]
if !ok {
client, err = node.Client()
if err != nil {
continue
}
// check that the node is up
_, err = client.Health(ctx)
if err != nil {
continue
}
clients[node.Name] = client
}
if _, err = client.BroadcastTxSync(ctx, tx); err != nil {
for idx := range testnet.Nodes {
// Construct a list of usable nodes for the creating
// load. Don't send load through seed nodes because
// they do not provide the RPC endpoints required to
// broadcast transaction.
if testnet.Nodes[idx].Mode == e2e.ModeSeed {
continue
}
chSuccess <- tx
client, err := testnet.Nodes[idx].Client()
if err != nil {
continue
}
clients = append(clients, client)
}
if len(clients) == 0 {
panic("no clients to process load")
}
// Put the clients in a ring so they can be used in a
// round-robin fashion.
clientRing := ring.New(len(clients))
for idx := range clients {
clientRing.Value = clients[idx]
clientRing = clientRing.Next()
}
var err error
for {
select {
case <-ctx.Done():
return
case tx := <-chTx:
clientRing = clientRing.Next()
client := clientRing.Value.(*rpchttp.HTTP)
if _, err := client.Health(ctx); err != nil {
continue
}
if _, err = client.BroadcastTxSync(ctx, tx); err != nil {
continue
}
select {
case chSuccess <- tx:
continue
case <-ctx.Done():
return
}
}
}
}

View File

@@ -238,7 +238,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) {
cfg.RPC.PprofListenAddress = ":6060"
cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false))
cfg.P2P.AddrBookStrict = false
cfg.P2P.DisableLegacy = node.DisableLegacyP2P
cfg.P2P.UseLegacy = node.UseLegacyP2P
cfg.P2P.QueueType = node.QueueType
cfg.DBBackend = node.Database
cfg.StateSync.DiscoveryTime = 5 * time.Second
@@ -345,17 +345,17 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) {
// MakeAppConfig generates an ABCI application config for a node.
func MakeAppConfig(node *e2e.Node) ([]byte, error) {
cfg := map[string]interface{}{
"chain_id": node.Testnet.Name,
"dir": "data/app",
"listen": AppAddressUNIX,
"mode": node.Mode,
"proxy_port": node.ProxyPort,
"protocol": "socket",
"persist_interval": node.PersistInterval,
"snapshot_interval": node.SnapshotInterval,
"retain_blocks": node.RetainBlocks,
"key_type": node.PrivvalKey.Type(),
"disable_legacy_p2p": node.DisableLegacyP2P,
"chain_id": node.Testnet.Name,
"dir": "data/app",
"listen": AppAddressUNIX,
"mode": node.Mode,
"proxy_port": node.ProxyPort,
"protocol": "socket",
"persist_interval": node.PersistInterval,
"snapshot_interval": node.SnapshotInterval,
"retain_blocks": node.RetainBlocks,
"key_type": node.PrivvalKey.Type(),
"use_legacy_p2p": node.UseLegacyP2P,
}
switch node.ABCIProtocol {
case e2e.ProtocolUNIX:

View File

@@ -9,6 +9,9 @@ import (
)
func Start(testnet *e2e.Testnet) error {
if len(testnet.Nodes) == 0 {
return fmt.Errorf("no nodes in testnet")
}
// Nodes are already sorted by name. Sort them by name then startAt,
// which gives the overall order startAt, mode, name.
@@ -25,12 +28,11 @@ func Start(testnet *e2e.Testnet) error {
}
return false
})
sort.SliceStable(nodeQueue, func(i, j int) bool {
return nodeQueue[i].StartAt < nodeQueue[j].StartAt
})
if len(nodeQueue) == 0 {
return fmt.Errorf("no nodes in testnet")
}
if nodeQueue[0].StartAt > 0 {
return fmt.Errorf("no initial nodes in testnet")
}
@@ -49,9 +51,15 @@ func Start(testnet *e2e.Testnet) error {
logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort))
}
networkHeight := testnet.InitialHeight
// Wait for initial height
logger.Info(fmt.Sprintf("Waiting for initial height %v...", testnet.InitialHeight))
block, blockID, err := waitForHeight(testnet, testnet.InitialHeight)
logger.Info("Waiting for initial height",
"height", networkHeight,
"nodes", len(testnet.Nodes)-len(nodeQueue),
"pending", len(nodeQueue))
block, blockID, err := waitForHeight(testnet, networkHeight)
if err != nil {
return err
}
@@ -66,12 +74,28 @@ func Start(testnet *e2e.Testnet) error {
}
}
// Start up remaining nodes
for _, node := range nodeQueue {
logger.Info(fmt.Sprintf("Starting node %v at height %v...", node.Name, node.StartAt))
if _, _, err := waitForHeight(testnet, node.StartAt); err != nil {
return err
if node.StartAt > networkHeight {
// if we're starting a node that's ahead of
// the last known height of the network, then
// we should make sure that the rest of the
// network has reached at least the height
// that this node will start at before we
// start the node.
networkHeight = node.StartAt
logger.Info("Waiting for network to advance before starting catch up node",
"node", node.Name,
"height", networkHeight)
if _, _, err := waitForHeight(testnet, networkHeight); err != nil {
return err
}
}
logger.Info("Starting catch up node", "node", node.Name, "height", node.StartAt)
if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil {
return err
}

View File

@@ -32,11 +32,12 @@ func TestNet_Peers(t *testing.T) {
seen[n.Name] = (n.Name == node.Name) // we've clearly seen ourself
}
for _, peerInfo := range netInfo.Peers {
peer := node.Testnet.LookupNode(peerInfo.NodeInfo.Moniker)
require.NotNil(t, peer, "unknown node %v", peerInfo.NodeInfo.Moniker)
require.Equal(t, peer.IP.String(), peerInfo.RemoteIP,
"unexpected IP address for peer %v", peer.Name)
seen[peerInfo.NodeInfo.Moniker] = true
id := peerInfo.ID
peer := node.Testnet.LookupNode(string(id))
require.NotNil(t, peer, "unknown node %v", id)
require.Contains(t, peerInfo.URL, peer.IP.String(),
"unexpected IP address for peer %v", id)
seen[string(id)] = true
}
for name := range seen {