mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-08 22:23:11 +00:00
rpc: decouple test fixtures from node implementation (#6533)
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.5.1. DO NOT EDIT.
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -796,3 +796,8 @@ func (_m *Client) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
@@ -50,6 +50,9 @@ type Service interface {
|
||||
|
||||
// SetLogger sets a logger.
|
||||
SetLogger(log.Logger)
|
||||
|
||||
// Wait blocks until the service is stopped.
|
||||
Wait()
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -21,10 +21,17 @@ import (
|
||||
|
||||
// Automatically getting new headers and verifying them.
|
||||
func ExampleClient_Update() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
conf := rpctest.CreateConfig()
|
||||
|
||||
// Start a test application
|
||||
app := kvstore.NewApplication()
|
||||
n := rpctest.StartTendermint(app, rpctest.SuppressStdout)
|
||||
defer func() { rpctest.StopTendermint(n) }()
|
||||
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
defer func() { _ = closer(ctx) }()
|
||||
|
||||
// give Tendermint time to generate some blocks
|
||||
time.Sleep(5 * time.Second)
|
||||
@@ -35,17 +42,14 @@ func ExampleClient_Update() {
|
||||
}
|
||||
defer os.RemoveAll(dbDir)
|
||||
|
||||
var (
|
||||
config = n.Config()
|
||||
chainID = config.ChainID()
|
||||
)
|
||||
chainID := conf.ChainID()
|
||||
|
||||
primary, err := httpp.New(chainID, config.RPC.ListenAddress)
|
||||
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
|
||||
block, err := primary.LightBlock(context.Background(), 2)
|
||||
block, err := primary.LightBlock(ctx, 2)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
@@ -56,7 +60,7 @@ func ExampleClient_Update() {
|
||||
}
|
||||
|
||||
c, err := light.NewClient(
|
||||
context.Background(),
|
||||
ctx,
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: 504 * time.Hour, // 21 days
|
||||
@@ -79,7 +83,7 @@ func ExampleClient_Update() {
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
h, err := c.Update(context.Background(), time.Now())
|
||||
h, err := c.Update(ctx, time.Now())
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
@@ -94,10 +98,18 @@ func ExampleClient_Update() {
|
||||
|
||||
// Manually getting light blocks and verifying them.
|
||||
func ExampleClient_VerifyLightBlockAtHeight() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
conf := rpctest.CreateConfig()
|
||||
|
||||
// Start a test application
|
||||
app := kvstore.NewApplication()
|
||||
n := rpctest.StartTendermint(app, rpctest.SuppressStdout)
|
||||
defer func() { rpctest.StopTendermint(n) }()
|
||||
|
||||
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
defer func() { _ = closer(ctx) }()
|
||||
|
||||
// give Tendermint time to generate some blocks
|
||||
time.Sleep(5 * time.Second)
|
||||
@@ -108,17 +120,14 @@ func ExampleClient_VerifyLightBlockAtHeight() {
|
||||
}
|
||||
defer os.RemoveAll(dbDir)
|
||||
|
||||
var (
|
||||
config = n.Config()
|
||||
chainID = config.ChainID()
|
||||
)
|
||||
chainID := conf.ChainID()
|
||||
|
||||
primary, err := httpp.New(chainID, config.RPC.ListenAddress)
|
||||
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
|
||||
block, err := primary.LightBlock(context.Background(), 2)
|
||||
block, err := primary.LightBlock(ctx, 2)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
@@ -128,8 +137,7 @@ func ExampleClient_VerifyLightBlockAtHeight() {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
|
||||
c, err := light.NewClient(
|
||||
context.Background(),
|
||||
c, err := light.NewClient(ctx,
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: 504 * time.Hour, // 21 days
|
||||
|
||||
@@ -3,16 +3,16 @@ package http_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/light/provider"
|
||||
lighthttp "github.com/tendermint/tendermint/light/provider/http"
|
||||
"github.com/tendermint/tendermint/node"
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/client"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
@@ -33,23 +33,30 @@ func TestNewProvider(t *testing.T) {
|
||||
require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1}")
|
||||
}
|
||||
|
||||
func NodeSuite(t *testing.T) *node.Node {
|
||||
// NodeSuite initiates and runs a full node instance in the
|
||||
// background, stopping it once the test is completed
|
||||
func NodeSuite(t *testing.T) (service.Service, *config.Config) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
conf := rpctest.CreateConfig()
|
||||
|
||||
// start a tendermint node in the background to test against
|
||||
app := kvstore.NewApplication()
|
||||
app.RetainBlocks = 9
|
||||
node := rpctest.StartTendermint(app)
|
||||
|
||||
node, closer, err := rpctest.StartTendermint(ctx, conf, app)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
rpctest.StopTendermint(node)
|
||||
os.RemoveAll(node.Config().RootDir)
|
||||
_ = closer(ctx)
|
||||
cancel()
|
||||
})
|
||||
return node
|
||||
return node, conf
|
||||
}
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
cfg := n.Config()
|
||||
_, cfg := NodeSuite(t)
|
||||
rpcAddr := cfg.RPC.ListenAddress
|
||||
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.5.1. DO NOT EDIT.
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -327,3 +327,8 @@ func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Peer) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
@@ -27,9 +27,9 @@ func MakeTxKV() ([]byte, []byte, []byte) {
|
||||
}
|
||||
|
||||
func TestHeaderEvents(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
for i, c := range GetClients(t, n) {
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
i, c := i, c
|
||||
t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
|
||||
// start for this test it if it wasn't already running
|
||||
@@ -56,8 +56,8 @@ func TestHeaderEvents(t *testing.T) {
|
||||
|
||||
// subscribe to new blocks and make sure height increments by 1
|
||||
func TestBlockEvents(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
for _, c := range GetClients(t, n) {
|
||||
n, conf := NodeSuite(t)
|
||||
for _, c := range GetClients(t, n, conf) {
|
||||
c := c
|
||||
t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
|
||||
|
||||
@@ -105,8 +105,8 @@ func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "a
|
||||
func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") }
|
||||
|
||||
func testTxEventsSent(t *testing.T, broadcastMethod string) {
|
||||
n := NodeSuite(t)
|
||||
for _, c := range GetClients(t, n) {
|
||||
n, conf := NodeSuite(t)
|
||||
for _, c := range GetClients(t, n, conf) {
|
||||
c := c
|
||||
t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
|
||||
|
||||
@@ -167,21 +167,24 @@ func TestClientsResubscribe(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
c := getHTTPClient(t, n)
|
||||
_, conf := NodeSuite(t)
|
||||
|
||||
c := getHTTPClient(t, conf)
|
||||
|
||||
// on Subscribe
|
||||
_, err := c.Subscribe(context.Background(), "TestHeaderEvents",
|
||||
_, err := c.Subscribe(ctx, "TestHeaderEvents",
|
||||
types.QueryForEvent(types.EventNewBlockHeader).String())
|
||||
assert.Error(t, err)
|
||||
|
||||
// on Unsubscribe
|
||||
err = c.Unsubscribe(context.Background(), "TestHeaderEvents",
|
||||
err = c.Unsubscribe(ctx, "TestHeaderEvents",
|
||||
types.QueryForEvent(types.EventNewBlockHeader).String())
|
||||
assert.Error(t, err)
|
||||
|
||||
// on UnsubscribeAll
|
||||
err = c.UnsubscribeAll(context.Background(), "TestHeaderEvents")
|
||||
err = c.UnsubscribeAll(ctx, "TestHeaderEvents")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -112,36 +112,37 @@ func makeEvidences(
|
||||
}
|
||||
|
||||
func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, config := NodeSuite(t)
|
||||
|
||||
// previous versions of this test used a shared fixture with
|
||||
// other tests, and in this version we give it a little time
|
||||
// for the node to make progress before running the test
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
var (
|
||||
config = n.Config()
|
||||
chainID = config.ChainID()
|
||||
)
|
||||
chainID := config.ChainID()
|
||||
|
||||
pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, c := range GetClients(t, n) {
|
||||
for i, c := range GetClients(t, n, config) {
|
||||
correct, fakes := makeEvidences(t, pv, chainID)
|
||||
t.Logf("client %d", i)
|
||||
|
||||
result, err := c.BroadcastEvidence(context.Background(), correct)
|
||||
result, err := c.BroadcastEvidence(ctx, correct)
|
||||
require.NoError(t, err, "BroadcastEvidence(%s) failed", correct)
|
||||
assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash")
|
||||
|
||||
status, err := c.Status(context.Background())
|
||||
status, err := c.Status(ctx)
|
||||
require.NoError(t, err)
|
||||
err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
ed25519pub := pv.Key.PubKey.(ed25519.PubKey)
|
||||
rawpub := ed25519pub.Bytes()
|
||||
result2, err := c.ABCIQuery(context.Background(), "/val", rawpub)
|
||||
result2, err := c.ABCIQuery(ctx, "/val", rawpub)
|
||||
require.NoError(t, err)
|
||||
qres := result2.Response
|
||||
require.True(t, qres.IsOK())
|
||||
@@ -157,14 +158,15 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) {
|
||||
require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value))
|
||||
|
||||
for _, fake := range fakes {
|
||||
_, err := c.BroadcastEvidence(context.Background(), fake)
|
||||
_, err := c.BroadcastEvidence(ctx, fake)
|
||||
require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBroadcastEmptyEvidence(t *testing.T) {
|
||||
for _, c := range GetClients(t, NodeSuite(t)) {
|
||||
n, conf := NodeSuite(t)
|
||||
for _, c := range GetClients(t, n, conf) {
|
||||
_, err := c.BroadcastEvidence(context.Background(), nil)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -13,17 +13,25 @@ import (
|
||||
)
|
||||
|
||||
func ExampleHTTP_simple() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Start a tendermint node (and kvstore) in the background to test against
|
||||
app := kvstore.NewApplication()
|
||||
node := rpctest.StartTendermint(app, rpctest.SuppressStdout)
|
||||
defer rpctest.StopTendermint(node)
|
||||
conf := rpctest.CreateConfig()
|
||||
|
||||
// Create our RPC client
|
||||
rpcAddr := node.Config().RPC.ListenAddress
|
||||
c, err := rpchttp.New(rpcAddr)
|
||||
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
|
||||
if err != nil {
|
||||
log.Fatal(err) //nolint:gocritic
|
||||
}
|
||||
defer func() { _ = closer(ctx) }()
|
||||
|
||||
// Create our RPC client
|
||||
rpcAddr := conf.RPC.ListenAddress
|
||||
c, err := rpchttp.New(rpcAddr)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a transaction
|
||||
k := []byte("name")
|
||||
@@ -66,19 +74,25 @@ func ExampleHTTP_simple() {
|
||||
}
|
||||
|
||||
func ExampleHTTP_batching() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Start a tendermint node (and kvstore) in the background to test against
|
||||
app := kvstore.NewApplication()
|
||||
node := rpctest.StartTendermint(app, rpctest.SuppressStdout)
|
||||
conf := rpctest.CreateConfig()
|
||||
|
||||
// Create our RPC client
|
||||
rpcAddr := node.Config().RPC.ListenAddress
|
||||
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
|
||||
if err != nil {
|
||||
log.Fatal(err) //nolint:gocritic
|
||||
}
|
||||
defer func() { _ = closer(ctx) }()
|
||||
|
||||
rpcAddr := conf.RPC.ListenAddress
|
||||
c, err := rpchttp.New(rpcAddr)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
defer rpctest.StopTendermint(node)
|
||||
|
||||
// Create our two transactions
|
||||
k1 := []byte("firstName")
|
||||
v1 := []byte("satoshi")
|
||||
@@ -98,7 +112,7 @@ func ExampleHTTP_batching() {
|
||||
// Broadcast the transaction and wait for it to commit (rather use
|
||||
// c.BroadcastTxSync though in production).
|
||||
if _, err := batch.BroadcastTxCommit(context.Background(), tx); err != nil {
|
||||
log.Fatal(err) // nolint:gocritic
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,8 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//go:generate mockery --case underscore --name Client
|
||||
|
||||
// Client wraps most important rpc calls a client would make if you want to
|
||||
// listen for events, test if it also implements events.EventSwitch.
|
||||
type Client interface {
|
||||
|
||||
@@ -9,9 +9,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/client"
|
||||
"github.com/tendermint/tendermint/rpc/core"
|
||||
rpccore "github.com/tendermint/tendermint/rpc/core"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -41,21 +40,28 @@ type Local struct {
|
||||
*types.EventBus
|
||||
Logger log.Logger
|
||||
ctx *rpctypes.Context
|
||||
env *core.Environment
|
||||
env *rpccore.Environment
|
||||
}
|
||||
|
||||
// NewLocal configures a client that calls the Node directly.
|
||||
func New(node *nm.Node) *Local {
|
||||
// NodeService describes the portion of the node interface that the
|
||||
// local RPC client constructor needs to build a local client.
|
||||
type NodeService interface {
|
||||
ConfigureRPC() (*rpccore.Environment, error)
|
||||
EventBus() *types.EventBus
|
||||
}
|
||||
|
||||
// New configures a client that calls the Node directly.
|
||||
func New(node NodeService) (*Local, error) {
|
||||
env, err := node.ConfigureRPC()
|
||||
if err != nil {
|
||||
node.Logger.Error("Error configuring RPC", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
return &Local{
|
||||
EventBus: node.EventBus(),
|
||||
Logger: log.NewNopLogger(),
|
||||
ctx: &rpctypes.Context{},
|
||||
env: env,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ rpcclient.Client = (*Local)(nil)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package client_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -8,26 +9,31 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
)
|
||||
|
||||
func NodeSuite(t *testing.T) *node.Node {
|
||||
func NodeSuite(t *testing.T) (service.Service, *config.Config) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
conf := rpctest.CreateConfig()
|
||||
|
||||
// start a tendermint node in the background to test against
|
||||
dir, err := ioutil.TempDir("/tmp", fmt.Sprint("rpc-client-test-", t.Name()))
|
||||
require.NoError(t, err)
|
||||
|
||||
app := kvstore.NewPersistentKVStoreApplication(dir)
|
||||
n := rpctest.StartTendermint(app)
|
||||
|
||||
node, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
// and shut down proper at the end
|
||||
rpctest.StopTendermint(n)
|
||||
_ = closer(ctx)
|
||||
cancel()
|
||||
app.Close()
|
||||
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
|
||||
return n
|
||||
return node, conf
|
||||
}
|
||||
|
||||
@@ -824,3 +824,8 @@ func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perP
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
@@ -15,11 +15,12 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/rpc/client"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
rpclocal "github.com/tendermint/tendermint/rpc/client/local"
|
||||
@@ -28,14 +29,10 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
func getHTTPClient(t *testing.T, n *node.Node) *rpchttp.HTTP {
|
||||
func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP {
|
||||
t.Helper()
|
||||
|
||||
rpcAddr := n.Config().RPC.ListenAddress
|
||||
rpcAddr := conf.RPC.ListenAddress
|
||||
c, err := rpchttp.New(rpcAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -43,22 +40,31 @@ func getHTTPClient(t *testing.T, n *node.Node) *rpchttp.HTTP {
|
||||
return c
|
||||
}
|
||||
|
||||
func getHTTPClientWithTimeout(t *testing.T, n *node.Node, timeout time.Duration) *rpchttp.HTTP {
|
||||
func getHTTPClientWithTimeout(t *testing.T, conf *config.Config, timeout time.Duration) *rpchttp.HTTP {
|
||||
t.Helper()
|
||||
rpcAddr := n.Config().RPC.ListenAddress
|
||||
|
||||
rpcAddr := conf.RPC.ListenAddress
|
||||
c, err := rpchttp.NewWithTimeout(rpcAddr, timeout)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.SetLogger(log.TestingLogger())
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// GetClients returns a slice of clients for table-driven tests
|
||||
func GetClients(t *testing.T, n *node.Node) []client.Client {
|
||||
func GetClients(t *testing.T, ns service.Service, conf *config.Config) []client.Client {
|
||||
t.Helper()
|
||||
|
||||
node, ok := ns.(rpclocal.NodeService)
|
||||
require.True(t, ok)
|
||||
|
||||
ncl, err := rpclocal.New(node)
|
||||
require.NoError(t, err)
|
||||
|
||||
return []client.Client{
|
||||
getHTTPClient(t, n),
|
||||
rpclocal.New(n),
|
||||
getHTTPClient(t, conf),
|
||||
ncl,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +78,7 @@ func TestNilCustomHTTPClient(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCustomHTTPClient(t *testing.T) {
|
||||
conf := NodeSuite(t).Config()
|
||||
_, conf := NodeSuite(t)
|
||||
remote := conf.RPC.ListenAddress
|
||||
c, err := rpchttp.NewWithClient(remote, http.DefaultClient)
|
||||
require.Nil(t, err)
|
||||
@@ -82,7 +88,7 @@ func TestCustomHTTPClient(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCorsEnabled(t *testing.T) {
|
||||
conf := NodeSuite(t).Config()
|
||||
_, conf := NodeSuite(t)
|
||||
origin := conf.RPC.CORSAllowedOrigins[0]
|
||||
remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http")
|
||||
|
||||
@@ -102,9 +108,9 @@ func TestStatus(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n := NodeSuite(t)
|
||||
for i, c := range GetClients(t, n) {
|
||||
moniker := n.Config().Moniker
|
||||
n, conf := NodeSuite(t)
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
moniker := conf.Moniker
|
||||
status, err := c.Status(ctx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
assert.Equal(t, moniker, status.NodeInfo.Moniker)
|
||||
@@ -115,8 +121,9 @@ func TestStatus(t *testing.T) {
|
||||
func TestInfo(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
for i, c := range GetClients(t, NodeSuite(t)) {
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
// status, err := c.Status()
|
||||
// require.Nil(t, err, "%+v", err)
|
||||
info, err := c.ABCIInfo(ctx)
|
||||
@@ -128,10 +135,14 @@ func TestInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNetInfo(t *testing.T) {
|
||||
for i, c := range GetClients(t, NodeSuite(t)) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, conf := NodeSuite(t)
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
nc, ok := c.(client.NetworkClient)
|
||||
require.True(t, ok, "%d", i)
|
||||
netinfo, err := nc.NetInfo(context.Background())
|
||||
netinfo, err := nc.NetInfo(ctx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
assert.True(t, netinfo.Listening)
|
||||
assert.Equal(t, 0, len(netinfo.Peers))
|
||||
@@ -139,11 +150,15 @@ func TestNetInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDumpConsensusState(t *testing.T) {
|
||||
for i, c := range GetClients(t, NodeSuite(t)) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, conf := NodeSuite(t)
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
// FIXME: fix server so it doesn't panic on invalid input
|
||||
nc, ok := c.(client.NetworkClient)
|
||||
require.True(t, ok, "%d", i)
|
||||
cons, err := nc.DumpConsensusState(context.Background())
|
||||
cons, err := nc.DumpConsensusState(ctx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
assert.NotEmpty(t, cons.RoundState)
|
||||
assert.Empty(t, cons.Peers)
|
||||
@@ -151,30 +166,44 @@ func TestDumpConsensusState(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConsensusState(t *testing.T) {
|
||||
for i, c := range GetClients(t, NodeSuite(t)) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
// FIXME: fix server so it doesn't panic on invalid input
|
||||
nc, ok := c.(client.NetworkClient)
|
||||
require.True(t, ok, "%d", i)
|
||||
cons, err := nc.ConsensusState(context.Background())
|
||||
cons, err := nc.ConsensusState(ctx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
assert.NotEmpty(t, cons.RoundState)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealth(t *testing.T) {
|
||||
for i, c := range GetClients(t, NodeSuite(t)) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
nc, ok := c.(client.NetworkClient)
|
||||
require.True(t, ok, "%d", i)
|
||||
_, err := nc.Health(context.Background())
|
||||
_, err := nc.Health(ctx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenesisAndValidators(t *testing.T) {
|
||||
for i, c := range GetClients(t, NodeSuite(t)) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, conf := NodeSuite(t)
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
|
||||
// make sure this is the right genesis file
|
||||
gen, err := c.Genesis(context.Background())
|
||||
gen, err := c.Genesis(ctx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
// get the genesis validator
|
||||
require.Equal(t, 1, len(gen.Genesis.Validators))
|
||||
@@ -182,7 +211,7 @@ func TestGenesisAndValidators(t *testing.T) {
|
||||
|
||||
// get the current validators
|
||||
h := int64(1)
|
||||
vals, err := c.Validators(context.Background(), &h, nil, nil)
|
||||
vals, err := c.Validators(ctx, &h, nil, nil)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
require.Equal(t, 1, len(vals.Validators))
|
||||
require.Equal(t, 1, vals.Count)
|
||||
@@ -199,7 +228,9 @@ func TestGenesisChunked(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for _, c := range GetClients(t, NodeSuite(t)) {
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
for _, c := range GetClients(t, n, conf) {
|
||||
first, err := c.GenesisChunked(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -221,17 +252,22 @@ func TestGenesisChunked(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestABCIQuery(t *testing.T) {
|
||||
for i, c := range GetClients(t, NodeSuite(t)) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
// write something
|
||||
k, v, tx := MakeTxKV()
|
||||
bres, err := c.BroadcastTxCommit(context.Background(), tx)
|
||||
bres, err := c.BroadcastTxCommit(ctx, tx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
apph := bres.Height + 1 // this is where the tx will be applied to the state
|
||||
|
||||
// wait before querying
|
||||
err = client.WaitForHeight(c, apph, nil)
|
||||
require.NoError(t, err)
|
||||
res, err := c.ABCIQuery(context.Background(), "/key", k)
|
||||
res, err := c.ABCIQuery(ctx, "/key", k)
|
||||
qres := res.Response
|
||||
if assert.Nil(t, err) && assert.True(t, qres.IsOK()) {
|
||||
assert.EqualValues(t, v, qres.Value)
|
||||
@@ -241,10 +277,12 @@ func TestABCIQuery(t *testing.T) {
|
||||
|
||||
// Make some app checks
|
||||
func TestAppCalls(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for i, c := range GetClients(t, NodeSuite(t)) {
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
|
||||
// get an offset of height to avoid racing and guessing
|
||||
s, err := c.Status(ctx)
|
||||
@@ -339,21 +377,26 @@ func TestAppCalls(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlockchainInfo(t *testing.T) {
|
||||
for i, c := range GetClients(t, NodeSuite(t)) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
err := client.WaitForHeight(c, 10, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err := c.BlockchainInfo(context.Background(), 0, 0)
|
||||
res, err := c.BlockchainInfo(ctx, 0, 0)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
assert.True(t, res.LastHeight > 0)
|
||||
assert.True(t, len(res.BlockMetas) > 0)
|
||||
|
||||
res, err = c.BlockchainInfo(context.Background(), 1, 1)
|
||||
res, err = c.BlockchainInfo(ctx, 1, 1)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
assert.True(t, res.LastHeight > 0)
|
||||
assert.True(t, len(res.BlockMetas) == 1)
|
||||
|
||||
res, err = c.BlockchainInfo(context.Background(), 1, 10000)
|
||||
res, err = c.BlockchainInfo(ctx, 1, 10000)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
assert.True(t, res.LastHeight > 0)
|
||||
assert.True(t, len(res.BlockMetas) < 100)
|
||||
@@ -361,7 +404,7 @@ func TestBlockchainInfo(t *testing.T) {
|
||||
assert.NotNil(t, m)
|
||||
}
|
||||
|
||||
res, err = c.BlockchainInfo(context.Background(), 10000, 1)
|
||||
res, err = c.BlockchainInfo(ctx, 10000, 1)
|
||||
require.NotNil(t, err)
|
||||
assert.Nil(t, res)
|
||||
assert.Contains(t, err.Error(), "can't be greater than max")
|
||||
@@ -369,15 +412,15 @@ func TestBlockchainInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBroadcastTxSync(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
n, conf := NodeSuite(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// TODO (melekes): use mempool which is set on RPC rather than getting it from node
|
||||
mempool := n.Mempool()
|
||||
mempool := getMempool(t, n)
|
||||
initMempoolSize := mempool.Size()
|
||||
|
||||
for i, c := range GetClients(t, n) {
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
_, _, tx := MakeTxKV()
|
||||
bres, err := c.BroadcastTxSync(ctx, tx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
@@ -391,14 +434,23 @@ func TestBroadcastTxSync(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getMempool(t *testing.T, srv service.Service) mempl.Mempool {
|
||||
t.Helper()
|
||||
n, ok := srv.(interface {
|
||||
Mempool() mempl.Mempool
|
||||
})
|
||||
require.True(t, ok)
|
||||
return n.Mempool()
|
||||
}
|
||||
|
||||
func TestBroadcastTxCommit(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n := NodeSuite(t)
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
mempool := n.Mempool()
|
||||
for i, c := range GetClients(t, n) {
|
||||
mempool := getMempool(t, n)
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
_, _, tx := MakeTxKV()
|
||||
bres, err := c.BroadcastTxCommit(ctx, tx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
@@ -410,12 +462,16 @@ func TestBroadcastTxCommit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnconfirmedTxs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_, _, tx := MakeTxKV()
|
||||
ch := make(chan *abci.Response, 1)
|
||||
n := NodeSuite(t)
|
||||
mempool := n.Mempool()
|
||||
|
||||
err := mempool.CheckTx(context.Background(), tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{})
|
||||
n, conf := NodeSuite(t)
|
||||
mempool := getMempool(t, n)
|
||||
err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for tx to arrive in mempoool.
|
||||
@@ -425,10 +481,10 @@ func TestUnconfirmedTxs(t *testing.T) {
|
||||
t.Error("Timed out waiting for CheckTx callback")
|
||||
}
|
||||
|
||||
for _, c := range GetClients(t, n) {
|
||||
for _, c := range GetClients(t, n, conf) {
|
||||
mc := c.(client.MempoolClient)
|
||||
limit := 1
|
||||
res, err := mc.UnconfirmedTxs(context.Background(), &limit)
|
||||
res, err := mc.UnconfirmedTxs(ctx, &limit)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, res.Count)
|
||||
@@ -441,12 +497,16 @@ func TestUnconfirmedTxs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNumUnconfirmedTxs(t *testing.T) {
|
||||
_, _, tx := MakeTxKV()
|
||||
n := NodeSuite(t)
|
||||
ch := make(chan *abci.Response, 1)
|
||||
mempool := n.Mempool()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
err := mempool.CheckTx(context.Background(), tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{})
|
||||
_, _, tx := MakeTxKV()
|
||||
|
||||
n, conf := NodeSuite(t)
|
||||
ch := make(chan *abci.Response, 1)
|
||||
mempool := getMempool(t, n)
|
||||
|
||||
err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for tx to arrive in mempoool.
|
||||
@@ -457,10 +517,10 @@ func TestNumUnconfirmedTxs(t *testing.T) {
|
||||
}
|
||||
|
||||
mempoolSize := mempool.Size()
|
||||
for i, c := range GetClients(t, n) {
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
mc, ok := c.(client.MempoolClient)
|
||||
require.True(t, ok, "%d", i)
|
||||
res, err := mc.NumUnconfirmedTxs(context.Background())
|
||||
res, err := mc.NumUnconfirmedTxs(ctx)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
|
||||
assert.Equal(t, mempoolSize, res.Count)
|
||||
@@ -472,12 +532,13 @@ func TestNumUnconfirmedTxs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckTx(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
mempool := n.Mempool()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for _, c := range GetClients(t, n) {
|
||||
n, conf := NodeSuite(t)
|
||||
mempool := getMempool(t, n)
|
||||
|
||||
for _, c := range GetClients(t, n, conf) {
|
||||
_, _, tx := MakeTxKV()
|
||||
|
||||
res, err := c.CheckTx(ctx, tx)
|
||||
@@ -489,12 +550,13 @@ func TestCheckTx(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTx(t *testing.T) {
|
||||
// first we broadcast a tx
|
||||
n := NodeSuite(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
n, conf := NodeSuite(t)
|
||||
|
||||
c := getHTTPClient(t, n)
|
||||
c := getHTTPClient(t, conf)
|
||||
|
||||
// first we broadcast a tx
|
||||
_, _, tx := MakeTxKV()
|
||||
bres, err := c.BroadcastTxCommit(ctx, tx)
|
||||
require.Nil(t, err, "%+v", err)
|
||||
@@ -518,7 +580,7 @@ func TestTx(t *testing.T) {
|
||||
{false, true, nil},
|
||||
}
|
||||
|
||||
for i, c := range GetClients(t, n) {
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
for j, tc := range cases {
|
||||
t.Logf("client %d, case %d", i, j)
|
||||
|
||||
@@ -550,8 +612,8 @@ func TestTxSearchWithTimeout(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n := NodeSuite(t)
|
||||
timeoutClient := getHTTPClientWithTimeout(t, n, 10*time.Second)
|
||||
_, conf := NodeSuite(t)
|
||||
timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second)
|
||||
|
||||
_, _, tx := MakeTxKV()
|
||||
_, err := timeoutClient.BroadcastTxCommit(ctx, tx)
|
||||
@@ -564,8 +626,8 @@ func TestTxSearchWithTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxSearch(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
c := getHTTPClient(t, n)
|
||||
n, conf := NodeSuite(t)
|
||||
c := getHTTPClient(t, conf)
|
||||
|
||||
// first we broadcast a few txs
|
||||
for i := 0; i < 10; i++ {
|
||||
@@ -584,7 +646,7 @@ func TestTxSearch(t *testing.T) {
|
||||
find := result.Txs[len(result.Txs)-1]
|
||||
anotherTxHash := types.Tx("a different tx").Hash()
|
||||
|
||||
for i, c := range GetClients(t, n) {
|
||||
for i, c := range GetClients(t, n, conf) {
|
||||
t.Logf("client %d", i)
|
||||
|
||||
// now we query for the tx.
|
||||
@@ -688,18 +750,22 @@ func TestTxSearch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBatchedJSONRPCCalls(t *testing.T) {
|
||||
c := getHTTPClient(t, NodeSuite(t))
|
||||
testBatchedJSONRPCCalls(t, c)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_, conf := NodeSuite(t)
|
||||
c := getHTTPClient(t, conf)
|
||||
testBatchedJSONRPCCalls(ctx, t, c)
|
||||
}
|
||||
|
||||
func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) {
|
||||
func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) {
|
||||
k1, v1, tx1 := MakeTxKV()
|
||||
k2, v2, tx2 := MakeTxKV()
|
||||
|
||||
batch := c.NewBatch()
|
||||
r1, err := batch.BroadcastTxCommit(context.Background(), tx1)
|
||||
r1, err := batch.BroadcastTxCommit(ctx, tx1)
|
||||
require.NoError(t, err)
|
||||
r2, err := batch.BroadcastTxCommit(context.Background(), tx2)
|
||||
r2, err := batch.BroadcastTxCommit(ctx, tx2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, batch.Count())
|
||||
bresults, err := batch.Send(ctx)
|
||||
@@ -718,9 +784,9 @@ func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) {
|
||||
err = client.WaitForHeight(c, apph, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
q1, err := batch.ABCIQuery(context.Background(), "/key", k1)
|
||||
q1, err := batch.ABCIQuery(ctx, "/key", k1)
|
||||
require.NoError(t, err)
|
||||
q2, err := batch.ABCIQuery(context.Background(), "/key", k2)
|
||||
q2, err := batch.ABCIQuery(ctx, "/key", k2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, batch.Count())
|
||||
qresults, err := batch.Send(ctx)
|
||||
@@ -742,14 +808,18 @@ func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) {
|
||||
}
|
||||
|
||||
func TestBatchedJSONRPCCallsCancellation(t *testing.T) {
|
||||
c := getHTTPClient(t, NodeSuite(t))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_, conf := NodeSuite(t)
|
||||
c := getHTTPClient(t, conf)
|
||||
_, _, tx1 := MakeTxKV()
|
||||
_, _, tx2 := MakeTxKV()
|
||||
|
||||
batch := c.NewBatch()
|
||||
_, err := batch.BroadcastTxCommit(context.Background(), tx1)
|
||||
_, err := batch.BroadcastTxCommit(ctx, tx1)
|
||||
require.NoError(t, err)
|
||||
_, err = batch.BroadcastTxCommit(context.Background(), tx2)
|
||||
_, err = batch.BroadcastTxCommit(ctx, tx2)
|
||||
require.NoError(t, err)
|
||||
// we should have 2 requests waiting
|
||||
require.Equal(t, 2, batch.Count())
|
||||
@@ -760,27 +830,35 @@ func TestBatchedJSONRPCCallsCancellation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSendingEmptyRequestBatch(t *testing.T) {
|
||||
c := getHTTPClient(t, NodeSuite(t))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_, conf := NodeSuite(t)
|
||||
c := getHTTPClient(t, conf)
|
||||
batch := c.NewBatch()
|
||||
_, err := batch.Send(ctx)
|
||||
require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error")
|
||||
}
|
||||
|
||||
func TestClearingEmptyRequestBatch(t *testing.T) {
|
||||
c := getHTTPClient(t, NodeSuite(t))
|
||||
_, conf := NodeSuite(t)
|
||||
c := getHTTPClient(t, conf)
|
||||
batch := c.NewBatch()
|
||||
require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result")
|
||||
}
|
||||
|
||||
func TestConcurrentJSONRPCBatching(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_, conf := NodeSuite(t)
|
||||
var wg sync.WaitGroup
|
||||
c := getHTTPClient(t, n)
|
||||
c := getHTTPClient(t, conf)
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
testBatchedJSONRPCCalls(t, c)
|
||||
testBatchedJSONRPCCalls(ctx, t, c)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
@@ -7,27 +7,35 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
)
|
||||
|
||||
func NodeSuite(t *testing.T) *node.Node {
|
||||
func NodeSuite(t *testing.T) (service.Service, *config.Config) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
conf := rpctest.CreateConfig()
|
||||
|
||||
// start a tendermint node in the background to test against
|
||||
app := kvstore.NewApplication()
|
||||
node := rpctest.StartTendermint(app)
|
||||
|
||||
node, closer, err := rpctest.StartTendermint(ctx, conf, app)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
rpctest.StopTendermint(node)
|
||||
_ = closer(ctx)
|
||||
cancel()
|
||||
})
|
||||
return node
|
||||
return node, conf
|
||||
}
|
||||
|
||||
func TestBroadcastTx(t *testing.T) {
|
||||
n := NodeSuite(t)
|
||||
_, conf := NodeSuite(t)
|
||||
|
||||
res, err := rpctest.GetGRPCClient(n.Config()).BroadcastTx(
|
||||
res, err := rpctest.GetGRPCClient(conf).BroadcastTx(
|
||||
context.Background(),
|
||||
&core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")},
|
||||
)
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"time"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
@@ -26,12 +26,6 @@ import (
|
||||
// control.
|
||||
type Options struct {
|
||||
suppressStdout bool
|
||||
recreateConfig bool
|
||||
}
|
||||
|
||||
var defaultOptions = Options{
|
||||
suppressStdout: false,
|
||||
recreateConfig: false,
|
||||
}
|
||||
|
||||
func waitForRPC(ctx context.Context, conf *cfg.Config) {
|
||||
@@ -88,7 +82,7 @@ func makeAddrs() (string, string, string) {
|
||||
fmt.Sprintf("tcp://127.0.0.1:%d", randPort())
|
||||
}
|
||||
|
||||
func createConfig() *cfg.Config {
|
||||
func CreateConfig() *cfg.Config {
|
||||
pathname := makePathname()
|
||||
c := cfg.ResetTestRoot(pathname)
|
||||
|
||||
@@ -106,74 +100,65 @@ func GetGRPCClient(conf *cfg.Config) core_grpc.BroadcastAPIClient {
|
||||
return core_grpc.StartGRPCClient(grpcAddr)
|
||||
}
|
||||
|
||||
// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized
|
||||
func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
type ServiceCloser func(context.Context) error
|
||||
|
||||
nodeOpts := defaultOptions
|
||||
func StartTendermint(ctx context.Context,
|
||||
conf *cfg.Config,
|
||||
app abci.Application,
|
||||
opts ...func(*Options)) (service.Service, ServiceCloser, error) {
|
||||
|
||||
nodeOpts := &Options{}
|
||||
for _, opt := range opts {
|
||||
opt(&nodeOpts)
|
||||
opt(nodeOpts)
|
||||
}
|
||||
node := NewTendermint(app, &nodeOpts)
|
||||
err := node.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cfg := node.Config()
|
||||
// wait for rpc
|
||||
waitForRPC(ctx, cfg)
|
||||
waitForGRPC(ctx, cfg)
|
||||
|
||||
if !nodeOpts.suppressStdout {
|
||||
fmt.Println("Tendermint running!")
|
||||
}
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
// StopTendermint stops a test tendermint server, waits until it's stopped and
|
||||
// cleans up test/config files.
|
||||
func StopTendermint(node *nm.Node) {
|
||||
if err := node.Stop(); err != nil {
|
||||
node.Logger.Error("Error when tryint to stop node", "err", err)
|
||||
}
|
||||
node.Wait()
|
||||
os.RemoveAll(node.Config().RootDir)
|
||||
}
|
||||
|
||||
// NewTendermint creates a new tendermint server and sleeps forever
|
||||
func NewTendermint(app abci.Application, opts *Options) *nm.Node {
|
||||
// Create & start node
|
||||
config := createConfig()
|
||||
var logger log.Logger
|
||||
if opts.suppressStdout {
|
||||
if nodeOpts.suppressStdout {
|
||||
logger = log.NewNopLogger()
|
||||
} else {
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
logger = log.NewFilter(logger, log.AllowError())
|
||||
}
|
||||
pvKeyFile := config.PrivValidatorKeyFile()
|
||||
pvKeyStateFile := config.PrivValidatorStateFile()
|
||||
pvKeyFile := conf.PrivValidatorKeyFile()
|
||||
pvKeyStateFile := conf.PrivValidatorStateFile()
|
||||
pv, err := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, func(_ context.Context) error { return nil }, err
|
||||
}
|
||||
papp := proxy.NewLocalClientCreator(app)
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(conf.NodeKeyFile())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, func(_ context.Context) error { return nil }, err
|
||||
}
|
||||
node, err := nm.NewNode(config, pv, nodeKey, papp,
|
||||
nm.DefaultGenesisDocProviderFunc(config),
|
||||
node, err := nm.NewNode(conf, pv, nodeKey, papp,
|
||||
nm.DefaultGenesisDocProviderFunc(conf),
|
||||
nm.DefaultDBProvider,
|
||||
nm.DefaultMetricsProvider(config.Instrumentation),
|
||||
nm.DefaultMetricsProvider(conf.Instrumentation),
|
||||
logger)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, func(_ context.Context) error { return nil }, err
|
||||
}
|
||||
return node
|
||||
|
||||
err = node.Start()
|
||||
if err != nil {
|
||||
return nil, func(_ context.Context) error { return nil }, err
|
||||
}
|
||||
|
||||
// wait for rpc
|
||||
waitForRPC(ctx, conf)
|
||||
waitForGRPC(ctx, conf)
|
||||
|
||||
if !nodeOpts.suppressStdout {
|
||||
fmt.Println("Tendermint running!")
|
||||
}
|
||||
|
||||
return node, func(ctx context.Context) error {
|
||||
if err := node.Stop(); err != nil {
|
||||
logger.Error("Error when trying to stop node", "err", err)
|
||||
}
|
||||
node.Wait()
|
||||
os.RemoveAll(conf.RootDir)
|
||||
return nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SuppressStdout is an option that tries to make sure the RPC test Tendermint
|
||||
|
||||
Reference in New Issue
Block a user