mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-03 11:45:18 +00:00
cleanup: unused parameters (#8372)
This commit is contained in:
@@ -50,7 +50,6 @@ func setup(
|
||||
genDoc *types.GenesisDoc,
|
||||
privVal types.PrivValidator,
|
||||
maxBlockHeights []int64,
|
||||
chBuf uint,
|
||||
) *reactorTestSuite {
|
||||
t.Helper()
|
||||
|
||||
@@ -228,7 +227,7 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(64)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0})
|
||||
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
|
||||
@@ -268,7 +267,7 @@ func TestReactor_SyncTime(t *testing.T) {
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(101)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0})
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
rts.start(ctx, t)
|
||||
|
||||
@@ -296,7 +295,7 @@ func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0})
|
||||
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
|
||||
@@ -348,7 +347,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000)
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0})
|
||||
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
|
||||
|
||||
@@ -429,7 +429,7 @@ func (h *Handshaker) ReplayBlocks(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mockApp, err := newMockProxyApp(ctx, h.logger, appHash, abciResponses)
|
||||
mockApp, err := newMockProxyApp(h.logger, appHash, abciResponses)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -57,7 +57,6 @@ func (emptyMempool) CloseWAL() {}
|
||||
// the real app.
|
||||
|
||||
func newMockProxyApp(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
appHash []byte,
|
||||
abciResponses *tmstate.ABCIResponses,
|
||||
|
||||
@@ -46,7 +46,7 @@ type reactorTestSuite struct {
|
||||
numStateStores int
|
||||
}
|
||||
|
||||
func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite {
|
||||
func setup(ctx context.Context, t *testing.T, stateStores []sm.Store) *reactorTestSuite {
|
||||
t.Helper()
|
||||
|
||||
pID := make([]byte, 16)
|
||||
@@ -245,7 +245,7 @@ func TestReactorMultiDisconnect(t *testing.T) {
|
||||
stateDB1 := initializeValidatorState(ctx, t, val, height)
|
||||
stateDB2 := initializeValidatorState(ctx, t, val, height)
|
||||
|
||||
rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 20)
|
||||
rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2})
|
||||
primary := rts.nodes[0]
|
||||
secondary := rts.nodes[1]
|
||||
|
||||
@@ -290,7 +290,7 @@ func TestReactorBroadcastEvidence(t *testing.T) {
|
||||
stateDBs[i] = initializeValidatorState(ctx, t, val, height)
|
||||
}
|
||||
|
||||
rts := setup(ctx, t, stateDBs, 0)
|
||||
rts := setup(ctx, t, stateDBs)
|
||||
|
||||
rts.start(ctx, t)
|
||||
|
||||
@@ -348,7 +348,7 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) {
|
||||
stateDB1 := initializeValidatorState(ctx, t, val, height1)
|
||||
stateDB2 := initializeValidatorState(ctx, t, val, height2)
|
||||
|
||||
rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 100)
|
||||
rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2})
|
||||
rts.start(ctx, t)
|
||||
|
||||
primary := rts.nodes[0]
|
||||
@@ -382,7 +382,7 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) {
|
||||
stateDB1 := initializeValidatorState(ctx, t, val, height)
|
||||
stateDB2 := initializeValidatorState(ctx, t, val, height)
|
||||
|
||||
rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 100)
|
||||
rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2})
|
||||
primary := rts.nodes[0]
|
||||
secondary := rts.nodes[1]
|
||||
|
||||
@@ -423,7 +423,7 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) {
|
||||
stateDB1 := initializeValidatorState(ctx, t, val, height)
|
||||
stateDB2 := initializeValidatorState(ctx, t, val, height)
|
||||
|
||||
rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 0)
|
||||
rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2})
|
||||
|
||||
primary := rts.nodes[0]
|
||||
secondary := rts.nodes[1]
|
||||
@@ -482,7 +482,7 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) {
|
||||
stateDBs[i] = initializeValidatorState(ctx, t, val, height)
|
||||
}
|
||||
|
||||
rts := setup(ctx, t, stateDBs, 0)
|
||||
rts := setup(ctx, t, stateDBs)
|
||||
rts.start(ctx, t)
|
||||
|
||||
evList := createEvidenceList(ctx, t, rts.pools[rts.network.RandomNode().NodeID], val, numEvidence)
|
||||
|
||||
@@ -228,7 +228,7 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) {
|
||||
goldenFilepath := filepath.Join("testdata", t.Name()+".golden")
|
||||
if *update {
|
||||
t.Logf("Updating golden test vector file %s", goldenFilepath)
|
||||
data := createGoldenTestVectors(t)
|
||||
data := createGoldenTestVectors()
|
||||
require.NoError(t, os.WriteFile(goldenFilepath, []byte(data), 0644))
|
||||
}
|
||||
f, err := os.Open(goldenFilepath)
|
||||
@@ -306,7 +306,7 @@ func readLots(t *testing.T, wg *sync.WaitGroup, conn io.Reader, n int) {
|
||||
// Creates the data for a test vector file.
|
||||
// The file format is:
|
||||
// Hex(diffie_hellman_secret), loc_is_least, Hex(recvSecret), Hex(sendSecret), Hex(challenge)
|
||||
func createGoldenTestVectors(t *testing.T) string {
|
||||
func createGoldenTestVectors() string {
|
||||
data := ""
|
||||
for i := 0; i < 32; i++ {
|
||||
randSecretVector := tmrand.Bytes(32)
|
||||
|
||||
@@ -151,14 +151,14 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
r := setupSingle(ctx, t)
|
||||
peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
|
||||
peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID()}
|
||||
added, err := r.manager.Add(peer)
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
|
||||
addresses := make([]p2pproto.PexAddress, 101)
|
||||
for i := 0; i < len(addresses); i++ {
|
||||
nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
|
||||
nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID()}
|
||||
addresses[i] = p2pproto.PexAddress{
|
||||
URL: nodeAddress.String(),
|
||||
}
|
||||
@@ -730,6 +730,6 @@ func newNodeID(t *testing.T, id string) types.NodeID {
|
||||
return nodeID
|
||||
}
|
||||
|
||||
func randomNodeID(t *testing.T) types.NodeID {
|
||||
func randomNodeID() types.NodeID {
|
||||
return types.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey())
|
||||
}
|
||||
|
||||
@@ -54,8 +54,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
|
||||
assert.False(t, indexer.IndexingEnabled([]indexer.EventSink{}))
|
||||
|
||||
// event sink setup
|
||||
pool, err := setupDB(t)
|
||||
assert.NoError(t, err)
|
||||
pool := setupDB(t)
|
||||
|
||||
store := dbm.NewMemDB()
|
||||
eventSinks := []indexer.EventSink{kv.NewEventSink(store), pSink}
|
||||
@@ -133,7 +132,7 @@ func resetDB(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func setupDB(t *testing.T) (*dockertest.Pool, error) {
|
||||
func setupDB(t *testing.T) *dockertest.Pool {
|
||||
t.Helper()
|
||||
pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL"))
|
||||
assert.NoError(t, err)
|
||||
@@ -187,7 +186,7 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) {
|
||||
err = migrator.Apply(psqldb, sm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return pool, nil
|
||||
return pool
|
||||
}
|
||||
|
||||
func teardown(t *testing.T, pool *dockertest.Pool) error {
|
||||
|
||||
@@ -379,7 +379,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(p *BlockProvider, peer types.NodeID) {
|
||||
go func(peer types.NodeID) {
|
||||
defer wg.Done()
|
||||
|
||||
timer := time.NewTimer(0)
|
||||
@@ -424,7 +424,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t
|
||||
}
|
||||
}
|
||||
|
||||
}(p, peer)
|
||||
}(peer)
|
||||
}
|
||||
sig := make(chan struct{})
|
||||
go func() { wg.Wait(); close(sig) }()
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -46,7 +45,7 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
|
||||
commitSigs)
|
||||
}
|
||||
|
||||
func makeStateAndBlockStore(dir string, logger log.Logger) (sm.State, *BlockStore, cleanupFunc, error) {
|
||||
func makeStateAndBlockStore(dir string) (sm.State, *BlockStore, cleanupFunc, error) {
|
||||
cfg, err := config.ResetTestRoot(dir, "blockchain_reactor_test")
|
||||
if err != nil {
|
||||
return sm.State{}, nil, nil, err
|
||||
@@ -81,7 +80,7 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
var cleanup cleanupFunc
|
||||
|
||||
state, _, cleanup, err = makeStateAndBlockStore(dir, log.NewNopLogger())
|
||||
state, _, cleanup, err = makeStateAndBlockStore(dir)
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
@@ -103,7 +102,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
// TODO: This test should be simplified ...
|
||||
func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir(), log.NewNopLogger())
|
||||
state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir())
|
||||
defer cleanup()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bs.Base(), int64(0), "initially the base should be zero")
|
||||
@@ -492,7 +491,7 @@ func TestLoadBlockMeta(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlockFetchAtHeight(t *testing.T) {
|
||||
state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir(), log.NewNopLogger())
|
||||
state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir())
|
||||
defer cleanup()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||
|
||||
@@ -13,25 +13,25 @@ import (
|
||||
tmprotobits "github.com/tendermint/tendermint/proto/tendermint/libs/bits"
|
||||
)
|
||||
|
||||
func randBitArray(bits int) (*BitArray, []byte) {
|
||||
func randBitArray(bits int) *BitArray {
|
||||
src := tmrand.Bytes((bits + 7) / 8)
|
||||
bA := NewBitArray(bits)
|
||||
for i := 0; i < len(src); i++ {
|
||||
for j := 0; j < 8; j++ {
|
||||
if i*8+j >= bits {
|
||||
return bA, src
|
||||
return bA
|
||||
}
|
||||
setBit := src[i]&(1<<uint(j)) > 0
|
||||
bA.SetIndex(i*8+j, setBit)
|
||||
}
|
||||
}
|
||||
return bA, src
|
||||
return bA
|
||||
}
|
||||
|
||||
func TestAnd(t *testing.T) {
|
||||
|
||||
bA1, _ := randBitArray(51)
|
||||
bA2, _ := randBitArray(31)
|
||||
bA1 := randBitArray(51)
|
||||
bA2 := randBitArray(31)
|
||||
bA3 := bA1.And(bA2)
|
||||
|
||||
var bNil *BitArray
|
||||
@@ -54,9 +54,8 @@ func TestAnd(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOr(t *testing.T) {
|
||||
|
||||
bA1, _ := randBitArray(51)
|
||||
bA2, _ := randBitArray(31)
|
||||
bA1 := randBitArray(51)
|
||||
bA2 := randBitArray(31)
|
||||
bA3 := bA1.Or(bA2)
|
||||
|
||||
bNil := (*BitArray)(nil)
|
||||
@@ -191,10 +190,7 @@ func TestEmptyFull(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateNeverPanics(t *testing.T) {
|
||||
newRandBitArray := func(n int) *BitArray {
|
||||
ba, _ := randBitArray(n)
|
||||
return ba
|
||||
}
|
||||
newRandBitArray := func(n int) *BitArray { return randBitArray(n) }
|
||||
pairs := []struct {
|
||||
a, b *BitArray
|
||||
}{
|
||||
|
||||
@@ -260,11 +260,8 @@ func makeNode(
|
||||
node.rpcEnv.EvidencePool = evPool
|
||||
node.evPool = evPool
|
||||
|
||||
mpReactor, mp, err := createMempoolReactor(logger, cfg, proxyApp, stateStore, nodeMetrics.mempool,
|
||||
mpReactor, mp := createMempoolReactor(logger, cfg, proxyApp, stateStore, nodeMetrics.mempool,
|
||||
peerManager.Subscribe, node.router.OpenChannel, peerManager.GetHeight)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(err, makeCloser(closers))
|
||||
}
|
||||
node.rpcEnv.Mempool = mp
|
||||
node.services = append(node.services, mpReactor)
|
||||
|
||||
|
||||
@@ -172,7 +172,7 @@ func createMempoolReactor(
|
||||
peerEvents p2p.PeerEventSubscriber,
|
||||
chCreator p2p.ChannelCreator,
|
||||
peerHeight func(types.NodeID) int64,
|
||||
) (service.Service, mempool.Mempool, error) {
|
||||
) (service.Service, mempool.Mempool) {
|
||||
logger = logger.With("module", "mempool")
|
||||
|
||||
mp := mempool.NewTxMempool(
|
||||
@@ -197,7 +197,7 @@ func createMempoolReactor(
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
return reactor, mp, nil
|
||||
return reactor, mp
|
||||
}
|
||||
|
||||
func createEvidenceReactor(
|
||||
|
||||
@@ -115,7 +115,7 @@ func getAllSeenCommits(ctx context.Context, db dbm.DB) ([]toMigrate, error) {
|
||||
return scData, nil
|
||||
}
|
||||
|
||||
func renameRecord(ctx context.Context, db dbm.DB, keep toMigrate) error {
|
||||
func renameRecord(db dbm.DB, keep toMigrate) error {
|
||||
wantKey := makeKeyFromPrefix(prefixSeenCommit)
|
||||
if bytes.Equal(keep.key, wantKey) {
|
||||
return nil // we already did this conversion
|
||||
@@ -143,7 +143,7 @@ func renameRecord(ctx context.Context, db dbm.DB, keep toMigrate) error {
|
||||
return cerr
|
||||
}
|
||||
|
||||
func deleteRecords(ctx context.Context, db dbm.DB, scData []toMigrate) error {
|
||||
func deleteRecords(db dbm.DB, scData []toMigrate) error {
|
||||
// delete all the remaining stale values in a single batch
|
||||
batch := db.NewBatch()
|
||||
|
||||
@@ -179,7 +179,7 @@ func Migrate(ctx context.Context, db dbm.DB) error {
|
||||
// retain only the latest.
|
||||
keep, remove := scData[0], scData[1:]
|
||||
|
||||
if err := renameRecord(ctx, db, keep); err != nil {
|
||||
if err := renameRecord(db, keep); err != nil {
|
||||
return fmt.Errorf("renaming seen commit record: %w", err)
|
||||
}
|
||||
|
||||
@@ -189,7 +189,7 @@ func Migrate(ctx context.Context, db dbm.DB) error {
|
||||
|
||||
// Remove any older seen commits. Prior to v0.35, we kept these records for
|
||||
// all heights, but v0.35 keeps only the latest.
|
||||
if err := deleteRecords(ctx, db, remove); err != nil {
|
||||
if err := deleteRecords(db, remove); err != nil {
|
||||
return fmt.Errorf("writing data: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // wait for network to settle before tests
|
||||
return err
|
||||
}
|
||||
if err := Test(logger, cli.testnet); err != nil {
|
||||
if err := Test(cli.testnet); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -259,7 +259,7 @@ func NewCLI(logger log.Logger) *CLI {
|
||||
Use: "test",
|
||||
Short: "Runs test cases against a running testnet",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return Test(logger, cli.testnet)
|
||||
return Test(cli.testnet)
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
@@ -3,12 +3,11 @@ package main
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
|
||||
)
|
||||
|
||||
// Test runs test cases under tests/
|
||||
func Test(logger log.Logger, testnet *e2e.Testnet) error {
|
||||
func Test(testnet *e2e.Testnet) error {
|
||||
err := os.Setenv("E2E_MANIFEST", testnet.File)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -295,7 +295,14 @@ func TestMockEvidenceValidateBasic(t *testing.T) {
|
||||
|
||||
func makeVote(
|
||||
ctx context.Context,
|
||||
t *testing.T, val PrivValidator, chainID string, valIndex int32, height int64, round int32, step int, blockID BlockID,
|
||||
t *testing.T,
|
||||
val PrivValidator,
|
||||
chainID string,
|
||||
valIndex int32,
|
||||
height int64,
|
||||
round int32,
|
||||
step int,
|
||||
blockID BlockID,
|
||||
time time.Time,
|
||||
) *Vote {
|
||||
pubKey, err := val.GetPubKey(ctx)
|
||||
|
||||
@@ -14,11 +14,11 @@ import (
|
||||
|
||||
func TestABCIPubKey(t *testing.T) {
|
||||
pkEd := ed25519.GenPrivKey().PubKey()
|
||||
err := testABCIPubKey(t, pkEd, ABCIPubKeyTypeEd25519)
|
||||
err := testABCIPubKey(t, pkEd)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) error {
|
||||
func testABCIPubKey(t *testing.T, pk crypto.PubKey) error {
|
||||
abciPubKey, err := encoding.PubKeyToProto(pk)
|
||||
require.NoError(t, err)
|
||||
pk2, err := encoding.PubKeyFromProto(abciPubKey)
|
||||
|
||||
Reference in New Issue
Block a user