mirror of
https://github.com/tendermint/tendermint.git
synced 2026-02-10 05:50:19 +00:00
* ci: Fix linter complaint (#9645)
Fixes a very silly linter complaint that makes absolutely no sense and is blocking the merging of several PRs.
---
#### PR checklist
- [x] Tests written/updated, or no tests needed
- [x] `CHANGELOG_PENDING.md` updated, or no changelog entry needed
- [x] Updated relevant documentation (`docs/`) and code comments, or no
documentation updates needed
(cherry picked from commit 83b7f4ad5b)
# Conflicts:
# .github/workflows/lint.yml
# .golangci.yml
# cmd/tendermint/commands/debug/util.go
* Resolve conflicts
Signed-off-by: Thane Thomson <connect@thanethomson.com>
* ci: Sync golangci-lint config with main
Minus the spelling configuration that restricts spelling to US English
only.
Signed-off-by: Thane Thomson <connect@thanethomson.com>
* make format
Signed-off-by: Thane Thomson <connect@thanethomson.com>
* Remove usage of deprecated io/ioutil package
Signed-off-by: Thane Thomson <connect@thanethomson.com>
* Remove unused mockBlockStore
Signed-off-by: Thane Thomson <connect@thanethomson.com>
* blockchain/v2: Remove unused method
Signed-off-by: Thane Thomson <connect@thanethomson.com>
* Bulk fix lints
Signed-off-by: Thane Thomson <connect@thanethomson.com>
* lint: Ignore auto-generated query PEG
Signed-off-by: Thane Thomson <connect@thanethomson.com>
Signed-off-by: Thane Thomson <connect@thanethomson.com>
Co-authored-by: Thane Thomson <connect@thanethomson.com>
This commit is contained in:
3
.github/workflows/lint.yml
vendored
3
.github/workflows/lint.yml
vendored
@@ -25,8 +25,7 @@ jobs:
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.47.3
|
||||
version: v1.50.1
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
@@ -2,20 +2,12 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
# - deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- exportloopref
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
- goconst
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
@@ -23,50 +15,32 @@ linters:
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
# - maligned
|
||||
# - misspell
|
||||
- misspell
|
||||
- nakedret
|
||||
# - nolintlint
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- staticcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- stylecheck
|
||||
# - typecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
# - unparam
|
||||
# - unused
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
disable:
|
||||
- unused
|
||||
- deadcode
|
||||
- nolintlint
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- linters:
|
||||
- lll
|
||||
source: "https://"
|
||||
max-same-issues: 50
|
||||
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
revive:
|
||||
min-confidence: 0
|
||||
misspell:
|
||||
locale: US
|
||||
ignore-words:
|
||||
- behaviour
|
||||
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
- libs/pubsub/query/query.peg.go
|
||||
|
||||
@@ -2,7 +2,7 @@ package kvstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -87,7 +87,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -114,12 +114,11 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -181,7 +180,6 @@ func TestValUpdates(t *testing.T) {
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
}
|
||||
|
||||
func makeApplyBlock(
|
||||
@@ -189,7 +187,8 @@ func makeApplyBlock(
|
||||
kvstore types.Application,
|
||||
heightInt int,
|
||||
diff []types.ValidatorUpdate,
|
||||
txs ...[]byte) {
|
||||
txs ...[]byte,
|
||||
) {
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
@@ -207,7 +206,6 @@ func makeApplyBlock(
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
|
||||
}
|
||||
|
||||
// order doesn't matter
|
||||
|
||||
@@ -62,24 +62,7 @@ func (mp mockPeer) Get(string) interface{} { return struct{}{} }
|
||||
func (mp mockPeer) SetRemovalFailed() {}
|
||||
func (mp mockPeer) GetRemovalFailed() bool { return false }
|
||||
|
||||
type mockBlockStore struct {
|
||||
blocks map[int64]*types.Block
|
||||
}
|
||||
|
||||
func (ml *mockBlockStore) Height() int64 {
|
||||
return int64(len(ml.blocks))
|
||||
}
|
||||
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
|
||||
return ml.blocks[height]
|
||||
}
|
||||
|
||||
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
|
||||
ml.blocks[block.Height] = block
|
||||
}
|
||||
|
||||
type mockBlockApplier struct {
|
||||
}
|
||||
type mockBlockApplier struct{}
|
||||
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
@@ -351,9 +334,7 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
// }
|
||||
|
||||
func TestReactorHelperMode(t *testing.T) {
|
||||
var (
|
||||
channelID = byte(0x40)
|
||||
)
|
||||
channelID := byte(0x40)
|
||||
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
@@ -466,7 +447,8 @@ type testApp struct {
|
||||
}
|
||||
|
||||
func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) (
|
||||
*types.GenesisDoc, []types.PrivValidator) {
|
||||
*types.GenesisDoc, []types.PrivValidator,
|
||||
) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
@@ -491,7 +473,8 @@ func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower
|
||||
func newReactorStore(
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
maxBlockHeight int64,
|
||||
) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
@@ -515,7 +498,8 @@ func newReactorStore(
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false},
|
||||
DiscardABCIResponses: false,
|
||||
},
|
||||
)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
|
||||
@@ -52,10 +52,6 @@ func (rt *Routine) setLogger(logger log.Logger) {
|
||||
rt.logger = logger
|
||||
}
|
||||
|
||||
func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
rt.metrics = metrics
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
|
||||
@@ -3,7 +3,6 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -82,7 +81,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err)
|
||||
return
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -68,7 +67,6 @@ func zipDir(src, dest string) error {
|
||||
_, err = io.Copy(headerWriter, file)
|
||||
return err
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dest and returns an error upon failure. The
|
||||
@@ -111,5 +109,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error {
|
||||
return fmt.Errorf("failed to encode state dump: %w", err)
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -56,7 +55,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package debug
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -67,16 +67,17 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
resp, err := http.Get(endpoint) //nolint: gosec
|
||||
//nolint:gosec,nolintlint
|
||||
resp, err := http.Get(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read %s profile response body: %w", profile, err)
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
@@ -16,7 +18,6 @@ import (
|
||||
"github.com/tendermint/tendermint/state/mocks"
|
||||
txmocks "github.com/tendermint/tendermint/state/txindex/mocks"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -2,7 +2,6 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -18,9 +17,7 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
)
|
||||
var defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
|
||||
// clearConfig clears env vars, the given root dir, and resets viper.
|
||||
func clearConfig(dir string) {
|
||||
@@ -88,7 +85,6 @@ func TestRootHome(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRootFlagsEnv(t *testing.T) {
|
||||
|
||||
// defaults
|
||||
defaults := cfg.DefaultConfig()
|
||||
defaultLogLvl := defaults.LogLevel
|
||||
@@ -116,7 +112,6 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRootConfig(t *testing.T) {
|
||||
|
||||
// write non-default config
|
||||
nonDefaultLogLvl := "abc:debug"
|
||||
cvals := map[string]string{
|
||||
@@ -140,7 +135,7 @@ func TestRootConfig(t *testing.T) {
|
||||
|
||||
// XXX: path must match cfg.defaultConfigPath
|
||||
configFilePath := filepath.Join(defaultRoot, "config")
|
||||
err := tmos.EnsureDir(configFilePath, 0700)
|
||||
err := tmos.EnsureDir(configFilePath, 0o700)
|
||||
require.Nil(t, err)
|
||||
|
||||
// write the non-defaults to a different path
|
||||
@@ -168,5 +163,5 @@ func WriteConfigVals(dir string, vals map[string]string) error {
|
||||
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
}
|
||||
cfile := filepath.Join(dir, "config.toml")
|
||||
return ioutil.WriteFile(cfile, []byte(data), 0600)
|
||||
return os.WriteFile(cfile, []byte(data), 0o600)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ func TestDefaultConfig(t *testing.T) {
|
||||
assert.Equal("/foo/bar", cfg.GenesisFile())
|
||||
assert.Equal("/opt/data", cfg.DBDir())
|
||||
assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir())
|
||||
|
||||
}
|
||||
|
||||
func TestConfigValidateBasic(t *testing.T) {
|
||||
@@ -140,7 +139,7 @@ func TestFastSyncConfigValidateBasic(t *testing.T) {
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
// nolint: lll
|
||||
//nolint:lll
|
||||
func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
modify func(*ConsensusConfig)
|
||||
|
||||
@@ -3,7 +3,7 @@ package config
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
// DefaultDirPerm is the default permissions used when creating directories.
|
||||
const DefaultDirPerm = 0700
|
||||
const DefaultDirPerm = 0o700
|
||||
|
||||
var configTemplate *template.Template
|
||||
|
||||
@@ -63,7 +63,7 @@ func WriteConfigFile(configFilePath string, config *Config) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0644)
|
||||
tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0o644)
|
||||
}
|
||||
|
||||
// Note: any changes to the comments/variables/mapstructure
|
||||
@@ -544,7 +544,7 @@ func ResetTestRoot(testName string) *Config {
|
||||
|
||||
func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
// create a unique, concurrency-safe test directory under os.TempDir()
|
||||
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -571,11 +571,11 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
chainID = "tendermint_test"
|
||||
}
|
||||
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
|
||||
tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
||||
tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0o644)
|
||||
}
|
||||
// we always overwrite the priv val
|
||||
tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
|
||||
tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
|
||||
tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0o644)
|
||||
tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0o644)
|
||||
|
||||
config := TestConfig().SetRoot(rootDir)
|
||||
return config
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -23,7 +22,7 @@ func TestEnsureRoot(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// setup temp dir for test
|
||||
tmpDir, err := ioutil.TempDir("", "config-test")
|
||||
tmpDir, err := os.MkdirTemp("", "config-test")
|
||||
require.Nil(err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
@@ -31,7 +30,7 @@ func TestEnsureRoot(t *testing.T) {
|
||||
EnsureRoot(tmpDir)
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
|
||||
if !checkConfig(string(data)) {
|
||||
@@ -52,7 +51,7 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
rootDir := cfg.RootDir
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
|
||||
if !checkConfig(string(data)) {
|
||||
@@ -68,7 +67,7 @@ func checkConfig(configFile string) bool {
|
||||
var valid bool
|
||||
|
||||
// list of words we expect in the config
|
||||
var elems = []string{
|
||||
elems := []string{
|
||||
"moniker",
|
||||
"seeds",
|
||||
"proxy_app",
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
"github.com/go-kit/log/term"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"path"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
@@ -92,8 +90,8 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida
|
||||
func (vs *validatorStub) signVote(
|
||||
voteType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader) (*types.Vote, error) {
|
||||
|
||||
header types.PartSetHeader,
|
||||
) (*types.Vote, error) {
|
||||
pubKey, err := vs.PrivValidator.GetPubKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get pubkey: %w", err)
|
||||
@@ -141,7 +139,8 @@ func signVotes(
|
||||
voteType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader,
|
||||
vss ...*validatorStub) []*types.Vote {
|
||||
vss ...*validatorStub,
|
||||
) []*types.Vote {
|
||||
votes := make([]*types.Vote, len(vss))
|
||||
for i, vs := range vss {
|
||||
votes[i] = signVote(vs, voteType, hash, header)
|
||||
@@ -452,7 +451,7 @@ func newStateWithConfigAndBlockStore(
|
||||
|
||||
func loadPrivValidator(config *cfg.Config) *privval.FilePV {
|
||||
privValidatorKeyFile := config.PrivValidatorKeyFile()
|
||||
ensureDir(filepath.Dir(privValidatorKeyFile), 0700)
|
||||
ensureDir(filepath.Dir(privValidatorKeyFile), 0o700)
|
||||
privValidatorStateFile := config.PrivValidatorStateFile()
|
||||
privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
|
||||
privValidator.Reset()
|
||||
@@ -479,7 +478,8 @@ func randState(nValidators int) (*State, []*validatorStub) {
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration,
|
||||
errorMessage string) {
|
||||
errorMessage string,
|
||||
) {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
break
|
||||
@@ -657,7 +657,8 @@ func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) {
|
||||
}
|
||||
|
||||
func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32,
|
||||
voteType tmproto.SignedMsgType) {
|
||||
voteType tmproto.SignedMsgType,
|
||||
) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewVote event")
|
||||
@@ -713,7 +714,8 @@ func consensusLogger() log.Logger {
|
||||
}
|
||||
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) {
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config),
|
||||
) ([]*State, cleanupFunc) {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
@@ -729,7 +731,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
for _, opt := range configOpts {
|
||||
opt(thisConfig)
|
||||
}
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
@@ -766,7 +768,7 @@ func randConsensusNetWithPeers(
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal
|
||||
if i == 0 {
|
||||
peer0Config = thisConfig
|
||||
}
|
||||
@@ -774,11 +776,11 @@ func randConsensusNetWithPeers(
|
||||
if i < nValidators {
|
||||
privVal = privVals[i]
|
||||
} else {
|
||||
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
||||
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
||||
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -900,7 +902,7 @@ func newCounter() abci.Application {
|
||||
}
|
||||
|
||||
func newPersistentKVStore() abci.Application {
|
||||
dir, err := ioutil.TempDir("", "persistent-kvstore")
|
||||
dir, err := os.MkdirTemp("", "persistent-kvstore")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -65,7 +64,8 @@ func TestMain(m *testing.M) {
|
||||
// wal writer when we need to, instead of with every message.
|
||||
|
||||
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
|
||||
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store,
|
||||
) {
|
||||
logger := log.TestingLogger()
|
||||
state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
@@ -78,7 +78,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
)
|
||||
cs.SetLogger(logger)
|
||||
|
||||
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
|
||||
bytes, _ := os.ReadFile(cs.config.WalFile())
|
||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||
|
||||
err := cs.Start()
|
||||
@@ -126,14 +126,18 @@ func TestWALCrash(t *testing.T) {
|
||||
initFn func(dbm.DB, *State, context.Context)
|
||||
heightToStop int64
|
||||
}{
|
||||
{"empty block",
|
||||
{
|
||||
"empty block",
|
||||
func(stateDB dbm.DB, cs *State, ctx context.Context) {},
|
||||
1},
|
||||
{"many non-empty blocks",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"many non-empty blocks",
|
||||
func(stateDB dbm.DB, cs *State, ctx context.Context) {
|
||||
go sendTxs(ctx, cs)
|
||||
},
|
||||
3},
|
||||
3,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
@@ -146,7 +150,8 @@ func TestWALCrash(t *testing.T) {
|
||||
}
|
||||
|
||||
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
initFn func(dbm.DB, *State, context.Context), heightToStop int64) {
|
||||
initFn func(dbm.DB, *State, context.Context), heightToStop int64,
|
||||
) {
|
||||
walPanicked := make(chan error)
|
||||
crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop}
|
||||
|
||||
@@ -284,7 +289,8 @@ func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() }
|
||||
|
||||
func (w *crashingWAL) SearchForEndHeight(
|
||||
height int64,
|
||||
options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
|
||||
options *WALSearchOptions,
|
||||
) (rd io.ReadCloser, found bool, err error) {
|
||||
return w.next.SearchForEndHeight(height, options)
|
||||
}
|
||||
|
||||
@@ -588,7 +594,7 @@ func TestHandshakeReplayNone(t *testing.T) {
|
||||
func TestMockProxyApp(t *testing.T) {
|
||||
sim.CleanupFunc() // clean the test env created in TestSimulateValidatorsChange
|
||||
logger := log.TestingLogger()
|
||||
var validTxs, invalidTxs = 0, 0
|
||||
validTxs, invalidTxs := 0, 0
|
||||
txIndex := 0
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
@@ -636,7 +642,7 @@ func TestMockProxyApp(t *testing.T) {
|
||||
}
|
||||
|
||||
func tempWALWithData(data []byte) string {
|
||||
walFile, err := ioutil.TempFile("", "wal")
|
||||
walFile, err := os.CreateTemp("", "wal")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to create temp WAL file: %v", err))
|
||||
}
|
||||
@@ -794,7 +800,8 @@ func applyBlock(stateStore sm.Store, st sm.State, blk *types.Block, proxyApp pro
|
||||
}
|
||||
|
||||
func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store,
|
||||
state sm.State, chain []*types.Block, nBlocks int, mode uint) {
|
||||
state sm.State, chain []*types.Block, nBlocks int, mode uint,
|
||||
) {
|
||||
// start a new app without handshake, play nBlocks blocks
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
panic(err)
|
||||
@@ -831,7 +838,6 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store,
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown mode %v", mode))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func buildTMStateFromChain(
|
||||
@@ -840,7 +846,8 @@ func buildTMStateFromChain(
|
||||
state sm.State,
|
||||
chain []*types.Block,
|
||||
nBlocks int,
|
||||
mode uint) sm.State {
|
||||
mode uint,
|
||||
) sm.State {
|
||||
// run the whole chain against this client to build up the tendermint state
|
||||
clientCreator := proxy.NewLocalClientCreator(
|
||||
kvstore.NewPersistentKVStoreApplication(
|
||||
@@ -983,8 +990,8 @@ func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Bl
|
||||
}
|
||||
|
||||
func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta,
|
||||
privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) {
|
||||
|
||||
privVal types.PrivValidator, height int64,
|
||||
) (*types.Block, *types.PartSet) {
|
||||
lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
|
||||
if height > 1 {
|
||||
vote, _ := types.MakeVote(
|
||||
@@ -1066,8 +1073,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
case EndHeightMessage:
|
||||
// if its not the first one, we have a full block
|
||||
if thisBlockParts != nil {
|
||||
var pbb = new(tmproto.Block)
|
||||
bz, err := ioutil.ReadAll(thisBlockParts.GetReader())
|
||||
pbb := new(tmproto.Block)
|
||||
bz, err := io.ReadAll(thisBlockParts.GetReader())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -1106,11 +1113,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
}
|
||||
}
|
||||
// grab the last block too
|
||||
bz, err := ioutil.ReadAll(thisBlockParts.GetReader())
|
||||
bz, err := io.ReadAll(thisBlockParts.GetReader())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var pbb = new(tmproto.Block)
|
||||
pbb := new(tmproto.Block)
|
||||
err = proto.Unmarshal(bz, pbb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -1154,7 +1161,8 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} {
|
||||
func stateAndStore(
|
||||
config *cfg.Config,
|
||||
pubKey crypto.PubKey,
|
||||
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
|
||||
appVersion uint64,
|
||||
) (dbm.DB, sm.State, *mockBlockStore) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
@@ -1192,6 +1200,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain
|
||||
func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block {
|
||||
return bs.chain[int64(len(bs.chain))-1]
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
block := bs.chain[height-1]
|
||||
return &types.BlockMeta{
|
||||
@@ -1202,9 +1211,11 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil }
|
||||
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
return bs.commits[height-1]
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
return bs.commits[height-1]
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
@@ -468,7 +468,6 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error
|
||||
|
||||
// SetProposal inputs a proposal.
|
||||
func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
|
||||
|
||||
if peerID == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}
|
||||
} else {
|
||||
@@ -481,7 +480,6 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
|
||||
|
||||
// AddProposalBlockPart inputs a part of the proposal block.
|
||||
func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error {
|
||||
|
||||
if peerID == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
|
||||
} else {
|
||||
@@ -499,7 +497,6 @@ func (cs *State) SetProposalAndBlock(
|
||||
parts *types.PartSet,
|
||||
peerID p2p.ID,
|
||||
) error {
|
||||
|
||||
if err := cs.SetProposal(proposal, peerID); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -821,7 +818,7 @@ func (cs *State) handleMsg(mi msgInfo) {
|
||||
|
||||
// We unlock here to yield to any routines that need to read the the RoundState.
|
||||
// Previously, this code held the lock from the point at which the final block
|
||||
// part was recieved until the block executed against the application.
|
||||
// part was received until the block executed against the application.
|
||||
// This prevented the reactor from being able to retrieve the most updated
|
||||
// version of the RoundState. The reactor needs the updated RoundState to
|
||||
// gossip the now completed block.
|
||||
@@ -937,7 +934,6 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid timeout step: %v", ti.Step))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (cs *State) handleTxsAvailable() {
|
||||
@@ -1181,7 +1177,6 @@ func (cs *State) isProposalComplete() bool {
|
||||
}
|
||||
// if this is false the proposer is lying or we haven't received the POL yet
|
||||
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
|
||||
|
||||
}
|
||||
|
||||
// Create the next block to propose and return it. Returns nil block upon error.
|
||||
@@ -1885,12 +1880,12 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add
|
||||
)
|
||||
}
|
||||
if added && cs.ProposalBlockParts.IsComplete() {
|
||||
bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader())
|
||||
bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader())
|
||||
if err != nil {
|
||||
return added, err
|
||||
}
|
||||
|
||||
var pbb = new(tmproto.Block)
|
||||
pbb := new(tmproto.Block)
|
||||
err = proto.Unmarshal(bz, pbb)
|
||||
if err != nil {
|
||||
return added, err
|
||||
|
||||
@@ -3,7 +3,6 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -27,7 +26,7 @@ const (
|
||||
)
|
||||
|
||||
func TestWALTruncate(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
walDir, err := os.MkdirTemp("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
|
||||
@@ -109,7 +108,7 @@ func TestWALEncoderDecoder(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWALWrite(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
walDir, err := os.MkdirTemp("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
@@ -177,7 +176,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWALPeriodicSync(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
walDir, err := os.MkdirTemp("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
|
||||
@@ -269,18 +268,23 @@ func BenchmarkWalDecode512B(b *testing.B) {
|
||||
func BenchmarkWalDecode10KB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 10*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode100KB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 100*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode1MB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 1024*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode10MB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 10*1024*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode100MB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 100*1024*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode1GB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 1024*1024*1024)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package armor
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/openpgp/armor" //nolint: staticcheck
|
||||
)
|
||||
@@ -31,7 +31,7 @@ func DecodeArmor(armorStr string) (blockType string, headers map[string]string,
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
data, err = ioutil.ReadAll(block.Body)
|
||||
data, err = io.ReadAll(block.Body)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package autofile
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
@@ -24,7 +23,7 @@ func TestSIGHUP(t *testing.T) {
|
||||
})
|
||||
|
||||
// First, create a temporary directory and move into it
|
||||
dir, err := ioutil.TempDir("", "sighup_test")
|
||||
dir, err := os.MkdirTemp("", "sighup_test")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
os.RemoveAll(dir)
|
||||
@@ -49,7 +48,7 @@ func TestSIGHUP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Move into a different temporary directory
|
||||
otherDir, err := ioutil.TempDir("", "sighup_test_other")
|
||||
otherDir, err := os.MkdirTemp("", "sighup_test_other")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(otherDir)
|
||||
err = os.Chdir(otherDir)
|
||||
@@ -79,7 +78,7 @@ func TestSIGHUP(t *testing.T) {
|
||||
}
|
||||
|
||||
// The current directory should be empty
|
||||
files, err := ioutil.ReadDir(".")
|
||||
files, err := os.ReadDir(".")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, files)
|
||||
}
|
||||
@@ -87,7 +86,7 @@ func TestSIGHUP(t *testing.T) {
|
||||
// // Manually modify file permissions, close, and reopen using autofile:
|
||||
// // We expect the file permissions to be changed back to the intended perms.
|
||||
// func TestOpenAutoFilePerms(t *testing.T) {
|
||||
// file, err := ioutil.TempFile("", "permission_test")
|
||||
// file, err := os.CreateTemp("", "permission_test")
|
||||
// require.NoError(t, err)
|
||||
// err = file.Close()
|
||||
// require.NoError(t, err)
|
||||
@@ -113,7 +112,7 @@ func TestSIGHUP(t *testing.T) {
|
||||
|
||||
func TestAutoFileSize(t *testing.T) {
|
||||
// First, create an AutoFile writing to a tempfile dir
|
||||
f, err := ioutil.TempFile("", "sighup_test")
|
||||
f, err := os.CreateTemp("", "sighup_test")
|
||||
require.NoError(t, err)
|
||||
err = f.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,7 +2,6 @@ package autofile
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -17,7 +16,7 @@ import (
|
||||
func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group {
|
||||
testID := tmrand.Str(12)
|
||||
testDir := "_test_" + testID
|
||||
err := tmos.EnsureDir(testDir, 0700)
|
||||
err := tmos.EnsureDir(testDir, 0o700)
|
||||
require.NoError(t, err, "Error creating dir")
|
||||
|
||||
headPath := testDir + "/myfile"
|
||||
@@ -122,7 +121,7 @@ func TestRotateFile(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
dir, err := ioutil.TempDir("", "rotate_test")
|
||||
dir, err := os.MkdirTemp("", "rotate_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
err = os.Chdir(dir)
|
||||
@@ -151,21 +150,21 @@ func TestRotateFile(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read g.Head.Path+"000"
|
||||
body1, err := ioutil.ReadFile(g.Head.Path + ".000")
|
||||
body1, err := os.ReadFile(g.Head.Path + ".000")
|
||||
assert.NoError(t, err, "Failed to read first rolled file")
|
||||
if string(body1) != "Line 1\nLine 2\nLine 3\n" {
|
||||
t.Errorf("got unexpected contents: [%v]", string(body1))
|
||||
}
|
||||
|
||||
// Read g.Head.Path
|
||||
body2, err := ioutil.ReadFile(g.Head.Path)
|
||||
body2, err := os.ReadFile(g.Head.Path)
|
||||
assert.NoError(t, err, "Failed to read first rolled file")
|
||||
if string(body2) != "Line 4\nLine 5\nLine 6\n" {
|
||||
t.Errorf("got unexpected contents: [%v]", string(body2))
|
||||
}
|
||||
|
||||
// Make sure there are no files in the current, temporary directory
|
||||
files, err := ioutil.ReadDir(".")
|
||||
files, err := os.ReadDir(".")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, files)
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -19,7 +18,7 @@ func WriteConfigVals(dir string, vals map[string]string) error {
|
||||
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
}
|
||||
cfile := filepath.Join(dir, "config.toml")
|
||||
return ioutil.WriteFile(cfile, []byte(data), 0600)
|
||||
return os.WriteFile(cfile, []byte(data), 0o600)
|
||||
}
|
||||
|
||||
// RunWithArgs executes the given command with the specified command line args
|
||||
|
||||
@@ -2,7 +2,7 @@ package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -27,8 +27,11 @@ func TestSetupEnv(t *testing.T) {
|
||||
{nil, map[string]string{"DEMO_FOOBAR": "good"}, "good"},
|
||||
{nil, map[string]string{"DEMOFOOBAR": "silly"}, "silly"},
|
||||
// and that cli overrides env...
|
||||
{[]string{"--foobar", "important"},
|
||||
map[string]string{"DEMO_FOOBAR": "ignored"}, "important"},
|
||||
{
|
||||
[]string{"--foobar", "important"},
|
||||
map[string]string{"DEMO_FOOBAR": "ignored"},
|
||||
"important",
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range cases {
|
||||
@@ -55,7 +58,7 @@ func TestSetupEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func tempDir() string {
|
||||
cdir, err := ioutil.TempDir("", "test-cli")
|
||||
cdir, err := os.MkdirTemp("", "test-cli")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package log_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -90,11 +90,11 @@ func TestError(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkTMLoggerSimple(b *testing.B) {
|
||||
benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage)
|
||||
benchmarkRunner(b, log.NewTMLogger(io.Discard), baseInfoMessage)
|
||||
}
|
||||
|
||||
func BenchmarkTMLoggerContextual(b *testing.B) {
|
||||
benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), withInfoMessage)
|
||||
benchmarkRunner(b, log.NewTMLogger(io.Discard), withInfoMessage)
|
||||
}
|
||||
|
||||
func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) {
|
||||
|
||||
@@ -3,7 +3,7 @@ package log_test
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
@@ -62,16 +62,16 @@ func TestTMFmtLogger(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkTMFmtLoggerSimple(b *testing.B) {
|
||||
benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), baseMessage)
|
||||
benchmarkRunnerKitlog(b, log.NewTMFmtLogger(io.Discard), baseMessage)
|
||||
}
|
||||
|
||||
func BenchmarkTMFmtLoggerContextual(b *testing.B) {
|
||||
benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), withMessage)
|
||||
benchmarkRunnerKitlog(b, log.NewTMFmtLogger(io.Discard), withMessage)
|
||||
}
|
||||
|
||||
func TestTMFmtLoggerConcurrency(t *testing.T) {
|
||||
t.Parallel()
|
||||
testConcurrency(t, log.NewTMFmtLogger(ioutil.Discard), 10000)
|
||||
testConcurrency(t, log.NewTMFmtLogger(io.Discard), 10000)
|
||||
}
|
||||
|
||||
func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) {
|
||||
@@ -83,7 +83,7 @@ func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Log
|
||||
}
|
||||
}
|
||||
|
||||
// nolint: errcheck // ignore errors
|
||||
//nolint:errcheck // ignore errors
|
||||
var (
|
||||
baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") }
|
||||
withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") }
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
@@ -62,11 +61,11 @@ func FileExists(filePath string) bool {
|
||||
}
|
||||
|
||||
func ReadFile(filePath string) ([]byte, error) {
|
||||
return ioutil.ReadFile(filePath)
|
||||
return os.ReadFile(filePath)
|
||||
}
|
||||
|
||||
func MustReadFile(filePath string) []byte {
|
||||
fileBytes, err := ioutil.ReadFile(filePath)
|
||||
fileBytes, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
Exit(fmt.Sprintf("MustReadFile failed: %v", err))
|
||||
return nil
|
||||
@@ -75,7 +74,7 @@ func MustReadFile(filePath string) []byte {
|
||||
}
|
||||
|
||||
func WriteFile(filePath string, contents []byte, mode os.FileMode) error {
|
||||
return ioutil.WriteFile(filePath, contents, mode)
|
||||
return os.WriteFile(filePath, contents, mode)
|
||||
}
|
||||
|
||||
func MustWriteFile(filePath string, contents []byte, mode os.FileMode) {
|
||||
|
||||
@@ -3,7 +3,6 @@ package os
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -12,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCopyFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "example")
|
||||
tmpfile, err := os.CreateTemp("", "example")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -29,7 +28,7 @@ func TestCopyFile(t *testing.T) {
|
||||
if _, err := os.Stat(copyfile); os.IsNotExist(err) {
|
||||
t.Fatal("copy should exist")
|
||||
}
|
||||
data, err := ioutil.ReadFile(copyfile)
|
||||
data, err := os.ReadFile(copyfile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -40,35 +39,35 @@ func TestCopyFile(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEnsureDir(t *testing.T) {
|
||||
tmp, err := ioutil.TempDir("", "ensure-dir")
|
||||
tmp, err := os.MkdirTemp("", "ensure-dir")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
// Should be possible to create a new directory.
|
||||
err = EnsureDir(filepath.Join(tmp, "dir"), 0755)
|
||||
err = EnsureDir(filepath.Join(tmp, "dir"), 0o755)
|
||||
require.NoError(t, err)
|
||||
require.DirExists(t, filepath.Join(tmp, "dir"))
|
||||
|
||||
// Should succeed on existing directory.
|
||||
err = EnsureDir(filepath.Join(tmp, "dir"), 0755)
|
||||
err = EnsureDir(filepath.Join(tmp, "dir"), 0o755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should fail on file.
|
||||
err = ioutil.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644)
|
||||
err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0o644)
|
||||
require.NoError(t, err)
|
||||
err = EnsureDir(filepath.Join(tmp, "file"), 0755)
|
||||
err = EnsureDir(filepath.Join(tmp, "file"), 0o755)
|
||||
require.Error(t, err)
|
||||
|
||||
// Should allow symlink to dir.
|
||||
err = os.Symlink(filepath.Join(tmp, "dir"), filepath.Join(tmp, "linkdir"))
|
||||
require.NoError(t, err)
|
||||
err = EnsureDir(filepath.Join(tmp, "linkdir"), 0755)
|
||||
err = EnsureDir(filepath.Join(tmp, "linkdir"), 0o755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should error on symlink to file.
|
||||
err = os.Symlink(filepath.Join(tmp, "file"), filepath.Join(tmp, "linkfile"))
|
||||
require.NoError(t, err)
|
||||
err = EnsureDir(filepath.Join(tmp, "linkfile"), 0755)
|
||||
err = EnsureDir(filepath.Join(tmp, "linkfile"), 0o755)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -76,7 +75,7 @@ func TestEnsureDir(t *testing.T) {
|
||||
// the origin is positively a non-directory and that it is ready for copying.
|
||||
// See https://github.com/tendermint/tendermint/issues/6427
|
||||
func TestTrickedTruncation(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "pwn_truncate")
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "pwn_truncate")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -84,12 +83,12 @@ func TestTrickedTruncation(t *testing.T) {
|
||||
|
||||
originalWALPath := filepath.Join(tmpDir, "wal")
|
||||
originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!")
|
||||
if err := ioutil.WriteFile(originalWALPath, originalWALContent, 0755); err != nil {
|
||||
if err := os.WriteFile(originalWALPath, originalWALContent, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// 1. Sanity check.
|
||||
readWAL, err := ioutil.ReadFile(originalWALPath)
|
||||
readWAL, err := os.ReadFile(originalWALPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -104,7 +103,7 @@ func TestTrickedTruncation(t *testing.T) {
|
||||
}
|
||||
|
||||
// 3. Check the WAL's content
|
||||
reReadWAL, err := ioutil.ReadFile(originalWALPath)
|
||||
reReadWAL, err := os.ReadFile(originalWALPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//nolint
|
||||
package query
|
||||
|
||||
import (
|
||||
|
||||
@@ -48,7 +48,7 @@ func (r *Rand) init() {
|
||||
}
|
||||
|
||||
func (r *Rand) reset(seed int64) {
|
||||
r.rand = mrand.New(mrand.NewSource(seed)) //nolint:gosec // G404: Use of weak random number generator
|
||||
r.rand = mrand.New(mrand.NewSource(seed)) //nolint:gosec,nolintlint // G404: Use of weak random number generator
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -5,7 +5,6 @@ package tempfile
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
testing "testing"
|
||||
|
||||
@@ -18,16 +17,16 @@ func TestWriteFileAtomic(t *testing.T) {
|
||||
var (
|
||||
data = []byte(tmrand.Str(tmrand.Intn(2048)))
|
||||
old = tmrand.Bytes(tmrand.Intn(2048))
|
||||
perm os.FileMode = 0600
|
||||
perm os.FileMode = 0o600
|
||||
)
|
||||
|
||||
f, err := ioutil.TempFile("/tmp", "write-atomic-test-")
|
||||
f, err := os.CreateTemp("/tmp", "write-atomic-test-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
if err = ioutil.WriteFile(f.Name(), old, 0600); err != nil {
|
||||
if err = os.WriteFile(f.Name(), old, 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -35,7 +34,7 @@ func TestWriteFileAtomic(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rData, err := ioutil.ReadFile(f.Name())
|
||||
rData, err := os.ReadFile(f.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -69,7 +68,7 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) {
|
||||
firstFileRand := randWriteFileSuffix()
|
||||
atomicWriteFileRand = defaultSeed
|
||||
fname := "/tmp/" + atomicWriteFilePrefix + firstFileRand
|
||||
f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777)
|
||||
f, err := os.OpenFile(fname, atomicWriteFileFlag, 0o777)
|
||||
defer os.Remove(fname)
|
||||
// Defer here, in case there is a panic in WriteFileAtomic.
|
||||
defer os.Remove(fileToWrite)
|
||||
@@ -77,14 +76,14 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = f.WriteString(testString)
|
||||
require.NoError(t, err)
|
||||
err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0777)
|
||||
err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0o777)
|
||||
require.NoError(t, err)
|
||||
// Check that the first atomic file was untouched
|
||||
firstAtomicFileBytes, err := ioutil.ReadFile(fname)
|
||||
firstAtomicFileBytes, err := os.ReadFile(fname)
|
||||
require.NoError(t, err, "Error reading first atomic file")
|
||||
require.Equal(t, []byte(testString), firstAtomicFileBytes, "First atomic file was overwritten")
|
||||
// Check that the resultant file is correct
|
||||
resultantFileBytes, err := ioutil.ReadFile(fileToWrite)
|
||||
resultantFileBytes, err := os.ReadFile(fileToWrite)
|
||||
require.NoError(t, err, "Error reading resultant file")
|
||||
require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes")
|
||||
|
||||
@@ -113,7 +112,7 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) {
|
||||
for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ {
|
||||
fileRand := randWriteFileSuffix()
|
||||
fname := "/tmp/" + atomicWriteFilePrefix + fileRand
|
||||
f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777)
|
||||
f, err := os.OpenFile(fname, atomicWriteFileFlag, 0o777)
|
||||
require.Nil(t, err)
|
||||
_, err = f.WriteString(fmt.Sprintf(testString, i))
|
||||
require.NoError(t, err)
|
||||
@@ -124,21 +123,21 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) {
|
||||
// Defer here, in case there is a panic in WriteFileAtomic.
|
||||
defer os.Remove(fileToWrite)
|
||||
|
||||
err := WriteFileAtomic(fileToWrite, []byte(expectedString), 0777)
|
||||
err := WriteFileAtomic(fileToWrite, []byte(expectedString), 0o777)
|
||||
require.NoError(t, err)
|
||||
// Check that all intermittent atomic file were untouched
|
||||
atomicWriteFileRand = defaultSeed
|
||||
for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ {
|
||||
fileRand := randWriteFileSuffix()
|
||||
fname := "/tmp/" + atomicWriteFilePrefix + fileRand
|
||||
firstAtomicFileBytes, err := ioutil.ReadFile(fname)
|
||||
firstAtomicFileBytes, err := os.ReadFile(fname)
|
||||
require.Nil(t, err, "Error reading first atomic file")
|
||||
require.Equal(t, []byte(fmt.Sprintf(testString, i)), firstAtomicFileBytes,
|
||||
"atomic write file %d was overwritten", i)
|
||||
}
|
||||
|
||||
// Check that the resultant file is correct
|
||||
resultantFileBytes, err := ioutil.ReadFile(fileToWrite)
|
||||
resultantFileBytes, err := os.ReadFile(fileToWrite)
|
||||
require.Nil(t, err, "Error reading resultant file")
|
||||
require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes")
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package light_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
stdlog "log"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -25,7 +24,7 @@ func ExampleClient_Update() {
|
||||
// give Tendermint time to generate some blocks
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
dbDir, err := ioutil.TempDir("", "light-client-example")
|
||||
dbDir, err := os.MkdirTemp("", "light-client-example")
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
@@ -93,7 +92,7 @@ func ExampleClient_VerifyLightBlockAtHeight() {
|
||||
// give Tendermint time to generate some blocks
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
dbDir, err := ioutil.TempDir("", "light-client-example")
|
||||
dbDir, err := os.MkdirTemp("", "light-client-example")
|
||||
if err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func makeHealthFunc(c *lrpc.Client) rpcHealthFunc {
|
||||
|
||||
type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error)
|
||||
|
||||
//nolint: interfacer
|
||||
//nolint:interfacer
|
||||
func makeStatusFunc(c *lrpc.Client) rpcStatusFunc {
|
||||
return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
|
||||
return c.Status(ctx.Context())
|
||||
@@ -259,8 +259,8 @@ type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string,
|
||||
|
||||
func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc {
|
||||
return func(ctx *rpctypes.Context, path string, data bytes.HexBytes,
|
||||
height int64, prove bool) (*ctypes.ResultABCIQuery, error) {
|
||||
|
||||
height int64, prove bool,
|
||||
) (*ctypes.ResultABCIQuery, error) {
|
||||
return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{
|
||||
Height: height,
|
||||
Prove: prove,
|
||||
@@ -278,7 +278,7 @@ func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc {
|
||||
|
||||
type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error)
|
||||
|
||||
//nolint: interfacer
|
||||
//nolint:interfacer
|
||||
func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc {
|
||||
return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||
return c.BroadcastEvidence(ctx.Context(), ev)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/creachadair/taskgroup"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/clist"
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
)
|
||||
|
||||
|
||||
43
node/node.go
43
node/node.go
@@ -273,7 +273,6 @@ func createAndStartIndexerService(
|
||||
eventBus *types.EventBus,
|
||||
logger log.Logger,
|
||||
) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
|
||||
|
||||
var (
|
||||
txIndexer txindex.TxIndexer
|
||||
blockIndexer indexer.BlockIndexer
|
||||
@@ -322,8 +321,8 @@ func doHandshake(
|
||||
genDoc *types.GenesisDoc,
|
||||
eventBus types.BlockEventPublisher,
|
||||
proxyApp proxy.AppConns,
|
||||
consensusLogger log.Logger) error {
|
||||
|
||||
consensusLogger log.Logger,
|
||||
) error {
|
||||
handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
|
||||
handshaker.SetLogger(consensusLogger)
|
||||
handshaker.SetEventBus(eventBus)
|
||||
@@ -373,7 +372,6 @@ func createMempoolAndMempoolReactor(
|
||||
memplMetrics *mempl.Metrics,
|
||||
logger log.Logger,
|
||||
) (mempl.Mempool, p2p.Reactor) {
|
||||
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV1:
|
||||
mp := mempoolv1.NewTxMempool(
|
||||
@@ -424,8 +422,8 @@ func createMempoolAndMempoolReactor(
|
||||
}
|
||||
|
||||
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
|
||||
stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
|
||||
|
||||
stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger,
|
||||
) (*evidence.Reactor, *evidence.Pool, error) {
|
||||
evidenceDB, err := dbProvider(&DBContext{"evidence", config})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -447,8 +445,8 @@ func createBlockchainReactor(config *cfg.Config,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore *store.BlockStore,
|
||||
fastSync bool,
|
||||
logger log.Logger) (bcReactor p2p.Reactor, err error) {
|
||||
|
||||
logger log.Logger,
|
||||
) (bcReactor p2p.Reactor, err error) {
|
||||
switch config.FastSync.Version {
|
||||
case "v0":
|
||||
bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
@@ -474,8 +472,8 @@ func createConsensusReactor(config *cfg.Config,
|
||||
csMetrics *cs.Metrics,
|
||||
waitSync bool,
|
||||
eventBus *types.EventBus,
|
||||
consensusLogger log.Logger) (*cs.Reactor, *cs.State) {
|
||||
|
||||
consensusLogger log.Logger,
|
||||
) (*cs.Reactor, *cs.State) {
|
||||
consensusState := cs.NewState(
|
||||
config.Consensus,
|
||||
state.Copy(),
|
||||
@@ -577,8 +575,8 @@ func createSwitch(config *cfg.Config,
|
||||
evidenceReactor *evidence.Reactor,
|
||||
nodeInfo p2p.NodeInfo,
|
||||
nodeKey *p2p.NodeKey,
|
||||
p2pLogger log.Logger) *p2p.Switch {
|
||||
|
||||
p2pLogger log.Logger,
|
||||
) *p2p.Switch {
|
||||
sw := p2p.NewSwitch(
|
||||
config.P2P,
|
||||
transport,
|
||||
@@ -600,8 +598,8 @@ func createSwitch(config *cfg.Config,
|
||||
}
|
||||
|
||||
func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
|
||||
p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) {
|
||||
|
||||
p2pLogger log.Logger, nodeKey *p2p.NodeKey,
|
||||
) (pex.AddrBook, error) {
|
||||
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
|
||||
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
|
||||
|
||||
@@ -627,8 +625,8 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
|
||||
}
|
||||
|
||||
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
|
||||
sw *p2p.Switch, logger log.Logger) *pex.Reactor {
|
||||
|
||||
sw *p2p.Switch, logger log.Logger,
|
||||
) *pex.Reactor {
|
||||
// TODO persistent peers ? so we can have their DNS addrs saved
|
||||
pexReactor := pex.NewReactor(addrBook,
|
||||
&pex.ReactorConfig{
|
||||
@@ -650,7 +648,8 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
|
||||
// startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
|
||||
func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
|
||||
stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
|
||||
stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error {
|
||||
stateStore sm.Store, blockStore *store.BlockStore, state sm.State,
|
||||
) error {
|
||||
ssR.Logger.Info("Starting state sync")
|
||||
|
||||
if stateProvider == nil {
|
||||
@@ -712,8 +711,8 @@ func NewNode(config *cfg.Config,
|
||||
dbProvider DBProvider,
|
||||
metricsProvider MetricsProvider,
|
||||
logger log.Logger,
|
||||
options ...Option) (*Node, error) {
|
||||
|
||||
options ...Option,
|
||||
) (*Node, error) {
|
||||
blockStore, stateDB, err := initDBs(config, dbProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -895,6 +894,7 @@ func NewNode(config *cfg.Config,
|
||||
if config.RPC.PprofListenAddress != "" {
|
||||
go func() {
|
||||
logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
|
||||
//nolint:gosec,nolintlint // G114: Use of net/http serve function that has no support for setting timeouts
|
||||
logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
|
||||
}()
|
||||
}
|
||||
@@ -1212,7 +1212,6 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
}
|
||||
|
||||
return listeners, nil
|
||||
|
||||
}
|
||||
|
||||
// startPrometheusServer starts a Prometheus HTTP server, listening for metrics
|
||||
@@ -1385,9 +1384,7 @@ func makeNodeInfo(
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
genesisDocKey = []byte("genesisDoc")
|
||||
)
|
||||
var genesisDocKey = []byte("genesisDoc")
|
||||
|
||||
// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
|
||||
// database, or creates one using the given genesisDocProvider. On success this also
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
@@ -70,7 +70,7 @@ func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
|
||||
|
||||
// LoadNodeKey loads NodeKey located in filePath.
|
||||
func LoadNodeKey(filePath string) (*NodeKey, error) {
|
||||
jsonBytes, err := ioutil.ReadFile(filePath)
|
||||
jsonBytes, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func (nodeKey *NodeKey) SaveAs(filePath string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filePath, jsonBytes, 0600)
|
||||
err = os.WriteFile(filePath, jsonBytes, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package pex
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
@@ -718,7 +717,7 @@ func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetA
|
||||
}
|
||||
|
||||
func createTempFileName(prefix string) string {
|
||||
f, err := ioutil.TempFile("", prefix)
|
||||
f, err := os.CreateTemp("", prefix)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package pex
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -20,9 +19,7 @@ import (
|
||||
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg *config.P2PConfig
|
||||
)
|
||||
var cfg *config.P2PConfig
|
||||
|
||||
func init() {
|
||||
cfg = config.DefaultP2PConfig()
|
||||
@@ -73,7 +70,7 @@ func TestPEXReactorRunning(t *testing.T) {
|
||||
switches := make([]*p2p.Switch, N)
|
||||
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
dir, err := os.MkdirTemp("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
@@ -208,7 +205,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
|
||||
|
||||
func TestCheckSeeds(t *testing.T) {
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
dir, err := os.MkdirTemp("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
@@ -227,8 +224,10 @@ func TestCheckSeeds(t *testing.T) {
|
||||
|
||||
// 4. test create peer with all seeds having unresolvable DNS fails
|
||||
badPeerConfig := &ReactorConfig{
|
||||
Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
|
||||
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"},
|
||||
Seeds: []string{
|
||||
"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
|
||||
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657",
|
||||
},
|
||||
}
|
||||
peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig)
|
||||
require.Error(t, peerSwitch.Start())
|
||||
@@ -236,9 +235,11 @@ func TestCheckSeeds(t *testing.T) {
|
||||
|
||||
// 5. test create peer with one good seed address succeeds
|
||||
badPeerConfig = &ReactorConfig{
|
||||
Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
|
||||
Seeds: []string{
|
||||
"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
|
||||
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657",
|
||||
seed.NetAddress().String()},
|
||||
seed.NetAddress().String(),
|
||||
},
|
||||
}
|
||||
peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
@@ -247,7 +248,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
|
||||
func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
dir, err := os.MkdirTemp("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
@@ -267,7 +268,7 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
|
||||
func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) {
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
dir, err := os.MkdirTemp("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
@@ -296,7 +297,7 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) {
|
||||
|
||||
func TestPEXReactorSeedMode(t *testing.T) {
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
dir, err := os.MkdirTemp("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
@@ -335,7 +336,7 @@ func TestPEXReactorSeedMode(t *testing.T) {
|
||||
|
||||
func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) {
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
dir, err := os.MkdirTemp("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
@@ -373,7 +374,7 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) {
|
||||
|
||||
func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) {
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
dir, err := os.MkdirTemp("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
@@ -409,7 +410,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) {
|
||||
switches := make([]*p2p.Switch, N)
|
||||
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
dir, err := os.MkdirTemp("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
@@ -646,7 +647,7 @@ func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch {
|
||||
|
||||
func createReactor(conf *ReactorConfig) (r *Reactor, book AddrBook) {
|
||||
// directory to store address book
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
dir, err := os.MkdirTemp("", "pex_reactor")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -677,7 +678,6 @@ func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch {
|
||||
}
|
||||
|
||||
func TestPexVectors(t *testing.T) {
|
||||
|
||||
addr := tmp2p.NetAddress{
|
||||
ID: "1",
|
||||
IP: "127.0.0.1",
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -25,9 +25,7 @@ import (
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg *config.P2PConfig
|
||||
)
|
||||
var cfg *config.P2PConfig
|
||||
|
||||
func init() {
|
||||
cfg = config.DefaultP2PConfig()
|
||||
@@ -99,7 +97,8 @@ func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switc
|
||||
func initSwitchFunc(i int, sw *Switch) *Switch {
|
||||
sw.SetAddrBook(&AddrBookMock{
|
||||
Addrs: make(map[string]struct{}),
|
||||
OurAddrs: make(map[string]struct{})})
|
||||
OurAddrs: make(map[string]struct{}),
|
||||
})
|
||||
|
||||
// Make two reactors of two channels each
|
||||
sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
|
||||
@@ -400,7 +399,7 @@ func TestSwitchStopPeerForError(t *testing.T) {
|
||||
resp, err := http.Get(s.URL)
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
buf, _ := ioutil.ReadAll(resp.Body)
|
||||
buf, _ := io.ReadAll(resp.Body)
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
@@ -681,9 +680,11 @@ func (et errorTransport) NetAddress() NetAddress {
|
||||
func (et errorTransport) Accept(c peerConfig) (Peer, error) {
|
||||
return nil, et.acceptErr
|
||||
}
|
||||
|
||||
func (errorTransport) Dial(NetAddress, peerConfig) (Peer, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (errorTransport) Cleanup(Peer) {
|
||||
panic("not implemented")
|
||||
}
|
||||
@@ -838,7 +839,6 @@ func BenchmarkSwitchBroadcast(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestSwitchRemovalErr(t *testing.T) {
|
||||
|
||||
sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch {
|
||||
return initSwitchFunc(i, sw)
|
||||
})
|
||||
|
||||
@@ -5,7 +5,6 @@ package trust
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -17,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func TestTrustMetricStoreSaveLoad(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "trust_test")
|
||||
dir, err := os.MkdirTemp("", "trust_test")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(dir)
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@@ -299,7 +299,6 @@ type statusInfo struct {
|
||||
}
|
||||
|
||||
func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) {
|
||||
|
||||
message := "<u:GetExternalIPAddress xmlns:u=\"urn:" + n.urnDomain + ":service:WANIPConnection:1\">\r\n" +
|
||||
"</u:GetExternalIPAddress>"
|
||||
|
||||
@@ -312,7 +311,7 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) {
|
||||
return
|
||||
}
|
||||
var envelope Envelope
|
||||
data, err := ioutil.ReadAll(response.Body)
|
||||
data, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -350,7 +349,8 @@ func (n *upnpNAT) AddPortMapping(
|
||||
externalPort,
|
||||
internalPort int,
|
||||
description string,
|
||||
timeout int) (mappedExternalPort int, err error) {
|
||||
timeout int,
|
||||
) (mappedExternalPort int, err error) {
|
||||
// A single concatenation would break ARM compilation.
|
||||
message := "<u:AddPortMapping xmlns:u=\"urn:" + n.urnDomain + ":service:WANIPConnection:1\">\r\n" +
|
||||
"<NewRemoteHost></NewRemoteHost><NewExternalPort>" + strconv.Itoa(externalPort)
|
||||
@@ -374,7 +374,7 @@ func (n *upnpNAT) AddPortMapping(
|
||||
// TODO: check response to see if the port was forwarded
|
||||
// log.Println(message, response)
|
||||
// JAE:
|
||||
// body, err := ioutil.ReadAll(response.Body)
|
||||
// body, err := io.ReadAll(response.Body)
|
||||
// fmt.Println(string(body), err)
|
||||
mappedExternalPort = externalPort
|
||||
_ = response
|
||||
@@ -382,7 +382,6 @@ func (n *upnpNAT) AddPortMapping(
|
||||
}
|
||||
|
||||
func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) {
|
||||
|
||||
message := "<u:DeletePortMapping xmlns:u=\"urn:" + n.urnDomain + ":service:WANIPConnection:1\">\r\n" +
|
||||
"<NewRemoteHost></NewRemoteHost><NewExternalPort>" + strconv.Itoa(externalPort) +
|
||||
"</NewExternalPort><NewProtocol>" + protocol + "</NewProtocol>" +
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
@@ -64,7 +64,7 @@ func (pvKey FilePVKey) Save() {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := tempfile.WriteFileAtomic(outFile, jsonBytes, 0600); err != nil {
|
||||
if err := tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -90,7 +90,6 @@ type FilePVLastSignState struct {
|
||||
// we have already signed for this HRS, and can reuse the existing signature).
|
||||
// It panics if the HRS matches the arguments, there's a SignBytes, but no Signature.
|
||||
func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) {
|
||||
|
||||
if lss.Height > height {
|
||||
return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height)
|
||||
}
|
||||
@@ -133,7 +132,7 @@ func (lss *FilePVLastSignState) Save() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600)
|
||||
err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -188,7 +187,7 @@ func LoadFilePVEmptyState(keyFilePath, stateFilePath string) *FilePV {
|
||||
|
||||
// If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState.
|
||||
func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV {
|
||||
keyJSONBytes, err := ioutil.ReadFile(keyFilePath)
|
||||
keyJSONBytes, err := os.ReadFile(keyFilePath)
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
}
|
||||
@@ -206,7 +205,7 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV {
|
||||
pvState := FilePVLastSignState{}
|
||||
|
||||
if loadState {
|
||||
stateJSONBytes, err := ioutil.ReadFile(stateFilePath)
|
||||
stateJSONBytes, err := os.ReadFile(stateFilePath)
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
}
|
||||
@@ -384,8 +383,8 @@ func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error
|
||||
|
||||
// Persist height/round/step and signature
|
||||
func (pv *FilePV) saveSigned(height int64, round int32, step int8,
|
||||
signBytes []byte, sig []byte) {
|
||||
|
||||
signBytes []byte, sig []byte,
|
||||
) {
|
||||
pv.LastSignState.Height = height
|
||||
pv.LastSignState.Round = round
|
||||
pv.LastSignState.Step = step
|
||||
|
||||
@@ -3,7 +3,6 @@ package privval
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -23,9 +22,9 @@ import (
|
||||
func TestGenLoadValidator(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
||||
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
|
||||
require.Nil(t, err)
|
||||
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
||||
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
|
||||
require.Nil(t, err)
|
||||
|
||||
privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
|
||||
@@ -41,9 +40,9 @@ func TestGenLoadValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestResetValidator(t *testing.T) {
|
||||
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
||||
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
|
||||
require.Nil(t, err)
|
||||
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
||||
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
|
||||
require.Nil(t, err)
|
||||
|
||||
privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
|
||||
@@ -72,9 +71,9 @@ func TestResetValidator(t *testing.T) {
|
||||
func TestLoadOrGenValidator(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
||||
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
|
||||
require.Nil(t, err)
|
||||
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
||||
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
|
||||
require.Nil(t, err)
|
||||
|
||||
tempKeyFilePath := tempKeyFile.Name()
|
||||
@@ -159,9 +158,9 @@ func TestUnmarshalValidatorKey(t *testing.T) {
|
||||
func TestSignVote(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
||||
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
|
||||
require.Nil(t, err)
|
||||
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
||||
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
|
||||
require.Nil(t, err)
|
||||
|
||||
privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
|
||||
@@ -169,10 +168,14 @@ func TestSignVote(t *testing.T) {
|
||||
randbytes := tmrand.Bytes(tmhash.Size)
|
||||
randbytes2 := tmrand.Bytes(tmhash.Size)
|
||||
|
||||
block1 := types.BlockID{Hash: randbytes,
|
||||
PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}}
|
||||
block2 := types.BlockID{Hash: randbytes2,
|
||||
PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}}
|
||||
block1 := types.BlockID{
|
||||
Hash: randbytes,
|
||||
PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes},
|
||||
}
|
||||
block2 := types.BlockID{
|
||||
Hash: randbytes2,
|
||||
PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2},
|
||||
}
|
||||
|
||||
height, round := int64(10), int32(1)
|
||||
voteType := tmproto.PrevoteType
|
||||
@@ -212,9 +215,9 @@ func TestSignVote(t *testing.T) {
|
||||
func TestSignProposal(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
||||
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
|
||||
require.Nil(t, err)
|
||||
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
||||
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
|
||||
require.Nil(t, err)
|
||||
|
||||
privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
|
||||
@@ -222,10 +225,14 @@ func TestSignProposal(t *testing.T) {
|
||||
randbytes := tmrand.Bytes(tmhash.Size)
|
||||
randbytes2 := tmrand.Bytes(tmhash.Size)
|
||||
|
||||
block1 := types.BlockID{Hash: randbytes,
|
||||
PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}}
|
||||
block2 := types.BlockID{Hash: randbytes2,
|
||||
PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}}
|
||||
block1 := types.BlockID{
|
||||
Hash: randbytes,
|
||||
PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes},
|
||||
}
|
||||
block2 := types.BlockID{
|
||||
Hash: randbytes2,
|
||||
PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2},
|
||||
}
|
||||
height, round := int64(10), int32(1)
|
||||
|
||||
// sign a proposal for first time
|
||||
@@ -260,9 +267,9 @@ func TestSignProposal(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDifferByTimestamp(t *testing.T) {
|
||||
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
||||
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
|
||||
require.Nil(t, err)
|
||||
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
||||
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
|
||||
require.Nil(t, err)
|
||||
|
||||
privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
|
||||
@@ -321,7 +328,8 @@ func TestDifferByTimestamp(t *testing.T) {
|
||||
}
|
||||
|
||||
func newVote(addr types.Address, idx int32, height int64, round int32,
|
||||
typ tmproto.SignedMsgType, blockID types.BlockID) *types.Vote {
|
||||
typ tmproto.SignedMsgType, blockID types.BlockID,
|
||||
) *types.Vote {
|
||||
return &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: idx,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -29,7 +28,7 @@ type listenerTestCase struct {
|
||||
// testUnixAddr will attempt to obtain a platform-independent temporary file
|
||||
// name for a Unix socket
|
||||
func testUnixAddr() (string, error) {
|
||||
f, err := ioutil.TempFile("", "tendermint-privval-test-*")
|
||||
f, err := os.CreateTemp("", "tendermint-privval-test-*")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
|
||||
@@ -4,6 +4,7 @@ package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
|
||||
@@ -4,6 +4,7 @@ package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package client_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -14,7 +13,7 @@ var node *nm.Node
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// start a tendermint node (and kvstore) in the background to test against
|
||||
dir, err := ioutil.TempDir("/tmp", "rpc-client-test")
|
||||
dir, err := os.MkdirTemp("/tmp", "rpc-client-test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -217,7 +217,7 @@ func (c *Client) Call(
|
||||
|
||||
defer httpResponse.Body.Close()
|
||||
|
||||
responseBytes, err := ioutil.ReadAll(httpResponse.Body)
|
||||
responseBytes, err := io.ReadAll(httpResponse.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
@@ -265,7 +265,7 @@ func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedReque
|
||||
|
||||
defer httpResponse.Body.Close()
|
||||
|
||||
responseBytes, err := ioutil.ReadAll(httpResponse.Body)
|
||||
responseBytes, err := io.ReadAll(httpResponse.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read response body: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -21,7 +21,7 @@ func TestHTTPClientMakeHTTPDialer(t *testing.T) {
|
||||
defer tsTLS.Close()
|
||||
// This silences a TLS handshake error, caused by the dialer just immediately
|
||||
// disconnecting, which we can just ignore.
|
||||
tsTLS.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
|
||||
tsTLS.Config.ErrorLog = log.New(io.Discard, "", 0)
|
||||
|
||||
for _, testURL := range []string{ts.URL, tsTLS.URL} {
|
||||
u, err := newParsedURL(testURL)
|
||||
|
||||
@@ -3,7 +3,7 @@ package client
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
@@ -52,8 +52,8 @@ func NewURI(remote string) (*URIClient, error) {
|
||||
|
||||
// Call issues a POST form HTTP request.
|
||||
func (c *URIClient) Call(ctx context.Context, method string,
|
||||
params map[string]interface{}, result interface{}) (interface{}, error) {
|
||||
|
||||
params map[string]interface{}, result interface{},
|
||||
) (interface{}, error) {
|
||||
values, err := argsToURLValues(params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode params: %w", err)
|
||||
@@ -76,7 +76,7 @@ func (c *URIClient) Call(ctx context.Context, method string,
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
responseBytes, err := ioutil.ReadAll(resp.Body)
|
||||
responseBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read response body: %w", err)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sort"
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
// jsonrpc calls grab the given method's function info and runs reflect.Call
|
||||
func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
res := types.RPCInvalidRequestError(nil,
|
||||
fmt.Errorf("error reading request body: %w", err),
|
||||
@@ -128,7 +128,6 @@ func mapParamsToArgs(
|
||||
params map[string]json.RawMessage,
|
||||
argsOffset int,
|
||||
) ([]reflect.Value, error) {
|
||||
|
||||
values := make([]reflect.Value, len(rpcFunc.argNames))
|
||||
for i, argName := range rpcFunc.argNames {
|
||||
argType := rpcFunc.args[i+argsOffset]
|
||||
@@ -153,7 +152,6 @@ func arrayParamsToArgs(
|
||||
params []json.RawMessage,
|
||||
argsOffset int,
|
||||
) ([]reflect.Value, error) {
|
||||
|
||||
if len(rpcFunc.argNames) != len(params) {
|
||||
return nil, fmt.Errorf("expected %v parameters (%v), got %v (%v)",
|
||||
len(rpcFunc.argNames), rpcFunc.argNames, len(params), params)
|
||||
|
||||
@@ -3,7 +3,7 @@ package server
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
@@ -66,7 +66,7 @@ func TestRPCParams(t *testing.T) {
|
||||
defer res.Body.Close()
|
||||
// Always expecting back a JSONRPCResponse
|
||||
assert.NotZero(t, res.StatusCode, "#%d: should always return code", i)
|
||||
blob, err := ioutil.ReadAll(res.Body)
|
||||
blob, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: err reading body: %v", i, err)
|
||||
continue
|
||||
@@ -113,7 +113,7 @@ func TestJSONRPCID(t *testing.T) {
|
||||
res := rec.Result()
|
||||
// Always expecting back a JSONRPCResponse
|
||||
assert.NotZero(t, res.StatusCode, "#%d: should always return code", i)
|
||||
blob, err := ioutil.ReadAll(res.Body)
|
||||
blob, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: err reading body: %v", i, err)
|
||||
continue
|
||||
@@ -143,7 +143,7 @@ func TestRPCNotification(t *testing.T) {
|
||||
|
||||
// Always expecting back a JSONRPCResponse
|
||||
require.True(t, statusOK(res.StatusCode), "should always return 2XX")
|
||||
blob, err := ioutil.ReadAll(res.Body)
|
||||
blob, err := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
require.Nil(t, err, "reading from the body should not give back an error")
|
||||
require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server")
|
||||
@@ -179,7 +179,7 @@ func TestRPCNotificationInBatch(t *testing.T) {
|
||||
res := rec.Result()
|
||||
// Always expecting back a JSONRPCResponse
|
||||
assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i)
|
||||
blob, err := ioutil.ReadAll(res.Body)
|
||||
blob, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: err reading body: %v", i, err)
|
||||
continue
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -102,7 +102,7 @@ func TestServeTLS(t *testing.T) {
|
||||
defer res.Body.Close()
|
||||
assert.Equal(t, http.StatusOK, res.StatusCode)
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
body, err := io.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("some body"), body)
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) {
|
||||
err := WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"}))
|
||||
require.NoError(t, err)
|
||||
resp := w.Result()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
@@ -135,7 +135,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) {
|
||||
types.NewRPCSuccessResponse(id, &sampleResult{"world"}))
|
||||
require.NoError(t, err)
|
||||
resp = w.Result()
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -167,7 +167,7 @@ func TestWriteRPCResponseHTTPError(t *testing.T) {
|
||||
types.RPCInternalError(types.JSONRPCIntID(-1), errors.New("foo")))
|
||||
require.NoError(t, err)
|
||||
resp := w.Result()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -277,7 +276,7 @@ func newTestBlockHeader() types.EventDataNewBlockHeader {
|
||||
// readSchema loads the indexing database schema file
|
||||
func readSchema() ([]*schema.Migration, error) {
|
||||
const filename = "schema.sql"
|
||||
contents, err := ioutil.ReadFile(filename)
|
||||
contents, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
|
||||
@@ -4,6 +4,7 @@ package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
|
||||
tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
@@ -82,7 +82,6 @@ type State struct {
|
||||
|
||||
// Copy makes a copy of the State for mutating.
|
||||
func (state State) Copy() State {
|
||||
|
||||
return State{
|
||||
Version: state.Version,
|
||||
ChainID: state.ChainID,
|
||||
@@ -239,7 +238,6 @@ func (state State) MakeBlock(
|
||||
evidence []types.Evidence,
|
||||
proposerAddress []byte,
|
||||
) (*types.Block, *types.PartSet) {
|
||||
|
||||
// Build base block with block data.
|
||||
block := types.MakeBlock(height, txs, commit, evidence)
|
||||
|
||||
@@ -303,7 +301,7 @@ func MakeGenesisStateFromFile(genDocFile string) (State, error) {
|
||||
|
||||
// MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file.
|
||||
func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) {
|
||||
genDocJSON, err := ioutil.ReadFile(genDocFile)
|
||||
genDocJSON, err := os.ReadFile(genDocFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read GenesisDoc file: %v", err)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func BenchmarkTxSearch(b *testing.B) {
|
||||
dbDir, err := ioutil.TempDir("", "benchmark_tx_search_test")
|
||||
dbDir, err := os.MkdirTemp("", "benchmark_tx_search_test")
|
||||
if err != nil {
|
||||
b.Errorf("failed to create temporary directory: %s", err)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -260,7 +259,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxIndexDuplicatePreviouslySuccessful(t *testing.T) {
|
||||
var mockTx = types.Tx("MOCK_TX_HASH")
|
||||
mockTx := types.Tx("MOCK_TX_HASH")
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -426,7 +425,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult {
|
||||
}
|
||||
|
||||
func benchmarkTxIndex(txsCount int64, b *testing.B) {
|
||||
dir, err := ioutil.TempDir("", "tx_index_db")
|
||||
dir, err := os.MkdirTemp("", "tx_index_db")
|
||||
require.NoError(b, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
query "github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
|
||||
txindex "github.com/tendermint/tendermint/state/txindex"
|
||||
|
||||
@@ -3,7 +3,6 @@ package statesync
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -42,7 +41,7 @@ type chunkQueue struct {
|
||||
// newChunkQueue creates a new chunk queue for a snapshot, using a temp dir for storage.
|
||||
// Callers must call Close() when done.
|
||||
func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) {
|
||||
dir, err := ioutil.TempDir(tempDir, "tm-statesync")
|
||||
dir, err := os.MkdirTemp(tempDir, "tm-statesync")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create temp dir for state sync chunks: %w", err)
|
||||
}
|
||||
@@ -84,7 +83,7 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) {
|
||||
}
|
||||
|
||||
path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10))
|
||||
err := ioutil.WriteFile(path, chunk.Chunk, 0600)
|
||||
err := os.WriteFile(path, chunk.Chunk, 0o600)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err)
|
||||
}
|
||||
@@ -209,7 +208,7 @@ func (q *chunkQueue) load(index uint32) (*chunk, error) {
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
body, err := ioutil.ReadFile(path)
|
||||
body, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load chunk %v: %w", index, err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package statesync
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -36,20 +35,20 @@ func TestNewChunkQueue_TempDir(t *testing.T) {
|
||||
Hash: []byte{7},
|
||||
Metadata: nil,
|
||||
}
|
||||
dir, err := ioutil.TempDir("", "newchunkqueue")
|
||||
dir, err := os.MkdirTemp("", "newchunkqueue")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
queue, err := newChunkQueue(snapshot, dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
files, err := os.ReadDir(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, files, 1)
|
||||
|
||||
err = queue.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
files, err = ioutil.ReadDir(dir)
|
||||
files, err = os.ReadDir(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, files, 0)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
// nolint: gosec
|
||||
package app
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -30,7 +28,7 @@ type SnapshotStore struct {
|
||||
// NewSnapshotStore creates a new snapshot store.
|
||||
func NewSnapshotStore(dir string) (*SnapshotStore, error) {
|
||||
store := &SnapshotStore{dir: dir}
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := store.loadMetadata(); err != nil {
|
||||
@@ -45,7 +43,7 @@ func (s *SnapshotStore) loadMetadata() error {
|
||||
file := filepath.Join(s.dir, "metadata.json")
|
||||
metadata := []abci.Snapshot{}
|
||||
|
||||
bz, err := ioutil.ReadFile(file)
|
||||
bz, err := os.ReadFile(file)
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
case err != nil:
|
||||
@@ -72,7 +70,7 @@ func (s *SnapshotStore) saveMetadata() error {
|
||||
// save the file to a new file and move it to make saving atomic.
|
||||
newFile := filepath.Join(s.dir, "metadata.json.new")
|
||||
file := filepath.Join(s.dir, "metadata.json")
|
||||
err = ioutil.WriteFile(newFile, bz, 0644) //nolint: gosec
|
||||
err = os.WriteFile(newFile, bz, 0o644) //nolint: gosec
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -93,7 +91,8 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) {
|
||||
Hash: hashItems(state.Values),
|
||||
Chunks: byteChunks(bz),
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644)
|
||||
//nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less
|
||||
err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0o644)
|
||||
if err != nil {
|
||||
return abci.Snapshot{}, err
|
||||
}
|
||||
@@ -122,7 +121,7 @@ func (s *SnapshotStore) LoadChunk(height uint64, format uint32, chunk uint32) ([
|
||||
defer s.RUnlock()
|
||||
for _, snapshot := range s.metadata {
|
||||
if snapshot.Height == height && snapshot.Format == format {
|
||||
bz, err := ioutil.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height)))
|
||||
bz, err := os.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// nolint: gosec
|
||||
package app
|
||||
|
||||
import (
|
||||
@@ -6,15 +5,16 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const stateFileName = "app_state.json"
|
||||
const prevStateFileName = "prev_app_state.json"
|
||||
const (
|
||||
stateFileName = "app_state.json"
|
||||
prevStateFileName = "prev_app_state.json"
|
||||
)
|
||||
|
||||
// State is the application state.
|
||||
type State struct {
|
||||
@@ -52,11 +52,11 @@ func NewState(dir string, persistInterval uint64) (*State, error) {
|
||||
// load loads state from disk. It does not take out a lock, since it is called
|
||||
// during construction.
|
||||
func (s *State) load() error {
|
||||
bz, err := ioutil.ReadFile(s.currentFile)
|
||||
bz, err := os.ReadFile(s.currentFile)
|
||||
if err != nil {
|
||||
// if the current state doesn't exist then we try recover from the previous state
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
bz, err = ioutil.ReadFile(s.previousFile)
|
||||
bz, err = os.ReadFile(s.previousFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read both current and previous state (%q): %w",
|
||||
s.previousFile, err)
|
||||
@@ -82,7 +82,8 @@ func (s *State) save() error {
|
||||
// We write the state to a separate file and move it to the destination, to
|
||||
// make it atomic.
|
||||
newFile := fmt.Sprintf("%v.new", s.currentFile)
|
||||
err = ioutil.WriteFile(newFile, bz, 0644)
|
||||
//nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less
|
||||
err = os.WriteFile(newFile, bz, 0o644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write state to %q: %w", s.currentFile, err)
|
||||
}
|
||||
@@ -160,7 +161,7 @@ func (s *State) Commit() (uint64, []byte, error) {
|
||||
}
|
||||
|
||||
func (s *State) Rollback() error {
|
||||
bz, err := ioutil.ReadFile(s.previousFile)
|
||||
bz, err := os.ReadFile(s.previousFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read state from %q: %w", s.previousFile, err)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// nolint: gosec
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -58,11 +57,12 @@ func NewCLI() *CLI {
|
||||
|
||||
// generate generates manifests in a directory.
|
||||
func (cli *CLI) generate(dir string, groups int) error {
|
||||
err := os.MkdirAll(dir, 0755)
|
||||
err := os.MkdirAll(dir, 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
manifests, err := Generate(rand.New(rand.NewSource(randomSeed)))
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// nolint: goconst
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -56,6 +55,8 @@ func LoadConfig(file string) (*Config, error) {
|
||||
|
||||
// Validate validates the configuration. We don't do exhaustive config
|
||||
// validation here, instead relying on Testnet.Validate() to handle it.
|
||||
//
|
||||
//nolint:goconst
|
||||
func (cfg Config) Validate() error {
|
||||
switch {
|
||||
case cfg.ChainID == "":
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// nolint: gosec
|
||||
package e2e
|
||||
|
||||
import (
|
||||
@@ -26,9 +25,11 @@ const (
|
||||
networkIPv6 = "fd80:b10c::/48"
|
||||
)
|
||||
|
||||
type Mode string
|
||||
type Protocol string
|
||||
type Perturbation string
|
||||
type (
|
||||
Mode string
|
||||
Protocol string
|
||||
Perturbation string
|
||||
)
|
||||
|
||||
const (
|
||||
ModeValidator Mode = "validator"
|
||||
@@ -415,6 +416,7 @@ func (t Testnet) ArchiveNodes() []*Node {
|
||||
// RandomNode returns a random non-seed node.
|
||||
func (t Testnet) RandomNode() *Node {
|
||||
for {
|
||||
//nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
node := t.Nodes[rand.Intn(len(t.Nodes))]
|
||||
if node.Mode != ModeSeed {
|
||||
return node
|
||||
@@ -491,7 +493,7 @@ type keyGenerator struct {
|
||||
|
||||
func newKeyGenerator(seed int64) *keyGenerator {
|
||||
return &keyGenerator{
|
||||
random: rand.New(rand.NewSource(seed)),
|
||||
random: rand.New(rand.NewSource(seed)), //nolint:gosec
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// nolint: gosec
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
|
||||
// execute executes a shell command.
|
||||
func exec(args ...string) error {
|
||||
//nolint:gosec // G204: Subprocess launched with a potential tainted input or cmd arguments
|
||||
cmd := osexec.Command(args[0], args[1:]...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
switch err := err.(type) {
|
||||
@@ -24,6 +24,7 @@ func exec(args ...string) error {
|
||||
|
||||
// execVerbose executes a shell command while displaying its output.
|
||||
func execVerbose(args ...string) error {
|
||||
//nolint:gosec // G204: Subprocess launched with a potential tainted input or cmd arguments
|
||||
cmd := osexec.Command(args[0], args[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//nolint: gosec
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -7,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -53,7 +51,8 @@ func Setup(testnet *e2e.Testnet) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644)
|
||||
//nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less
|
||||
err = os.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -76,7 +75,7 @@ func Setup(testnet *e2e.Testnet) error {
|
||||
if node.Mode == e2e.ModeLight && strings.Contains(dir, "app") {
|
||||
continue
|
||||
}
|
||||
err := os.MkdirAll(dir, 0755)
|
||||
err := os.MkdirAll(dir, 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -92,7 +91,8 @@ func Setup(testnet *e2e.Testnet) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644)
|
||||
//nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less
|
||||
err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -401,11 +401,12 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error {
|
||||
|
||||
// FIXME Apparently there's no function to simply load a config file without
|
||||
// involving the entire Viper apparatus, so we'll just resort to regexps.
|
||||
bz, err := ioutil.ReadFile(cfgPath)
|
||||
bz, err := os.ReadFile(cfgPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bz = regexp.MustCompile(`(?m)^trust_height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust_height = %v`, height)))
|
||||
bz = regexp.MustCompile(`(?m)^trust_hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust_hash = "%X"`, hash)))
|
||||
return ioutil.WriteFile(cfgPath, bz, 0644)
|
||||
//nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less
|
||||
return os.WriteFile(cfgPath, bz, 0o644)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package v0_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
mempoolv0 "github.com/tendermint/tendermint/test/fuzz/mempool/v0"
|
||||
)
|
||||
|
||||
@@ -25,7 +26,7 @@ func TestMempoolTestdataCases(t *testing.T) {
|
||||
}()
|
||||
f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name()))
|
||||
require.NoError(t, err)
|
||||
input, err := ioutil.ReadAll(f)
|
||||
input, err := io.ReadAll(f)
|
||||
require.NoError(t, err)
|
||||
mempoolv0.Fuzz(input)
|
||||
})
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package v1_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
mempoolv1 "github.com/tendermint/tendermint/test/fuzz/mempool/v1"
|
||||
)
|
||||
|
||||
@@ -25,7 +26,7 @@ func TestMempoolTestdataCases(t *testing.T) {
|
||||
}()
|
||||
f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name()))
|
||||
require.NoError(t, err)
|
||||
input, err := ioutil.ReadAll(f)
|
||||
input, err := io.ReadAll(f)
|
||||
require.NoError(t, err)
|
||||
mempoolv1.Fuzz(input)
|
||||
})
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//nolint: gosec
|
||||
package addr
|
||||
|
||||
import (
|
||||
@@ -25,6 +24,7 @@ func Fuzz(data []byte) int {
|
||||
}
|
||||
|
||||
// Also, make sure PickAddress always returns a non-nil address.
|
||||
//nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
bias := rand.Intn(100)
|
||||
if p := addrBook.PickAddress(bias); p == nil {
|
||||
panic(fmt.Sprintf("picked a nil address (bias: %d, addrBook size: %v)",
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
//nolint: gosec
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
@@ -27,7 +25,7 @@ func initCorpus(baseDir string) {
|
||||
|
||||
// create "corpus" directory
|
||||
corpusDir := filepath.Join(baseDir, "corpus")
|
||||
if err := os.MkdirAll(corpusDir, 0755); err != nil {
|
||||
if err := os.MkdirAll(corpusDir, 0o755); err != nil {
|
||||
log.Fatalf("Creating %q err: %v", corpusDir, err)
|
||||
}
|
||||
|
||||
@@ -49,7 +47,8 @@ func initCorpus(baseDir string) {
|
||||
log.Fatalf("can't marshal %v: %v", addr, err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filename, bz, 0644); err != nil {
|
||||
//nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less
|
||||
if err := os.WriteFile(filename, bz, 0o644); err != nil {
|
||||
log.Fatalf("can't write %v to %q: %v", addr, filename, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
//nolint: gosec
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -22,11 +20,12 @@ func main() {
|
||||
initCorpus(*baseDir)
|
||||
}
|
||||
|
||||
//nolint:gosec
|
||||
func initCorpus(rootDir string) {
|
||||
log.SetFlags(0)
|
||||
|
||||
corpusDir := filepath.Join(rootDir, "corpus")
|
||||
if err := os.MkdirAll(corpusDir, 0755); err != nil {
|
||||
if err := os.MkdirAll(corpusDir, 0o755); err != nil {
|
||||
log.Fatalf("Creating %q err: %v", corpusDir, err)
|
||||
}
|
||||
sizes := []int{0, 1, 2, 17, 5, 31}
|
||||
@@ -73,7 +72,7 @@ func initCorpus(rootDir string) {
|
||||
|
||||
filename := filepath.Join(rootDir, "corpus", fmt.Sprintf("%d", n))
|
||||
|
||||
if err := ioutil.WriteFile(filename, bz, 0644); err != nil {
|
||||
if err := os.WriteFile(filename, bz, 0o644); err != nil {
|
||||
log.Fatalf("can't write %X to %q: %v", bz, filename, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
//nolint: gosec
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -21,7 +19,7 @@ func initCorpus(baseDir string) {
|
||||
log.SetFlags(0)
|
||||
|
||||
corpusDir := filepath.Join(baseDir, "corpus")
|
||||
if err := os.MkdirAll(corpusDir, 0755); err != nil {
|
||||
if err := os.MkdirAll(corpusDir, 0o755); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -39,7 +37,8 @@ func initCorpus(baseDir string) {
|
||||
for i, datum := range data {
|
||||
filename := filepath.Join(corpusDir, fmt.Sprintf("%d", i))
|
||||
|
||||
if err := ioutil.WriteFile(filename, []byte(datum), 0644); err != nil {
|
||||
//nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less
|
||||
if err := os.WriteFile(filename, []byte(datum), 0o644); err != nil {
|
||||
log.Fatalf("can't write %v to %q: %v", datum, filename, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package handler
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
@@ -29,7 +29,7 @@ func Fuzz(data []byte) int {
|
||||
rec := httptest.NewRecorder()
|
||||
mux.ServeHTTP(rec, req)
|
||||
res := rec.Result()
|
||||
blob, err := ioutil.ReadAll(res.Body)
|
||||
blob, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/informalsystems/tm-load-test/pkg/loadtest"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
)
|
||||
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/test/loadtime/report"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
)
|
||||
|
||||
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
"gonum.org/v1/gonum/stat"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"gonum.org/v1/gonum/stat"
|
||||
)
|
||||
|
||||
// BlockStore defines the set of methods needed by the report generator from
|
||||
|
||||
@@ -5,10 +5,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"github.com/tendermint/tendermint/test/loadtime/report"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
type mockBlockStore struct {
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
@@ -350,12 +350,12 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
} else {
|
||||
defaultEnterPrecommit(cs, height, round)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (cs *State) addVote(
|
||||
vote *types.Vote,
|
||||
peerID p2p.ID) (added bool, err error) {
|
||||
peerID p2p.ID,
|
||||
) (added bool, err error) {
|
||||
cs.Logger.Debug(
|
||||
"addVote",
|
||||
"voteHeight",
|
||||
@@ -448,9 +448,7 @@ var (
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
msgQueueSize = 1000
|
||||
)
|
||||
var msgQueueSize = 1000
|
||||
|
||||
// msgs from the reactor which may update the state
|
||||
type msgInfo struct {
|
||||
@@ -732,7 +730,6 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error
|
||||
|
||||
// SetProposal inputs a proposal.
|
||||
func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
|
||||
|
||||
if peerID == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&tmcon.ProposalMessage{Proposal: proposal}, ""}
|
||||
} else {
|
||||
@@ -745,7 +742,6 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
|
||||
|
||||
// AddProposalBlockPart inputs a part of the proposal block.
|
||||
func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error {
|
||||
|
||||
if peerID == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&tmcon.BlockPartMessage{Height: height, Round: round, Part: part}, ""}
|
||||
} else {
|
||||
@@ -1073,7 +1069,6 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (cs *State) handleTxsAvailable() {
|
||||
@@ -1253,7 +1248,6 @@ func (cs *State) isProposalComplete() bool {
|
||||
}
|
||||
// if this is false the proposer is lying or we haven't received the POL yet
|
||||
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
|
||||
|
||||
}
|
||||
|
||||
// Create the next block to propose and return it. Returns nil block upon error.
|
||||
@@ -1716,12 +1710,12 @@ func (cs *State) addProposalBlockPart(msg *tmcon.BlockPartMessage, peerID p2p.ID
|
||||
)
|
||||
}
|
||||
if added && cs.ProposalBlockParts.IsComplete() {
|
||||
bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader())
|
||||
bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader())
|
||||
if err != nil {
|
||||
return added, err
|
||||
}
|
||||
|
||||
var pbb = new(tmproto.Block)
|
||||
pbb := new(tmproto.Block)
|
||||
err = proto.Unmarshal(bz, pbb)
|
||||
if err != nil {
|
||||
return added, err
|
||||
|
||||
@@ -62,7 +62,7 @@ import (
|
||||
// a map of misbehaviors to be executed by the maverick node
|
||||
func ParseMisbehaviors(str string) (map[int64]cs.Misbehavior, error) {
|
||||
// check if string is empty in which case we run a normal node
|
||||
var misbehaviors = make(map[int64]cs.Misbehavior)
|
||||
misbehaviors := make(map[int64]cs.Misbehavior)
|
||||
if str == "" {
|
||||
return misbehaviors, nil
|
||||
}
|
||||
@@ -141,7 +141,6 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger, misbehaviors map[int6
|
||||
logger,
|
||||
misbehaviors,
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
// MetricsProvider returns a consensus, p2p and mempool Metrics.
|
||||
@@ -300,7 +299,6 @@ func createAndStartIndexerService(
|
||||
eventBus *types.EventBus,
|
||||
logger log.Logger,
|
||||
) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
|
||||
|
||||
var (
|
||||
txIndexer txindex.TxIndexer
|
||||
blockIndexer indexer.BlockIndexer
|
||||
@@ -337,8 +335,8 @@ func doHandshake(
|
||||
genDoc *types.GenesisDoc,
|
||||
eventBus types.BlockEventPublisher,
|
||||
proxyApp proxy.AppConns,
|
||||
consensusLogger log.Logger) error {
|
||||
|
||||
consensusLogger log.Logger,
|
||||
) error {
|
||||
handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
|
||||
handshaker.SetLogger(consensusLogger)
|
||||
handshaker.SetEventBus(eventBus)
|
||||
@@ -382,8 +380,8 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
|
||||
}
|
||||
|
||||
func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
|
||||
state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (p2p.Reactor, mempl.Mempool) {
|
||||
|
||||
state sm.State, memplMetrics *mempl.Metrics, logger log.Logger,
|
||||
) (p2p.Reactor, mempl.Mempool) {
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV1:
|
||||
mp := mempoolv1.NewTxMempool(
|
||||
@@ -435,8 +433,8 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
|
||||
}
|
||||
|
||||
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
|
||||
stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
|
||||
|
||||
stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger,
|
||||
) (*evidence.Reactor, *evidence.Pool, error) {
|
||||
evidenceDB, err := dbProvider(&DBContext{"evidence", config})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -459,8 +457,8 @@ func createBlockchainReactor(config *cfg.Config,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore *store.BlockStore,
|
||||
fastSync bool,
|
||||
logger log.Logger) (bcReactor p2p.Reactor, err error) {
|
||||
|
||||
logger log.Logger,
|
||||
) (bcReactor p2p.Reactor, err error) {
|
||||
switch config.FastSync.Version {
|
||||
case "v0":
|
||||
bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
@@ -487,8 +485,8 @@ func createConsensusReactor(config *cfg.Config,
|
||||
waitSync bool,
|
||||
eventBus *types.EventBus,
|
||||
consensusLogger log.Logger,
|
||||
misbehaviors map[int64]cs.Misbehavior) (*cs.Reactor, *cs.State) {
|
||||
|
||||
misbehaviors map[int64]cs.Misbehavior,
|
||||
) (*cs.Reactor, *cs.State) {
|
||||
consensusState := cs.NewState(
|
||||
config.Consensus,
|
||||
state.Copy(),
|
||||
@@ -591,8 +589,8 @@ func createSwitch(config *cfg.Config,
|
||||
evidenceReactor *evidence.Reactor,
|
||||
nodeInfo p2p.NodeInfo,
|
||||
nodeKey *p2p.NodeKey,
|
||||
p2pLogger log.Logger) *p2p.Switch {
|
||||
|
||||
p2pLogger log.Logger,
|
||||
) *p2p.Switch {
|
||||
sw := p2p.NewSwitch(
|
||||
config.P2P,
|
||||
transport,
|
||||
@@ -614,8 +612,8 @@ func createSwitch(config *cfg.Config,
|
||||
}
|
||||
|
||||
func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
|
||||
p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) {
|
||||
|
||||
p2pLogger log.Logger, nodeKey *p2p.NodeKey,
|
||||
) (pex.AddrBook, error) {
|
||||
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
|
||||
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
|
||||
|
||||
@@ -641,8 +639,8 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
|
||||
}
|
||||
|
||||
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
|
||||
sw *p2p.Switch, logger log.Logger) *pex.Reactor {
|
||||
|
||||
sw *p2p.Switch, logger log.Logger,
|
||||
) *pex.Reactor {
|
||||
// TODO persistent peers ? so we can have their DNS addrs saved
|
||||
pexReactor := pex.NewReactor(addrBook,
|
||||
&pex.ReactorConfig{
|
||||
@@ -664,7 +662,8 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
|
||||
// startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
|
||||
func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
|
||||
stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
|
||||
stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error {
|
||||
stateStore sm.Store, blockStore *store.BlockStore, state sm.State,
|
||||
) error {
|
||||
ssR.Logger.Info("Starting state sync")
|
||||
|
||||
if stateProvider == nil {
|
||||
@@ -727,8 +726,8 @@ func NewNode(config *cfg.Config,
|
||||
metricsProvider MetricsProvider,
|
||||
logger log.Logger,
|
||||
misbehaviors map[int64]cs.Misbehavior,
|
||||
options ...Option) (*Node, error) {
|
||||
|
||||
options ...Option,
|
||||
) (*Node, error) {
|
||||
blockStore, stateDB, err := initDBs(config, dbProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -908,6 +907,7 @@ func NewNode(config *cfg.Config,
|
||||
if config.RPC.PprofListenAddress != "" {
|
||||
go func() {
|
||||
logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
|
||||
//nolint:gosec,nolintlint // G114: Use of net/http serve function that has no support for setting timeouts
|
||||
logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
|
||||
}()
|
||||
}
|
||||
@@ -1381,9 +1381,7 @@ func makeNodeInfo(
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
genesisDocKey = []byte("genesisDoc")
|
||||
)
|
||||
var genesisDocKey = []byte("genesisDoc")
|
||||
|
||||
// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
|
||||
// database, or creates one using the given genesisDocProvider and persists the
|
||||
|
||||
@@ -3,7 +3,7 @@ package node
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
@@ -62,11 +62,10 @@ func (pvKey FilePVKey) Save() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600)
|
||||
err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
@@ -90,7 +89,6 @@ type FilePVLastSignState struct {
|
||||
// we have already signed for this HRS, and can reuse the existing signature).
|
||||
// It panics if the HRS matches the arguments, there's a SignBytes, but no Signature.
|
||||
func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) {
|
||||
|
||||
if lss.Height > height {
|
||||
return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height)
|
||||
}
|
||||
@@ -133,7 +131,7 @@ func (lss *FilePVLastSignState) Save() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600)
|
||||
err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -185,7 +183,7 @@ func LoadFilePVEmptyState(keyFilePath, stateFilePath string) *FilePV {
|
||||
|
||||
// If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState.
|
||||
func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV {
|
||||
keyJSONBytes, err := ioutil.ReadFile(keyFilePath)
|
||||
keyJSONBytes, err := os.ReadFile(keyFilePath)
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
}
|
||||
@@ -203,7 +201,7 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV {
|
||||
pvState := FilePVLastSignState{}
|
||||
|
||||
if loadState {
|
||||
stateJSONBytes, err := ioutil.ReadFile(stateFilePath)
|
||||
stateJSONBytes, err := os.ReadFile(stateFilePath)
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
}
|
||||
@@ -347,8 +345,8 @@ func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error
|
||||
|
||||
// Persist height/round/step and signature
|
||||
func (pv *FilePV) saveSigned(height int64, round int32, step int8,
|
||||
signBytes []byte, sig []byte) {
|
||||
|
||||
signBytes []byte, sig []byte,
|
||||
) {
|
||||
pv.LastSignState.Height = height
|
||||
pv.LastSignState.Round = round
|
||||
pv.LastSignState.Step = step
|
||||
|
||||
@@ -2,7 +2,6 @@ package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -187,7 +186,7 @@ func cleanup(cfg TestHarnessConfig) {
|
||||
}
|
||||
|
||||
func makeTempFile(name, content string) string {
|
||||
tempFile, err := ioutil.TempFile("", fmt.Sprintf("%s-*", name))
|
||||
tempFile, err := os.CreateTemp("", fmt.Sprintf("%s-*", name))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -136,7 +135,7 @@ func extractKey(tmhome, outputPath string) {
|
||||
stateFile := filepath.Join(internal.ExpandPath(tmhome), "data", "priv_validator_state.json")
|
||||
fpv := privval.LoadFilePV(keyFile, stateFile)
|
||||
pkb := []byte(fpv.Key.PrivKey.(ed25519.PrivKey))
|
||||
if err := ioutil.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil {
|
||||
if err := os.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0o600); err != nil {
|
||||
logger.Info("Failed to write private key", "output", outputPath, "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ type EventBusSubscriber interface {
|
||||
|
||||
type Subscription interface {
|
||||
Out() <-chan tmpubsub.Message
|
||||
Cancelled() <-chan struct{} //nolint: misspell
|
||||
Cancelled() <-chan struct{}
|
||||
Err() error
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
@@ -52,7 +52,7 @@ func (genDoc *GenesisDoc) SaveAs(file string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tmos.WriteFile(file, genDocBytes, 0644)
|
||||
return tmos.WriteFile(file, genDocBytes, 0o644)
|
||||
}
|
||||
|
||||
// ValidatorHash returns the hash of the validator set contained in the GenesisDoc
|
||||
@@ -126,7 +126,7 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) {
|
||||
|
||||
// GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc.
|
||||
func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) {
|
||||
jsonBlob, err := ioutil.ReadFile(genDocFile)
|
||||
jsonBlob, err := os.ReadFile(genDocFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -122,7 +121,7 @@ func TestGenesisGood(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenesisSaveAs(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "genesis")
|
||||
tmpfile, err := os.CreateTemp("", "genesis")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -57,7 +57,7 @@ func TestBasicPartSet(t *testing.T) {
|
||||
|
||||
// Reconstruct data, assert that they are equal.
|
||||
data2Reader := partSet2.GetReader()
|
||||
data2, err := ioutil.ReadAll(data2Reader)
|
||||
data2, err := io.ReadAll(data2Reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, data, data2)
|
||||
@@ -145,8 +145,10 @@ func TestParSetHeaderProtoBuf(t *testing.T) {
|
||||
expPass bool
|
||||
}{
|
||||
{"success empty", &PartSetHeader{}, true},
|
||||
{"success",
|
||||
&PartSetHeader{Total: 1, Hash: []byte("hash")}, true},
|
||||
{
|
||||
"success",
|
||||
&PartSetHeader{Total: 1, Hash: []byte("hash")}, true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -162,7 +164,6 @@ func TestParSetHeaderProtoBuf(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPartProtoBuf(t *testing.T) {
|
||||
|
||||
proof := merkle.Proof{
|
||||
Total: 1,
|
||||
Index: 1,
|
||||
@@ -175,8 +176,10 @@ func TestPartProtoBuf(t *testing.T) {
|
||||
}{
|
||||
{"failure empty", &Part{}, false},
|
||||
{"failure nil", nil, false},
|
||||
{"success",
|
||||
&Part{Index: 1, Bytes: tmrand.Bytes(32), Proof: proof}, true},
|
||||
{
|
||||
"success",
|
||||
&Part{Index: 1, Bytes: tmrand.Bytes(32), Proof: proof}, true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
Reference in New Issue
Block a user