Compare commits

...

21 Commits

Author SHA1 Message Date
Sam Kleinman
e47c43fdd9 Merge branch 'master' into tychoish/adr-node-init 2021-07-30 09:29:00 -04:00
dependabot[bot]
3aec71cdd4 build(deps): Bump styfle/cancel-workflow-action from 0.9.0 to 0.9.1 (#6786) 2021-07-30 09:21:27 -04:00
M. J. Fromberger
6dd8984fef Fix and clarify breaks from select cases. (#6781)
Update those break statements inside case clauses that are intended to reach an
enclosing for loop, so that they correctly exit the loop.

The candidate files for this change were located using:

    % staticcheck -checks SA4011 ./... | cut -d: -f-2

This change is intended to preserve the intended semantics of the code, but
since the code as-written did not have its intended effect, some behaviour may
change. Specifically: Some loops may have run longer than they were supposed
to, prior to this change.

In one case I was not able to clearly determine the intended outcome. That case
has been commented but otherwise left as-written.

Fixes #6780.
2021-07-29 22:28:32 -04:00
JayT106
9a2a7d4307 state/privval: vote timestamp fix (#6748) 2021-07-29 12:52:53 +02:00
M. J. Fromberger
8f06e0c9e7 cleanup: remove redundant error plumbing (#6778)
This is a mostly-automated fixup using Comby (https://comby.dev) to remove 
lexically-obvious redundant error checks. No functional changes are intended.

To reproduce the core change:

    # Collapse redundant error check conditionals
    % comby -in-place 'if err != nil {
       return err
    }
    return nil' 'return err' .go

    # Fold out unnecessary error temporaries
    % comby -in-place ':[spc~^\s*]err :[~:?]= :[any]
       return err' ':[spc]return :[any]' .go

Fixes #6479 and related cases.
2021-07-28 15:38:46 -04:00
Sam Kleinman
6a94b55d12 rpc: add documentation for genesis chunked api (#6776) 2021-07-28 14:20:40 -04:00
tycho garen
4beeebce1a Merge remote-tracking branch 'origin/master' into tychoish/adr-node-init 2021-07-28 12:07:13 -04:00
tycho garen
30ebf482ab cr comments 2021-07-28 12:06:32 -04:00
tycho garen
334146fdcf add additional content 2021-07-28 10:37:10 -04:00
tycho garen
a8a212df7f update chagnge log 2021-07-28 10:37:10 -04:00
tycho garen
a911c9a8d4 more drating and updating 2021-07-28 10:37:10 -04:00
tycho garen
3c63dc3ff4 write proposal background 2021-07-28 10:37:10 -04:00
tycho garen
b5b64b4831 normalize 2021-07-28 10:37:10 -04:00
tycho garen
a4b4fdaff6 be more clear about future work 2021-07-28 10:37:10 -04:00
tycho garen
1b8c90657d add clear proposal 2021-07-28 10:37:10 -04:00
tycho garen
d2a1348c2d answer questions 2021-07-28 10:37:10 -04:00
tycho garen
ae3f1f6f84 add status 2021-07-28 10:37:10 -04:00
tycho garen
46863a30a2 avoid first person 2021-07-28 10:37:10 -04:00
tycho garen
bfa8ac334b add changlog 2021-07-28 10:37:10 -04:00
Sam Kleinman
c9fe1c2249 Apply suggestions from code review
Co-authored-by: Aleksandr Bezobchuk <alexanderbez@users.noreply.github.com>
Co-authored-by: Callum Waters <cmwaters19@gmail.com>
2021-07-28 10:37:10 -04:00
tycho garen
c589c90ef9 adr: add sketch of node initialization work 2021-07-28 10:37:10 -04:00
21 changed files with 408 additions and 69 deletions

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 3
steps:
- uses: styfle/cancel-workflow-action@0.9.0
- uses: styfle/cancel-workflow-action@0.9.1
with:
workflow_id: 1041851,1401230,2837803
access_token: ${{ github.token }}

View File

@@ -24,6 +24,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
- [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
- Apps
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)

View File

@@ -15,11 +15,7 @@ const (
func WriteMessage(msg proto.Message, w io.Writer) error {
protoWriter := protoio.NewDelimitedWriter(w)
_, err := protoWriter.WriteMsg(msg)
if err != nil {
return err
}
return nil
return err
}
// ReadMessage reads a varint length-delimited protobuf message.

View File

@@ -204,7 +204,10 @@ func (spn *ProofNode) FlattenAunts() [][]byte {
case spn.Right != nil:
innerHashes = append(innerHashes, spn.Right.Hash)
default:
break
// FIXME(fromberger): Per the documentation above, exactly one of
// these fields should be set. If that is true, this should probably
// be a panic since it violates the invariant. If not, when can it
// be OK to have no siblings? Does this occur at the leaves?
}
spn = spn.Parent
}

View File

@@ -97,3 +97,4 @@ Note the context/background should be written in the present tense.
- [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md)
- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md)
- [ADR-057: RPC](./adr-057-RPC.md)
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)

View File

@@ -0,0 +1,253 @@
# ADR 069: Flexible Node Initialization
## Changlog
- 2021-06-09: Initial Draft (@tychoish)
- 2021-07-21: Major Revision (@tychoish)
## Status
Proposed.
## Context
In an effort to support [Go-API-Stability](./adr-060-go-api-stability.Md),
during the 0.35 development cycle, we have attempted to reduce the total size
of the API surface area by moving most of the interface of the `node` package
into unexported functions, as well as moving the reactors to an `internal`
package. Having this coincide with the 0.35 release made a lot of sense
because these interfaces were _already_ changing as a result of the `p2p`
[refactor](./adr-061-p2p-refactor-scope.md), so it made sense to think a bit
more about how tendermint exposes this API.
While the interfaces of the P2P layer and most of the node package are
already internalized, this precludes several kinds of operational
patterns that are important to key tendermint users who use tendermint
as a library, specifically introspecting the tendermint node service
and replacing components is not supported in the latest version of the
code, and some of these use cases would require maintaining a vendor
copy of the code. Adding these features requires rather extensive
(internal/implementation) changes to the `node` and `rpc` packages,
and this ADR describes a model for changing the way that tendermint
nodes initialize, in service of providing this kind of functionality.
We consider node initialization, because the current implemention
provides strong connections between all components, as well as between
the components of the node and the RPC layer, and being able to think
about the interactions of these components will help enable these
features and help define the requirements of the node package.
## Alternative Approaches
### Do Nothing
The current implementation is functional and sufficient for the vast
majority of use cases (e.g. all users of the Cosmos-SDK as well as
anyone who runs tendermint and the ABCI application in separate
processes.) In the current implementation, and even previous versions,
modifying node initialization or injecting custom components required
copying most of the `node` package, which amounts to requiring users
to maintain a vendored copy of tendermint for these kinds of use
cases.
While this is (likely) not tenable in the long term, as users do want
more modularity, and the current service implementation is brittle and
difficult to maintain, it may be viable in the short and medium term
to delay implementation which will allow us to do more product
research and build more confidence and consensus around the eventual
solution.
### Generic Service Plugability
We can imagine a system design that exports interfaces (in the Golang
sense) for all components of the system, to permit runtime dependency
injection of all components in the system so that users can compose
tendermint nodes of arbitrary user supplied components.
This is an interesting proposal, and may be interesting to persue, but
essentially requires doing a lot more API design and increases the
surface area of our API. While this is not a goal at the moment,
eventually providing support for some kinds of plugability may be
useful, so the current solution does not explicitly forclose the
possibility of this alternative.
### Abstract Dependency Based Startup and Shutdown
The proposal on in this document simplifies and makes tendermint node
initialization more abstract, but the system lacks a number of
features which daemon/service initialization might provide, such as a
dependency based system that allows the authors of services to control
the initialization and shutdown order of components using an
dependencies to control the ordering.
Dependency based orderings make it possible to write components
(reactors, etc.) with only limited awareness of other components or of
node initialization, and is the state of the art for process
initialization. However, this may be too abstract and complicated, and
authors of components in the current implementation of tendermint
*would* need to know about other components, so a dependency based
system would be unhelpfully abstract at this stage.
## Decisions
- Provide a more flexible internal framework for initializing tendermint
nodes to make the initatilization process less hard-coded by the
implementation of the node objects.
- Reactors should not need to expose their interfaces *within* the
implementation of the node type, except in the context of some groups of
services.
- This refactoring should be entirely opaque to users.
- These node initialization changes should not require a
reevaluation of the `service.Service` or a generic initialization
orchestration framework.
- If required for the 0.35 release, add an "unsafe" way to construct a
node service with user-supplied mempool, including making the
relevant interfaces and types external/exported.
- Prioritize implementation of p2p-based statesync reactor to obviate
need for users to inject a custom state-sync provider.
## Detailed Design
The [current
nodeImpl](https://github.com/tendermint/tendermint/blob/master/node/node.go#L47)
includes direct references to the implementations of each of the
reactors, which should be replaced by references to `service.Service`
objects. This will require moving the construction of the [rpc
service](https://github.com/tendermint/tendermint/blob/master/node/node.go#L771)
into the constructor of
[makeNode](https://github.com/tendermint/tendermint/blob/master/node/node.go#L126).
In order to prevent adding complexity to the `node` package, this
project will add an implementation to the service `service` package
that implements `service.Service` and is comprised of a sequence of
underlying `service.Service` objects and handles their
startup/shutdown in a consistent order.
Consensus, blocksync (nee fast sync.), and statesync all depend on
each other, and have significant initialization dependencies that are
presently encoded in the `node` package, and as part of this change,
a new package/component will encapsulate the initialization of this
area of functionality.
In order to support replacement of a component, a new public function
will be added to the public interface of `node` with a signature that
resembles the following:
```go
func NewWithServices(conf *config.Config,
logger log.Logger,
cf proxy.ClientCreator,
gen *types.GenesisDoc,
srvs ...service.Service
) (service.Service, error) {
```
The `service.Service` objects will be initialized in the order their
supplied, after all pre-configured/default services have started (and
shut down first, comparatively.) If any of the services implement
additional interfaces, that allow them to replace specific default
services. `NewWithServices` will validate input service lists with the
following rules:
- no running services
- all values of `service.Service.String()` must be unique.
- for replacing default services, callers cannot supply more than
one replacement reactor for any specified type.
If callers violate any of these rules, `NewWithServices` will return
an error.
## Consequences
### Positive
- The node package will become easier to maintain.
- It will become easier to add additional services within tendermint
nodes.
- It will become possible to replace default components in the node
package without vendoring the tendermint repo and modifying internal
code.
- The current end-to-end (e2e) test suite will be able to prevent any
regressions, and the new functionality can be thoroughly unittested.
- The scope of this project is very narrow, which minimizes risk and
### Negative
- This increases our reliance on the `service.Service` interface which
is probably not an interface that we want to fully commit to.
- This proposal implements a fairly minimal set of functionality and
leaves open the possibility for many additional features which are
not included in the scope of this proposal.
### Neutral
N/A
## Open Questions
- To what extent does this new initialization framework need to accommodate
the legacy p2p stack? Would it be possible to delay a great deal of this
work to the 0.36 cycle to avoid this complexity?
- Answer: _depends on timing_, and the requirement to ship pluggable
reactors in 0.35.
- Where should additional public types be exported for the 0.35
release?
Related to the general project of API stabilization there is a
desire to deprecate the `types` package, and move these public types
into a new `pkg` hierarchy; however, the design of the `pkg`
interface is currently underspecified. If `types` is going to remain
for the 0.35 release, then we should consider the impact of using
multiple organizing modalities for this code within a single release.
## Future Work
- Improve or simplify the `service.Service` interface. There are some
pretty clear limitations with this interface as written (there's no
way to timeout slow startup or shut down, the cycle between the
`service.BaseService` and `service.Service` implementations is
troubling, the default panic in `OnReset` seems troubling.)
- As part of the refactor of `service.Service` have all services/nodes
respect the lifetime of a `context.Context` object, and avoid the
current practice of creating `context.Context` objects in p2p and
reactor code. This would be required for in-process multi-tenancy.
- Support explicit dependencies between components and allow for
parallel startup, so that different reactors can startup at the same
time, where possible.
## References
- [this
branch](https://github.com/tendermint/tendermint/tree/tychoish/scratch-node-minimize)
contains experimental work in the implementation of the node package
to unwind some of the hard dependencies between components.
- [the component
graph](https://peter.bourgon.org/go-for-industrial-programming/#the-component-graph)
as a framing for internal service construction.
## Appendix
### Dependencies
There's a relationship between the blockchain and consensus reactor
described by the following dependency graph makes replacing some of
these components more difficult relative to other reactors or
components.
![consensus blockchain dependency graph](./img/consensus_blockchain.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 672 KiB

View File

@@ -88,6 +88,7 @@ type validatorStub struct {
Round int32
types.PrivValidator
VotingPower int64
lastVote *types.Vote
}
const testMinPower int64 = 10
@@ -121,8 +122,18 @@ func (vs *validatorStub) signVote(
BlockID: types.BlockID{Hash: hash, PartSetHeader: header},
}
v := vote.ToProto()
err = vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v)
if err := vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v); err != nil {
return nil, fmt.Errorf("sign vote failed: %w", err)
}
// ref: signVote in FilePV, the vote should use the privious vote info when the sign data is the same.
if signDataIsEqual(vs.lastVote, v) {
v.Signature = vs.lastVote.Signature
v.Timestamp = vs.lastVote.Timestamp
}
vote.Signature = v.Signature
vote.Timestamp = v.Timestamp
return vote, err
}
@@ -139,6 +150,9 @@ func signVote(
if err != nil {
panic(fmt.Errorf("failed to sign vote: %v", err))
}
vs.lastVote = v
return v
}
@@ -876,3 +890,16 @@ func newKVStore() abci.Application {
func newPersistentKVStoreWithPath(dbDir string) abci.Application {
return kvstore.NewPersistentKVStoreApplication(dbDir)
}
func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool {
if v1 == nil || v2 == nil {
return false
}
return v1.Type == v2.Type &&
bytes.Equal(v1.BlockID.Hash, v2.BlockID.GetHash()) &&
v1.Height == v2.GetHeight() &&
v1.Round == v2.Round &&
bytes.Equal(v1.ValidatorAddress.Bytes(), v2.GetValidatorAddress()) &&
v1.ValidatorIndex == v2.GetValidatorIndex()
}

View File

@@ -2218,6 +2218,7 @@ func (cs *State) signVote(
err := cs.privValidator.SignVote(ctx, cs.state.ChainID, v)
vote.Signature = v.Signature
vote.Timestamp = v.Timestamp
return vote, err
}

View File

@@ -1939,6 +1939,30 @@ func TestStateOutputVoteStats(t *testing.T) {
}
func TestSignSameVoteTwice(t *testing.T) {
config := configSetup(t)
_, vss := randState(config, 2)
randBytes := tmrand.Bytes(tmhash.Size)
vote := signVote(vss[1],
config,
tmproto.PrecommitType,
randBytes,
types.PartSetHeader{Total: 10, Hash: randBytes},
)
vote2 := signVote(vss[1],
config,
tmproto.PrecommitType,
randBytes,
types.PartSetHeader{Total: 10, Hash: randBytes},
)
require.Equal(t, vote, vote2)
}
// subscribe subscribes test client to the given query and returns a channel with cap = 1.
func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message {
sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q)

View File

@@ -385,7 +385,7 @@ func (m *PeerManager) prunePeers() error {
peerID := ranked[i].ID
switch {
case m.store.Size() <= int(m.options.MaxPeers):
break
return nil
case m.dialing[peerID]:
case m.connected[peerID]:
default:

View File

@@ -142,12 +142,7 @@ func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error
// block until connected or timeout
sl.Logger.Info("SignerListener: Blocking for connection")
sl.triggerConnect()
err := sl.WaitConnection(sl.connectionAvailableCh, maxWait)
if err != nil {
return err
}
return nil
return sl.WaitConnection(sl.connectionAvailableCh, maxWait)
}
func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) {

View File

@@ -806,6 +806,7 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
/genesis:
get:
summary: Get Genesis
@@ -813,7 +814,7 @@ paths:
tags:
- Info
description: |
Get genesis.
Get the genesis document.
responses:
"200":
description: Genesis results.
@@ -827,6 +828,39 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
/genesis_chunked:
get:
summary: Get Genesis in paginated chunks
operationId: genesis_chunked
tags:
- Info
description: |
Get genesis document in a paginated/chunked format to make it
easier to iterate through larger gensis structures.
parameters:
- in: query
name: chunkID
description: Sequence number of the chunk to download.
schema:
type: integer
default: 0
example: 1
responses:
"200":
description: Genesis results.
content:
application/json:
schema:
$ref: "#/components/schemas/GenesisChunkedResponse"
"500":
description: Error
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
/dump_consensus_state:
get:
summary: Get consensus state
@@ -1894,6 +1928,35 @@ components:
properties: {}
type: object
GenesisChunkedResponse:
type: object
required:
- "jsonrpc"
- "id"
- "result"
properties:
jsonrpc:
type: string
example: "2.0"
id:
type: integer
example: 0
result:
required:
- "chunk"
- "total"
- "data"
properties:
chunk:
type: integer
example: 0
total:
type: integer
example: 1
data:
type: string
example: "Z2VuZXNpcwo="
DumpConsensusResponse:
type: object
required:

View File

@@ -186,6 +186,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64,
// fetch matching heights
results = make([]int64, 0, len(filteredHeights))
heights:
for _, hBz := range filteredHeights {
h := int64FromBytes(hBz)
@@ -199,7 +200,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64,
select {
case <-ctx.Done():
break
break heights
default:
}
@@ -240,7 +241,7 @@ func (idx *BlockerIndexer) matchRange(
}
defer it.Close()
LOOP:
iter:
for ; it.Valid(); it.Next() {
var (
eventValue string
@@ -260,7 +261,7 @@ LOOP:
if _, ok := qr.AnyBound().(int64); ok {
v, err := strconv.ParseInt(eventValue, 10, 64)
if err != nil {
continue LOOP
continue iter
}
include := true
@@ -279,7 +280,7 @@ LOOP:
select {
case <-ctx.Done():
break
break iter
default:
}
@@ -372,12 +373,13 @@ func (idx *BlockerIndexer) match(
}
defer it.Close()
iterExists:
for ; it.Valid(); it.Next() {
tmpHeights[string(it.Value())] = it.Value()
select {
case <-ctx.Done():
break
break iterExists
default:
}
@@ -399,6 +401,7 @@ func (idx *BlockerIndexer) match(
}
defer it.Close()
iterContains:
for ; it.Valid(); it.Next() {
eventValue, err := parseValueFromEventKey(it.Key())
if err != nil {
@@ -411,7 +414,7 @@ func (idx *BlockerIndexer) match(
select {
case <-ctx.Done():
break
break iterContains
default:
}

View File

@@ -255,12 +255,7 @@ func verifyTimeStamp(tb string) error {
if rows.Next() {
var ts string
err = rows.Scan(&ts)
if err != nil {
return err
}
return nil
return rows.Scan(&ts)
}
return errors.New("no result")

View File

@@ -219,6 +219,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul
}
results := make([]*abci.TxResult, 0, len(filteredHashes))
hashes:
for _, h := range filteredHashes {
res, err := txi.Get(h)
if err != nil {
@@ -229,7 +230,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul
// Potentially exit early.
select {
case <-ctx.Done():
break
break hashes
default:
}
}
@@ -285,13 +286,14 @@ func (txi *TxIndex) match(
}
defer it.Close()
iterEqual:
for ; it.Valid(); it.Next() {
tmpHashes[string(it.Value())] = it.Value()
// Potentially exit early.
select {
case <-ctx.Done():
break
break iterEqual
default:
}
}
@@ -308,13 +310,14 @@ func (txi *TxIndex) match(
}
defer it.Close()
iterExists:
for ; it.Valid(); it.Next() {
tmpHashes[string(it.Value())] = it.Value()
// Potentially exit early.
select {
case <-ctx.Done():
break
break iterExists
default:
}
}
@@ -332,6 +335,7 @@ func (txi *TxIndex) match(
}
defer it.Close()
iterContains:
for ; it.Valid(); it.Next() {
value, err := parseValueFromKey(it.Key())
if err != nil {
@@ -344,7 +348,7 @@ func (txi *TxIndex) match(
// Potentially exit early.
select {
case <-ctx.Done():
break
break iterContains
default:
}
}
@@ -412,7 +416,7 @@ func (txi *TxIndex) matchRange(
}
defer it.Close()
LOOP:
iter:
for ; it.Valid(); it.Next() {
value, err := parseValueFromKey(it.Key())
if err != nil {
@@ -421,7 +425,7 @@ LOOP:
if _, ok := qr.AnyBound().(int64); ok {
v, err := strconv.ParseInt(value, 10, 64)
if err != nil {
continue LOOP
continue iter
}
include := true
@@ -448,7 +452,7 @@ LOOP:
// Potentially exit early.
select {
case <-ctx.Done():
break
break iter
default:
}
}

View File

@@ -661,10 +661,5 @@ func (store dbStore) saveConsensusParamsInfo(
return err
}
err = batch.Set(consensusParamsKey(nextHeight), bz)
if err != nil {
return err
}
return nil
return batch.Set(consensusParamsKey(nextHeight), bz)
}

View File

@@ -15,11 +15,7 @@ func Cleanup(testnet *e2e.Testnet) error {
if err != nil {
return err
}
err = cleanupDir(testnet.Dir)
if err != nil {
return err
}
return nil
return cleanupDir(testnet.Dir)
}
// cleanupDocker removes all E2E resources (with label e2e=True), regardless
@@ -37,13 +33,8 @@ func cleanupDocker() error {
return err
}
err = exec("bash", "-c", fmt.Sprintf(
return exec("bash", "-c", fmt.Sprintf(
"docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR))
if err != nil {
return err
}
return nil
}
// cleanupDir cleans up a testnet directory
@@ -74,10 +65,5 @@ func cleanupDir(dir string) error {
return err
}
err = os.RemoveAll(dir)
if err != nil {
return err
}
return nil
return os.RemoveAll(dir)
}

View File

@@ -21,10 +21,7 @@ func Wait(testnet *e2e.Testnet, blocks int64) error {
func WaitUntil(testnet *e2e.Testnet, height int64) error {
logger.Info(fmt.Sprintf("Waiting for all nodes to reach height %v...", height))
_, err := waitForAllNodes(testnet, height, waitingTime(len(testnet.Nodes)))
if err != nil {
return err
}
return nil
return err
}
// waitingTime estimates how long it should take for a node to reach the height.

View File

@@ -59,6 +59,5 @@ func Fuzz(data []byte) int {
func outputJSONIsSlice(input []byte) bool {
slice := []interface{}{}
err := json.Unmarshal(input, &slice)
return err == nil
return json.Unmarshal(input, &slice) == nil
}

View File

@@ -33,11 +33,7 @@ func (nodeKey NodeKey) SaveAs(filePath string) error {
if err != nil {
return err
}
err = ioutil.WriteFile(filePath, jsonBytes, 0600)
if err != nil {
return err
}
return nil
return ioutil.WriteFile(filePath, jsonBytes, 0600)
}
// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If