mirror of
https://github.com/tendermint/tendermint.git
synced 2026-02-12 23:01:30 +00:00
Compare commits
4 Commits
wb/abci-pr
...
alessio/go
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fff55ae7e6 | ||
|
|
d4006bd1b6 | ||
|
|
6ea2cd78b9 | ||
|
|
2f1e143e17 |
@@ -8,7 +8,7 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://godoc.org/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/AzefAFd)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
@@ -49,7 +49,7 @@ For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.
|
||||
|
||||
| Requirement | Notes |
|
||||
| ----------- | ---------------- |
|
||||
| Go version | Go1.13 or higher |
|
||||
| Go version | Go1.14 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ func TestByzantine(t *testing.T) {
|
||||
N := 4
|
||||
logger := consensusLogger().With("test", "byzantine")
|
||||
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
ticker := NewTimeoutTicker()
|
||||
@@ -86,7 +86,7 @@ func TestByzantine(t *testing.T) {
|
||||
sm.SaveState(css[i].blockExec.DB(), css[i].state) //for save height 1's validators info
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
for _, r := range reactors {
|
||||
if rr, ok := r.(*ByzantineReactor); ok {
|
||||
rr.reactor.Switch.Stop()
|
||||
@@ -94,7 +94,7 @@ func TestByzantine(t *testing.T) {
|
||||
r.(*Reactor).Switch.Stop()
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
// ignore new switch s, we already made ours
|
||||
|
||||
@@ -25,7 +25,7 @@ func assertMempool(txn txNotifier) mempl.Mempool {
|
||||
|
||||
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
t.Cleanup(func() { os.RemoveAll(config.RootDir) })
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
@@ -44,7 +44,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
|
||||
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
t.Cleanup(func() { os.RemoveAll(config.RootDir) })
|
||||
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
@@ -60,7 +60,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
|
||||
func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
t.Cleanup(func() { os.RemoveAll(config.RootDir) })
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
|
||||
@@ -97,9 +97,9 @@ func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*type
|
||||
func TestReactorBasic(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
t.Cleanup(func() { stopConsensusNet(log.TestingLogger(), reactors, eventBuses) })
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
@@ -127,7 +127,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
t.Cleanup(func() { os.RemoveAll(thisConfig.RootDir) })
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
@@ -177,7 +177,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
}
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
t.Cleanup(func() { stopConsensusNet(log.TestingLogger(), reactors, eventBuses) })
|
||||
|
||||
// wait till everyone makes the first new block with no evidence
|
||||
timeoutWaitGroup(t, nValidators, func(j int) {
|
||||
@@ -235,9 +235,9 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
t.Cleanup(func() { stopConsensusNet(log.TestingLogger(), reactors, eventBuses) })
|
||||
|
||||
// send a tx
|
||||
if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempl.TxInfo{}); err != nil {
|
||||
@@ -253,9 +253,9 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
t.Cleanup(func() { stopConsensusNet(log.TestingLogger(), reactors, eventBuses) })
|
||||
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
@@ -275,9 +275,9 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
t.Cleanup(func() { stopConsensusNet(log.TestingLogger(), reactors, eventBuses) })
|
||||
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
@@ -297,9 +297,9 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
t.Cleanup(func() { stopConsensusNet(log.TestingLogger(), reactors, eventBuses) })
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
@@ -326,9 +326,9 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
"consensus_voting_power_changes_test",
|
||||
newMockTickerFunc(true),
|
||||
newPersistentKVStore)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
t.Cleanup(func() { stopConsensusNet(logger, reactors, eventBuses) })
|
||||
|
||||
// map of active validators
|
||||
activeVals := make(map[string]struct{})
|
||||
@@ -406,11 +406,11 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
newMockTickerFunc(true),
|
||||
newPersistentKVStoreWithPath)
|
||||
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
t.Cleanup(func() { stopConsensusNet(logger, reactors, eventBuses) })
|
||||
|
||||
// map of active validators
|
||||
activeVals := make(map[string]struct{})
|
||||
@@ -512,14 +512,14 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
// override default SkipTimeoutCommit == true for tests
|
||||
for i := 0; i < N; i++ {
|
||||
css[i].config.SkipTimeoutCommit = false
|
||||
}
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
t.Cleanup(func() { stopConsensusNet(log.TestingLogger(), reactors, eventBuses) })
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N-1, func(j int) {
|
||||
|
||||
@@ -83,7 +83,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
|
||||
err := cs.Start()
|
||||
require.NoError(t, err)
|
||||
defer cs.Stop()
|
||||
t.Cleanup(func() { cs.Stop() })
|
||||
|
||||
// This is just a signal that we haven't halted; its not something contained
|
||||
// in the WAL itself. Assuming the consensus state is running, replay of any
|
||||
@@ -845,7 +845,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
// - 0x02
|
||||
// - 0x03
|
||||
config := ResetConfig("handshake_test_")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
t.Cleanup(func() { os.RemoveAll(config.RootDir) })
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
const appVersion = 0x0
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
@@ -1146,7 +1146,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
clientCreator := proxy.NewLocalClientCreator(app)
|
||||
|
||||
config := ResetConfig("handshake_test_")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
t.Cleanup(func() { os.RemoveAll(config.RootDir) })
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
@@ -1161,7 +1161,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
t.Cleanup(func() { proxyApp.Stop() })
|
||||
if err := handshaker.Handshake(proxyApp); err != nil {
|
||||
t.Fatalf("Error on abci handshake: %v", err)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ const (
|
||||
func TestWALTruncate(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
t.Cleanup(func() { os.RemoveAll(walDir) })
|
||||
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
|
||||
@@ -45,12 +45,12 @@ func TestWALTruncate(t *testing.T) {
|
||||
wal.SetLogger(log.TestingLogger())
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
wal.Stop()
|
||||
// wait for the wal to finish shutting down so we
|
||||
// can safely remove the directory
|
||||
wal.Wait()
|
||||
}()
|
||||
})
|
||||
|
||||
// 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10),
|
||||
// when headBuf is full, truncate content will Flush to the file. at this
|
||||
@@ -67,7 +67,7 @@ func TestWALTruncate(t *testing.T) {
|
||||
assert.NoError(t, err, "expected not to err on height %d", h)
|
||||
assert.True(t, found, "expected to find end height for %d", h)
|
||||
assert.NotNil(t, gr)
|
||||
defer gr.Close()
|
||||
t.Cleanup(func() { gr.Close() })
|
||||
|
||||
dec := NewWALDecoder(gr)
|
||||
msg, err := dec.Decode()
|
||||
@@ -107,19 +107,19 @@ func TestWALEncoderDecoder(t *testing.T) {
|
||||
func TestWALWrite(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
t.Cleanup(func() { os.RemoveAll(walDir) })
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
|
||||
wal, err := NewWAL(walFile)
|
||||
require.NoError(t, err)
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
wal.Stop()
|
||||
// wait for the wal to finish shutting down so we
|
||||
// can safely remove the directory
|
||||
wal.Wait()
|
||||
}()
|
||||
})
|
||||
|
||||
// 1) Write returns an error if msg is too big
|
||||
msg := &BlockPartMessage{
|
||||
@@ -157,7 +157,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
assert.NoError(t, err, "expected not to err on height %d", h)
|
||||
assert.True(t, found, "expected to find end height for %d", h)
|
||||
assert.NotNil(t, gr)
|
||||
defer gr.Close()
|
||||
t.Cleanup(func() { gr.Close() })
|
||||
|
||||
dec := NewWALDecoder(gr)
|
||||
msg, err := dec.Decode()
|
||||
@@ -170,7 +170,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
func TestWALPeriodicSync(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
t.Cleanup(func() { os.RemoveAll(walDir) })
|
||||
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
wal, err := NewWAL(walFile, autofile.GroupCheckDuration(1*time.Millisecond))
|
||||
@@ -187,10 +187,10 @@ func TestWALPeriodicSync(t *testing.T) {
|
||||
assert.NotZero(t, wal.Group().Buffered())
|
||||
|
||||
require.NoError(t, wal.Start())
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
wal.Stop()
|
||||
wal.Wait()
|
||||
}()
|
||||
})
|
||||
|
||||
time.Sleep(walTestFlushInterval + (10 * time.Millisecond))
|
||||
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/tendermint/tendermint
|
||||
|
||||
go 1.13
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/ChainSafe/go-schnorrkel v0.0.0-20200115165343-aa45d48b5ed6
|
||||
|
||||
@@ -17,12 +17,12 @@ import (
|
||||
func TestSIGHUP(t *testing.T) {
|
||||
origDir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
defer os.Chdir(origDir)
|
||||
t.Cleanup(func() { os.Chdir(origDir) })
|
||||
|
||||
// First, create a temporary directory and move into it
|
||||
dir, err := ioutil.TempDir("", "sighup_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
t.Cleanup(func() { os.RemoveAll(dir) })
|
||||
err = os.Chdir(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestSIGHUP(t *testing.T) {
|
||||
// Move into a different temporary directory
|
||||
otherDir, err := ioutil.TempDir("", "sighup_test_other")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(otherDir)
|
||||
t.Cleanup(func() { os.RemoveAll(otherDir) })
|
||||
err = os.Chdir(otherDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -111,11 +111,11 @@ func TestRotateFile(t *testing.T) {
|
||||
// relative paths are resolved at Group creation
|
||||
origDir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
defer os.Chdir(origDir)
|
||||
t.Cleanup(func() { os.Chdir(origDir) })
|
||||
|
||||
dir, err := ioutil.TempDir("", "rotate_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
t.Cleanup(func() { os.RemoveAll(dir) })
|
||||
err = os.Chdir(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ func BenchmarkReap(b *testing.B) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
b.Cleanup(cleanup)
|
||||
|
||||
size := 10000
|
||||
for i := 0; i < size; i++ {
|
||||
@@ -30,7 +30,7 @@ func BenchmarkCheckTx(b *testing.B) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
b.Cleanup(cleanup)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
tx := make([]byte, 8)
|
||||
|
||||
@@ -39,7 +39,7 @@ func TestCacheAfterUpdate(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
// reAddIndices & txsInCache can have elements > numTxsToCreate
|
||||
// also assumes max index is 255 for convenience
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
// Ensure gas calculation behaves as expected
|
||||
checkTxs(t, mempool, 1, UnknownPeerID)
|
||||
@@ -143,7 +143,7 @@ func TestMempoolFilters(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
emptyTxArr := []types.Tx{[]byte{}}
|
||||
|
||||
nopPreFilter := func(tx types.Tx) error { return nil }
|
||||
@@ -182,7 +182,7 @@ func TestMempoolUpdate(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
// 1. Adds valid txs to the cache
|
||||
{
|
||||
@@ -217,7 +217,7 @@ func TestTxsAvailable(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
mempool.EnableTxsAvailable()
|
||||
|
||||
timeoutMS := 500
|
||||
@@ -263,7 +263,7 @@ func TestSerialReap(t *testing.T) {
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
appConnCon, _ := cc.NewABCIClient()
|
||||
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
||||
@@ -384,7 +384,7 @@ func TestMempoolCloseWAL(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
mempool.height = 10
|
||||
mempool.InitWAL()
|
||||
|
||||
@@ -425,7 +425,7 @@ func TestMempoolMaxMsgSize(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempl, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
maxTxSize := mempl.config.MaxTxBytes
|
||||
maxMsgSize := calcMaxMsgSize(maxTxSize)
|
||||
@@ -478,7 +478,7 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("mempool_test")
|
||||
config.Mempool.MaxTxsBytes = 10
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
// 1. zero by default
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
@@ -512,7 +512,7 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
app2 := counter.NewApplication(true)
|
||||
cc = proxy.NewLocalClientCreator(app2)
|
||||
mempool, cleanup = newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
||||
@@ -525,7 +525,7 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
||||
err = appConnCon.Start()
|
||||
require.Nil(t, err)
|
||||
defer appConnCon.Stop()
|
||||
t.Cleanup(func() { appConnCon.Stop() })
|
||||
res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, res.Code)
|
||||
@@ -546,10 +546,10 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
|
||||
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
|
||||
app := kvstore.NewApplication()
|
||||
cc, server := newRemoteApp(t, sockPath, app)
|
||||
defer server.Stop()
|
||||
t.Cleanup(func() { server.Stop() })
|
||||
config := cfg.ResetTestRoot("mempool_test")
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
// generate small number of txs
|
||||
nTxs := 10
|
||||
|
||||
@@ -36,12 +36,12 @@ const oldPrivvalContent = `{
|
||||
func TestLoadAndUpgrade(t *testing.T) {
|
||||
|
||||
oldFilePath := initTmpOldFile(t)
|
||||
defer os.Remove(oldFilePath)
|
||||
t.Cleanup(func(){os.Remove(oldFilePath)})
|
||||
newStateFile, err := ioutil.TempFile("", "priv_validator_state*.json")
|
||||
defer os.Remove(newStateFile.Name())
|
||||
t.Cleanup(func(){os.Remove(newStateFile.Name())})
|
||||
require.NoError(t, err)
|
||||
newKeyFile, err := ioutil.TempFile("", "priv_validator_key*.json")
|
||||
defer os.Remove(newKeyFile.Name())
|
||||
t.Cleanup(func(){os.Remove(newKeyFile.Name())})
|
||||
require.NoError(t, err)
|
||||
|
||||
oldPV, err := privval.LoadOldFilePV(oldFilePath)
|
||||
|
||||
@@ -61,8 +61,8 @@ func TestSignerClose(t *testing.T) {
|
||||
|
||||
func TestSignerPing(t *testing.T) {
|
||||
for _, tc := range getSignerTestCases(t) {
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
t.Cleanup(func() { tc.signerServer.Stop() })
|
||||
t.Cleanup(func() { tc.signerClient.Close() })
|
||||
|
||||
err := tc.signerClient.Ping()
|
||||
assert.NoError(t, err)
|
||||
@@ -71,8 +71,8 @@ func TestSignerPing(t *testing.T) {
|
||||
|
||||
func TestSignerGetPubKey(t *testing.T) {
|
||||
for _, tc := range getSignerTestCases(t) {
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
t.Cleanup(func() { tc.signerServer.Stop() })
|
||||
t.Cleanup(func() { tc.signerClient.Close() })
|
||||
|
||||
pubKey, err := tc.signerClient.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
@@ -97,8 +97,8 @@ func TestSignerProposal(t *testing.T) {
|
||||
want := &types.Proposal{Timestamp: ts}
|
||||
have := &types.Proposal{Timestamp: ts}
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
t.Cleanup(func() { tc.signerServer.Stop() })
|
||||
t.Cleanup(func() { tc.signerClient.Close() })
|
||||
|
||||
require.NoError(t, tc.mockPV.SignProposal(tc.chainID, want))
|
||||
require.NoError(t, tc.signerClient.SignProposal(tc.chainID, have))
|
||||
@@ -113,8 +113,8 @@ func TestSignerVote(t *testing.T) {
|
||||
want := &types.Vote{Timestamp: ts, Type: types.PrecommitType}
|
||||
have := &types.Vote{Timestamp: ts, Type: types.PrecommitType}
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
t.Cleanup(func() { tc.signerServer.Stop() })
|
||||
t.Cleanup(func() { tc.signerClient.Close() })
|
||||
|
||||
require.NoError(t, tc.mockPV.SignVote(tc.chainID, want))
|
||||
require.NoError(t, tc.signerClient.SignVote(tc.chainID, have))
|
||||
@@ -129,8 +129,8 @@ func TestSignerVoteResetDeadline(t *testing.T) {
|
||||
want := &types.Vote{Timestamp: ts, Type: types.PrecommitType}
|
||||
have := &types.Vote{Timestamp: ts, Type: types.PrecommitType}
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
t.Cleanup(func() { tc.signerServer.Stop() })
|
||||
t.Cleanup(func() { tc.signerClient.Close() })
|
||||
|
||||
time.Sleep(testTimeoutReadWrite2o3)
|
||||
|
||||
@@ -155,8 +155,8 @@ func TestSignerVoteKeepAlive(t *testing.T) {
|
||||
want := &types.Vote{Timestamp: ts, Type: types.PrecommitType}
|
||||
have := &types.Vote{Timestamp: ts, Type: types.PrecommitType}
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
t.Cleanup(func() { tc.signerServer.Stop() })
|
||||
t.Cleanup(func() { tc.signerClient.Close() })
|
||||
|
||||
// Check that even if the client does not request a
|
||||
// signature for a long time. The service is still available
|
||||
@@ -180,8 +180,8 @@ func TestSignerSignProposalErrors(t *testing.T) {
|
||||
tc.signerServer.privVal = types.NewErroringMockPV()
|
||||
tc.mockPV = types.NewErroringMockPV()
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
t.Cleanup(func() { tc.signerServer.Stop() })
|
||||
t.Cleanup(func() { tc.signerClient.Close() })
|
||||
|
||||
ts := time.Now()
|
||||
proposal := &types.Proposal{Timestamp: ts}
|
||||
@@ -205,8 +205,8 @@ func TestSignerSignVoteErrors(t *testing.T) {
|
||||
tc.signerServer.privVal = types.NewErroringMockPV()
|
||||
tc.mockPV = types.NewErroringMockPV()
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
t.Cleanup(func() { tc.signerServer.Stop() })
|
||||
t.Cleanup(func() { tc.signerClient.Close() })
|
||||
|
||||
err := tc.signerClient.SignVote(tc.chainID, vote)
|
||||
require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error())
|
||||
@@ -250,8 +250,8 @@ func TestSignerUnexpectedResponse(t *testing.T) {
|
||||
|
||||
tc.signerServer.SetRequestHandler(brokenHandler)
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
t.Cleanup(func() { tc.signerServer.Stop() })
|
||||
t.Cleanup(func() { tc.signerClient.Close() })
|
||||
|
||||
ts := time.Now()
|
||||
want := &types.Vote{Timestamp: ts, Type: types.PrecommitType}
|
||||
|
||||
@@ -73,7 +73,7 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) {
|
||||
|
||||
err = signerServer.Start()
|
||||
require.NoError(t, err)
|
||||
defer signerServer.Stop()
|
||||
t.Cleanup(func() { signerServer.Stop() })
|
||||
|
||||
select {
|
||||
case attempts := <-attemptCh:
|
||||
@@ -104,7 +104,7 @@ func TestRetryConnToRemoteSigner(t *testing.T) {
|
||||
signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV)
|
||||
|
||||
startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh)
|
||||
defer listenerEndpoint.Stop()
|
||||
t.Cleanup(func() { listenerEndpoint.Stop() })
|
||||
|
||||
require.NoError(t, signerServer.Start())
|
||||
assert.True(t, signerServer.IsRunning())
|
||||
@@ -120,7 +120,7 @@ func TestRetryConnToRemoteSigner(t *testing.T) {
|
||||
// let some pings pass
|
||||
require.NoError(t, signerServer2.Start())
|
||||
assert.True(t, signerServer2.IsRunning())
|
||||
defer signerServer2.Stop()
|
||||
t.Cleanup(func() { signerServer2.Stop() })
|
||||
|
||||
// give the client some time to re-establish the conn to the remote signer
|
||||
// should see sth like this in the logs:
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestEcho(t *testing.T) {
|
||||
if err := s.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket server: %v", err.Error())
|
||||
}
|
||||
defer s.Stop()
|
||||
t.Cleanup(func() { s.Stop() })
|
||||
|
||||
// Start client
|
||||
cli, err := clientCreator.NewABCIClient()
|
||||
@@ -89,7 +89,7 @@ func BenchmarkEcho(b *testing.B) {
|
||||
if err := s.Start(); err != nil {
|
||||
b.Fatalf("Error starting socket server: %v", err.Error())
|
||||
}
|
||||
defer s.Stop()
|
||||
b.Cleanup(func() { s.Stop() })
|
||||
|
||||
// Start client
|
||||
cli, err := clientCreator.NewABCIClient()
|
||||
@@ -128,7 +128,7 @@ func TestInfo(t *testing.T) {
|
||||
if err := s.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket server: %v", err.Error())
|
||||
}
|
||||
defer s.Stop()
|
||||
t.Cleanup(func() { s.Stop() })
|
||||
|
||||
// Start client
|
||||
cli, err := clientCreator.NewABCIClient()
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
set -euo pipefail
|
||||
|
||||
GITIAN_CACHE_DIRNAME='.gitian-builder-cache'
|
||||
GO_RELEASE='1.13.3'
|
||||
GO_RELEASE='1.14'
|
||||
GO_TARBALL="go${GO_RELEASE}.linux-amd64.tar.gz"
|
||||
GO_TARBALL_URL="https://dl.google.com/go/${GO_TARBALL}"
|
||||
|
||||
|
||||
@@ -23,11 +23,11 @@ remotes:
|
||||
- "url": "https://github.com/tendermint/tendermint.git"
|
||||
"dir": "tendermint"
|
||||
files:
|
||||
- "go1.13.3.linux-amd64.tar.gz"
|
||||
- "go1.14.linux-amd64.tar.gz"
|
||||
script: |
|
||||
set -e -o pipefail
|
||||
|
||||
GO_SRC_RELEASE=go1.13.3.linux-amd64
|
||||
GO_SRC_RELEASE=go1.14.linux-amd64
|
||||
GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz"
|
||||
# Compile go and configure the environment
|
||||
export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME""
|
||||
|
||||
@@ -23,11 +23,11 @@ remotes:
|
||||
- "url": "https://github.com/tendermint/tendermint.git"
|
||||
"dir": "tendermint"
|
||||
files:
|
||||
- "go1.13.3.linux-amd64.tar.gz"
|
||||
- "go1.14.linux-amd64.tar.gz"
|
||||
script: |
|
||||
set -e -o pipefail
|
||||
|
||||
GO_SRC_RELEASE=go1.13.3.linux-amd64
|
||||
GO_SRC_RELEASE=go1.14.linux-amd64
|
||||
GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz"
|
||||
# Compile go and configure the environment
|
||||
export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME""
|
||||
|
||||
@@ -23,11 +23,11 @@ remotes:
|
||||
- "url": "https://github.com/tendermint/tendermint.git"
|
||||
"dir": "tendermint"
|
||||
files:
|
||||
- "go1.13.3.linux-amd64.tar.gz"
|
||||
- "go1.14.linux-amd64.tar.gz"
|
||||
script: |
|
||||
set -e -o pipefail
|
||||
|
||||
GO_SRC_RELEASE=go1.13.3.linux-amd64
|
||||
GO_SRC_RELEASE=go1.14.linux-amd64
|
||||
GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz"
|
||||
# Compile go and configure the environment
|
||||
export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME""
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.13
|
||||
FROM golang:1.14
|
||||
|
||||
# Add testing deps for curl
|
||||
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list
|
||||
|
||||
289
tools/build/Makefile
Normal file
289
tools/build/Makefile
Normal file
@@ -0,0 +1,289 @@
|
||||
##
|
||||
# Extra checks, because we do not use autoconf.
|
||||
##
|
||||
|
||||
requirements_check = true
|
||||
gpg_check = false
|
||||
go_min_version = 1.14
|
||||
gpg_key = 2122CBE9
|
||||
|
||||
ifeq ($(requirements_check),true)
|
||||
ifndef GOPATH
|
||||
$(error GOPATH not set)
|
||||
else
|
||||
go_version := $(shell go version | sed "s/^.* go\([0-9\.]*\) .*$$/\1/" )
|
||||
$(info Found go version $(go_version))
|
||||
go_version_check := $(shell echo -e "$(go_min_version)\n$(go_version)" | sort -V | head -1)
|
||||
ifneq ($(go_min_version),$(go_version_check))
|
||||
$(error go version go_min_version or above is required)
|
||||
endif
|
||||
endif
|
||||
ifeq ($(gpg_check),true)
|
||||
gpg_check := $(shell gpg -K | grep '/$(gpg_key) ' | sed 's,^.*/\($(gpg_key)\) .*$$,\1,')
|
||||
ifneq ($(gpg_check),$(gpg_key))
|
||||
$(error GPG key $(gpg_key) not found.)
|
||||
else
|
||||
$(info GPG key $(gpg_key) found)
|
||||
endif
|
||||
ifndef GPG_PASSPHRASE
|
||||
$(error GPG_PASSPHRASE not set)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
###
|
||||
# Here comes the real deal
|
||||
###
|
||||
|
||||
binaries = tendermint basecoind ethermint gaia
|
||||
build-binaries = build-tendermint build-basecoind build-ethermint build-gaia
|
||||
package-rpm = package-rpm-tendermint package-rpm-basecoind package-rpm-ethermint package-rpm-gaia
|
||||
install-rpm = install-rpm-tendermint install-rpm-basecoind install-rpm-ethermint install-rpm-gaia
|
||||
package-deb = package-deb-tendermint package-deb-basecoind package-deb-ethermint package-deb-gaia
|
||||
install-deb = install-deb-tendermint install-deb-basecoind install-deb-ethermint install-deb-gaia
|
||||
|
||||
all: $(binaries)
|
||||
build: $(build-binaries)
|
||||
package: $(package-rpm) $(package-deb)
|
||||
install: $(install-rpm) $(install-deb)
|
||||
$(binaries): %: build-% package-rpm-% package-deb-%
|
||||
|
||||
###
|
||||
# Build the binaries
|
||||
###
|
||||
|
||||
git-branch:
|
||||
$(eval GIT_BRANCH=$(shell echo $${GIT_BRANCH:-master}))
|
||||
|
||||
gopath-setup:
|
||||
test -d $(GOPATH) || mkdir -p $(GOPATH)
|
||||
test -d $(GOPATH)/bin || mkdir -p $(GOPATH)/bin
|
||||
test -d $(GOPATH)/src || mkdir -p $(GOPATH)/src
|
||||
|
||||
build-tendermint: git-branch gopath-setup
|
||||
@echo "*** Building tendermint"
|
||||
go get -d -u github.com/tendermint/tendermint/cmd/tendermint
|
||||
cd $(GOPATH)/src/github.com/tendermint/tendermint && git checkout "$(GIT_BRANCH)" && git pull
|
||||
export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint tools build
|
||||
cp $(GOPATH)/src/github.com/tendermint/tendermint/build/tendermint $(GOPATH)/bin
|
||||
@echo "*** Built tendermint"
|
||||
|
||||
build-ethermint: git-branch gopath-setup
|
||||
@echo "*** Building ethermint"
|
||||
go get -d -u github.com/tendermint/ethermint/cmd/ethermint
|
||||
cd $(GOPATH)/src/github.com/tendermint/ethermint && git checkout "$(GIT_BRANCH)" && git pull
|
||||
export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/ethermint build
|
||||
cp $(GOPATH)/src/github.com/tendermint/ethermint/build/ethermint $(GOPATH)/bin
|
||||
@echo "*** Built ethermint"
|
||||
|
||||
build-gaia: git-branch gopath-setup
|
||||
@echo "*** Building gaia"
|
||||
go get -d -u go github.com/cosmos/gaia || echo "Workaround for go downloads."
|
||||
cd $(GOPATH)/src/github.com/cosmos/gaia && git checkout "$(GIT_BRANCH)" && git pull
|
||||
export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/gaia install
|
||||
@echo "*** Built gaia"
|
||||
|
||||
build-basecoind: git-branch gopath-setup
|
||||
@echo "*** Building basecoind from cosmos-sdk"
|
||||
go get -d -u github.com/cosmos/cosmos-sdk/examples/basecoin/cmd/basecoind
|
||||
cd $(GOPATH)/src/github.com/cosmos/cosmos-sdk && git checkout "$(GIT_BRANCH)" && git pull
|
||||
export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/cosmos-sdk tools build
|
||||
cp $(GOPATH)/src/github.com/cosmos/cosmos-sdk/build/basecoind $(GOPATH)/bin/basecoind
|
||||
@echo "*** Built basecoind from cosmos-sdk"
|
||||
|
||||
###
|
||||
# Prepare package files
|
||||
###
|
||||
|
||||
# set app_version
|
||||
version-%:
|
||||
@echo "Checking if binary exists"
|
||||
test -f $(GOPATH)/bin/$*
|
||||
@echo "BUILD_NUMBER is $(BUILD_NUMBER)"
|
||||
test -n "$(BUILD_NUMBER)"
|
||||
$(eval $*_version=$(shell $(GOPATH)/bin/$* version | head -1 | cut -d- -f1 | sed 's/^\(ethermint:\s*\|\)\(v\|\)//' | tr -d '\t ' ))
|
||||
|
||||
# set build_folder
|
||||
folder-%: version-%
|
||||
$(eval build_folder=BUILD/$*-$($*_version)-$(BUILD_NUMBER))
|
||||
|
||||
# clean up folder structure for package files
|
||||
prepare-files = rm -rf $(build_folder) && mkdir -p $(build_folder) && cp -r ./$(1)/* $(build_folder) && mkdir -p $(build_folder)/usr/bin && cp $(GOPATH)/bin/$(1) $(build_folder)/usr/bin
|
||||
|
||||
##
|
||||
## Package customizations for the different applications
|
||||
##
|
||||
|
||||
prepare-tendermint =
|
||||
prepare-ethermint = mkdir -p $(build_folder)/etc/ethermint && \
|
||||
cp $(GOPATH)/src/github.com/tendermint/ethermint/setup/genesis.json $(build_folder)/etc/ethermint/genesis.json && \
|
||||
cp -r $(GOPATH)/src/github.com/tendermint/ethermint/setup/keystore $(build_folder)/etc/ethermint
|
||||
prepare-gaia =
|
||||
prepare-basecoind = cp $(GOPATH)/bin/basecoind $(build_folder)/usr/bin
|
||||
|
||||
###
|
||||
# Package the binary for CentOS/RedHat (RPM) and Debian/Ubuntu (DEB)
|
||||
###
|
||||
|
||||
# Depends on rpmbuild, sorry, this can only be built on CentOS/RedHat machines.
|
||||
package-rpm-%: folder-%
|
||||
@echo "*** Packaging RPM $* version $($*_version)"
|
||||
|
||||
$(call prepare-files,$*)
|
||||
$(call prepare-$*)
|
||||
|
||||
rm -rf $(build_folder)/DEBIAN
|
||||
mkdir -p $(build_folder)/usr/share/licenses/$*
|
||||
cp ./LICENSE $(build_folder)/usr/share/licenses/$*/LICENSE
|
||||
chmod -Rf a+rX,u+w,g-w,o-w $(build_folder)
|
||||
|
||||
mkdir -p {SPECS,tmp}
|
||||
|
||||
./generate-spec $* spectemplates SPECS
|
||||
sed -i "s/@VERSION@/$($*_version)/" SPECS/$*.spec
|
||||
sed -i "s/@BUILD_NUMBER@/$(BUILD_NUMBER)/" SPECS/$*.spec
|
||||
sed -i "s/@PACKAGE_NAME@/$*/" SPECS/$*.spec
|
||||
|
||||
rpmbuild -bb SPECS/$*.spec --define "_topdir `pwd`" --define "_tmppath `pwd`/tmp"
|
||||
./sign RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm "$(gpg_key)" "`which gpg`"
|
||||
rpm -Kv RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm || echo "rpm returns non-zero exist for some reason. ($?)"
|
||||
@echo "*** Packaged RPM $* version $($*_version)"
|
||||
|
||||
package-deb-%: folder-%
|
||||
@echo "*** Packaging DEB $* version $($*_version)-$(BUILD_NUMBER)"
|
||||
|
||||
$(call prepare-files,$*)
|
||||
$(call prepare-$*)
|
||||
|
||||
mkdir -p $(build_folder)/usr/share/doc/$*
|
||||
cp $(build_folder)/DEBIAN/copyright $(build_folder)/usr/share/doc/$*
|
||||
chmod -Rf a+rX,u+w,g-w,o-w $(build_folder)
|
||||
|
||||
sed -i "s/@VERSION@/$($*_version)-$(BUILD_NUMBER)/" $(build_folder)/DEBIAN/changelog
|
||||
sed -i "s/@STABILITY@/stable/" $(build_folder)/DEBIAN/changelog
|
||||
sed -i "s/@DATETIMESTAMP@/`date +%a,\ %d\ %b\ %Y\ %T\ %z`/" $(build_folder)/DEBIAN/changelog
|
||||
sed -i "s/@VERSION@/$($*_version)-$(BUILD_NUMBER)/" $(build_folder)/DEBIAN/control
|
||||
|
||||
gzip -c $(build_folder)/DEBIAN/changelog > $(build_folder)/usr/share/doc/$*/changelog.Debian.gz
|
||||
gzip -c $(build_folder)/DEBIAN/changelog > $(build_folder)/usr/share/doc/$*/changelog.Debian.amd64.gz
|
||||
sed -i "s/@INSTALLEDSIZE@/`du -ks $(build_folder) | cut -f 1`/" $(build_folder)/DEBIAN/control
|
||||
|
||||
cd $(build_folder) && tar --owner=root --group=root -cvJf ../../tmp/data.tar.xz --exclude DEBIAN *
|
||||
cd $(build_folder)/DEBIAN && tar --owner=root --group=root -cvzf ../../../tmp/control.tar.gz *
|
||||
echo "2.0" > tmp/debian-binary
|
||||
|
||||
cp ./_gpg tmp/
|
||||
cd tmp && sed -i "s/@DATETIMESTAMP@/`date +%a\ %b\ %d\ %T\ %Y`/" _gpg
|
||||
cd tmp && sed -i "s/@BINMD5@/`md5sum debian-binary | cut -d\ -f1`/" _gpg
|
||||
cd tmp && sed -i "s/@BINSHA1@/`sha1sum debian-binary | cut -d\ -f1`/" _gpg
|
||||
cd tmp && sed -i "s/@BINSIZE@/`stat -c %s debian-binary | cut -d\ -f1`/" _gpg
|
||||
cd tmp && sed -i "s/@CONMD5@/`md5sum control.tar.gz | cut -d\ -f1`/" _gpg
|
||||
cd tmp && sed -i "s/@CONSHA1@/`sha1sum control.tar.gz | cut -d\ -f1`/" _gpg
|
||||
cd tmp && sed -i "s/@CONSIZE@/`stat -c %s control.tar.gz | cut -d\ -f1`/" _gpg
|
||||
cd tmp && sed -i "s/@DATMD5@/`md5sum data.tar.xz | cut -d\ -f1`/" _gpg
|
||||
cd tmp && sed -i "s/@DATSHA1@/`sha1sum data.tar.xz | cut -d\ -f1`/" _gpg
|
||||
cd tmp && sed -i "s/@DATSIZE@/`stat -c %s data.tar.xz | cut -d\ -f1`/" _gpg
|
||||
gpg --batch --passphrase "$(GPG_PASSPHRASE)" --clearsign tmp/_gpg
|
||||
mv tmp/_gpg.asc tmp/_gpgbuilder
|
||||
ar r tmp/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb tmp/debian-binary tmp/control.tar.gz tmp/data.tar.xz tmp/_gpgbuilder
|
||||
mv tmp/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb RPMS/
|
||||
rm tmp/debian-binary tmp/control.tar.gz tmp/data.tar.xz tmp/_gpgbuilder tmp/_gpg
|
||||
@echo "*** Packaged DEB $* version $($*_version)-$(BUILD_NUMBER)"
|
||||
|
||||
install-rpm-%: version-%
|
||||
#Make sure your host has the IAM role to read/write the S3 bucket OR that you set up ~/.boto
|
||||
@echo "*** Uploading $*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm to AWS $(DEVOPS_PATH)CentOS repository"
|
||||
aws s3 sync s3://tendermint-packages/$(DEVOPS_PATH)centos/ tmp/s3/ --delete
|
||||
mkdir -p tmp/s3/7/os/x86_64/Packages
|
||||
cp RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm tmp/s3/7/os/x86_64/Packages
|
||||
cp ./RPM-GPG-KEY-Tendermint tmp/s3/7/os/x86_64/
|
||||
cp ./tendermint.repo tmp/s3/7/os/x86_64/
|
||||
rm -f tmp/s3/7/os/x86_64/repodata/*.bz2 tmp/s3/7/os/x86_64/repodata/*.gz tmp/s3/7/os/x86_64/repodata/repomd.xml.asc
|
||||
createrepo tmp/s3/7/os/x86_64/Packages -u https://tendermint-packages.interblock.io/$(DEVOPS_PATH)centos/7/os/x86_64/Packages -o tmp/s3/7/os/x86_64 --update -S --repo Tendermint --content tendermint --content basecoind --content ethermint
|
||||
gpg --batch --passphrase "$(GPG_PASSPHRASE)" --detach-sign -a tmp/s3/7/os/x86_64/repodata/repomd.xml
|
||||
aws s3 sync tmp/s3/ s3://tendermint-packages/$(DEVOPS_PATH)centos/ --delete --acl public-read
|
||||
@echo "*** Uploaded $* to AWS $(DEVOPS_PATH)CentOS repository"
|
||||
|
||||
install-deb-%: version-%
|
||||
@echo "*** Uploading $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb to AWS $(DEVOPS_PATH)Debian repository"
|
||||
@echo "Testing if $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb is already uploaded"
|
||||
test ! -f tmp/debian-s3/pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb
|
||||
aws s3 sync s3://tendermint-packages/$(DEVOPS_PATH)debian/ tmp/debian-s3/ --delete
|
||||
@echo "Testing if $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb is already uploaded"
|
||||
test ! -f tmp/debian-s3/pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb
|
||||
cp ./tendermint.list tmp/debian-s3/
|
||||
mkdir -p tmp/debian-s3/pool tmp/debian-s3/dists/stable/main/binary-amd64
|
||||
cp RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb tmp/debian-s3/pool
|
||||
cp ./Release_amd64 tmp/debian-s3/dists/stable/main/binary-amd64/Release
|
||||
|
||||
#Packages / Packages.gz
|
||||
|
||||
echo > tmp/Package
|
||||
echo "Filename: pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb" >> tmp/Package
|
||||
echo "MD5sum: `md5sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package
|
||||
echo "SHA1: `sha1sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package
|
||||
echo "SHA256: `sha256sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package
|
||||
echo "Size: `stat -c %s RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package
|
||||
cat BUILD/$*-$($*_version)-$(BUILD_NUMBER)/DEBIAN/control >> tmp/Package
|
||||
|
||||
cat tmp/Package >> tmp/debian-s3/dists/stable/main/binary-amd64/Packages
|
||||
rm -f tmp/debian-s3/dists/stable/main/binary-amd64/Packages.gz
|
||||
gzip -c tmp/debian-s3/dists/stable/main/binary-amd64/Packages > tmp/debian-s3/dists/stable/main/binary-amd64/Packages.gz
|
||||
rm -f tmp/Package
|
||||
|
||||
#main / Release / InRelease / Release.gpg
|
||||
|
||||
cp ./Release tmp/debian-s3/dists/stable/main/Release
|
||||
rm -f tmp/debian-s3/dists/stable/main/InRelease
|
||||
rm -f tmp/debian-s3/dists/stable/main/Release.gpg
|
||||
|
||||
echo "MD5Sum:" >> tmp/debian-s3/dists/stable/main/Release
|
||||
cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; md5sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release
|
||||
echo "SHA1:" >> tmp/debian-s3/dists/stable/main/Release
|
||||
cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha1sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release
|
||||
echo "SHA256:" >> tmp/debian-s3/dists/stable/main/Release
|
||||
cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha256sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release
|
||||
|
||||
gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA256 -b -a tmp/debian-s3/dists/stable/main/Release
|
||||
mv tmp/debian-s3/dists/stable/main/Release.asc tmp/debian-s3/dists/stable/main/Release.gpg
|
||||
gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA512 --clearsign tmp/debian-s3/dists/stable/main/Release
|
||||
mv tmp/debian-s3/dists/stable/main/Release.asc tmp/debian-s3/dists/stable/main/InRelease
|
||||
|
||||
#stable / Release / InRelease / Release.gpg
|
||||
|
||||
cp ./Release tmp/debian-s3/dists/stable/Release
|
||||
rm -f tmp/debian-s3/dists/stable/InRelease
|
||||
rm -f tmp/debian-s3/dists/stable/Release.gpg
|
||||
|
||||
echo "MD5Sum:" >> tmp/debian-s3/dists/stable/Release
|
||||
cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; md5sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release
|
||||
echo "SHA1:" >> tmp/debian-s3/dists/stable/Release
|
||||
cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha1sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release
|
||||
echo "SHA256:" >> tmp/debian-s3/dists/stable/Release
|
||||
cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha256sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release
|
||||
|
||||
gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA256 -b -a tmp/debian-s3/dists/stable/Release
|
||||
mv tmp/debian-s3/dists/stable/Release.asc tmp/debian-s3/dists/stable/Release.gpg
|
||||
gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA512 --clearsign tmp/debian-s3/dists/stable/Release
|
||||
mv tmp/debian-s3/dists/stable/Release.asc tmp/debian-s3/dists/stable/InRelease
|
||||
|
||||
aws s3 sync tmp/debian-s3/ s3://tendermint-packages/$(DEVOPS_PATH)debian/ --delete --acl public-read
|
||||
@echo "*** Uploaded $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb to AWS $(DEVOPS_PATH)Debian repository"
|
||||
|
||||
mostlyclean:
|
||||
rm -rf {BUILDROOT,SOURCES,SPECS,SRPMS,tmp}
|
||||
|
||||
clean: mostlyclean
|
||||
rm -rf {BUILD,RPMS}
|
||||
|
||||
distclean: clean
|
||||
rm -rf $(GOPATH)/src/github.com/tendermint/tendermint
|
||||
rm -rf $(GOPATH)/src/github.com/cosmos/cosmos-sdk
|
||||
rm -rf $(GOPATH)/src/github.com/tendermint/ethermint
|
||||
rm -rf $(GOPATH)/bin/tendermint
|
||||
rm -rf $(GOPATH)/bin/basecoind
|
||||
rm -rf $(GOPATH)/bin/ethermint
|
||||
rm -rf $(GOPATH)/bin/gaia
|
||||
|
||||
.PHONY : clean
|
||||
|
||||
Reference in New Issue
Block a user