From a4606f1c5ef3f9b97f14f35ee83dbb7a064d5e9c Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 22 Mar 2015 19:20:54 -0700 Subject: [PATCH 01/18] Make pool tests faster --- blockchain/pool.go | 5 ++++- blockchain/pool_test.go | 5 +++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/blockchain/pool.go b/blockchain/pool.go index 528c66548..3de2c2969 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -12,7 +12,6 @@ import ( const ( maxOutstandingRequestsPerPeer = 10 eventsChannelCapacity = 100 - requestTimeoutSeconds = 10 maxTries = 3 requestIntervalMS = 500 requestBatchSize = 50 @@ -21,6 +20,10 @@ const ( maxPeersPerRequest = 1 ) +var ( + requestTimeoutSeconds = time.Duration(10) +) + type BlockRequest struct { Height uint PeerId string diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go index 4376f3ac0..a2578aa13 100644 --- a/blockchain/pool_test.go +++ b/blockchain/pool_test.go @@ -3,6 +3,7 @@ package blockchain import ( "math/rand" "testing" + "time" . "github.com/tendermint/tendermint/common" "github.com/tendermint/tendermint/types" @@ -74,6 +75,10 @@ func TestBasic(t *testing.T) { } func TestTimeout(t *testing.T) { + origRequestTimeoutSeconds := requestTimeoutSeconds + requestTimeoutSeconds = time.Duration(0) + defer func() { requestTimeoutSeconds = origRequestTimeoutSeconds }() + peers := makePeers(100, 0, 1000) start := uint(42) timeoutsCh := make(chan string, 10) From 9703d34b65682930d5d8f267a94eecde8e742545 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 24 Mar 2015 11:02:30 -0700 Subject: [PATCH 02/18] fixed pool, using locks now. --- blockchain/pool.go | 571 +++++++++++++++++++++------------------- blockchain/pool_test.go | 91 ++++--- 2 files changed, 347 insertions(+), 315 deletions(-) diff --git a/blockchain/pool.go b/blockchain/pool.go index 3de2c2969..3e3313bfb 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -1,7 +1,7 @@ package blockchain import ( - "math/rand" + "sync" "sync/atomic" "time" @@ -11,347 +11,368 @@ import ( const ( maxOutstandingRequestsPerPeer = 10 - eventsChannelCapacity = 100 + inputsChannelCapacity = 100 maxTries = 3 requestIntervalMS = 500 requestBatchSize = 50 maxPendingRequests = 50 maxTotalRequests = 100 - maxPeersPerRequest = 1 + maxRequestsPerPeer = 20 ) var ( - requestTimeoutSeconds = time.Duration(10) + requestTimeoutSeconds = time.Duration(1) ) -type BlockRequest struct { - Height uint - PeerId string -} - type BlockPool struct { - peers map[string]*bpPeer - blockInfos map[uint]*bpBlockInfo - height uint // the lowest key in blockInfos. - started int32 // atomic - stopped int32 // atomic - numPending int32 - numTotal int32 - eventsCh chan interface{} // internal events. - requestsCh chan<- BlockRequest // output of new requests to make. - timeoutsCh chan<- string // output of peers that timed out. - blocksCh chan<- *types.Block // output of ordered blocks. - repeater *RepeatTimer // for requesting more bocks. - quit chan struct{} + // block requests + requestsMtx sync.Mutex + requests map[uint]*bpRequest + height uint // the lowest key in requests. + numPending int32 + numTotal int32 + + // peers + peersMtx sync.Mutex + peers map[string]*bpPeer + + requestsCh chan<- BlockRequest + timeoutsCh chan<- string + repeater *RepeatTimer + + running int32 // atomic } -func NewBlockPool(start uint, timeoutsCh chan<- string, requestsCh chan<- BlockRequest, blocksCh chan<- *types.Block) *BlockPool { +func NewBlockPool(start uint, requestsCh chan<- BlockRequest, timeoutsCh chan<- string) *BlockPool { return &BlockPool{ - peers: make(map[string]*bpPeer), - blockInfos: make(map[uint]*bpBlockInfo), + peers: make(map[string]*bpPeer), + + requests: make(map[uint]*bpRequest), height: start, - started: 0, - stopped: 0, numPending: 0, numTotal: 0, - quit: make(chan struct{}), - eventsCh: make(chan interface{}, eventsChannelCapacity), requestsCh: requestsCh, timeoutsCh: timeoutsCh, - blocksCh: blocksCh, repeater: NewRepeatTimer("", requestIntervalMS*time.Millisecond), + + running: 0, } } func (bp *BlockPool) Start() { - if atomic.CompareAndSwapInt32(&bp.started, 0, 1) { + if atomic.CompareAndSwapInt32(&bp.running, 0, 1) { log.Info("Starting BlockPool") go bp.run() } } func (bp *BlockPool) Stop() { - if atomic.CompareAndSwapInt32(&bp.stopped, 0, 1) { + if atomic.CompareAndSwapInt32(&bp.running, 1, 0) { log.Info("Stopping BlockPool") - close(bp.quit) - close(bp.eventsCh) - close(bp.requestsCh) - close(bp.timeoutsCh) - close(bp.blocksCh) bp.repeater.Stop() } } -// AddBlock should be called when a block is received. -func (bp *BlockPool) AddBlock(block *types.Block, peerId string) { - bp.eventsCh <- bpBlockResponse{block, peerId} +func (bp *BlockPool) IsRunning() bool { + return atomic.LoadInt32(&bp.running) == 1 } -func (bp *BlockPool) SetPeerStatus(peerId string, height uint) { - bp.eventsCh <- bpPeerStatus{peerId, height} -} - -// Runs in a goroutine and processes messages. +// Run spawns requests as needed. func (bp *BlockPool) run() { -FOR_LOOP: +RUN_LOOP: for { - select { - case msg := <-bp.eventsCh: - bp.handleEvent(msg) - case <-bp.repeater.Ch: - bp.makeMoreBlockInfos() - bp.requestBlocksFromRandomPeers(10) - case <-bp.quit: - break FOR_LOOP + if atomic.LoadInt32(&bp.running) == 0 { + break RUN_LOOP } - } -} - -func (bp *BlockPool) handleEvent(event_ interface{}) { - switch event := event_.(type) { - case bpBlockResponse: - peer := bp.peers[event.peerId] - blockInfo := bp.blockInfos[event.block.Height] - if blockInfo == nil { - // block was unwanted. - if peer != nil { - peer.bad++ - } + height, numPending, numTotal := bp.GetStatus() + log.Debug("BlockPool.run", "height", height, "numPending", numPending, + "numTotal", numTotal) + if numPending >= maxPendingRequests { + // sleep for a bit. + time.Sleep(requestIntervalMS * time.Millisecond) + } else if numTotal >= maxTotalRequests { + // sleep for a bit. + time.Sleep(requestIntervalMS * time.Millisecond) } else { - // block was wanted. - if peer != nil { - peer.good++ - } - delete(peer.requests, event.block.Height) - if blockInfo.block == nil { - // peer is the first to give it to us. - blockInfo.block = event.block - blockInfo.blockBy = peer.id - bp.numPending-- - if event.block.Height == bp.height { - go bp.pushBlocksFromStart() - } - } - } - case bpPeerStatus: // updated or new status from peer - // request blocks if possible. - peer := bp.peers[event.peerId] - if peer == nil { - peer = bpNewPeer(event.peerId, event.height) - bp.peers[peer.id] = peer - } - bp.requestBlocksFromPeer(peer) - case bpRequestTimeout: // unconditional timeout for each peer's request. - peer := bp.peers[event.peerId] - if peer == nil { - // cleanup was already handled. - return - } - height := event.height - request := peer.requests[height] - if request == nil || request.block != nil { - // the request was fulfilled by some peer or this peer. - return - } - - // A request for peer timed out. - peer.bad++ - if request.tries < maxTries { - log.Warn("Timeout: Trying again.", "tries", request.tries, "peerId", peer.id) - // try again. - select { - case bp.requestsCh <- BlockRequest{height, peer.id}: - request.startAndTimeoutTo(bp.eventsCh) // also bumps request.tries - default: - // The request cannot be made because requestCh is full. - // Just delete the request. - delete(peer.requests, height) - } - } else { - log.Warn("Timeout: Deleting request") - // delete the request. - delete(peer.requests, height) - blockInfo := bp.blockInfos[height] - if blockInfo != nil { - delete(blockInfo.requests, peer.id) - } - select { - case bp.timeoutsCh <- peer.id: - default: - } - + // request for more blocks. + height := bp.nextHeight() + bp.makeRequest(height) } } } -// NOTE: This function is sufficient, but we should find pending blocks -// and sample the peers in one go rather than the current O(n^2) impl. -func (bp *BlockPool) requestBlocksFromRandomPeers(maxPeers int) { - chosen := bp.pickAvailablePeers(maxPeers) - log.Debug("requestBlocksFromRandomPeers", "chosen", len(chosen)) - for _, peer := range chosen { - bp.requestBlocksFromPeer(peer) +func (bp *BlockPool) GetStatus() (uint, int32, int32) { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + return bp.height, bp.numPending, bp.numTotal +} + +// We need to see the second block's Validation to validate the first block. +// So we peek two blocks at a time. +func (bp *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + if r := bp.requests[bp.height]; r != nil { + first = r.block + } + if r := bp.requests[bp.height+1]; r != nil { + second = r.block + } + return +} + +// Pop the first block at bp.height +// It must have been validated by 'second'.Validation from PeekTwoBlocks(). +func (bp *BlockPool) PopRequest() { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + if r := bp.requests[bp.height]; r == nil || r.block == nil { + panic("PopRequest() requires a valid block") + } + + delete(bp.requests, bp.height) + bp.height++ + bp.numTotal-- +} + +// Invalidates the block at bp.height. +// Remove the peer and request from others. +func (bp *BlockPool) RedoRequest(height uint) { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + request := bp.requests[height] + if request.block == nil { + panic("Expected block to be non-nil") + } + bp.removePeer(request.peerId) + request.block = nil + request.peerId = "" + bp.numPending++ + + go requestRoutine(bp, height) +} + +func (bp *BlockPool) hasBlock(height uint) bool { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + request := bp.requests[height] + return request != nil && request.block != nil +} + +func (bp *BlockPool) setPeerForRequest(height uint, peerId string) { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + request := bp.requests[height] + if request == nil { + return + } + request.peerId = peerId +} + +func (bp *BlockPool) AddBlock(block *types.Block, peerId string) { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + request := bp.requests[block.Height] + if request == nil { + return + } + if request.peerId != peerId { + return + } + if request.block != nil { + return + } + request.block = block + bp.numPending-- +} + +func (bp *BlockPool) getPeer(peerId string) *bpPeer { + bp.peersMtx.Lock() // Lock + defer bp.peersMtx.Unlock() + + peer := bp.peers[peerId] + return peer +} + +// Sets the peer's blockchain height. +func (bp *BlockPool) SetPeerHeight(peerId string, height uint) { + bp.peersMtx.Lock() // Lock + defer bp.peersMtx.Unlock() + + peer := bp.peers[peerId] + if peer != nil { + peer.height = height + } else { + peer = &bpPeer{ + height: height, + id: peerId, + numRequests: 0, + } + bp.peers[peerId] = peer } } -func (bp *BlockPool) requestBlocksFromPeer(peer *bpPeer) { - // If peer is available and can provide something... - for height := bp.height; peer.available(); height++ { - blockInfo := bp.blockInfos[height] - if blockInfo == nil { - // We're out of range. - return - } - needsMorePeers := blockInfo.needsMorePeers() - alreadyAskedPeer := blockInfo.requests[peer.id] != nil - if needsMorePeers && !alreadyAskedPeer { - select { - case bp.requestsCh <- BlockRequest{height, peer.id}: - // Create a new request and start the timer. - request := &bpBlockRequest{ - height: height, - peer: peer, - } - blockInfo.requests[peer.id] = request - peer.requests[height] = request - request.startAndTimeoutTo(bp.eventsCh) // also bumps request.tries - default: - // The request cannot be made because requestCh is full. - // Just stop. - return - } - } - } +func (bp *BlockPool) RemovePeer(peerId string) { + bp.peersMtx.Lock() // Lock + defer bp.peersMtx.Unlock() + + delete(bp.peers, peerId) } -func (bp *BlockPool) makeMoreBlockInfos() { - // make more requests if necessary. - for i := 0; i < requestBatchSize; i++ { - //log.Debug("Confused?", - // "numPending", bp.numPending, "maxPendingRequests", maxPendingRequests, "numtotal", bp.numTotal, "maxTotalRequests", maxTotalRequests) - if bp.numPending < maxPendingRequests && bp.numTotal < maxTotalRequests { - // Make a request for the next block height - requestHeight := bp.height + uint(bp.numTotal) - log.Debug("New blockInfo", "height", requestHeight) - blockInfo := bpNewBlockInfo(requestHeight) - bp.blockInfos[requestHeight] = blockInfo - bp.numPending++ - bp.numTotal++ - } else { - break - } - } -} +// Pick an available peer with at least the given minHeight. +// If no peers are available, returns nil. +func (bp *BlockPool) pickIncrAvailablePeer(minHeight uint) *bpPeer { + bp.peersMtx.Lock() + defer bp.peersMtx.Unlock() -func (bp *BlockPool) pickAvailablePeers(choose int) []*bpPeer { - available := []*bpPeer{} for _, peer := range bp.peers { - if peer.available() { - available = append(available, peer) + if peer.numRequests >= maxRequestsPerPeer { + continue + } + if peer.height < minHeight { + continue + } + peer.numRequests++ + return peer + } + + return nil +} + +func (bp *BlockPool) decrPeer(peerId string) { + bp.peersMtx.Lock() + defer bp.peersMtx.Unlock() + + peer := bp.peers[peerId] + if peer == nil { + return + } + peer.numRequests-- +} + +func (bp *BlockPool) nextHeight() uint { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + return bp.height + uint(bp.numTotal) +} + +func (bp *BlockPool) makeRequest(height uint) { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + request := &bpRequest{ + height: height, + peerId: "", + block: nil, + } + bp.requests[height] = request + + nextHeight := bp.height + uint(bp.numTotal) + if nextHeight == height { + bp.numTotal++ + bp.numPending++ + } + + go requestRoutine(bp, height) +} + +func (bp *BlockPool) sendRequest(height uint, peerId string) { + if atomic.LoadInt32(&bp.running) == 0 { + return + } + bp.requestsCh <- BlockRequest{height, peerId} +} + +func (bp *BlockPool) sendTimeout(peerId string) { + if atomic.LoadInt32(&bp.running) == 0 { + return + } + bp.timeoutsCh <- peerId +} + +func (bp *BlockPool) debug() string { + bp.requestsMtx.Lock() // Lock + defer bp.requestsMtx.Unlock() + + str := "" + for h := bp.height; h < bp.height+uint(bp.numTotal); h++ { + if bp.requests[h] == nil { + str += Fmt("H(%v):X ", h) + } else { + str += Fmt("H(%v):", h) + str += Fmt("B?(%v) ", bp.requests[h].block != nil) } } - perm := rand.Perm(MinInt(choose, len(available))) - chosen := make([]*bpPeer, len(perm)) - for i, idx := range perm { - chosen[i] = available[idx] - } - return chosen -} - -// blocking -func (bp *BlockPool) pushBlocksFromStart() { - for height := bp.height; ; height++ { - // push block to blocksCh. - blockInfo := bp.blockInfos[height] - if blockInfo == nil || blockInfo.block == nil { - break - } - bp.numTotal-- - bp.height++ - delete(bp.blockInfos, height) - bp.blocksCh <- blockInfo.block - } -} - -//----------------------------------------------------------------------------- - -type bpBlockInfo struct { - height uint - requests map[string]*bpBlockRequest - block *types.Block // first block received - blockBy string // peerId of source -} - -func bpNewBlockInfo(height uint) *bpBlockInfo { - return &bpBlockInfo{ - height: height, - requests: make(map[string]*bpBlockRequest), - } -} - -func (blockInfo *bpBlockInfo) needsMorePeers() bool { - return len(blockInfo.requests) < maxPeersPerRequest -} - -//------------------------------------- - -type bpBlockRequest struct { - peer *bpPeer - height uint - block *types.Block - tries int -} - -// bump tries++ and set timeout. -// NOTE: the timer is unconditional. -func (request *bpBlockRequest) startAndTimeoutTo(eventsCh chan<- interface{}) { - request.tries++ - time.AfterFunc(requestTimeoutSeconds*time.Second, func() { - eventsCh <- bpRequestTimeout{ - peerId: request.peer.id, - height: request.height, - } - }) + return str } //------------------------------------- type bpPeer struct { - id string - height uint - requests map[uint]*bpBlockRequest - // Count good/bad events from peer. - good uint - bad uint + id string + height uint + numRequests int32 } -func bpNewPeer(peerId string, height uint) *bpPeer { - return &bpPeer{ - id: peerId, - height: height, - requests: make(map[uint]*bpBlockRequest), - } -} - -func (peer *bpPeer) available() bool { - return len(peer.requests) < maxOutstandingRequestsPerPeer +type bpRequest struct { + height uint + peerId string + block *types.Block } //------------------------------------- -// bp.eventsCh messages -type bpBlockResponse struct { - block *types.Block - peerId string +// Responsible for making more requests as necessary +// Returns when a block is found (e.g. AddBlock() is called) +func requestRoutine(bp *BlockPool, height uint) { + for { + var peer *bpPeer = nil + PICK_LOOP: + for { + if !bp.IsRunning() { + return + } + peer = bp.pickIncrAvailablePeer(height) + if peer == nil { + time.Sleep(requestIntervalMS * time.Millisecond) + continue PICK_LOOP + } + break PICK_LOOP + } + + bp.setPeerForRequest(height, peer.id) + + for try := 0; try < maxTries; try++ { + bp.sendRequest(height, peer.id) + time.Sleep(requestTimeoutSeconds * time.Second) + if bp.hasBlock(height) { + bp.decrPeer(peer.id) + return + } + bpHeight, _, _ := bp.GetStatus() + if height < bpHeight { + bp.decrPeer(peer.id) + return + } + } + + bp.RemovePeer(peer.id) + bp.sendTimeout(peer.id) + } } -type bpPeerStatus struct { - peerId string - height uint // blockchain tip of peer -} +//------------------------------------- -type bpRequestTimeout struct { - peerId string - height uint +type BlockRequest struct { + Height uint + PeerId string } diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go index a2578aa13..c07a11d85 100644 --- a/blockchain/pool_test.go +++ b/blockchain/pool_test.go @@ -25,26 +25,34 @@ func makePeers(numPeers int, minHeight, maxHeight uint) map[string]testPeer { } func TestBasic(t *testing.T) { - // 100 peers anywhere at height 0 to 1000. - peers := makePeers(100, 0, 1000) - + peers := makePeers(10, 0, 1000) start := uint(42) - maxHeight := uint(300) timeoutsCh := make(chan string, 100) requestsCh := make(chan BlockRequest, 100) - blocksCh := make(chan *types.Block, 100) - - pool := NewBlockPool(start, timeoutsCh, requestsCh, blocksCh) + pool := NewBlockPool(start, requestsCh, timeoutsCh) pool.Start() // Introduce each peer. go func() { for _, peer := range peers { - pool.SetPeerStatus(peer.id, peer.height) + pool.SetPeerHeight(peer.id, peer.height) } }() - lastSeenBlock := uint(41) + // Start a goroutine to pull blocks + go func() { + for { + if !pool.IsRunning() { + return + } + first, second := pool.PeekTwoBlocks() + if first != nil && second != nil { + pool.PopRequest() + } else { + time.Sleep(1 * time.Second) + } + } + }() // Pull from channels for { @@ -53,21 +61,15 @@ func TestBasic(t *testing.T) { t.Errorf("timeout: %v", peerId) case request := <-requestsCh: log.Debug("TEST: Pulled new BlockRequest", "request", request) - // After a while, pretend like we got a block from the peer. + if request.Height == 300 { + return // Done! + } + // Request desired, pretend like we got the block immediately. go func() { block := &types.Block{Header: &types.Header{Height: request.Height}} pool.AddBlock(block, request.PeerId) log.Debug("TEST: Added block", "block", request.Height, "peer", request.PeerId) }() - case block := <-blocksCh: - log.Debug("TEST: Pulled new Block", "height", block.Height) - if block.Height != lastSeenBlock+1 { - t.Fatalf("Wrong order of blocks seen. Expected: %v Got: %v", lastSeenBlock+1, block.Height) - } - lastSeenBlock++ - if block.Height == maxHeight { - return // Done! - } } } @@ -75,43 +77,52 @@ func TestBasic(t *testing.T) { } func TestTimeout(t *testing.T) { - origRequestTimeoutSeconds := requestTimeoutSeconds - requestTimeoutSeconds = time.Duration(0) - defer func() { requestTimeoutSeconds = origRequestTimeoutSeconds }() - - peers := makePeers(100, 0, 1000) + peers := makePeers(10, 0, 1000) start := uint(42) - timeoutsCh := make(chan string, 10) - requestsCh := make(chan BlockRequest, 10) - blocksCh := make(chan *types.Block, 100) - - pool := NewBlockPool(start, timeoutsCh, requestsCh, blocksCh) + timeoutsCh := make(chan string, 100) + requestsCh := make(chan BlockRequest, 100) + pool := NewBlockPool(start, requestsCh, timeoutsCh) pool.Start() // Introduce each peer. go func() { for _, peer := range peers { - pool.SetPeerStatus(peer.id, peer.height) + pool.SetPeerHeight(peer.id, peer.height) + } + }() + + // Start a goroutine to pull blocks + go func() { + for { + if !pool.IsRunning() { + return + } + first, second := pool.PeekTwoBlocks() + if first != nil && second != nil { + pool.PopRequest() + } else { + time.Sleep(1 * time.Second) + } } }() // Pull from channels + counter := 0 + timedOut := map[string]struct{}{} for { select { case peerId := <-timeoutsCh: - // Timed out. Done! - if peers[peerId].id != peerId { - t.Errorf("Unexpected peer from timeoutsCh") + log.Debug("Timeout", "peerId", peerId) + if _, ok := timedOut[peerId]; !ok { + counter++ + if counter == len(peers) { + return // Done! + } } - return - case _ = <-requestsCh: - // Don't do anything, let it time out. - case _ = <-blocksCh: - t.Errorf("Got block when none expected") - return + case request := <-requestsCh: + log.Debug("TEST: Pulled new BlockRequest", "request", request) } } pool.Stop() - } From 0237d284cc673cd5a37d3022c920b84a868b986c Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 24 Mar 2015 12:00:27 -0700 Subject: [PATCH 03/18] Channel bytes are spelled fully, "XXXChannel" --- consensus/pol.go | 5 +++++ consensus/reactor.go | 42 +++++++++++++++++++++--------------------- consensus/state.go | 4 ++-- mempool/reactor.go | 8 ++++---- p2p/connection.go | 26 +++++++++++++++++++------- p2p/peer_set.go | 6 ++++++ p2p/pex_reactor.go | 8 ++++---- p2p/switch.go | 4 ++-- state/state_test.go | 6 +++--- types/block.go | 4 ++++ 10 files changed, 70 insertions(+), 43 deletions(-) diff --git a/consensus/pol.go b/consensus/pol.go index c87b4ee5d..06784d588 100644 --- a/consensus/pol.go +++ b/consensus/pol.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/tendermint/tendermint/account" + "github.com/tendermint/tendermint/binary" . "github.com/tendermint/tendermint/common" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -94,3 +95,7 @@ func (pol *POL) StringShort() string { Fingerprint(pol.BlockHash), pol.BlockParts) } } + +func (pol *POL) MakePartSet() *types.PartSet { + return types.NewPartSetFromData(binary.BinaryBytes(pol)) +} diff --git a/consensus/reactor.go b/consensus/reactor.go index 83c244995..6e6f33662 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -17,9 +17,9 @@ import ( ) const ( - StateCh = byte(0x20) - DataCh = byte(0x21) - VoteCh = byte(0x22) + StateChannel = byte(0x20) + DataChannel = byte(0x21) + VoteChannel = byte(0x22) peerStateKey = "ConsensusReactor.peerState" @@ -75,15 +75,15 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor { // TODO optimize return []*p2p.ChannelDescriptor{ &p2p.ChannelDescriptor{ - Id: StateCh, + Id: StateChannel, Priority: 5, }, &p2p.ChannelDescriptor{ - Id: DataCh, + Id: DataChannel, Priority: 5, }, &p2p.ChannelDescriptor{ - Id: VoteCh, + Id: VoteChannel, Priority: 5, }, } @@ -122,7 +122,7 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte log.Debug("Receive", "channel", chId, "peer", peer, "msg", msg_, "bytes", msgBytes) switch chId { - case StateCh: + case StateChannel: switch msg := msg_.(type) { case *NewRoundStepMessage: ps.ApplyNewRoundStepMessage(msg, rs) @@ -134,7 +134,7 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte // Ignore unknown message } - case DataCh: + case DataChannel: switch msg := msg_.(type) { case *Proposal: ps.SetHasProposal(msg) @@ -155,7 +155,7 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte // Ignore unknown message } - case VoteCh: + case VoteChannel: switch msg := msg_.(type) { case *VoteMessage: vote := msg.Vote @@ -192,7 +192,7 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte Type: vote.Type, Index: index, } - conR.sw.Broadcast(StateCh, msg) + conR.sw.Broadcast(StateChannel, msg) } default: @@ -252,10 +252,10 @@ func (conR *ConsensusReactor) broadcastNewRoundStepRoutine() { nrsMsg, csMsg := makeRoundStepMessages(rs) if nrsMsg != nil { - conR.sw.Broadcast(StateCh, nrsMsg) + conR.sw.Broadcast(StateChannel, nrsMsg) } if csMsg != nil { - conR.sw.Broadcast(StateCh, csMsg) + conR.sw.Broadcast(StateChannel, csMsg) } } } @@ -264,10 +264,10 @@ func (conR *ConsensusReactor) sendNewRoundStepRoutine(peer *p2p.Peer) { rs := conR.conS.GetRoundState() nrsMsg, csMsg := makeRoundStepMessages(rs) if nrsMsg != nil { - peer.Send(StateCh, nrsMsg) + peer.Send(StateChannel, nrsMsg) } if csMsg != nil { - peer.Send(StateCh, nrsMsg) + peer.Send(StateChannel, nrsMsg) } } @@ -296,7 +296,7 @@ OUTER_LOOP: Type: partTypeProposalBlock, Part: part, } - peer.Send(DataCh, msg) + peer.Send(DataChannel, msg) ps.SetHasProposalBlockPart(rs.Height, rs.Round, index) continue OUTER_LOOP } @@ -306,7 +306,7 @@ OUTER_LOOP: if 0 < prs.Height && prs.Height < rs.Height { //log.Debug("Data catchup", "height", rs.Height, "peerHeight", prs.Height, "peerProposalBlockBitArray", prs.ProposalBlockBitArray) if index, ok := prs.ProposalBlockBitArray.Not().PickRandom(); ok { - // Ensure that the peer's PartSetHeaeder is correct + // Ensure that the peer's PartSetHeader is correct blockMeta := conR.blockStore.LoadBlockMeta(prs.Height) if !blockMeta.Parts.Equals(prs.ProposalBlockParts) { log.Debug("Peer ProposalBlockParts mismatch, sleeping", @@ -329,7 +329,7 @@ OUTER_LOOP: Type: partTypeProposalBlock, Part: part, } - peer.Send(DataCh, msg) + peer.Send(DataChannel, msg) ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) continue OUTER_LOOP } else { @@ -349,7 +349,7 @@ OUTER_LOOP: // Send proposal? if rs.Proposal != nil && !prs.Proposal { msg := p2p.TypedMessage{msgTypeProposal, rs.Proposal} - peer.Send(DataCh, msg) + peer.Send(DataChannel, msg) ps.SetHasProposal(rs.Proposal) continue OUTER_LOOP } @@ -363,7 +363,7 @@ OUTER_LOOP: Type: partTypeProposalPOL, Part: rs.ProposalPOLParts.GetPart(index), } - peer.Send(DataCh, msg) + peer.Send(DataChannel, msg) ps.SetHasProposalPOLPart(rs.Height, rs.Round, index) continue OUTER_LOOP } @@ -397,7 +397,7 @@ OUTER_LOOP: vote := voteSet.GetByIndex(index) // NOTE: vote may be a commit. msg := &VoteMessage{index, vote} - peer.Send(VoteCh, msg) + peer.Send(VoteChannel, msg) ps.SetHasVote(vote, index) return true } @@ -421,7 +421,7 @@ OUTER_LOOP: Signature: commit.Signature, } msg := &VoteMessage{index, vote} - peer.Send(VoteCh, msg) + peer.Send(VoteChannel, msg) ps.SetHasVote(vote, index) return true } diff --git a/consensus/state.go b/consensus/state.go index d44dd7b5a..5fb4268d8 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -641,12 +641,12 @@ func (cs *ConsensusState) RunActionPropose(height uint, round uint) { return } - blockParts = types.NewPartSetFromData(binary.BinaryBytes(block)) + blockParts = block.MakePartSet() pol = cs.LockedPOL // If exists, is a PoUnlock. } if pol != nil { - polParts = types.NewPartSetFromData(binary.BinaryBytes(pol)) + polParts = pol.MakePartSet() } // Make proposal diff --git a/mempool/reactor.go b/mempool/reactor.go index 5bed4e18b..e16cf9332 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -11,7 +11,7 @@ import ( ) var ( - MempoolCh = byte(0x30) + MempoolChannel = byte(0x30) ) // MempoolReactor handles mempool tx broadcasting amongst peers. @@ -52,7 +52,7 @@ func (memR *MempoolReactor) Stop() { func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { return []*p2p.ChannelDescriptor{ &p2p.ChannelDescriptor{ - Id: MempoolCh, + Id: MempoolChannel, Priority: 5, }, } @@ -92,7 +92,7 @@ func (memR *MempoolReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) { if peer.Key == src.Key { continue } - peer.TrySend(MempoolCh, msg) + peer.TrySend(MempoolChannel, msg) } default: @@ -106,7 +106,7 @@ func (memR *MempoolReactor) BroadcastTx(tx types.Tx) error { return err } msg := &TxMessage{Tx: tx} - memR.sw.Broadcast(MempoolCh, msg) + memR.sw.Broadcast(MempoolChannel, msg) return nil } diff --git a/p2p/connection.go b/p2p/connection.go index 89086bc2a..578dea1b3 100644 --- a/p2p/connection.go +++ b/p2p/connection.go @@ -50,8 +50,9 @@ There are two methods for sending messages: func (m MConnection) TrySend(chId byte, msg interface{}) bool {} `Send(chId, msg)` is a blocking call that waits until `msg` is successfully queued -for the channel with the given id byte `chId`. The message `msg` is serialized -using the `tendermint/binary` submodule's `WriteBinary()` reflection routine. +for the channel with the given id byte `chId`, or until the request times out. +The message `msg` is serialized using the `tendermint/binary` submodule's +`WriteBinary()` reflection routine. `TrySend(chId, msg)` is a nonblocking call that returns false if the channel's queue is full. @@ -437,8 +438,19 @@ FOR_LOOP: //----------------------------------------------------------------------------- type ChannelDescriptor struct { - Id byte - Priority uint + Id byte + Priority uint + SendQueueCapacity uint + RecvBufferCapacity uint +} + +func (chDesc *ChannelDescriptor) FillDefaults() { + if chDesc.SendQueueCapacity == 0 { + chDesc.SendQueueCapacity = defaultSendQueueCapacity + } + if chDesc.RecvBufferCapacity == 0 { + chDesc.RecvBufferCapacity = defaultRecvBufferCapacity + } } // TODO: lowercase. @@ -448,7 +460,7 @@ type Channel struct { desc *ChannelDescriptor id byte sendQueue chan []byte - sendQueueSize uint32 + sendQueueSize uint32 // atomic. recving []byte sending []byte priority uint @@ -463,8 +475,8 @@ func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel { conn: conn, desc: desc, id: desc.Id, - sendQueue: make(chan []byte, defaultSendQueueCapacity), - recving: make([]byte, 0, defaultRecvBufferCapacity), + sendQueue: make(chan []byte, desc.SendQueueCapacity), + recving: make([]byte, 0, desc.RecvBufferCapacity), priority: desc.Priority, } } diff --git a/p2p/peer_set.go b/p2p/peer_set.go index b4230ffa3..f365cd8ea 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -55,6 +55,12 @@ func (ps *PeerSet) Has(peerKey string) bool { return ok } +func (ps *PeerSet) Get(peerKey string) *Peer { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return ps.lookup[peerKey].peer +} + func (ps *PeerSet) Remove(peer *Peer) { ps.mtx.Lock() defer ps.mtx.Unlock() diff --git a/p2p/pex_reactor.go b/p2p/pex_reactor.go index 90be9b24c..de742645f 100644 --- a/p2p/pex_reactor.go +++ b/p2p/pex_reactor.go @@ -14,7 +14,7 @@ import ( var pexErrInvalidMessage = errors.New("Invalid PEX message") const ( - PexCh = byte(0x00) + PexChannel = byte(0x00) ensurePeersPeriodSeconds = 30 minNumOutboundPeers = 10 maxNumPeers = 50 @@ -62,7 +62,7 @@ func (pexR *PEXReactor) Stop() { func (pexR *PEXReactor) GetChannels() []*ChannelDescriptor { return []*ChannelDescriptor{ &ChannelDescriptor{ - Id: PexCh, + Id: PexChannel, Priority: 1, }, } @@ -122,11 +122,11 @@ func (pexR *PEXReactor) Receive(chId byte, src *Peer, msgBytes []byte) { // Asks peer for more addresses. func (pexR *PEXReactor) RequestPEX(peer *Peer) { - peer.Send(PexCh, &pexRequestMessage{}) + peer.Send(PexChannel, &pexRequestMessage{}) } func (pexR *PEXReactor) SendAddrs(peer *Peer, addrs []*NetAddress) { - peer.Send(PexCh, &pexAddrsMessage{Addrs: addrs}) + peer.Send(PexChannel, &pexAddrsMessage{Addrs: addrs}) } // Ensures that sufficient peers are connected. (continuous) diff --git a/p2p/switch.go b/p2p/switch.go index 635e0ecaa..12267fa10 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -133,7 +133,7 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er // Send handshake msg := &pexHandshakeMessage{ChainId: sw.chainId} - peer.Send(PexCh, msg) + peer.Send(PexChannel, msg) return peer, nil } @@ -164,7 +164,7 @@ func (sw *Switch) IsDialing(addr *NetAddress) bool { return sw.dialing.Has(addr.String()) } -// Broadcast runs a go routine for each attemptted send, which will block +// Broadcast runs a go routine for each attempted send, which will block // trying to send for defaultSendTimeoutSeconds. Returns a channel // which receives success values for each attempted send (false if times out) func (sw *Switch) Broadcast(chId byte, msg interface{}) chan bool { diff --git a/state/state_test.go b/state/state_test.go index da7960a77..71efd80f2 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -91,7 +91,7 @@ func TestGenesisSaveLoad(t *testing.T) { // Make complete block and blockParts block := makeBlock(t, s0, nil, nil) - blockParts := types.NewPartSetFromData(binary.BinaryBytes(block)) + blockParts := block.MakePartSet() // Now append the block to s0. err := s0.AppendBlock(block, blockParts.Header()) @@ -338,7 +338,7 @@ func TestAddValidator(t *testing.T) { // Make complete block and blockParts block0 := makeBlock(t, s0, nil, []types.Tx{bondTx}) - block0Parts := types.NewPartSetFromData(binary.BinaryBytes(block0)) + block0Parts := block0.MakePartSet() // Sanity check if s0.BondedValidators.Size() != 1 { @@ -379,7 +379,7 @@ func TestAddValidator(t *testing.T) { }, }, nil, ) - block1Parts := types.NewPartSetFromData(binary.BinaryBytes(block1)) + block1Parts := block1.MakePartSet() err = s0.AppendBlock(block1, block1Parts.Header()) if err != nil { t.Error("Error appending secondary block:", err) diff --git a/types/block.go b/types/block.go index d56f4e467..11dfb3b9d 100644 --- a/types/block.go +++ b/types/block.go @@ -66,6 +66,10 @@ func (b *Block) Hash() []byte { return merkle.HashFromHashes(hashes) } +func (b *Block) MakePartSet() *PartSet { + return NewPartSetFromData(binary.BinaryBytes(b)) +} + // Convenience. // A nil block never hashes to anything. // Nothing hashes to a nil hash. From 612f8bab9d829d5eeabfe52af47327cb2deee0e9 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 24 Mar 2015 17:54:09 -0700 Subject: [PATCH 04/18] Fixed RepeatTimer race condition --- common/repeat_timer.go | 61 ++++++---- types/store.go | 247 ----------------------------------------- 2 files changed, 41 insertions(+), 267 deletions(-) delete mode 100644 types/store.go diff --git a/common/repeat_timer.go b/common/repeat_timer.go index de9b71fae..e2a5e1834 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -1,44 +1,65 @@ package common import "time" +import "sync" /* RepeatTimer repeatedly sends a struct{}{} to .Ch after each "dur" period. It's good for keeping connections alive. */ type RepeatTimer struct { - Name string - Ch chan struct{} - quit chan struct{} - dur time.Duration - timer *time.Timer + Ch chan time.Time + + mtx sync.Mutex + name string + ticker *time.Ticker + quit chan struct{} + dur time.Duration } func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { - var ch = make(chan struct{}) - var quit = make(chan struct{}) - var t = &RepeatTimer{Name: name, Ch: ch, dur: dur, quit: quit} - t.timer = time.AfterFunc(dur, t.fireRoutine) + var t = &RepeatTimer{ + Ch: make(chan time.Time), + ticker: time.NewTicker(dur), + quit: make(chan struct{}), + name: name, + dur: dur, + } + go t.fireRoutine(t.ticker) return t } -func (t *RepeatTimer) fireRoutine() { - select { - case t.Ch <- struct{}{}: - t.timer.Reset(t.dur) - case <-t.quit: - // do nothing - default: - t.timer.Reset(t.dur) +func (t *RepeatTimer) fireRoutine(ticker *time.Ticker) { + for { + select { + case t_ := <-ticker.C: + t.Ch <- t_ + case <-t.quit: + return + } } } // Wait the duration again before firing. func (t *RepeatTimer) Reset() { - t.timer.Reset(t.dur) + t.mtx.Lock() // Lock + defer t.mtx.Unlock() + + if t.ticker != nil { + t.ticker.Stop() + } + t.ticker = time.NewTicker(t.dur) + go t.fireRoutine(t.ticker) } func (t *RepeatTimer) Stop() bool { - close(t.quit) - return t.timer.Stop() + t.mtx.Lock() // Lock + defer t.mtx.Unlock() + + exists := t.ticker != nil + if exists { + t.ticker.Stop() + t.ticker = nil + } + return exists } diff --git a/types/store.go b/types/store.go deleted file mode 100644 index 3afc8cb58..000000000 --- a/types/store.go +++ /dev/null @@ -1,247 +0,0 @@ -package types - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - - "github.com/tendermint/tendermint/binary" - . "github.com/tendermint/tendermint/common" - dbm "github.com/tendermint/tendermint/db" -) - -/* -Simple low level store for blocks. - -There are three types of information stored: - - BlockMeta: Meta information about each block - - Block part: Parts of each block, aggregated w/ PartSet - - Validation: The Validation part of each block, for gossiping commit votes - -Currently the commit signatures are duplicated in the Block parts as -well as the Validation. In the future this may change, perhaps by moving -the Validation data outside the Block. -*/ -type BlockStore struct { - height uint - db dbm.DB -} - -func NewBlockStore(db dbm.DB) *BlockStore { - bsjson := LoadBlockStoreStateJSON(db) - return &BlockStore{ - height: bsjson.Height, - db: db, - } -} - -// Height() returns the last known contiguous block height. -func (bs *BlockStore) Height() uint { - return bs.height -} - -func (bs *BlockStore) GetReader(key []byte) io.Reader { - bytez := bs.db.Get(key) - if bytez == nil { - return nil - } - return bytes.NewReader(bytez) -} - -func (bs *BlockStore) LoadBlock(height uint) *Block { - var n int64 - var err error - r := bs.GetReader(calcBlockMetaKey(height)) - if r == nil { - panic(Fmt("Block does not exist at height %v", height)) - } - meta := binary.ReadBinary(&BlockMeta{}, r, &n, &err).(*BlockMeta) - if err != nil { - panic(Fmt("Error reading block meta: %v", err)) - } - bytez := []byte{} - for i := uint(0); i < meta.Parts.Total; i++ { - part := bs.LoadBlockPart(height, i) - bytez = append(bytez, part.Bytes...) - } - block := binary.ReadBinary(&Block{}, bytes.NewReader(bytez), &n, &err).(*Block) - if err != nil { - panic(Fmt("Error reading block: %v", err)) - } - return block -} - -func (bs *BlockStore) LoadBlockPart(height uint, index uint) *Part { - var n int64 - var err error - r := bs.GetReader(calcBlockPartKey(height, index)) - if r == nil { - panic(Fmt("BlockPart does not exist for height %v index %v", height, index)) - } - part := binary.ReadBinary(&Part{}, r, &n, &err).(*Part) - if err != nil { - panic(Fmt("Error reading block part: %v", err)) - } - return part -} - -func (bs *BlockStore) LoadBlockMeta(height uint) *BlockMeta { - var n int64 - var err error - r := bs.GetReader(calcBlockMetaKey(height)) - if r == nil { - panic(Fmt("BlockMeta does not exist for height %v", height)) - } - meta := binary.ReadBinary(&BlockMeta{}, r, &n, &err).(*BlockMeta) - if err != nil { - panic(Fmt("Error reading block meta: %v", err)) - } - return meta -} - -// NOTE: the Commit-vote heights are for the block at `height-1` -// Since these are included in the subsequent block, the height -// is off by 1. -func (bs *BlockStore) LoadBlockValidation(height uint) *Validation { - var n int64 - var err error - r := bs.GetReader(calcBlockValidationKey(height)) - if r == nil { - panic(Fmt("BlockValidation does not exist for height %v", height)) - } - validation := binary.ReadBinary(&Validation{}, r, &n, &err).(*Validation) - if err != nil { - panic(Fmt("Error reading validation: %v", err)) - } - return validation -} - -// NOTE: the Commit-vote heights are for the block at `height` -func (bs *BlockStore) LoadSeenValidation(height uint) *Validation { - var n int64 - var err error - r := bs.GetReader(calcSeenValidationKey(height)) - if r == nil { - panic(Fmt("SeenValidation does not exist for height %v", height)) - } - validation := binary.ReadBinary(&Validation{}, r, &n, &err).(*Validation) - if err != nil { - panic(Fmt("Error reading validation: %v", err)) - } - return validation -} - -// blockParts: Must be parts of the block -// seenValidation: The +2/3 commits that were seen which finalized the height. -// If all the nodes restart after committing a block, -// we need this to reload the commits to catch-up nodes to the -// most recent height. Otherwise they'd stall at H-1. -// Also good to have to debug consensus issues & punish wrong-signers -// whose commits weren't included in the block. -func (bs *BlockStore) SaveBlock(block *Block, blockParts *PartSet, seenValidation *Validation) { - height := block.Height - if height != bs.height+1 { - panic(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height)) - } - if !blockParts.IsComplete() { - panic(Fmt("BlockStore can only save complete block part sets")) - } - - // Save block meta - meta := makeBlockMeta(block, blockParts) - metaBytes := binary.BinaryBytes(meta) - bs.db.Set(calcBlockMetaKey(height), metaBytes) - - // Save block parts - for i := uint(0); i < blockParts.Total(); i++ { - bs.saveBlockPart(height, i, blockParts.GetPart(i)) - } - - // Save block validation (duplicate and separate from the Block) - blockValidationBytes := binary.BinaryBytes(block.Validation) - bs.db.Set(calcBlockValidationKey(height), blockValidationBytes) - - // Save seen validation (seen +2/3 commits) - seenValidationBytes := binary.BinaryBytes(seenValidation) - bs.db.Set(calcSeenValidationKey(height), seenValidationBytes) - - // Save new BlockStoreStateJSON descriptor - BlockStoreStateJSON{Height: height}.Save(bs.db) - - // Done! - bs.height = height -} - -func (bs *BlockStore) saveBlockPart(height uint, index uint, part *Part) { - if height != bs.height+1 { - panic(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height)) - } - partBytes := binary.BinaryBytes(part) - bs.db.Set(calcBlockPartKey(height, index), partBytes) -} - -//----------------------------------------------------------------------------- - -type BlockMeta struct { - Hash []byte // The block hash - Header *Header // The block's Header - Parts PartSetHeader // The PartSetHeader, for transfer -} - -func makeBlockMeta(block *Block, blockParts *PartSet) *BlockMeta { - return &BlockMeta{ - Hash: block.Hash(), - Header: block.Header, - Parts: blockParts.Header(), - } -} - -//----------------------------------------------------------------------------- - -func calcBlockMetaKey(height uint) []byte { - return []byte(fmt.Sprintf("H:%v", height)) -} - -func calcBlockPartKey(height uint, partIndex uint) []byte { - return []byte(fmt.Sprintf("P:%v:%v", height, partIndex)) -} - -func calcBlockValidationKey(height uint) []byte { - return []byte(fmt.Sprintf("V:%v", height)) -} - -func calcSeenValidationKey(height uint) []byte { - return []byte(fmt.Sprintf("SV:%v", height)) -} - -//----------------------------------------------------------------------------- - -var blockStoreKey = []byte("blockStore") - -type BlockStoreStateJSON struct { - Height uint -} - -func (bsj BlockStoreStateJSON) Save(db dbm.DB) { - bytes, err := json.Marshal(bsj) - if err != nil { - panic(Fmt("Could not marshal state bytes: %v", err)) - } - db.Set(blockStoreKey, bytes) -} - -func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { - bytes := db.Get(blockStoreKey) - if bytes == nil { - return BlockStoreStateJSON{ - Height: 0, - } - } - bsj := BlockStoreStateJSON{} - err := json.Unmarshal(bytes, &bsj) - if err != nil { - panic(Fmt("Could not unmarshal bytes: %X", bytes)) - } - return bsj -} From 08a83aa9fbb08d85543710a5c88c7cc7d58bf230 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Mar 2015 00:15:18 -0700 Subject: [PATCH 05/18] Reactors can be stopped or started at any time. --- blockchain/pool.go | 2 +- blockchain/reactor.go | 226 ++++++++++++++++++++++++++++++++++++++++++ blockchain/store.go | 24 +---- consensus/reactor.go | 5 +- consensus/state.go | 5 +- consensus/test.go | 4 +- daemon/daemon.go | 23 +++-- p2p/connection.go | 1 + p2p/peer_set.go | 1 + p2p/switch.go | 123 ++++++++++++----------- rpc/rpc.go | 6 +- state/state_test.go | 1 - types/block_meta.go | 15 +++ 13 files changed, 340 insertions(+), 96 deletions(-) create mode 100644 blockchain/reactor.go create mode 100644 types/block_meta.go diff --git a/blockchain/pool.go b/blockchain/pool.go index 3e3313bfb..099594c12 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -149,7 +149,7 @@ func (bp *BlockPool) RedoRequest(height uint) { if request.block == nil { panic("Expected block to be non-nil") } - bp.removePeer(request.peerId) + bp.RemovePeer(request.peerId) // Lock on peersMtx. request.block = nil request.peerId = "" bp.numPending++ diff --git a/blockchain/reactor.go b/blockchain/reactor.go new file mode 100644 index 000000000..1aba782ee --- /dev/null +++ b/blockchain/reactor.go @@ -0,0 +1,226 @@ +package blockchain + +import ( + "bytes" + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/tendermint/tendermint/binary" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +const ( + BlockchainChannel = byte(0x40) + defaultChannelCapacity = 100 + defaultSleepIntervalMS = 500 +) + +// BlockchainReactor handles long-term catchup syncing. +type BlockchainReactor struct { + sw *p2p.Switch + store *BlockStore + pool *BlockPool + requestsCh chan BlockRequest + timeoutsCh chan string + lastBlock *types.Block + quit chan struct{} + started uint32 + stopped uint32 +} + +func NewBlockchainReactor(store *BlockStore) *BlockchainReactor { + requestsCh := make(chan BlockRequest, defaultChannelCapacity) + timeoutsCh := make(chan string, defaultChannelCapacity) + pool := NewBlockPool( + store.Height()+1, + requestsCh, + timeoutsCh, + ) + bcR := &BlockchainReactor{ + store: store, + pool: pool, + requestsCh: requestsCh, + timeoutsCh: timeoutsCh, + quit: make(chan struct{}), + started: 0, + stopped: 0, + } + return bcR +} + +// Implements Reactor +func (bcR *BlockchainReactor) Start(sw *p2p.Switch) { + if atomic.CompareAndSwapUint32(&bcR.started, 0, 1) { + log.Info("Starting BlockchainReactor") + bcR.sw = sw + bcR.pool.Start() + go bcR.poolRoutine() + } +} + +// Implements Reactor +func (bcR *BlockchainReactor) Stop() { + if atomic.CompareAndSwapUint32(&bcR.stopped, 0, 1) { + log.Info("Stopping BlockchainReactor") + close(bcR.quit) + bcR.pool.Stop() + } +} + +// Implements Reactor +func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + &p2p.ChannelDescriptor{ + Id: BlockchainChannel, + Priority: 5, + SendQueueCapacity: 20, // Queue 20 blocks to send to a peer. + }, + } +} + +// Implements Reactor +func (bcR *BlockchainReactor) AddPeer(peer *p2p.Peer) { + // Send peer our state. + peer.Send(BlockchainChannel, PeerStatusMessage{bcR.store.Height()}) +} + +// Implements Reactor +func (bcR *BlockchainReactor) RemovePeer(peer *p2p.Peer, reason interface{}) { + // Remove peer from the pool. + bcR.pool.RemovePeer(peer.Key) +} + +// Implements Reactor +func (bcR *BlockchainReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) { + _, msg_, err := DecodeMessage(msgBytes) + if err != nil { + log.Warn("Error decoding message", "error", err) + return + } + log.Info("BlockchainReactor received message", "msg", msg_) + + switch msg := msg_.(type) { + case BlockRequestMessage: + log.Debug("Got BlockRequest", "msg", msg) + // Got a request for a block. Respond with block if we have it. + block := bcR.store.LoadBlock(msg.Height) + if block != nil { + msg := BlockResponseMessage{Block: block} + queued := src.TrySend(BlockchainChannel, msg) + if !queued { + // queue is full, just ignore. + } + } else { + // TODO peer is asking for things we don't have. + } + case BlockResponseMessage: + log.Debug("Got BlockResponse", "msg", msg) + // Got a block. + bcR.pool.AddBlock(msg.Block, src.Key) + case PeerStatusMessage: + log.Debug("Got PeerStatus", "msg", msg) + // Got a peer status. + bcR.pool.SetPeerHeight(src.Key, msg.Height) + default: + // Ignore unknown message + } +} + +func (bcR *BlockchainReactor) poolRoutine() { +FOR_LOOP: + for { + select { + case request := <-bcR.requestsCh: // chan BlockRequest + peer := bcR.sw.Peers().Get(request.PeerId) + if peer == nil { + // We can't fulfill the request. + continue FOR_LOOP + } + msg := BlockRequestMessage{request.Height} + queued := peer.TrySend(BlockchainChannel, msg) + if !queued { + // We couldn't queue the request. + time.Sleep(defaultSleepIntervalMS * time.Millisecond) + continue FOR_LOOP + } + case peerId := <-bcR.timeoutsCh: // chan string + // Peer timed out. + peer := bcR.sw.Peers().Get(peerId) + bcR.sw.StopPeerForError(peer, errors.New("BlockchainReactor Timeout")) + case <-bcR.quit: + break FOR_LOOP + } + } +} + +func (bcR *BlockchainReactor) BroadcastStatus() error { + bcR.sw.Broadcast(BlockchainChannel, PeerStatusMessage{bcR.store.Height()}) + return nil +} + +//----------------------------------------------------------------------------- +// Messages + +const ( + msgTypeUnknown = byte(0x00) + msgTypeBlockRequest = byte(0x10) + msgTypeBlockResponse = byte(0x11) + msgTypePeerStatus = byte(0x20) +) + +// TODO: check for unnecessary extra bytes at the end. +func DecodeMessage(bz []byte) (msgType byte, msg interface{}, err error) { + n := new(int64) + msgType = bz[0] + r := bytes.NewReader(bz) + switch msgType { + case msgTypeBlockRequest: + msg = binary.ReadBinary(BlockRequestMessage{}, r, n, &err) + case msgTypeBlockResponse: + msg = binary.ReadBinary(BlockResponseMessage{}, r, n, &err) + case msgTypePeerStatus: + msg = binary.ReadBinary(PeerStatusMessage{}, r, n, &err) + default: + msg = nil + } + return +} + +//------------------------------------- + +type BlockRequestMessage struct { + Height uint +} + +func (m BlockRequestMessage) TypeByte() byte { return msgTypeBlockRequest } + +func (m BlockRequestMessage) String() string { + return fmt.Sprintf("[BlockRequestMessage %v]", m.Height) +} + +//------------------------------------- + +type BlockResponseMessage struct { + Block *types.Block +} + +func (m BlockResponseMessage) TypeByte() byte { return msgTypeBlockResponse } + +func (m BlockResponseMessage) String() string { + return fmt.Sprintf("[BlockResponseMessage %v]", m.Block.Height) +} + +//------------------------------------- + +type PeerStatusMessage struct { + Height uint +} + +func (m PeerStatusMessage) TypeByte() byte { return msgTypePeerStatus } + +func (m PeerStatusMessage) String() string { + return fmt.Sprintf("[PeerStatusMessage %v]", m.Height) +} diff --git a/blockchain/store.go b/blockchain/store.go index 8938273cb..f9d54cd23 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -57,7 +57,7 @@ func (bs *BlockStore) LoadBlock(height uint) *types.Block { if r == nil { panic(Fmt("Block does not exist at height %v", height)) } - meta := binary.ReadBinary(&BlockMeta{}, r, &n, &err).(*BlockMeta) + meta := binary.ReadBinary(&types.BlockMeta{}, r, &n, &err).(*types.BlockMeta) if err != nil { panic(Fmt("Error reading block meta: %v", err)) } @@ -87,14 +87,14 @@ func (bs *BlockStore) LoadBlockPart(height uint, index uint) *types.Part { return part } -func (bs *BlockStore) LoadBlockMeta(height uint) *BlockMeta { +func (bs *BlockStore) LoadBlockMeta(height uint) *types.BlockMeta { var n int64 var err error r := bs.GetReader(calcBlockMetaKey(height)) if r == nil { panic(Fmt("BlockMeta does not exist for height %v", height)) } - meta := binary.ReadBinary(&BlockMeta{}, r, &n, &err).(*BlockMeta) + meta := binary.ReadBinary(&types.BlockMeta{}, r, &n, &err).(*types.BlockMeta) if err != nil { panic(Fmt("Error reading block meta: %v", err)) } @@ -150,7 +150,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s } // Save block meta - meta := makeBlockMeta(block, blockParts) + meta := types.NewBlockMeta(block, blockParts) metaBytes := binary.BinaryBytes(meta) bs.db.Set(calcBlockMetaKey(height), metaBytes) @@ -184,22 +184,6 @@ func (bs *BlockStore) saveBlockPart(height uint, index uint, part *types.Part) { //----------------------------------------------------------------------------- -type BlockMeta struct { - Hash []byte // The block hash - Header *types.Header // The block's Header - Parts types.PartSetHeader // The PartSetHeader, for transfer -} - -func makeBlockMeta(block *types.Block, blockParts *types.PartSet) *BlockMeta { - return &BlockMeta{ - Hash: block.Hash(), - Header: block.Header, - Parts: blockParts.Header(), - } -} - -//----------------------------------------------------------------------------- - func calcBlockMetaKey(height uint) []byte { return []byte(fmt.Sprintf("H:%v", height)) } diff --git a/consensus/reactor.go b/consensus/reactor.go index 6e6f33662..7abd75ec1 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -9,6 +9,7 @@ import ( "time" "github.com/tendermint/tendermint/binary" + bc "github.com/tendermint/tendermint/blockchain" . "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/p2p" @@ -34,11 +35,11 @@ type ConsensusReactor struct { stopped uint32 quit chan struct{} - blockStore *types.BlockStore + blockStore *bc.BlockStore conS *ConsensusState } -func NewConsensusReactor(consensusState *ConsensusState, blockStore *types.BlockStore) *ConsensusReactor { +func NewConsensusReactor(consensusState *ConsensusState, blockStore *bc.BlockStore) *ConsensusReactor { conR := &ConsensusReactor{ blockStore: blockStore, quit: make(chan struct{}), diff --git a/consensus/state.go b/consensus/state.go index 5fb4268d8..ad8b79cf7 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -62,6 +62,7 @@ import ( "github.com/tendermint/tendermint/account" "github.com/tendermint/tendermint/binary" + bc "github.com/tendermint/tendermint/blockchain" . "github.com/tendermint/tendermint/common" "github.com/tendermint/tendermint/config" . "github.com/tendermint/tendermint/consensus/types" @@ -234,7 +235,7 @@ type ConsensusState struct { stopped uint32 quit chan struct{} - blockStore *types.BlockStore + blockStore *bc.BlockStore mempoolReactor *mempl.MempoolReactor runActionCh chan RoundAction newStepCh chan *RoundState @@ -247,7 +248,7 @@ type ConsensusState struct { lastCommitVoteHeight uint // Last called commitVoteBlock() or saveCommitVoteBlock() on. } -func NewConsensusState(state *sm.State, blockStore *types.BlockStore, mempoolReactor *mempl.MempoolReactor) *ConsensusState { +func NewConsensusState(state *sm.State, blockStore *bc.BlockStore, mempoolReactor *mempl.MempoolReactor) *ConsensusState { cs := &ConsensusState{ quit: make(chan struct{}), blockStore: blockStore, diff --git a/consensus/test.go b/consensus/test.go index e86c6a075..397befa0d 100644 --- a/consensus/test.go +++ b/consensus/test.go @@ -3,15 +3,15 @@ package consensus import ( "sort" + bc "github.com/tendermint/tendermint/blockchain" dbm "github.com/tendermint/tendermint/db" mempl "github.com/tendermint/tendermint/mempool" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) func randConsensusState() (*ConsensusState, []*sm.PrivValidator) { state, _, privValidators := sm.RandGenesisState(20, false, 1000, 10, false, 1000) - blockStore := types.NewBlockStore(dbm.NewMemDB()) + blockStore := bc.NewBlockStore(dbm.NewMemDB()) mempool := mempl.NewMempool(state) mempoolReactor := mempl.NewMempoolReactor(mempool) cs := NewConsensusState(state, blockStore, mempoolReactor) diff --git a/daemon/daemon.go b/daemon/daemon.go index ccb2932de..89e5de697 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -4,6 +4,7 @@ import ( "os" "os/signal" + bc "github.com/tendermint/tendermint/blockchain" . "github.com/tendermint/tendermint/common" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/consensus" @@ -12,15 +13,15 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/rpc" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) type Node struct { lz []p2p.Listener sw *p2p.Switch book *p2p.AddrBook + blockStore *bc.BlockStore pexReactor *p2p.PEXReactor - blockStore *types.BlockStore + bcReactor *bc.BlockchainReactor mempoolReactor *mempl.MempoolReactor consensusState *consensus.ConsensusState consensusReactor *consensus.ConsensusReactor @@ -30,7 +31,7 @@ type Node struct { func NewNode() *Node { // Get BlockStore blockStoreDB := dbm.GetDB("blockstore") - blockStore := types.NewBlockStore(blockStoreDB) + blockStore := bc.NewBlockStore(blockStoreDB) // Get State stateDB := dbm.GetDB("state") @@ -53,6 +54,9 @@ func NewNode() *Node { book := p2p.NewAddrBook(config.App().GetString("AddrBookFile")) pexReactor := p2p.NewPEXReactor(book) + // Get BlockchainReactor + bcReactor := bc.NewBlockchainReactor(blockStore) + // Get MempoolReactor mempool := mempl.NewMempool(state.Copy()) mempoolReactor := mempl.NewMempoolReactor(mempool) @@ -64,14 +68,19 @@ func NewNode() *Node { consensusReactor.SetPrivValidator(privValidator) } - sw := p2p.NewSwitch([]p2p.Reactor{pexReactor, mempoolReactor, consensusReactor}) + sw := p2p.NewSwitch() sw.SetChainId(state.Hash(), config.App().GetString("Network")) + sw.AddReactor("PEX", pexReactor) + //sw.AddReactor("BLOCKCHAIN", bcReactor) + sw.AddReactor("MEMPOOL", mempoolReactor) + sw.AddReactor("CONSENSUS", consensusReactor) return &Node{ sw: sw, book: book, - pexReactor: pexReactor, blockStore: blockStore, + pexReactor: pexReactor, + bcReactor: bcReactor, mempoolReactor: mempoolReactor, consensusState: consensusState, consensusReactor: consensusReactor, @@ -85,13 +94,13 @@ func (n *Node) Start() { go n.inboundConnectionRoutine(l) } n.book.Start() - n.sw.Start() + n.sw.StartAll() } func (n *Node) Stop() { log.Info("Stopping Node") // TODO: gracefully disconnect from peers. - n.sw.Stop() + n.sw.StopAll() n.book.Stop() } diff --git a/p2p/connection.go b/p2p/connection.go index 578dea1b3..0e26480f1 100644 --- a/p2p/connection.go +++ b/p2p/connection.go @@ -468,6 +468,7 @@ type Channel struct { } func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel { + desc.FillDefaults() if desc.Priority <= 0 { panic("Channel default priority must be a postive integer") } diff --git a/p2p/peer_set.go b/p2p/peer_set.go index f365cd8ea..23f49c513 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -7,6 +7,7 @@ import ( // IPeerSet has a (immutable) subset of the methods of PeerSet. type IPeerSet interface { Has(key string) bool + Get(key string) *Peer List() []*Peer Size() int } diff --git a/p2p/switch.go b/p2p/switch.go index 12267fa10..27c61cb46 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -29,89 +29,100 @@ or more `Channels`. So while sending outgoing messages is typically performed o incoming messages are received on the reactor. */ type Switch struct { - reactors []Reactor + chainId string + reactors map[string]Reactor chDescs []*ChannelDescriptor reactorsByCh map[byte]Reactor peers *PeerSet dialing *CMap - listeners *CMap // listenerName -> chan interface{} - quit chan struct{} - started uint32 - stopped uint32 - chainId string + listeners *CMap // listenerName -> chan interface{} + running uint32 // atomic } var ( - ErrSwitchStopped = errors.New("Switch already stopped") ErrSwitchDuplicatePeer = errors.New("Duplicate peer") + ErrSwitchStopped = errors.New("Switch stopped") ) const ( peerDialTimeoutSeconds = 3 ) -func NewSwitch(reactors []Reactor) *Switch { - - // Validate the reactors. no two reactors can share the same channel. - chDescs := []*ChannelDescriptor{} - reactorsByCh := make(map[byte]Reactor) - for _, reactor := range reactors { - reactorChannels := reactor.GetChannels() - for _, chDesc := range reactorChannels { - chId := chDesc.Id - if reactorsByCh[chId] != nil { - panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chId, reactorsByCh[chId], reactor)) - } - chDescs = append(chDescs, chDesc) - reactorsByCh[chId] = reactor - } - } +func NewSwitch() *Switch { sw := &Switch{ - reactors: reactors, - chDescs: chDescs, - reactorsByCh: reactorsByCh, + chainId: "", + reactors: make(map[string]Reactor), + chDescs: make([]*ChannelDescriptor, 0), + reactorsByCh: make(map[byte]Reactor), peers: NewPeerSet(), dialing: NewCMap(), listeners: NewCMap(), - quit: make(chan struct{}), - stopped: 0, + running: 0, } return sw } -func (sw *Switch) Start() { - if atomic.CompareAndSwapUint32(&sw.started, 0, 1) { - log.Info("Starting Switch") - for _, reactor := range sw.reactors { - reactor.Start(sw) +func (sw *Switch) SetChainId(hash []byte, network string) { + sw.chainId = hex.EncodeToString(hash) + "-" + network +} + +func (sw *Switch) AddReactor(name string, reactor Reactor) { + // Validate the reactor. + // No two reactors can share the same channel. + reactorChannels := reactor.GetChannels() + for _, chDesc := range reactorChannels { + chId := chDesc.Id + if sw.reactorsByCh[chId] != nil { + panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chId, sw.reactorsByCh[chId], reactor)) } + sw.chDescs = append(sw.chDescs, chDesc) + sw.reactorsByCh[chId] = reactor + } + sw.reactors[name] = reactor + time.Sleep(1 * time.Second) +} + +func (sw *Switch) StartReactor(name string) { + atomic.StoreUint32(&sw.running, 1) + sw.reactors[name].Start(sw) +} + +// Convenience function +func (sw *Switch) StartAll() { + atomic.StoreUint32(&sw.running, 1) + for _, reactor := range sw.reactors { + reactor.Start(sw) } } -func (sw *Switch) Stop() { - if atomic.CompareAndSwapUint32(&sw.stopped, 0, 1) { - log.Info("Stopping Switch") - close(sw.quit) - // Stop each peer. - for _, peer := range sw.peers.List() { - peer.stop() - } - sw.peers = NewPeerSet() - // Stop all reactors. - for _, reactor := range sw.reactors { - reactor.Stop() - } +func (sw *Switch) StopReactor(name string) { + sw.reactors[name].Stop() +} + +// Convenience function +// Not goroutine safe +func (sw *Switch) StopAll() { + atomic.StoreUint32(&sw.running, 0) + // Stop each peer. + for _, peer := range sw.peers.List() { + peer.stop() + } + sw.peers = NewPeerSet() + // Stop all reactors. + for _, reactor := range sw.reactors { + reactor.Stop() } } -func (sw *Switch) Reactors() []Reactor { +// Not goroutine safe +func (sw *Switch) Reactors() map[string]Reactor { return sw.reactors } func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, error) { - if atomic.LoadUint32(&sw.stopped) == 1 { + if atomic.LoadUint32(&sw.running) == 0 { return nil, ErrSwitchStopped } @@ -125,12 +136,12 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er return nil, ErrSwitchDuplicatePeer } - // Start the peer - go peer.start() - // Notify listeners. sw.doAddPeer(peer) + // Start the peer + go peer.start() + // Send handshake msg := &pexHandshakeMessage{ChainId: sw.chainId} peer.Send(PexChannel, msg) @@ -139,7 +150,7 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er } func (sw *Switch) DialPeerWithAddress(addr *NetAddress) (*Peer, error) { - if atomic.LoadUint32(&sw.stopped) == 1 { + if atomic.LoadUint32(&sw.running) == 0 { return nil, ErrSwitchStopped } @@ -168,7 +179,7 @@ func (sw *Switch) IsDialing(addr *NetAddress) bool { // trying to send for defaultSendTimeoutSeconds. Returns a channel // which receives success values for each attempted send (false if times out) func (sw *Switch) Broadcast(chId byte, msg interface{}) chan bool { - if atomic.LoadUint32(&sw.stopped) == 1 { + if atomic.LoadUint32(&sw.running) == 0 { return nil } successChan := make(chan bool, len(sw.peers.List())) @@ -223,16 +234,12 @@ func (sw *Switch) StopPeerGracefully(peer *Peer) { sw.doRemovePeer(peer, nil) } -func (sw *Switch) SetChainId(hash []byte, network string) { - sw.chainId = hex.EncodeToString(hash) + "-" + network -} - func (sw *Switch) IsListening() bool { return sw.listeners.Size() > 0 } func (sw *Switch) doAddPeer(peer *Peer) { - for _, reactor := range sw.reactors { + for name, reactor := range sw.reactors { reactor.AddPeer(peer) } } diff --git a/rpc/rpc.go b/rpc/rpc.go index 94631249c..8cf905d3a 100644 --- a/rpc/rpc.go +++ b/rpc/rpc.go @@ -1,18 +1,18 @@ package rpc import ( + bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/consensus" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/types" ) -var blockStore *types.BlockStore +var blockStore *bc.BlockStore var consensusState *consensus.ConsensusState var mempoolReactor *mempl.MempoolReactor var p2pSwitch *p2p.Switch -func SetRPCBlockStore(bs *types.BlockStore) { +func SetRPCBlockStore(bs *bc.BlockStore) { blockStore = bs } diff --git a/state/state_test.go b/state/state_test.go index 71efd80f2..0d2c963bb 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -2,7 +2,6 @@ package state import ( "github.com/tendermint/tendermint/account" - "github.com/tendermint/tendermint/binary" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" diff --git a/types/block_meta.go b/types/block_meta.go new file mode 100644 index 000000000..3e9ba8f91 --- /dev/null +++ b/types/block_meta.go @@ -0,0 +1,15 @@ +package types + +type BlockMeta struct { + Hash []byte // The block hash + Header *Header // The block's Header + Parts PartSetHeader // The PartSetHeader, for transfer +} + +func NewBlockMeta(block *Block, blockParts *PartSet) *BlockMeta { + return &BlockMeta{ + Hash: block.Hash(), + Header: block.Header, + Parts: blockParts.Header(), + } +} From aed4bbf0f0e236b28b76d9d456e91cb01de60103 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Mar 2015 02:36:59 -0700 Subject: [PATCH 06/18] Fix switch tests --- daemon/daemon.go | 4 +-- p2p/pex_reactor.go | 5 ++-- p2p/switch.go | 63 +++++++++++++++++++--------------------------- p2p/switch_test.go | 52 ++++++++++++++++++-------------------- 4 files changed, 56 insertions(+), 68 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index 89e5de697..a4b573bee 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -94,13 +94,13 @@ func (n *Node) Start() { go n.inboundConnectionRoutine(l) } n.book.Start() - n.sw.StartAll() + n.sw.StartReactors() } func (n *Node) Stop() { log.Info("Stopping Node") // TODO: gracefully disconnect from peers. - n.sw.StopAll() + n.sw.Stop() n.book.Stop() } diff --git a/p2p/pex_reactor.go b/p2p/pex_reactor.go index de742645f..926859316 100644 --- a/p2p/pex_reactor.go +++ b/p2p/pex_reactor.go @@ -62,8 +62,9 @@ func (pexR *PEXReactor) Stop() { func (pexR *PEXReactor) GetChannels() []*ChannelDescriptor { return []*ChannelDescriptor{ &ChannelDescriptor{ - Id: PexChannel, - Priority: 1, + Id: PexChannel, + Priority: 1, + SendQueueCapacity: 10, }, } } diff --git a/p2p/switch.go b/p2p/switch.go index 27c61cb46..5216f77dd 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "net" - "sync/atomic" "time" . "github.com/tendermint/tendermint/common" @@ -35,13 +34,11 @@ type Switch struct { reactorsByCh map[byte]Reactor peers *PeerSet dialing *CMap - listeners *CMap // listenerName -> chan interface{} - running uint32 // atomic + listeners *CMap // listenerName -> chan interface{} } var ( ErrSwitchDuplicatePeer = errors.New("Duplicate peer") - ErrSwitchStopped = errors.New("Switch stopped") ) const ( @@ -58,17 +55,18 @@ func NewSwitch() *Switch { peers: NewPeerSet(), dialing: NewCMap(), listeners: NewCMap(), - running: 0, } return sw } +// Not goroutine safe. func (sw *Switch) SetChainId(hash []byte, network string) { sw.chainId = hex.EncodeToString(hash) + "-" + network } -func (sw *Switch) AddReactor(name string, reactor Reactor) { +// Not goroutine safe. +func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { // Validate the reactor. // No two reactors can share the same channel. reactorChannels := reactor.GetChannels() @@ -81,51 +79,49 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) { sw.reactorsByCh[chId] = reactor } sw.reactors[name] = reactor - time.Sleep(1 * time.Second) + return reactor } -func (sw *Switch) StartReactor(name string) { - atomic.StoreUint32(&sw.running, 1) - sw.reactors[name].Start(sw) +func (sw *Switch) Reactor(name string) Reactor { + return sw.reactors[name] } // Convenience function -func (sw *Switch) StartAll() { - atomic.StoreUint32(&sw.running, 1) +func (sw *Switch) StartReactors() { for _, reactor := range sw.reactors { reactor.Start(sw) } } -func (sw *Switch) StopReactor(name string) { - sw.reactors[name].Stop() -} - // Convenience function -// Not goroutine safe -func (sw *Switch) StopAll() { - atomic.StoreUint32(&sw.running, 0) - // Stop each peer. - for _, peer := range sw.peers.List() { - peer.stop() - } - sw.peers = NewPeerSet() +func (sw *Switch) StopReactors() { // Stop all reactors. for _, reactor := range sw.reactors { reactor.Stop() } } -// Not goroutine safe +// Convenience function +func (sw *Switch) StopPeers() { + // Stop each peer. + for _, peer := range sw.peers.List() { + peer.stop() + } + sw.peers = NewPeerSet() +} + +// Convenience function +func (sw *Switch) Stop() { + sw.StopPeers() + sw.StopReactors() +} + +// Not goroutine safe to modify. func (sw *Switch) Reactors() map[string]Reactor { return sw.reactors } func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, error) { - if atomic.LoadUint32(&sw.running) == 0 { - return nil, ErrSwitchStopped - } - peer := newPeer(conn, outbound, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError) // Add the peer to .peers @@ -150,10 +146,6 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er } func (sw *Switch) DialPeerWithAddress(addr *NetAddress) (*Peer, error) { - if atomic.LoadUint32(&sw.running) == 0 { - return nil, ErrSwitchStopped - } - log.Debug("Dialing address", "address", addr) sw.dialing.Set(addr.String(), addr) conn, err := addr.DialTimeout(peerDialTimeoutSeconds * time.Second) @@ -179,9 +171,6 @@ func (sw *Switch) IsDialing(addr *NetAddress) bool { // trying to send for defaultSendTimeoutSeconds. Returns a channel // which receives success values for each attempted send (false if times out) func (sw *Switch) Broadcast(chId byte, msg interface{}) chan bool { - if atomic.LoadUint32(&sw.running) == 0 { - return nil - } successChan := make(chan bool, len(sw.peers.List())) log.Debug("Broadcast", "channel", chId, "msg", msg) for _, peer := range sw.peers.List() { @@ -239,7 +228,7 @@ func (sw *Switch) IsListening() bool { } func (sw *Switch) doAddPeer(peer *Peer) { - for name, reactor := range sw.reactors { + for _, reactor := range sw.reactors { reactor.AddPeer(peer) } } diff --git a/p2p/switch_test.go b/p2p/switch_test.go index a260df27a..f486230f5 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -68,12 +68,12 @@ func (tr *TestReactor) Receive(chId byte, peer *Peer, msgBytes []byte) { //----------------------------------------------------------------------------- -// convenience method for creating two switches connected to each other. -func makeSwitchPair(t testing.TB, reactorsGenerator func() []Reactor) (*Switch, *Switch) { +// convenience method for creating bar switches connected to each other. +func makeSwitchPair(t testing.TB, initSwitch func(*Switch) *Switch) (*Switch, *Switch) { - // Create two switches that will be interconnected. - s1 := NewSwitch(reactorsGenerator()) - s2 := NewSwitch(reactorsGenerator()) + // Create bar switches that will be interconnected. + s1 := initSwitch(NewSwitch()) + s2 := initSwitch(NewSwitch()) // Create a listener for s1 l := NewDefaultListener("tcp", ":8001", true) @@ -104,18 +104,17 @@ func makeSwitchPair(t testing.TB, reactorsGenerator func() []Reactor) (*Switch, } func TestSwitches(t *testing.T) { - s1, s2 := makeSwitchPair(t, func() []Reactor { - // Make two reactors of two channels each - reactors := make([]Reactor, 2) - reactors[0] = NewTestReactor([]*ChannelDescriptor{ + s1, s2 := makeSwitchPair(t, func(sw *Switch) *Switch { + // Make bar reactors of bar channels each + sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{ &ChannelDescriptor{Id: byte(0x00), Priority: 10}, &ChannelDescriptor{Id: byte(0x01), Priority: 10}, - }, true) - reactors[1] = NewTestReactor([]*ChannelDescriptor{ + }, true)).Start(sw) // Start the reactor + sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{ &ChannelDescriptor{Id: byte(0x02), Priority: 10}, &ChannelDescriptor{Id: byte(0x03), Priority: 10}, - }, true) - return reactors + }, true)).Start(sw) // Start the reactor + return sw }) defer s1.Stop() defer s2.Stop() @@ -129,8 +128,8 @@ func TestSwitches(t *testing.T) { } ch0Msg := "channel zero" - ch1Msg := "channel one" - ch2Msg := "channel two" + ch1Msg := "channel foo" + ch2Msg := "channel bar" s1.Broadcast(byte(0x00), ch0Msg) s1.Broadcast(byte(0x01), ch1Msg) @@ -140,7 +139,7 @@ func TestSwitches(t *testing.T) { time.Sleep(5000 * time.Millisecond) // Check message on ch0 - ch0Msgs := s2.Reactors()[0].(*TestReactor).msgsReceived[byte(0x00)] + ch0Msgs := s2.Reactor("foo").(*TestReactor).msgsReceived[byte(0x00)] if len(ch0Msgs) != 2 { t.Errorf("Expected to have received 1 message in ch0") } @@ -149,7 +148,7 @@ func TestSwitches(t *testing.T) { } // Check message on ch1 - ch1Msgs := s2.Reactors()[0].(*TestReactor).msgsReceived[byte(0x01)] + ch1Msgs := s2.Reactor("foo").(*TestReactor).msgsReceived[byte(0x01)] if len(ch1Msgs) != 1 { t.Errorf("Expected to have received 1 message in ch1") } @@ -158,7 +157,7 @@ func TestSwitches(t *testing.T) { } // Check message on ch2 - ch2Msgs := s2.Reactors()[1].(*TestReactor).msgsReceived[byte(0x02)] + ch2Msgs := s2.Reactor("bar").(*TestReactor).msgsReceived[byte(0x02)] if len(ch2Msgs) != 1 { t.Errorf("Expected to have received 1 message in ch2") } @@ -172,18 +171,17 @@ func BenchmarkSwitches(b *testing.B) { b.StopTimer() - s1, s2 := makeSwitchPair(b, func() []Reactor { - // Make two reactors of two channels each - reactors := make([]Reactor, 2) - reactors[0] = NewTestReactor([]*ChannelDescriptor{ + s1, s2 := makeSwitchPair(b, func(sw *Switch) *Switch { + // Make bar reactors of bar channels each + sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{ &ChannelDescriptor{Id: byte(0x00), Priority: 10}, &ChannelDescriptor{Id: byte(0x01), Priority: 10}, - }, false) - reactors[1] = NewTestReactor([]*ChannelDescriptor{ + }, false)) + sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{ &ChannelDescriptor{Id: byte(0x02), Priority: 10}, &ChannelDescriptor{Id: byte(0x03), Priority: 10}, - }, false) - return reactors + }, false)) + return sw }) defer s1.Stop() defer s2.Stop() @@ -194,7 +192,7 @@ func BenchmarkSwitches(b *testing.B) { numSuccess, numFailure := 0, 0 - // Send random message from one channel to another + // Send random message from foo channel to another for i := 0; i < b.N; i++ { chId := byte(i % 4) successChan := s1.Broadcast(chId, "test data") From cebfae60c7d1c30f1a3764eccb27b93fd9c4fa97 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Mar 2015 11:33:39 -0700 Subject: [PATCH 07/18] BlockchainReactor syncs first before ConsensusReactor. --- blockchain/pool.go | 214 ++++++++++++++++++++--------------------- blockchain/reactor.go | 60 +++++++++++- consensus/reactor.go | 2 + consensus/state.go | 2 + consensus/vote_set.go | 2 +- daemon/daemon.go | 10 +- p2p/peer_set.go | 7 +- state/validator_set.go | 47 +++++++++ types/block.go | 4 +- 9 files changed, 232 insertions(+), 116 deletions(-) diff --git a/blockchain/pool.go b/blockchain/pool.go index 099594c12..192165bf7 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -60,32 +60,32 @@ func NewBlockPool(start uint, requestsCh chan<- BlockRequest, timeoutsCh chan<- } } -func (bp *BlockPool) Start() { - if atomic.CompareAndSwapInt32(&bp.running, 0, 1) { +func (pool *BlockPool) Start() { + if atomic.CompareAndSwapInt32(&pool.running, 0, 1) { log.Info("Starting BlockPool") - go bp.run() + go pool.run() } } -func (bp *BlockPool) Stop() { - if atomic.CompareAndSwapInt32(&bp.running, 1, 0) { +func (pool *BlockPool) Stop() { + if atomic.CompareAndSwapInt32(&pool.running, 1, 0) { log.Info("Stopping BlockPool") - bp.repeater.Stop() + pool.repeater.Stop() } } -func (bp *BlockPool) IsRunning() bool { - return atomic.LoadInt32(&bp.running) == 1 +func (pool *BlockPool) IsRunning() bool { + return atomic.LoadInt32(&pool.running) == 1 } // Run spawns requests as needed. -func (bp *BlockPool) run() { +func (pool *BlockPool) run() { RUN_LOOP: for { - if atomic.LoadInt32(&bp.running) == 0 { + if atomic.LoadInt32(&pool.running) == 0 { break RUN_LOOP } - height, numPending, numTotal := bp.GetStatus() + height, numPending, numTotal := pool.GetStatus() log.Debug("BlockPool.run", "height", height, "numPending", numPending, "numTotal", numTotal) if numPending >= maxPendingRequests { @@ -96,91 +96,91 @@ RUN_LOOP: time.Sleep(requestIntervalMS * time.Millisecond) } else { // request for more blocks. - height := bp.nextHeight() - bp.makeRequest(height) + height := pool.nextHeight() + pool.makeRequest(height) } } } -func (bp *BlockPool) GetStatus() (uint, int32, int32) { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) GetStatus() (uint, int32, int32) { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() - return bp.height, bp.numPending, bp.numTotal + return pool.height, pool.numPending, pool.numTotal } // We need to see the second block's Validation to validate the first block. // So we peek two blocks at a time. -func (bp *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() - if r := bp.requests[bp.height]; r != nil { + if r := pool.requests[pool.height]; r != nil { first = r.block } - if r := bp.requests[bp.height+1]; r != nil { + if r := pool.requests[pool.height+1]; r != nil { second = r.block } return } -// Pop the first block at bp.height +// Pop the first block at pool.height // It must have been validated by 'second'.Validation from PeekTwoBlocks(). -func (bp *BlockPool) PopRequest() { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) PopRequest() { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() - if r := bp.requests[bp.height]; r == nil || r.block == nil { + if r := pool.requests[pool.height]; r == nil || r.block == nil { panic("PopRequest() requires a valid block") } - delete(bp.requests, bp.height) - bp.height++ - bp.numTotal-- + delete(pool.requests, pool.height) + pool.height++ + pool.numTotal-- } -// Invalidates the block at bp.height. +// Invalidates the block at pool.height. // Remove the peer and request from others. -func (bp *BlockPool) RedoRequest(height uint) { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) RedoRequest(height uint) { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() - request := bp.requests[height] + request := pool.requests[height] if request.block == nil { panic("Expected block to be non-nil") } - bp.RemovePeer(request.peerId) // Lock on peersMtx. + pool.RemovePeer(request.peerId) // Lock on peersMtx. request.block = nil request.peerId = "" - bp.numPending++ + pool.numPending++ - go requestRoutine(bp, height) + go requestRoutine(pool, height) } -func (bp *BlockPool) hasBlock(height uint) bool { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) hasBlock(height uint) bool { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() - request := bp.requests[height] + request := pool.requests[height] return request != nil && request.block != nil } -func (bp *BlockPool) setPeerForRequest(height uint, peerId string) { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) setPeerForRequest(height uint, peerId string) { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() - request := bp.requests[height] + request := pool.requests[height] if request == nil { return } request.peerId = peerId } -func (bp *BlockPool) AddBlock(block *types.Block, peerId string) { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) AddBlock(block *types.Block, peerId string) { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() - request := bp.requests[block.Height] + request := pool.requests[block.Height] if request == nil { return } @@ -191,23 +191,23 @@ func (bp *BlockPool) AddBlock(block *types.Block, peerId string) { return } request.block = block - bp.numPending-- + pool.numPending-- } -func (bp *BlockPool) getPeer(peerId string) *bpPeer { - bp.peersMtx.Lock() // Lock - defer bp.peersMtx.Unlock() +func (pool *BlockPool) getPeer(peerId string) *bpPeer { + pool.peersMtx.Lock() // Lock + defer pool.peersMtx.Unlock() - peer := bp.peers[peerId] + peer := pool.peers[peerId] return peer } // Sets the peer's blockchain height. -func (bp *BlockPool) SetPeerHeight(peerId string, height uint) { - bp.peersMtx.Lock() // Lock - defer bp.peersMtx.Unlock() +func (pool *BlockPool) SetPeerHeight(peerId string, height uint) { + pool.peersMtx.Lock() // Lock + defer pool.peersMtx.Unlock() - peer := bp.peers[peerId] + peer := pool.peers[peerId] if peer != nil { peer.height = height } else { @@ -216,24 +216,24 @@ func (bp *BlockPool) SetPeerHeight(peerId string, height uint) { id: peerId, numRequests: 0, } - bp.peers[peerId] = peer + pool.peers[peerId] = peer } } -func (bp *BlockPool) RemovePeer(peerId string) { - bp.peersMtx.Lock() // Lock - defer bp.peersMtx.Unlock() +func (pool *BlockPool) RemovePeer(peerId string) { + pool.peersMtx.Lock() // Lock + defer pool.peersMtx.Unlock() - delete(bp.peers, peerId) + delete(pool.peers, peerId) } // Pick an available peer with at least the given minHeight. // If no peers are available, returns nil. -func (bp *BlockPool) pickIncrAvailablePeer(minHeight uint) *bpPeer { - bp.peersMtx.Lock() - defer bp.peersMtx.Unlock() +func (pool *BlockPool) pickIncrAvailablePeer(minHeight uint) *bpPeer { + pool.peersMtx.Lock() + defer pool.peersMtx.Unlock() - for _, peer := range bp.peers { + for _, peer := range pool.peers { if peer.numRequests >= maxRequestsPerPeer { continue } @@ -247,69 +247,69 @@ func (bp *BlockPool) pickIncrAvailablePeer(minHeight uint) *bpPeer { return nil } -func (bp *BlockPool) decrPeer(peerId string) { - bp.peersMtx.Lock() - defer bp.peersMtx.Unlock() +func (pool *BlockPool) decrPeer(peerId string) { + pool.peersMtx.Lock() + defer pool.peersMtx.Unlock() - peer := bp.peers[peerId] + peer := pool.peers[peerId] if peer == nil { return } peer.numRequests-- } -func (bp *BlockPool) nextHeight() uint { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) nextHeight() uint { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() - return bp.height + uint(bp.numTotal) + return pool.height + uint(pool.numTotal) } -func (bp *BlockPool) makeRequest(height uint) { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) makeRequest(height uint) { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() request := &bpRequest{ height: height, peerId: "", block: nil, } - bp.requests[height] = request + pool.requests[height] = request - nextHeight := bp.height + uint(bp.numTotal) + nextHeight := pool.height + uint(pool.numTotal) if nextHeight == height { - bp.numTotal++ - bp.numPending++ + pool.numTotal++ + pool.numPending++ } - go requestRoutine(bp, height) + go requestRoutine(pool, height) } -func (bp *BlockPool) sendRequest(height uint, peerId string) { - if atomic.LoadInt32(&bp.running) == 0 { +func (pool *BlockPool) sendRequest(height uint, peerId string) { + if atomic.LoadInt32(&pool.running) == 0 { return } - bp.requestsCh <- BlockRequest{height, peerId} + pool.requestsCh <- BlockRequest{height, peerId} } -func (bp *BlockPool) sendTimeout(peerId string) { - if atomic.LoadInt32(&bp.running) == 0 { +func (pool *BlockPool) sendTimeout(peerId string) { + if atomic.LoadInt32(&pool.running) == 0 { return } - bp.timeoutsCh <- peerId + pool.timeoutsCh <- peerId } -func (bp *BlockPool) debug() string { - bp.requestsMtx.Lock() // Lock - defer bp.requestsMtx.Unlock() +func (pool *BlockPool) debug() string { + pool.requestsMtx.Lock() // Lock + defer pool.requestsMtx.Unlock() str := "" - for h := bp.height; h < bp.height+uint(bp.numTotal); h++ { - if bp.requests[h] == nil { + for h := pool.height; h < pool.height+uint(pool.numTotal); h++ { + if pool.requests[h] == nil { str += Fmt("H(%v):X ", h) } else { str += Fmt("H(%v):", h) - str += Fmt("B?(%v) ", bp.requests[h].block != nil) + str += Fmt("B?(%v) ", pool.requests[h].block != nil) } } return str @@ -333,15 +333,15 @@ type bpRequest struct { // Responsible for making more requests as necessary // Returns when a block is found (e.g. AddBlock() is called) -func requestRoutine(bp *BlockPool, height uint) { +func requestRoutine(pool *BlockPool, height uint) { for { var peer *bpPeer = nil PICK_LOOP: for { - if !bp.IsRunning() { + if !pool.IsRunning() { return } - peer = bp.pickIncrAvailablePeer(height) + peer = pool.pickIncrAvailablePeer(height) if peer == nil { time.Sleep(requestIntervalMS * time.Millisecond) continue PICK_LOOP @@ -349,24 +349,24 @@ func requestRoutine(bp *BlockPool, height uint) { break PICK_LOOP } - bp.setPeerForRequest(height, peer.id) + pool.setPeerForRequest(height, peer.id) for try := 0; try < maxTries; try++ { - bp.sendRequest(height, peer.id) + pool.sendRequest(height, peer.id) time.Sleep(requestTimeoutSeconds * time.Second) - if bp.hasBlock(height) { - bp.decrPeer(peer.id) + if pool.hasBlock(height) { + pool.decrPeer(peer.id) return } - bpHeight, _, _ := bp.GetStatus() + bpHeight, _, _ := pool.GetStatus() if height < bpHeight { - bp.decrPeer(peer.id) + pool.decrPeer(peer.id) return } } - bp.RemovePeer(peer.id) - bp.sendTimeout(peer.id) + pool.RemovePeer(peer.id) + pool.sendTimeout(peer.id) } } diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 1aba782ee..e5976877e 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -8,7 +8,9 @@ import ( "time" "github.com/tendermint/tendermint/binary" + . "github.com/tendermint/tendermint/common" "github.com/tendermint/tendermint/p2p" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -16,11 +18,17 @@ const ( BlockchainChannel = byte(0x40) defaultChannelCapacity = 100 defaultSleepIntervalMS = 500 + trySyncIntervalMS = 100 + + // stop syncing when last block's time is + // within this much of the system time. + stopSyncingDurationMinutes = 10 ) // BlockchainReactor handles long-term catchup syncing. type BlockchainReactor struct { sw *p2p.Switch + state *sm.State store *BlockStore pool *BlockPool requestsCh chan BlockRequest @@ -31,7 +39,10 @@ type BlockchainReactor struct { stopped uint32 } -func NewBlockchainReactor(store *BlockStore) *BlockchainReactor { +func NewBlockchainReactor(state *sm.State, store *BlockStore) *BlockchainReactor { + if state.LastBlockHeight != store.Height() { + panic(Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())) + } requestsCh := make(chan BlockRequest, defaultChannelCapacity) timeoutsCh := make(chan string, defaultChannelCapacity) pool := NewBlockPool( @@ -40,6 +51,7 @@ func NewBlockchainReactor(store *BlockStore) *BlockchainReactor { timeoutsCh, ) bcR := &BlockchainReactor{ + state: state, store: store, pool: pool, requestsCh: requestsCh, @@ -129,7 +141,11 @@ func (bcR *BlockchainReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) } } +// Handle messages from the poolReactor telling the reactor what to do. func (bcR *BlockchainReactor) poolRoutine() { + + trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) + FOR_LOOP: for { select { @@ -150,6 +166,48 @@ FOR_LOOP: // Peer timed out. peer := bcR.sw.Peers().Get(peerId) bcR.sw.StopPeerForError(peer, errors.New("BlockchainReactor Timeout")) + case _ = <-trySyncTicker.C: // chan time + var lastValidatedBlock *types.Block + SYNC_LOOP: + for i := 0; i < 10; i++ { + // See if there are any blocks to sync. + first, second := bcR.pool.PeekTwoBlocks() + if first == nil || second == nil { + // We need both to sync the first block. + break SYNC_LOOP + } + firstParts := first.MakePartSet().Header() + // Finally, verify the first block using the second's validation. + err := bcR.state.BondedValidators.VerifyValidation( + first.Hash(), firstParts, first.Height, second.Validation) + if err != nil { + bcR.pool.RedoRequest(first.Height) + break SYNC_LOOP + } else { + bcR.pool.PopRequest() + err := bcR.state.AppendBlock(first, firstParts) + if err != nil { + // TODO This is bad, are we zombie? + panic(Fmt("Failed to process committed block: %v", err)) + } + lastValidatedBlock = first + } + } + // We're done syncing for now (will do again shortly) + // See if we want to stop syncing and turn on the + // consensus reactor. + // TODO: use other heuristics too besides blocktime. + // It's not a security concern, as it only needs to happen + // upon node sync, and there's also a second (slower) + // method of syncing in the consensus reactor. + if lastValidatedBlock != nil && time.Now().Sub(lastValidatedBlock.Time) < stopSyncingDurationMinutes*time.Minute { + go func() { + bcR.sw.Reactor("BLOCKCHAIN").Stop() + bcR.sw.Reactor("CONSENSUS").Start(bcR.sw) + }() + break FOR_LOOP + } + continue FOR_LOOP case <-bcR.quit: break FOR_LOOP } diff --git a/consensus/reactor.go b/consensus/reactor.go index 7abd75ec1..bbc39c89c 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -29,6 +29,8 @@ const ( //----------------------------------------------------------------------------- +// The reactor's underlying ConsensusState may change state at any time. +// We atomically copy the RoundState struct before using it. type ConsensusReactor struct { sw *p2p.Switch started uint32 diff --git a/consensus/state.go b/consensus/state.go index ad8b79cf7..e2738ca98 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -467,6 +467,8 @@ func (cs *ConsensusState) updateToState(state *sm.State) { // Reset fields based on state. validators := state.BondedValidators height := state.LastBlockHeight + 1 // next desired block height + + // RoundState fields cs.Height = height cs.Round = 0 cs.Step = RoundStepNewHeight diff --git a/consensus/vote_set.go b/consensus/vote_set.go index c640fc51b..973d4b7a1 100644 --- a/consensus/vote_set.go +++ b/consensus/vote_set.go @@ -34,7 +34,7 @@ type VoteSet struct { maj23Exists bool } -// Constructs a new VoteSet struct used to accumulate votes for each round. +// Constructs a new VoteSet struct used to accumulate votes for given height/round. func NewVoteSet(height uint, round uint, type_ byte, valSet *sm.ValidatorSet) *VoteSet { if height == 0 { panic("Cannot make VoteSet for height == 0, doesn't make sense.") diff --git a/daemon/daemon.go b/daemon/daemon.go index a4b573bee..fae9c9792 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -55,7 +55,7 @@ func NewNode() *Node { pexReactor := p2p.NewPEXReactor(book) // Get BlockchainReactor - bcReactor := bc.NewBlockchainReactor(blockStore) + bcReactor := bc.NewBlockchainReactor(state, blockStore) // Get MempoolReactor mempool := mempl.NewMempool(state.Copy()) @@ -70,10 +70,10 @@ func NewNode() *Node { sw := p2p.NewSwitch() sw.SetChainId(state.Hash(), config.App().GetString("Network")) - sw.AddReactor("PEX", pexReactor) - //sw.AddReactor("BLOCKCHAIN", bcReactor) - sw.AddReactor("MEMPOOL", mempoolReactor) - sw.AddReactor("CONSENSUS", consensusReactor) + sw.AddReactor("PEX", pexReactor).Start(sw) + sw.AddReactor("BLOCKCHAIN", bcReactor).Start(sw) + sw.AddReactor("MEMPOOL", mempoolReactor).Start(sw) + sw.AddReactor("CONSENSUS", consensusReactor) // Do not start yet. return &Node{ sw: sw, diff --git a/p2p/peer_set.go b/p2p/peer_set.go index 23f49c513..effad6dcc 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -59,7 +59,12 @@ func (ps *PeerSet) Has(peerKey string) bool { func (ps *PeerSet) Get(peerKey string) *Peer { ps.mtx.Lock() defer ps.mtx.Unlock() - return ps.lookup[peerKey].peer + item, ok := ps.lookup[peerKey] + if ok { + return item.peer + } else { + return nil + } } func (ps *PeerSet) Remove(peer *Peer) { diff --git a/state/validator_set.go b/state/validator_set.go index 50f76f423..09589a9db 100644 --- a/state/validator_set.go +++ b/state/validator_set.go @@ -2,12 +2,15 @@ package state import ( "bytes" + "errors" "fmt" "sort" "strings" + "github.com/tendermint/tendermint/account" . "github.com/tendermint/tendermint/common" "github.com/tendermint/tendermint/merkle" + "github.com/tendermint/tendermint/types" ) // ValidatorSet represent a set of *Validator at a given height. @@ -198,6 +201,50 @@ func (valSet *ValidatorSet) Iterate(fn func(index uint, val *Validator) bool) { } } +// Verify that +2/3 of the set had signed the given signBytes +func (valSet *ValidatorSet) VerifyValidation(hash []byte, parts types.PartSetHeader, height uint, v *types.Validation) error { + if valSet.Size() != uint(len(v.Commits)) { + return errors.New(Fmt("Invalid validation -- wrong set size: %v vs %v", + valSet.Size(), len(v.Commits))) + } + + talliedVotingPower := uint64(0) + seenValidators := map[string]struct{}{} + + for idx, commit := range v.Commits { + // may be zero, in which case skip. + if commit.Signature.IsZero() { + continue + } + _, val := valSet.GetByIndex(uint(idx)) + commitSignBytes := account.SignBytes(&types.Vote{ + Height: height, Round: commit.Round, Type: types.VoteTypeCommit, + BlockHash: hash, + BlockParts: parts, + }) + + // Validate + if _, seen := seenValidators[string(val.Address)]; seen { + return Errorf("Duplicate validator for commit %v for Validation %v", commit, v) + } + + if !val.PubKey.VerifyBytes(commitSignBytes, commit.Signature) { + return Errorf("Invalid signature for commit %v for Validation %v", commit, v) + } + + // Tally + seenValidators[string(val.Address)] = struct{}{} + talliedVotingPower += val.VotingPower + } + + if talliedVotingPower > valSet.TotalVotingPower()*2/3 { + return nil + } else { + return Errorf("insufficient voting power %v, needed %v", + talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) + } +} + func (valSet *ValidatorSet) String() string { return valSet.StringIndented("") } diff --git a/types/block.go b/types/block.go index 11dfb3b9d..176ce4385 100644 --- a/types/block.go +++ b/types/block.go @@ -39,7 +39,9 @@ func (b *Block) ValidateBasic(lastBlockHeight uint, lastBlockHash []byte, if !b.LastBlockParts.Equals(lastBlockParts) { return errors.New("Wrong Block.Header.LastBlockParts") } - /* TODO: Determine bounds. + /* TODO: Determine bounds + See blockchain/reactor "stopSyncingDurationMinutes" + if !b.Time.After(lastBlockTime) { return errors.New("Invalid Block.Header.Time") } From 94c3a5176082ed8e7590732f7b089ec8d9b6c4c5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Mar 2015 12:13:32 -0700 Subject: [PATCH 08/18] bug fix in daemon -- network name only, no chain hash --- daemon/daemon.go | 2 +- p2p/pex_reactor.go | 8 ++++---- p2p/switch.go | 11 +++++------ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index fae9c9792..ddc310762 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -69,7 +69,7 @@ func NewNode() *Node { } sw := p2p.NewSwitch() - sw.SetChainId(state.Hash(), config.App().GetString("Network")) + sw.SetNetwork(config.App().GetString("Network")) sw.AddReactor("PEX", pexReactor).Start(sw) sw.AddReactor("BLOCKCHAIN", bcReactor).Start(sw) sw.AddReactor("MEMPOOL", mempoolReactor).Start(sw) diff --git a/p2p/pex_reactor.go b/p2p/pex_reactor.go index 926859316..439b4cf9b 100644 --- a/p2p/pex_reactor.go +++ b/p2p/pex_reactor.go @@ -98,9 +98,9 @@ func (pexR *PEXReactor) Receive(chId byte, src *Peer, msgBytes []byte) { switch msg.(type) { case *pexHandshakeMessage: - chainId := msg.(*pexHandshakeMessage).ChainId - if chainId != pexR.sw.chainId { - err := fmt.Sprintf("Peer is on a different chain/network. Got %s, expected %s", chainId, pexR.sw.chainId) + network := msg.(*pexHandshakeMessage).Network + if network != pexR.sw.network { + err := fmt.Sprintf("Peer is on a different chain/network. Got %s, expected %s", network, pexR.sw.network) pexR.sw.StopPeerForError(src, err) } case *pexRequestMessage: @@ -238,7 +238,7 @@ func DecodeMessage(bz []byte) (msg interface{}, err error) { A pexHandshakeMessage contains the peer's chainId */ type pexHandshakeMessage struct { - ChainId string + Network string } func (m *pexHandshakeMessage) TypeByte() byte { return msgTypeHandshake } diff --git a/p2p/switch.go b/p2p/switch.go index 5216f77dd..10da29f36 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -1,7 +1,6 @@ package p2p import ( - "encoding/hex" "errors" "fmt" "net" @@ -28,7 +27,7 @@ or more `Channels`. So while sending outgoing messages is typically performed o incoming messages are received on the reactor. */ type Switch struct { - chainId string + network string reactors map[string]Reactor chDescs []*ChannelDescriptor reactorsByCh map[byte]Reactor @@ -48,7 +47,7 @@ const ( func NewSwitch() *Switch { sw := &Switch{ - chainId: "", + network: "", reactors: make(map[string]Reactor), chDescs: make([]*ChannelDescriptor, 0), reactorsByCh: make(map[byte]Reactor), @@ -61,8 +60,8 @@ func NewSwitch() *Switch { } // Not goroutine safe. -func (sw *Switch) SetChainId(hash []byte, network string) { - sw.chainId = hex.EncodeToString(hash) + "-" + network +func (sw *Switch) SetNetwork(network string) { + sw.network = network } // Not goroutine safe. @@ -139,7 +138,7 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er go peer.start() // Send handshake - msg := &pexHandshakeMessage{ChainId: sw.chainId} + msg := &pexHandshakeMessage{Network: sw.network} peer.Send(PexChannel, msg) return peer, nil From 938eda979bf71f36542453f49a9d9d69f1d76bc5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Mar 2015 12:21:52 -0700 Subject: [PATCH 09/18] fix bug, peer may be nil --- blockchain/reactor.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/blockchain/reactor.go b/blockchain/reactor.go index e5976877e..f80abbe34 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -112,11 +112,10 @@ func (bcR *BlockchainReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) log.Warn("Error decoding message", "error", err) return } - log.Info("BlockchainReactor received message", "msg", msg_) + log.Debug("BlockchainReactor received message", "msg", msg_) switch msg := msg_.(type) { case BlockRequestMessage: - log.Debug("Got BlockRequest", "msg", msg) // Got a request for a block. Respond with block if we have it. block := bcR.store.LoadBlock(msg.Height) if block != nil { @@ -129,11 +128,9 @@ func (bcR *BlockchainReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) // TODO peer is asking for things we don't have. } case BlockResponseMessage: - log.Debug("Got BlockResponse", "msg", msg) // Got a block. bcR.pool.AddBlock(msg.Block, src.Key) case PeerStatusMessage: - log.Debug("Got PeerStatus", "msg", msg) // Got a peer status. bcR.pool.SetPeerHeight(src.Key, msg.Height) default: @@ -165,7 +162,9 @@ FOR_LOOP: case peerId := <-bcR.timeoutsCh: // chan string // Peer timed out. peer := bcR.sw.Peers().Get(peerId) - bcR.sw.StopPeerForError(peer, errors.New("BlockchainReactor Timeout")) + if peer != nil { + bcR.sw.StopPeerForError(peer, errors.New("BlockchainReactor Timeout")) + } case _ = <-trySyncTicker.C: // chan time var lastValidatedBlock *types.Block SYNC_LOOP: From 788f9bfb933b3f3882d7d25075e973da2f8daa30 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Mar 2015 13:01:28 -0700 Subject: [PATCH 10/18] started/stopped -> running; contiguous vs fast forward ConsensusState updates. --- blockchain/reactor.go | 20 ++++++++++++-------- consensus/reactor.go | 30 ++++++++++++++++++++++-------- consensus/state.go | 8 ++++---- daemon/daemon.go | 2 +- p2p/peer.go | 19 +++++++++---------- 5 files changed, 48 insertions(+), 31 deletions(-) diff --git a/blockchain/reactor.go b/blockchain/reactor.go index f80abbe34..56ae6a241 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -35,8 +35,7 @@ type BlockchainReactor struct { timeoutsCh chan string lastBlock *types.Block quit chan struct{} - started uint32 - stopped uint32 + running uint32 } func NewBlockchainReactor(state *sm.State, store *BlockStore) *BlockchainReactor { @@ -57,15 +56,14 @@ func NewBlockchainReactor(state *sm.State, store *BlockStore) *BlockchainReactor requestsCh: requestsCh, timeoutsCh: timeoutsCh, quit: make(chan struct{}), - started: 0, - stopped: 0, + running: uint32(0), } return bcR } // Implements Reactor func (bcR *BlockchainReactor) Start(sw *p2p.Switch) { - if atomic.CompareAndSwapUint32(&bcR.started, 0, 1) { + if atomic.CompareAndSwapUint32(&bcR.running, 0, 1) { log.Info("Starting BlockchainReactor") bcR.sw = sw bcR.pool.Start() @@ -75,7 +73,7 @@ func (bcR *BlockchainReactor) Start(sw *p2p.Switch) { // Implements Reactor func (bcR *BlockchainReactor) Stop() { - if atomic.CompareAndSwapUint32(&bcR.stopped, 0, 1) { + if atomic.CompareAndSwapUint32(&bcR.running, 1, 0) { log.Info("Stopping BlockchainReactor") close(bcR.quit) bcR.pool.Stop() @@ -201,8 +199,14 @@ FOR_LOOP: // method of syncing in the consensus reactor. if lastValidatedBlock != nil && time.Now().Sub(lastValidatedBlock.Time) < stopSyncingDurationMinutes*time.Minute { go func() { - bcR.sw.Reactor("BLOCKCHAIN").Stop() - bcR.sw.Reactor("CONSENSUS").Start(bcR.sw) + log.Info("Stopping blockpool syncing, turning on consensus...") + //bcR.sw.Reactor("BLOCKCHAIN").Stop() + trySyncTicker.Stop() // Just stop the block requests. Still serve blocks to others. + conR := bcR.sw.Reactor("CONSENSUS") + conR.Start(bcR.sw) + for _, peer := range bcR.sw.Peers().List() { + conR.AddPeer(peer) + } }() break FOR_LOOP } diff --git a/consensus/reactor.go b/consensus/reactor.go index bbc39c89c..30ebde879 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -33,8 +33,7 @@ const ( // We atomically copy the RoundState struct before using it. type ConsensusReactor struct { sw *p2p.Switch - started uint32 - stopped uint32 + running uint32 quit chan struct{} blockStore *bc.BlockStore @@ -52,7 +51,7 @@ func NewConsensusReactor(consensusState *ConsensusState, blockStore *bc.BlockSto // Implements Reactor func (conR *ConsensusReactor) Start(sw *p2p.Switch) { - if atomic.CompareAndSwapUint32(&conR.started, 0, 1) { + if atomic.CompareAndSwapUint32(&conR.running, 0, 1) { log.Info("Starting ConsensusReactor") conR.sw = sw conR.conS.Start() @@ -62,15 +61,15 @@ func (conR *ConsensusReactor) Start(sw *p2p.Switch) { // Implements Reactor func (conR *ConsensusReactor) Stop() { - if atomic.CompareAndSwapUint32(&conR.stopped, 0, 1) { + if atomic.CompareAndSwapUint32(&conR.running, 1, 0) { log.Info("Stopping ConsensusReactor") conR.conS.Stop() close(conR.quit) } } -func (conR *ConsensusReactor) IsStopped() bool { - return atomic.LoadUint32(&conR.stopped) == 1 +func (conR *ConsensusReactor) IsRunning() bool { + return atomic.LoadUint32(&conR.running) == 0 } // Implements Reactor @@ -94,6 +93,10 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor { // Implements Reactor func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) { + if !conR.IsRunning() { + return + } + // Create peerState for peer peerState := NewPeerState(peer) peer.Data.Set(peerStateKey, peerState) @@ -108,11 +111,18 @@ func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) { // Implements Reactor func (conR *ConsensusReactor) RemovePeer(peer *p2p.Peer, reason interface{}) { + if !conR.IsRunning() { + return + } + //peer.Data.Get(peerStateKey).(*PeerState).Disconnect() } // Implements Reactor func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte) { + if !conR.IsRunning() { + return + } // Get round state rs := conR.conS.GetRoundState() @@ -215,6 +225,10 @@ func (conR *ConsensusReactor) SetPrivValidator(priv *sm.PrivValidator) { conR.conS.SetPrivValidator(priv) } +func (conR *ConsensusReactor) UpdateToState(state *sm.State) { + conR.conS.updateToState(state, false) +} + //-------------------------------------- func makeRoundStepMessages(rs *RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) { @@ -279,7 +293,7 @@ func (conR *ConsensusReactor) gossipDataRoutine(peer *p2p.Peer, ps *PeerState) { OUTER_LOOP: for { // Manage disconnects from self or peer. - if peer.IsStopped() || conR.IsStopped() { + if !peer.IsRunning() || !conR.IsRunning() { log.Info(Fmt("Stopping gossipDataRoutine for %v.", peer)) return } @@ -382,7 +396,7 @@ func (conR *ConsensusReactor) gossipVotesRoutine(peer *p2p.Peer, ps *PeerState) OUTER_LOOP: for { // Manage disconnects from self or peer. - if peer.IsStopped() || conR.IsStopped() { + if !peer.IsRunning() || !conR.IsRunning() { log.Info(Fmt("Stopping gossipVotesRoutine for %v.", peer)) return } diff --git a/consensus/state.go b/consensus/state.go index e2738ca98..683612496 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -256,7 +256,7 @@ func NewConsensusState(state *sm.State, blockStore *bc.BlockStore, mempoolReacto runActionCh: make(chan RoundAction, 1), newStepCh: make(chan *RoundState, 1), } - cs.updateToState(state) + cs.updateToState(state, true) return cs } @@ -457,9 +457,9 @@ ACTION_LOOP: // If calculated round is greater than 0 (based on BlockTime or calculated StartTime) // then also sets up the appropriate round, and cs.Step becomes RoundStepNewRound. // Otherwise the round is 0 and cs.Step becomes RoundStepNewHeight. -func (cs *ConsensusState) updateToState(state *sm.State) { +func (cs *ConsensusState) updateToState(state *sm.State, contiguous bool) { // Sanity check state. - if cs.Height > 0 && cs.Height != state.LastBlockHeight { + if contiguous && cs.Height > 0 && cs.Height != state.LastBlockHeight { panic(Fmt("updateToState() expected state height of %v but found %v", cs.Height, state.LastBlockHeight)) } @@ -859,7 +859,7 @@ func (cs *ConsensusState) TryFinalizeCommit(height uint) bool { // We have the block, so save/stage/sign-commit-vote. cs.saveCommitVoteBlock(cs.ProposalBlock, cs.ProposalBlockParts, cs.Commits) // Increment height. - cs.updateToState(cs.stagedState) + cs.updateToState(cs.stagedState, true) // cs.Step is now RoundStepNewHeight or RoundStepNewRound cs.newStepCh <- cs.getRoundState() return true diff --git a/daemon/daemon.go b/daemon/daemon.go index ddc310762..1b029f9d7 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -94,7 +94,7 @@ func (n *Node) Start() { go n.inboundConnectionRoutine(l) } n.book.Start() - n.sw.StartReactors() + //n.sw.StartReactors() } func (n *Node) Stop() { diff --git a/p2p/peer.go b/p2p/peer.go index 68137a63a..173297eb0 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -13,8 +13,7 @@ import ( type Peer struct { outbound bool mconn *MConnection - started uint32 - stopped uint32 + running uint32 Key string Data *CMap // User data. @@ -37,7 +36,7 @@ func newPeer(conn net.Conn, outbound bool, reactorsByCh map[byte]Reactor, chDesc p = &Peer{ outbound: outbound, mconn: mconn, - stopped: 0, + running: 0, Key: mconn.RemoteAddress.String(), Data: NewCMap(), } @@ -45,21 +44,21 @@ func newPeer(conn net.Conn, outbound bool, reactorsByCh map[byte]Reactor, chDesc } func (p *Peer) start() { - if atomic.CompareAndSwapUint32(&p.started, 0, 1) { + if atomic.CompareAndSwapUint32(&p.running, 0, 1) { log.Debug("Starting Peer", "peer", p) p.mconn.Start() } } func (p *Peer) stop() { - if atomic.CompareAndSwapUint32(&p.stopped, 0, 1) { + if atomic.CompareAndSwapUint32(&p.running, 1, 0) { log.Debug("Stopping Peer", "peer", p) p.mconn.Stop() } } -func (p *Peer) IsStopped() bool { - return atomic.LoadUint32(&p.stopped) == 1 +func (p *Peer) IsRunning() bool { + return atomic.LoadUint32(&p.running) == 1 } func (p *Peer) Connection() *MConnection { @@ -71,21 +70,21 @@ func (p *Peer) IsOutbound() bool { } func (p *Peer) Send(chId byte, msg interface{}) bool { - if atomic.LoadUint32(&p.stopped) == 1 { + if atomic.LoadUint32(&p.running) == 0 { return false } return p.mconn.Send(chId, msg) } func (p *Peer) TrySend(chId byte, msg interface{}) bool { - if atomic.LoadUint32(&p.stopped) == 1 { + if atomic.LoadUint32(&p.running) == 0 { return false } return p.mconn.TrySend(chId, msg) } func (p *Peer) CanSend(chId byte) bool { - if atomic.LoadUint32(&p.stopped) == 1 { + if atomic.LoadUint32(&p.running) == 0 { return false } return p.mconn.CanSend(chId) From a2b8318aaccc343b41da9a4b140135dcd25c2a03 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Mar 2015 13:17:45 -0700 Subject: [PATCH 11/18] allow BlockchainReactor to reset ConsensusReactor state --- blockchain/pool.go | 3 +++ blockchain/reactor.go | 6 ++++++ consensus/reactor.go | 1 + 3 files changed, 10 insertions(+) diff --git a/blockchain/pool.go b/blockchain/pool.go index 192165bf7..577701519 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -339,16 +339,19 @@ func requestRoutine(pool *BlockPool, height uint) { PICK_LOOP: for { if !pool.IsRunning() { + log.Debug("BlockPool not running. Stopping requestRoutine", "height", height) return } peer = pool.pickIncrAvailablePeer(height) if peer == nil { + log.Debug("No peers available", "height", height) time.Sleep(requestIntervalMS * time.Millisecond) continue PICK_LOOP } break PICK_LOOP } + log.Debug("Selected peer for request", "height", height, "peerId", peer.id) pool.setPeerForRequest(height, peer.id) for try := 0; try < maxTries; try++ { diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 56ae6a241..a3e93e989 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -25,6 +25,10 @@ const ( stopSyncingDurationMinutes = 10 ) +type stateResetter interface { + ResetToState(*sm.State) +} + // BlockchainReactor handles long-term catchup syncing. type BlockchainReactor struct { sw *p2p.Switch @@ -93,6 +97,7 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { // Implements Reactor func (bcR *BlockchainReactor) AddPeer(peer *p2p.Peer) { + log.Debug("BlockchainReactor AddPeer", "peer", peer) // Send peer our state. peer.Send(BlockchainChannel, PeerStatusMessage{bcR.store.Height()}) } @@ -203,6 +208,7 @@ FOR_LOOP: //bcR.sw.Reactor("BLOCKCHAIN").Stop() trySyncTicker.Stop() // Just stop the block requests. Still serve blocks to others. conR := bcR.sw.Reactor("CONSENSUS") + conR.(stateResetter).ResetToState(bcR.state) conR.Start(bcR.sw) for _, peer := range bcR.sw.Peers().List() { conR.AddPeer(peer) diff --git a/consensus/reactor.go b/consensus/reactor.go index 30ebde879..56cadcf9d 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -225,6 +225,7 @@ func (conR *ConsensusReactor) SetPrivValidator(priv *sm.PrivValidator) { conR.conS.SetPrivValidator(priv) } +// Fast forward to some state. func (conR *ConsensusReactor) UpdateToState(state *sm.State) { conR.conS.updateToState(state, false) } From bd6d9d646df967b6d94441837cf702b5ea1d7573 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Mar 2015 13:22:46 -0700 Subject: [PATCH 12/18] start peer before AddPeer() on reactors. --- consensus/reactor.go | 4 ++-- p2p/switch.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/reactor.go b/consensus/reactor.go index 56cadcf9d..8d992bba7 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -225,8 +225,8 @@ func (conR *ConsensusReactor) SetPrivValidator(priv *sm.PrivValidator) { conR.conS.SetPrivValidator(priv) } -// Fast forward to some state. -func (conR *ConsensusReactor) UpdateToState(state *sm.State) { +// Reset to some state. +func (conR *ConsensusReactor) ResetToState(state *sm.State) { conR.conS.updateToState(state, false) } diff --git a/p2p/switch.go b/p2p/switch.go index 10da29f36..105561314 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -131,12 +131,12 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er return nil, ErrSwitchDuplicatePeer } - // Notify listeners. - sw.doAddPeer(peer) - // Start the peer go peer.start() + // Notify listeners. + sw.doAddPeer(peer) + // Send handshake msg := &pexHandshakeMessage{Network: sw.network} peer.Send(PexChannel, msg) From 7171823fc6d667904cfdc926a51f2848a804a04d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Mar 2015 17:16:49 -0700 Subject: [PATCH 13/18] Fix blockpool bugs and clean up log messages. --- blockchain/pool.go | 21 ++++++--------- blockchain/reactor.go | 61 +++++++++++++++++++++++-------------------- consensus/reactor.go | 2 +- p2p/addrbook.go | 2 +- p2p/connection.go | 1 + p2p/pex_reactor.go | 10 ++++--- p2p/switch.go | 2 +- 7 files changed, 51 insertions(+), 48 deletions(-) diff --git a/blockchain/pool.go b/blockchain/pool.go index 577701519..0bd620463 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -10,14 +10,12 @@ import ( ) const ( - maxOutstandingRequestsPerPeer = 10 - inputsChannelCapacity = 100 - maxTries = 3 - requestIntervalMS = 500 - requestBatchSize = 50 - maxPendingRequests = 50 - maxTotalRequests = 100 - maxRequestsPerPeer = 20 + maxTries = 3 + inputsChannelCapacity = 200 + requestIntervalMS = 500 + maxPendingRequests = 200 + maxTotalRequests = 300 + maxRequestsPerPeer = 300 ) var ( @@ -85,9 +83,7 @@ RUN_LOOP: if atomic.LoadInt32(&pool.running) == 0 { break RUN_LOOP } - height, numPending, numTotal := pool.GetStatus() - log.Debug("BlockPool.run", "height", height, "numPending", numPending, - "numTotal", numTotal) + _, numPending, numTotal := pool.GetStatus() if numPending >= maxPendingRequests { // sleep for a bit. time.Sleep(requestIntervalMS * time.Millisecond) @@ -344,14 +340,13 @@ func requestRoutine(pool *BlockPool, height uint) { } peer = pool.pickIncrAvailablePeer(height) if peer == nil { - log.Debug("No peers available", "height", height) + //log.Debug("No peers available", "height", height) time.Sleep(requestIntervalMS * time.Millisecond) continue PICK_LOOP } break PICK_LOOP } - log.Debug("Selected peer for request", "height", height, "peerId", peer.id) pool.setPeerForRequest(height, peer.id) for try := 0; try < maxTries; try++ { diff --git a/blockchain/reactor.go b/blockchain/reactor.go index a3e93e989..b0ae92455 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -90,16 +90,15 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { &p2p.ChannelDescriptor{ Id: BlockchainChannel, Priority: 5, - SendQueueCapacity: 20, // Queue 20 blocks to send to a peer. + SendQueueCapacity: 100, }, } } // Implements Reactor func (bcR *BlockchainReactor) AddPeer(peer *p2p.Peer) { - log.Debug("BlockchainReactor AddPeer", "peer", peer) // Send peer our state. - peer.Send(BlockchainChannel, PeerStatusMessage{bcR.store.Height()}) + peer.Send(BlockchainChannel, bcPeerStatusMessage{bcR.store.Height()}) } // Implements Reactor @@ -115,14 +114,15 @@ func (bcR *BlockchainReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) log.Warn("Error decoding message", "error", err) return } - log.Debug("BlockchainReactor received message", "msg", msg_) + + log.Info("Received message", "msg", msg_) switch msg := msg_.(type) { - case BlockRequestMessage: + case bcBlockRequestMessage: // Got a request for a block. Respond with block if we have it. block := bcR.store.LoadBlock(msg.Height) if block != nil { - msg := BlockResponseMessage{Block: block} + msg := bcBlockResponseMessage{Block: block} queued := src.TrySend(BlockchainChannel, msg) if !queued { // queue is full, just ignore. @@ -130,10 +130,10 @@ func (bcR *BlockchainReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) } else { // TODO peer is asking for things we don't have. } - case BlockResponseMessage: + case bcBlockResponseMessage: // Got a block. bcR.pool.AddBlock(msg.Block, src.Key) - case PeerStatusMessage: + case bcPeerStatusMessage: // Got a peer status. bcR.pool.SetPeerHeight(src.Key, msg.Height) default: @@ -155,7 +155,7 @@ FOR_LOOP: // We can't fulfill the request. continue FOR_LOOP } - msg := BlockRequestMessage{request.Height} + msg := bcBlockRequestMessage{request.Height} queued := peer.TrySend(BlockchainChannel, msg) if !queued { // We couldn't queue the request. @@ -174,24 +174,29 @@ FOR_LOOP: for i := 0; i < 10; i++ { // See if there are any blocks to sync. first, second := bcR.pool.PeekTwoBlocks() + //log.Debug("TrySync peeked", "first", first, "second", second) if first == nil || second == nil { // We need both to sync the first block. break SYNC_LOOP } - firstParts := first.MakePartSet().Header() + firstParts := first.MakePartSet() + firstPartsHeader := firstParts.Header() // Finally, verify the first block using the second's validation. err := bcR.state.BondedValidators.VerifyValidation( - first.Hash(), firstParts, first.Height, second.Validation) + first.Hash(), firstPartsHeader, first.Height, second.Validation) if err != nil { + log.Debug("error in validation", "error", err) bcR.pool.RedoRequest(first.Height) break SYNC_LOOP } else { bcR.pool.PopRequest() - err := bcR.state.AppendBlock(first, firstParts) + err := bcR.state.AppendBlock(first, firstPartsHeader) if err != nil { // TODO This is bad, are we zombie? panic(Fmt("Failed to process committed block: %v", err)) } + bcR.store.SaveBlock(first, firstParts, second.Validation) + bcR.state.Save() lastValidatedBlock = first } } @@ -224,7 +229,7 @@ FOR_LOOP: } func (bcR *BlockchainReactor) BroadcastStatus() error { - bcR.sw.Broadcast(BlockchainChannel, PeerStatusMessage{bcR.store.Height()}) + bcR.sw.Broadcast(BlockchainChannel, bcPeerStatusMessage{bcR.store.Height()}) return nil } @@ -245,11 +250,11 @@ func DecodeMessage(bz []byte) (msgType byte, msg interface{}, err error) { r := bytes.NewReader(bz) switch msgType { case msgTypeBlockRequest: - msg = binary.ReadBinary(BlockRequestMessage{}, r, n, &err) + msg = binary.ReadBinary(bcBlockRequestMessage{}, r, n, &err) case msgTypeBlockResponse: - msg = binary.ReadBinary(BlockResponseMessage{}, r, n, &err) + msg = binary.ReadBinary(bcBlockResponseMessage{}, r, n, &err) case msgTypePeerStatus: - msg = binary.ReadBinary(PeerStatusMessage{}, r, n, &err) + msg = binary.ReadBinary(bcPeerStatusMessage{}, r, n, &err) default: msg = nil } @@ -258,36 +263,36 @@ func DecodeMessage(bz []byte) (msgType byte, msg interface{}, err error) { //------------------------------------- -type BlockRequestMessage struct { +type bcBlockRequestMessage struct { Height uint } -func (m BlockRequestMessage) TypeByte() byte { return msgTypeBlockRequest } +func (m bcBlockRequestMessage) TypeByte() byte { return msgTypeBlockRequest } -func (m BlockRequestMessage) String() string { - return fmt.Sprintf("[BlockRequestMessage %v]", m.Height) +func (m bcBlockRequestMessage) String() string { + return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height) } //------------------------------------- -type BlockResponseMessage struct { +type bcBlockResponseMessage struct { Block *types.Block } -func (m BlockResponseMessage) TypeByte() byte { return msgTypeBlockResponse } +func (m bcBlockResponseMessage) TypeByte() byte { return msgTypeBlockResponse } -func (m BlockResponseMessage) String() string { - return fmt.Sprintf("[BlockResponseMessage %v]", m.Block.Height) +func (m bcBlockResponseMessage) String() string { + return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height) } //------------------------------------- -type PeerStatusMessage struct { +type bcPeerStatusMessage struct { Height uint } -func (m PeerStatusMessage) TypeByte() byte { return msgTypePeerStatus } +func (m bcPeerStatusMessage) TypeByte() byte { return msgTypePeerStatus } -func (m PeerStatusMessage) String() string { - return fmt.Sprintf("[PeerStatusMessage %v]", m.Height) +func (m bcPeerStatusMessage) String() string { + return fmt.Sprintf("[bcPeerStatusMessage %v]", m.Height) } diff --git a/consensus/reactor.go b/consensus/reactor.go index 8d992bba7..a3028b6d5 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -69,7 +69,7 @@ func (conR *ConsensusReactor) Stop() { } func (conR *ConsensusReactor) IsRunning() bool { - return atomic.LoadUint32(&conR.running) == 0 + return atomic.LoadUint32(&conR.running) == 1 } // Implements Reactor diff --git a/p2p/addrbook.go b/p2p/addrbook.go index 43cad607c..893cf1c9e 100644 --- a/p2p/addrbook.go +++ b/p2p/addrbook.go @@ -381,7 +381,7 @@ out: for { select { case <-dumpAddressTicker.C: - log.Debug("Saving book to file", "size", a.Size()) + log.Debug("Saving AddrBook to file", "size", a.Size()) a.saveToFile(a.filePath) case <-a.quit: break out diff --git a/p2p/connection.go b/p2p/connection.go index 0e26480f1..c75538365 100644 --- a/p2p/connection.go +++ b/p2p/connection.go @@ -417,6 +417,7 @@ FOR_LOOP: } msgBytes := channel.recvMsgPacket(pkt) if msgBytes != nil { + log.Debug("Received bytes", "chId", pkt.ChannelId, "msgBytes", msgBytes) c.onReceive(pkt.ChannelId, msgBytes) } default: diff --git a/p2p/pex_reactor.go b/p2p/pex_reactor.go index 439b4cf9b..5ba238d31 100644 --- a/p2p/pex_reactor.go +++ b/p2p/pex_reactor.go @@ -176,10 +176,12 @@ func (pexR *PEXReactor) ensurePeers() { alreadyDialing := pexR.sw.IsDialing(try) alreadyConnected := pexR.sw.Peers().Has(try.String()) if alreadySelected || alreadyDialing || alreadyConnected { - log.Debug("Cannot dial address", "addr", try, - "alreadySelected", alreadySelected, - "alreadyDialing", alreadyDialing, - "alreadyConnected", alreadyConnected) + /* + log.Debug("Cannot dial address", "addr", try, + "alreadySelected", alreadySelected, + "alreadyDialing", alreadyDialing, + "alreadyConnected", alreadyConnected) + */ continue } else { log.Debug("Will dial address", "addr", try) diff --git a/p2p/switch.go b/p2p/switch.go index 105561314..1eb513089 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -132,7 +132,7 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er } // Start the peer - go peer.start() + peer.start() // Notify listeners. sw.doAddPeer(peer) From bd767c1fab2123aa94b637723f3fb278f295c08c Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 26 Mar 2015 00:35:16 -0700 Subject: [PATCH 14/18] Make fast_sync a command-line flag --- blockchain/reactor.go | 56 ++++++++++++++++++++++++------------------- config/config.go | 4 ++++ daemon/daemon.go | 10 +++++--- 3 files changed, 42 insertions(+), 28 deletions(-) diff --git a/blockchain/reactor.go b/blockchain/reactor.go index b0ae92455..6d65708f2 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -35,6 +35,7 @@ type BlockchainReactor struct { state *sm.State store *BlockStore pool *BlockPool + sync bool requestsCh chan BlockRequest timeoutsCh chan string lastBlock *types.Block @@ -42,7 +43,7 @@ type BlockchainReactor struct { running uint32 } -func NewBlockchainReactor(state *sm.State, store *BlockStore) *BlockchainReactor { +func NewBlockchainReactor(state *sm.State, store *BlockStore, sync bool) *BlockchainReactor { if state.LastBlockHeight != store.Height() { panic(Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())) } @@ -57,6 +58,7 @@ func NewBlockchainReactor(state *sm.State, store *BlockStore) *BlockchainReactor state: state, store: store, pool: pool, + sync: sync, requestsCh: requestsCh, timeoutsCh: timeoutsCh, quit: make(chan struct{}), @@ -71,7 +73,9 @@ func (bcR *BlockchainReactor) Start(sw *p2p.Switch) { log.Info("Starting BlockchainReactor") bcR.sw = sw bcR.pool.Start() - go bcR.poolRoutine() + if bcR.sync { + go bcR.poolRoutine() + } } } @@ -169,7 +173,7 @@ FOR_LOOP: bcR.sw.StopPeerForError(peer, errors.New("BlockchainReactor Timeout")) } case _ = <-trySyncTicker.C: // chan time - var lastValidatedBlock *types.Block + //var lastValidatedBlock *types.Block SYNC_LOOP: for i := 0; i < 10; i++ { // See if there are any blocks to sync. @@ -197,30 +201,32 @@ FOR_LOOP: } bcR.store.SaveBlock(first, firstParts, second.Validation) bcR.state.Save() - lastValidatedBlock = first + //lastValidatedBlock = first } } - // We're done syncing for now (will do again shortly) - // See if we want to stop syncing and turn on the - // consensus reactor. - // TODO: use other heuristics too besides blocktime. - // It's not a security concern, as it only needs to happen - // upon node sync, and there's also a second (slower) - // method of syncing in the consensus reactor. - if lastValidatedBlock != nil && time.Now().Sub(lastValidatedBlock.Time) < stopSyncingDurationMinutes*time.Minute { - go func() { - log.Info("Stopping blockpool syncing, turning on consensus...") - //bcR.sw.Reactor("BLOCKCHAIN").Stop() - trySyncTicker.Stop() // Just stop the block requests. Still serve blocks to others. - conR := bcR.sw.Reactor("CONSENSUS") - conR.(stateResetter).ResetToState(bcR.state) - conR.Start(bcR.sw) - for _, peer := range bcR.sw.Peers().List() { - conR.AddPeer(peer) - } - }() - break FOR_LOOP - } + /* + // We're done syncing for now (will do again shortly) + // See if we want to stop syncing and turn on the + // consensus reactor. + // TODO: use other heuristics too besides blocktime. + // It's not a security concern, as it only needs to happen + // upon node sync, and there's also a second (slower) + // method of syncing in the consensus reactor. + + if lastValidatedBlock != nil && time.Now().Sub(lastValidatedBlock.Time) < stopSyncingDurationMinutes*time.Minute { + go func() { + log.Info("Stopping blockpool syncing, turning on consensus...") + trySyncTicker.Stop() // Just stop the block requests. Still serve blocks to others. + conR := bcR.sw.Reactor("CONSENSUS") + conR.(stateResetter).ResetToState(bcR.state) + conR.Start(bcR.sw) + for _, peer := range bcR.sw.Peers().List() { + conR.AddPeer(peer) + } + }() + break FOR_LOOP + } + */ continue FOR_LOOP case <-bcR.quit: break FOR_LOOP diff --git a/config/config.go b/config/config.go index 0abb40d03..48973a162 100644 --- a/config/config.go +++ b/config/config.go @@ -104,6 +104,8 @@ func initDefaults(rootDir string) { app.SetDefault("GenesisFile", rootDir+"/genesis.json") app.SetDefault("AddrBookFile", rootDir+"/addrbook.json") app.SetDefault("PrivValidatorfile", rootDir+"/priv_validator.json") + + app.SetDefault("FastSync", false) } func Init(rootDir string) { @@ -161,6 +163,7 @@ func ParseFlags(args []string) { flags.BoolVar(&printHelp, "help", false, "Print this help message.") flags.String("listen_addr", app.GetString("ListenAddr"), "Listen address. (0.0.0.0:0 means any interface, any port)") flags.String("seed_node", app.GetString("SeedNode"), "Address of seed node") + flags.Bool("fast_sync", app.GetBool("FastSync"), "Fast blockchain syncing") flags.String("rpc_http_listen_addr", app.GetString("RPC.HTTP.ListenAddr"), "RPC listen address. Port required") flags.Parse(args) if printHelp { @@ -171,6 +174,7 @@ func ParseFlags(args []string) { // Merge parsed flag values onto app. app.BindPFlag("ListenAddr", flags.Lookup("listen_addr")) app.BindPFlag("SeedNode", flags.Lookup("seed_node")) + app.BindPFlag("FastSync", flags.Lookup("fast_sync")) app.BindPFlag("RPC.HTTP.ListenAddr", flags.Lookup("rpc_http_listen_addr")) // Confused? diff --git a/daemon/daemon.go b/daemon/daemon.go index 1b029f9d7..dc43e2fa1 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -55,7 +55,7 @@ func NewNode() *Node { pexReactor := p2p.NewPEXReactor(book) // Get BlockchainReactor - bcReactor := bc.NewBlockchainReactor(state, blockStore) + bcReactor := bc.NewBlockchainReactor(state, blockStore, config.App().GetBool("FastSync")) // Get MempoolReactor mempool := mempl.NewMempool(state.Copy()) @@ -71,9 +71,13 @@ func NewNode() *Node { sw := p2p.NewSwitch() sw.SetNetwork(config.App().GetString("Network")) sw.AddReactor("PEX", pexReactor).Start(sw) - sw.AddReactor("BLOCKCHAIN", bcReactor).Start(sw) sw.AddReactor("MEMPOOL", mempoolReactor).Start(sw) - sw.AddReactor("CONSENSUS", consensusReactor) // Do not start yet. + sw.AddReactor("BLOCKCHAIN", bcReactor).Start(sw) + if !config.App().GetBool("FastSync") { + sw.AddReactor("CONSENSUS", consensusReactor).Start(sw) + } else { + sw.AddReactor("CONSENSUS", consensusReactor) + } return &Node{ sw: sw, From fd0646fc4f6f33082b4523cbed140c080095882a Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 26 Mar 2015 10:58:20 -0700 Subject: [PATCH 15/18] Compare blockhashes in stageBlock() --- consensus/state.go | 3 ++- state/state.go | 1 + types/block.go | 23 ++++++++++++++++++----- vm/test/fake_app_state.go | 2 +- 4 files changed, 22 insertions(+), 7 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 683612496..84488731f 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1015,7 +1015,8 @@ func (cs *ConsensusState) stageBlock(block *types.Block, blockParts *types.PartS } // Already staged? - if cs.stagedBlock == block { + blockHash := block.Hash() + if cs.stagedBlock != nil && len(blockHash) != 0 && bytes.Equal(cs.stagedBlock.Hash(), blockHash) { return nil } diff --git a/state/state.go b/state/state.go index a3f3586eb..0fcd64f7b 100644 --- a/state/state.go +++ b/state/state.go @@ -633,6 +633,7 @@ func (s *State) AppendBlock(block *types.Block, blockPartsHeader types.PartSetHe return nil } +// Mutates the block in place and updates it with new state hash. func (s *State) SetBlockStateHash(block *types.Block) error { sCopy := s.Copy() err := sCopy.appendBlock(block, types.PartSetHeader{}) diff --git a/types/block.go b/types/block.go index 176ce4385..c00e90ccb 100644 --- a/types/block.go +++ b/types/block.go @@ -55,16 +55,24 @@ func (b *Block) ValidateBasic(lastBlockHeight uint, lastBlockHash []byte, return nil } +// Computes and returns the block hash. +// If the block is incomplete (e.g. missing Header.StateHash) +// then the hash is nil, to prevent the usage of that hash. func (b *Block) Hash() []byte { if b.Header == nil || b.Validation == nil || b.Data == nil { return nil } - hashes := [][]byte{ - b.Header.Hash(), - b.Validation.Hash(), - b.Data.Hash(), + hashHeader := b.Header.Hash() + hashValidation := b.Validation.Hash() + hashData := b.Data.Hash() + + // If hashHeader is nil, required fields are missing. + if len(hashHeader) == 0 { + return nil } - // Merkle hash from sub-hashes. + + // Merkle hash from subhashes. + hashes := [][]byte{hashHeader, hashValidation, hashData} return merkle.HashFromHashes(hashes) } @@ -125,7 +133,12 @@ type Header struct { StateHash []byte } +// NOTE: hash is nil if required fields are missing. func (h *Header) Hash() []byte { + if len(h.StateHash) == 0 { + return nil + } + buf := new(bytes.Buffer) hasher, n, err := sha256.New(), new(int64), new(error) binary.WriteBinary(h, buf, n, err) diff --git a/vm/test/fake_app_state.go b/vm/test/fake_app_state.go index f31ed80e9..be11467ea 100644 --- a/vm/test/fake_app_state.go +++ b/vm/test/fake_app_state.go @@ -1,4 +1,4 @@ -package main +package vm import ( "fmt" From 79304b0dd37a84627bb73a9884398b240615ae0d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 28 Mar 2015 23:44:07 -0700 Subject: [PATCH 16/18] Create BlockCache and TxCache for performance. --- account/account.go | 10 +- binary/reflect_test.go | 36 +- blockchain/reactor.go | 2 +- common/int.go | 11 + common/word.go | 78 ++++ consensus/state.go | 2 +- mempool/mempool.go | 7 +- state/block_cache.go | 196 +++++++++++ state/common.go | 18 + state/execution.go | 593 +++++++++++++++++++++++++++++++ state/state.go | 724 +++++--------------------------------- state/state_test.go | 23 +- state/tx_cache.go | 193 ++++++++++ state/vm_app_state.go | 265 -------------- vm/common.go | 35 -- vm/gas.go | 1 - vm/native.go | 13 +- vm/stack.go | 19 +- vm/test/fake_app_state.go | 56 ++- vm/types.go | 47 +-- vm/vm.go | 123 +++---- 21 files changed, 1345 insertions(+), 1107 deletions(-) create mode 100644 common/word.go create mode 100644 state/block_cache.go create mode 100644 state/common.go create mode 100644 state/execution.go create mode 100644 state/tx_cache.go delete mode 100644 state/vm_app_state.go delete mode 100644 vm/common.go diff --git a/account/account.go b/account/account.go index 5c8bfda30..520afd70c 100644 --- a/account/account.go +++ b/account/account.go @@ -38,13 +38,13 @@ type Account struct { StorageRoot []byte // VM storage merkle root. } -func (account *Account) Copy() *Account { - accountCopy := *account - return &accountCopy +func (acc *Account) Copy() *Account { + accCopy := *acc + return &accCopy } -func (account *Account) String() string { - return fmt.Sprintf("Account{%X:%v C:%v S:%X}", account.Address, account.PubKey, len(account.Code), account.StorageRoot) +func (acc *Account) String() string { + return fmt.Sprintf("Account{%X:%v C:%v S:%X}", acc.Address, acc.PubKey, len(acc.Code), acc.StorageRoot) } func AccountEncoder(o interface{}, w io.Writer, n *int64, err *error) { diff --git a/binary/reflect_test.go b/binary/reflect_test.go index d8055f53a..ac5423685 100644 --- a/binary/reflect_test.go +++ b/binary/reflect_test.go @@ -2,6 +2,7 @@ package binary import ( "bytes" + "fmt" "reflect" "testing" "time" @@ -58,6 +59,35 @@ var _ = RegisterInterface( ConcreteType{&Viper{}}, ) +func TestAnimalInterface(t *testing.T) { + var foo Animal + + // Type of pointer to Animal + rt := reflect.TypeOf(&foo) + fmt.Printf("rt: %v\n", rt) + + // Type of Animal itself. + // NOTE: normally this is acquired through other means + // like introspecting on method signatures, or struct fields. + rte := rt.Elem() + fmt.Printf("rte: %v\n", rte) + + // Get a new pointer to the interface + // NOTE: calling .Interface() is to get the actual value, + // instead of reflection values. + ptr := reflect.New(rte).Interface() + fmt.Printf("ptr: %v", ptr) + + // Make a binary byteslice that represents a snake. + snakeBytes := BinaryBytes(Snake([]byte("snake"))) + snakeReader := bytes.NewReader(snakeBytes) + + // Now you can read it. + n, err := new(int64), new(error) + it := *ReadBinary(ptr, snakeReader, n, err).(*Animal) + fmt.Println(it, reflect.TypeOf(it)) +} + //------------------------------------- type Constructor func() interface{} @@ -287,9 +317,9 @@ func validateComplexArray(o interface{}, t *testing.T) { var testCases = []TestCase{} func init() { - //testCases = append(testCases, TestCase{constructBasic, instantiateBasic, validateBasic}) - //testCases = append(testCases, TestCase{constructComplex, instantiateComplex, validateComplex}) - //testCases = append(testCases, TestCase{constructComplex2, instantiateComplex2, validateComplex2}) + testCases = append(testCases, TestCase{constructBasic, instantiateBasic, validateBasic}) + testCases = append(testCases, TestCase{constructComplex, instantiateComplex, validateComplex}) + testCases = append(testCases, TestCase{constructComplex2, instantiateComplex2, validateComplex2}) testCases = append(testCases, TestCase{constructComplexArray, instantiateComplexArray, validateComplexArray}) } diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 6d65708f2..7e776ab07 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -194,7 +194,7 @@ FOR_LOOP: break SYNC_LOOP } else { bcR.pool.PopRequest() - err := bcR.state.AppendBlock(first, firstPartsHeader) + err := sm.ExecBlock(bcR.state, first, firstPartsHeader) if err != nil { // TODO This is bad, are we zombie? panic(Fmt("Failed to process committed block: %v", err)) diff --git a/common/int.go b/common/int.go index 6ca602193..f1c376d75 100644 --- a/common/int.go +++ b/common/int.go @@ -1,6 +1,7 @@ package common import ( + "encoding/binary" "sort" ) @@ -18,3 +19,13 @@ func SearchUint64s(a []uint64, x uint64) int { } func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) } + +//----------------------------------------------------------------------------- + +func PutUint64(dest []byte, i uint64) { + binary.LittleEndian.PutUint64(dest, i) +} + +func GetUint64(src []byte) uint64 { + return binary.LittleEndian.Uint64(src) +} diff --git a/common/word.go b/common/word.go new file mode 100644 index 000000000..d67730e13 --- /dev/null +++ b/common/word.go @@ -0,0 +1,78 @@ +package common + +import ( + "bytes" + "encoding/binary" + "sort" +) + +var ( + Zero256 = Word256{0} + One256 = Word256{1} +) + +type Word256 [32]byte + +func (w Word256) String() string { return string(w[:]) } +func (w Word256) Copy() Word256 { return w } +func (w Word256) Bytes() []byte { return w[:] } // copied. +func (w Word256) Prefix(n int) []byte { return w[:n] } +func (w Word256) IsZero() bool { + accum := byte(0) + for _, byt := range w { + accum |= byt + } + return accum == 0 +} +func (w Word256) Compare(other Word256) int { + return bytes.Compare(w[:], other[:]) +} + +func Uint64ToWord256(i uint64) Word256 { + word := Word256{} + PutUint64(word[:], i) + return word +} + +func RightPadWord256(bz []byte) (word Word256) { + copy(word[:], bz) + return +} + +func LeftPadWord256(bz []byte) (word Word256) { + copy(word[32-len(bz):], bz) + return +} + +func Uint64FromWord256(word Word256) uint64 { + return binary.LittleEndian.Uint64(word[:]) +} + +//------------------------------------- + +type Tuple256 struct { + First Word256 + Second Word256 +} + +func (tuple Tuple256) Compare(other Tuple256) int { + firstCompare := tuple.First.Compare(other.First) + if firstCompare == 0 { + return tuple.Second.Compare(other.Second) + } else { + return firstCompare + } +} + +func Tuple256Split(t Tuple256) (Word256, Word256) { + return t.First, t.Second +} + +type Tuple256Slice []Tuple256 + +func (p Tuple256Slice) Len() int { return len(p) } +func (p Tuple256Slice) Less(i, j int) bool { + return p[i].Compare(p[j]) < 0 +} +func (p Tuple256Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Tuple256Slice) Sort() { sort.Sort(p) } diff --git a/consensus/state.go b/consensus/state.go index 84488731f..8756df612 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1025,7 +1025,7 @@ func (cs *ConsensusState) stageBlock(block *types.Block, blockParts *types.PartS // Commit block onto the copied state. // NOTE: Basic validation is done in state.AppendBlock(). - err := stateCopy.AppendBlock(block, blockParts.Header()) + err := sm.ExecBlock(stateCopy, block, blockParts.Header()) if err != nil { return err } else { diff --git a/mempool/mempool.go b/mempool/mempool.go index e955dfacf..28e3ab708 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -19,12 +19,14 @@ import ( type Mempool struct { mtx sync.Mutex state *sm.State + cache *sm.BlockCache txs []types.Tx } func NewMempool(state *sm.State) *Mempool { return &Mempool{ state: state, + cache: sm.NewBlockCache(state), } } @@ -36,7 +38,7 @@ func (mem *Mempool) GetState() *sm.State { func (mem *Mempool) AddTx(tx types.Tx) (err error) { mem.mtx.Lock() defer mem.mtx.Unlock() - err = mem.state.ExecTx(tx, false) + err = sm.ExecTx(mem.cache, tx, false) if err != nil { log.Debug("AddTx() error", "tx", tx, "error", err) return err @@ -62,6 +64,7 @@ func (mem *Mempool) ResetForBlockAndState(block *types.Block, state *sm.State) { mem.mtx.Lock() defer mem.mtx.Unlock() mem.state = state.Copy() + mem.cache = sm.NewBlockCache(mem.state) // First, create a lookup map of txns in new block. blockTxsMap := make(map[string]struct{}) @@ -86,7 +89,7 @@ func (mem *Mempool) ResetForBlockAndState(block *types.Block, state *sm.State) { // Next, filter all txs that aren't valid given new state. validTxs := []types.Tx{} for _, tx := range txs { - err := mem.state.ExecTx(tx, false) + err := sm.ExecTx(mem.cache, tx, false) if err == nil { log.Debug("Filter in, valid", "tx", tx) validTxs = append(validTxs, tx) diff --git a/state/block_cache.go b/state/block_cache.go new file mode 100644 index 000000000..4da8e7ce7 --- /dev/null +++ b/state/block_cache.go @@ -0,0 +1,196 @@ +package state + +import ( + "sort" + + ac "github.com/tendermint/tendermint/account" + "github.com/tendermint/tendermint/binary" + . "github.com/tendermint/tendermint/common" + dbm "github.com/tendermint/tendermint/db" + "github.com/tendermint/tendermint/merkle" +) + +func makeStorage(db dbm.DB, root []byte) merkle.Tree { + storage := merkle.NewIAVLTree( + binary.BasicCodec, + binary.BasicCodec, + 1024, + db, + ) + storage.Load(root) + return storage +} + +type BlockCache struct { + db dbm.DB + backend *State + accounts map[string]accountInfo + storages map[Tuple256]Word256 +} + +func NewBlockCache(backend *State) *BlockCache { + return &BlockCache{ + db: backend.DB, + backend: backend, + accounts: make(map[string]accountInfo), + storages: make(map[Tuple256]Word256), + } +} + +func (cache *BlockCache) State() *State { + return cache.backend +} + +//------------------------------------- +// BlockCache.account + +func (cache *BlockCache) GetAccount(addr []byte) *ac.Account { + acc, storage, removed := unpack(cache.accounts[string(addr)]) + if removed { + return nil + } else if acc != nil { + return acc + } else { + acc = cache.backend.GetAccount(addr) + storage = makeStorage(cache.db, acc.StorageRoot) + cache.accounts[string(addr)] = accountInfo{acc, storage, false} + return acc + } +} + +func (cache *BlockCache) UpdateAccount(acc *ac.Account) { + addr := acc.Address + // SANITY CHECK + _, storage, removed := unpack(cache.accounts[string(addr)]) + if removed { + panic("UpdateAccount on a removed account") + } + // SANITY CHECK END + cache.accounts[string(addr)] = accountInfo{acc, storage, false} +} + +func (cache *BlockCache) RemoveAccount(addr []byte) { + // SANITY CHECK + _, _, removed := unpack(cache.accounts[string(addr)]) + if removed { + panic("RemoveAccount on a removed account") + } + // SANITY CHECK END + cache.accounts[string(addr)] = accountInfo{nil, nil, true} +} + +// BlockCache.account +//------------------------------------- +// BlockCache.storage + +func (cache *BlockCache) GetStorage(addr Word256, key Word256) (value Word256) { + // Check cache + value, ok := cache.storages[Tuple256{addr, key}] + if ok { + return value + } + + // Get or load storage + _, storage, removed := unpack(cache.accounts[string(addr.Prefix(20))]) + if removed { + panic("GetStorage() on removed account") + } + + // Load and set cache + _, val_ := storage.Get(key.Bytes()) + value = Zero256 + if val_ != nil { + value = RightPadWord256(val_.([]byte)) + } + cache.storages[Tuple256{addr, key}] = value + return value +} + +// NOTE: Set value to zero to removed from the trie. +func (cache *BlockCache) SetStorage(addr Word256, key Word256, value Word256) { + _, _, removed := unpack(cache.accounts[string(addr.Prefix(20))]) + if removed { + panic("SetStorage() on a removed account") + } + cache.storages[Tuple256{addr, key}] = value +} + +// BlockCache.storage +//------------------------------------- + +// CONTRACT the updates are in deterministic order. +func (cache *BlockCache) Sync() { + + // Determine order for storage updates + // The address comes first so it'll be grouped. + storageKeys := make([]Tuple256, 0, len(cache.storages)) + for keyTuple := range cache.storages { + storageKeys = append(storageKeys, keyTuple) + } + Tuple256Slice(storageKeys).Sort() + + // Update storage for all account/key. + // Later we'll iterate over all the users and save storage + update storage root. + var ( + curAddr Word256 + curAcc *ac.Account + curAccRemoved bool + curStorage merkle.Tree + ) + for _, storageKey := range storageKeys { + addr, key := Tuple256Split(storageKey) + if addr != curAddr || curAcc == nil { + acc, storage, removed := unpack(cache.accounts[string(addr.Prefix(20))]) + curAddr = addr + curAcc = acc + curAccRemoved = removed + curStorage = storage + } + if curAccRemoved { + continue + } + value := cache.storages[storageKey] + if value.IsZero() { + curStorage.Remove(key.Bytes()) + } else { + curStorage.Set(key.Bytes(), value.Bytes()) + } + } + + // Determine order for accounts + addrStrs := []string{} + for addrStr := range cache.accounts { + addrStrs = append(addrStrs, addrStr) + } + sort.Strings(addrStrs) + + // Update or delete accounts. + for _, addrStr := range addrStrs { + acc, storage, removed := unpack(cache.accounts[addrStr]) + if removed { + removed := cache.backend.RemoveAccount(acc.Address) + if !removed { + panic(Fmt("Could not remove account to be removed: %X", acc.Address)) + } + } else { + if acc == nil { + panic(Fmt("Account should not be nil for addr: %X", acc.Address)) + } + acc.StorageRoot = storage.Save() + cache.backend.UpdateAccount(acc) + } + } + +} + +//----------------------------------------------------------------------------- + +type accountInfo struct { + account *ac.Account + storage merkle.Tree + removed bool +} + +func unpack(accInfo accountInfo) (*ac.Account, merkle.Tree, bool) { + return accInfo.account, accInfo.storage, accInfo.removed +} diff --git a/state/common.go b/state/common.go new file mode 100644 index 000000000..342d35779 --- /dev/null +++ b/state/common.go @@ -0,0 +1,18 @@ +package state + +import ( + ac "github.com/tendermint/tendermint/account" + . "github.com/tendermint/tendermint/common" + "github.com/tendermint/tendermint/vm" +) + +type AccountGetter interface { + GetAccount(addr []byte) *ac.Account +} + +type VMAccountState interface { + GetAccount(addr Word256) *vm.Account + UpdateAccount(acc *vm.Account) + RemoveAccount(acc *vm.Account) + CreateAccount(creator *vm.Account) *vm.Account +} diff --git a/state/execution.go b/state/execution.go new file mode 100644 index 000000000..d7b3b78f7 --- /dev/null +++ b/state/execution.go @@ -0,0 +1,593 @@ +package state + +import ( + "bytes" + "errors" + + "github.com/tendermint/tendermint/account" + . "github.com/tendermint/tendermint/common" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/vm" +) + +// NOTE: If an error occurs during block execution, state will be left +// at an invalid state. Copy the state before calling ExecBlock! +func ExecBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeader) error { + err := execBlock(s, block, blockPartsHeader) + if err != nil { + return err + } + // State.Hash should match block.StateHash + stateHash := s.Hash() + if !bytes.Equal(stateHash, block.StateHash) { + return Errorf("Invalid state hash. Expected %X, got %X", + stateHash, block.StateHash) + } + return nil +} + +// executes transactions of a block, does not check block.StateHash +// NOTE: If an error occurs during block execution, state will be left +// at an invalid state. Copy the state before calling execBlock! +func execBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeader) error { + // Basic block validation. + err := block.ValidateBasic(s.LastBlockHeight, s.LastBlockHash, s.LastBlockParts, s.LastBlockTime) + if err != nil { + return err + } + + // Validate block Validation. + if block.Height == 1 { + if len(block.Validation.Commits) != 0 { + return errors.New("Block at height 1 (first block) should have no Validation commits") + } + } else { + if uint(len(block.Validation.Commits)) != s.LastBondedValidators.Size() { + return errors.New(Fmt("Invalid block validation size. Expected %v, got %v", + s.LastBondedValidators.Size(), len(block.Validation.Commits))) + } + var sumVotingPower uint64 + s.LastBondedValidators.Iterate(func(index uint, val *Validator) bool { + commit := block.Validation.Commits[index] + if commit.IsZero() { + return false + } else { + vote := &types.Vote{ + Height: block.Height - 1, + Round: commit.Round, + Type: types.VoteTypeCommit, + BlockHash: block.LastBlockHash, + BlockParts: block.LastBlockParts, + } + if val.PubKey.VerifyBytes(account.SignBytes(vote), commit.Signature) { + sumVotingPower += val.VotingPower + return false + } else { + log.Warn(Fmt("Invalid validation signature.\nval: %v\nvote: %v", val, vote)) + err = errors.New("Invalid validation signature") + return true + } + } + }) + if err != nil { + return err + } + if sumVotingPower <= s.LastBondedValidators.TotalVotingPower()*2/3 { + return errors.New("Insufficient validation voting power") + } + } + + // Update Validator.LastCommitHeight as necessary. + for i, commit := range block.Validation.Commits { + if commit.IsZero() { + continue + } + _, val := s.LastBondedValidators.GetByIndex(uint(i)) + if val == nil { + panic(Fmt("Failed to fetch validator at index %v", i)) + } + if _, val_ := s.BondedValidators.GetByAddress(val.Address); val_ != nil { + val_.LastCommitHeight = block.Height - 1 + updated := s.BondedValidators.Update(val_) + if !updated { + panic("Failed to update bonded validator LastCommitHeight") + } + } else if _, val_ := s.UnbondingValidators.GetByAddress(val.Address); val_ != nil { + val_.LastCommitHeight = block.Height - 1 + updated := s.UnbondingValidators.Update(val_) + if !updated { + panic("Failed to update unbonding validator LastCommitHeight") + } + } else { + panic("Could not find validator") + } + } + + // Remember LastBondedValidators + s.LastBondedValidators = s.BondedValidators.Copy() + + // Create BlockCache to cache changes to state. + blockCache := NewBlockCache(s) + + // Commit each tx + for _, tx := range block.Data.Txs { + err := ExecTx(blockCache, tx, true) + if err != nil { + return InvalidTxError{tx, err} + } + } + + // Now sync the BlockCache to the backend. + blockCache.Sync() + + // If any unbonding periods are over, + // reward account with bonded coins. + toRelease := []*Validator{} + s.UnbondingValidators.Iterate(func(index uint, val *Validator) bool { + if val.UnbondHeight+unbondingPeriodBlocks < block.Height { + toRelease = append(toRelease, val) + } + return false + }) + for _, val := range toRelease { + s.releaseValidator(val) + } + + // If any validators haven't signed in a while, + // unbond them, they have timed out. + toTimeout := []*Validator{} + s.BondedValidators.Iterate(func(index uint, val *Validator) bool { + lastActivityHeight := MaxUint(val.BondHeight, val.LastCommitHeight) + if lastActivityHeight+validatorTimeoutBlocks < block.Height { + log.Info("Validator timeout", "validator", val, "height", block.Height) + toTimeout = append(toTimeout, val) + } + return false + }) + for _, val := range toTimeout { + s.unbondValidator(val) + } + + // Increment validator AccumPowers + s.BondedValidators.IncrementAccum(1) + + s.LastBlockHeight = block.Height + s.LastBlockHash = block.Hash() + s.LastBlockParts = blockPartsHeader + s.LastBlockTime = block.Time + return nil +} + +// The accounts from the TxInputs must either already have +// account.PubKey.(type) != PubKeyNil, (it must be known), +// or it must be specified in the TxInput. If redeclared, +// the TxInput is modified and input.PubKey set to PubKeyNil. +func getOrMakeAccounts(state AccountGetter, ins []*types.TxInput, outs []*types.TxOutput) (map[string]*account.Account, error) { + accounts := map[string]*account.Account{} + for _, in := range ins { + // Account shouldn't be duplicated + if _, ok := accounts[string(in.Address)]; ok { + return nil, types.ErrTxDuplicateAddress + } + acc := state.GetAccount(in.Address) + if acc == nil { + return nil, types.ErrTxInvalidAddress + } + // PubKey should be present in either "account" or "in" + if err := checkInputPubKey(acc, in); err != nil { + return nil, err + } + accounts[string(in.Address)] = acc + } + for _, out := range outs { + // Account shouldn't be duplicated + if _, ok := accounts[string(out.Address)]; ok { + return nil, types.ErrTxDuplicateAddress + } + acc := state.GetAccount(out.Address) + // output account may be nil (new) + if acc == nil { + acc = &account.Account{ + Address: out.Address, + PubKey: account.PubKeyNil{}, + Sequence: 0, + Balance: 0, + } + } + accounts[string(out.Address)] = acc + } + return accounts, nil +} + +func checkInputPubKey(acc *account.Account, in *types.TxInput) error { + if _, isNil := acc.PubKey.(account.PubKeyNil); isNil { + if _, isNil := in.PubKey.(account.PubKeyNil); isNil { + return types.ErrTxUnknownPubKey + } + if !bytes.Equal(in.PubKey.Address(), acc.Address) { + return types.ErrTxInvalidPubKey + } + acc.PubKey = in.PubKey + } else { + in.PubKey = account.PubKeyNil{} + } + return nil +} + +func validateInputs(accounts map[string]*account.Account, signBytes []byte, ins []*types.TxInput) (total uint64, err error) { + for _, in := range ins { + acc := accounts[string(in.Address)] + if acc == nil { + panic("validateInputs() expects account in accounts") + } + err = validateInput(acc, signBytes, in) + if err != nil { + return + } + // Good. Add amount to total + total += in.Amount + } + return total, nil +} + +func validateInput(acc *account.Account, signBytes []byte, in *types.TxInput) (err error) { + // Check TxInput basic + if err := in.ValidateBasic(); err != nil { + return err + } + // Check signatures + if !acc.PubKey.VerifyBytes(signBytes, in.Signature) { + return types.ErrTxInvalidSignature + } + // Check sequences + if acc.Sequence+1 != in.Sequence { + return types.ErrTxInvalidSequence{ + Got: uint64(in.Sequence), + Expected: uint64(acc.Sequence + 1), + } + } + // Check amount + if acc.Balance < in.Amount { + return types.ErrTxInsufficientFunds + } + return nil +} + +func validateOutputs(outs []*types.TxOutput) (total uint64, err error) { + for _, out := range outs { + // Check TxOutput basic + if err := out.ValidateBasic(); err != nil { + return 0, err + } + // Good. Add amount to total + total += out.Amount + } + return total, nil +} + +func adjustByInputs(accounts map[string]*account.Account, ins []*types.TxInput) { + for _, in := range ins { + acc := accounts[string(in.Address)] + if acc == nil { + panic("adjustByInputs() expects account in accounts") + } + if acc.Balance < in.Amount { + panic("adjustByInputs() expects sufficient funds") + } + acc.Balance -= in.Amount + acc.Sequence += 1 + } +} + +func adjustByOutputs(accounts map[string]*account.Account, outs []*types.TxOutput) { + for _, out := range outs { + acc := accounts[string(out.Address)] + if acc == nil { + panic("adjustByOutputs() expects account in accounts") + } + acc.Balance += out.Amount + } +} + +// If the tx is invalid, an error will be returned. +// Unlike ExecBlock(), state will not be altered. +func ExecTx(blockCache *BlockCache, tx_ types.Tx, runCall bool) error { + + // TODO: do something with fees + fees := uint64(0) + _s := blockCache.State() // hack to access validators. + + // Exec tx + switch tx := tx_.(type) { + case *types.SendTx: + accounts, err := getOrMakeAccounts(blockCache, tx.Inputs, tx.Outputs) + if err != nil { + return err + } + signBytes := account.SignBytes(tx) + inTotal, err := validateInputs(accounts, signBytes, tx.Inputs) + if err != nil { + return err + } + outTotal, err := validateOutputs(tx.Outputs) + if err != nil { + return err + } + if outTotal > inTotal { + return types.ErrTxInsufficientFunds + } + fee := inTotal - outTotal + fees += fee + + // Good! Adjust accounts + adjustByInputs(accounts, tx.Inputs) + adjustByOutputs(accounts, tx.Outputs) + for _, acc := range accounts { + blockCache.UpdateAccount(acc) + } + return nil + + case *types.CallTx: + var inAcc, outAcc *account.Account + + // Validate input + inAcc = blockCache.GetAccount(tx.Input.Address) + if inAcc == nil { + log.Debug(Fmt("Can't find in account %X", tx.Input.Address)) + return types.ErrTxInvalidAddress + } + // pubKey should be present in either "inAcc" or "tx.Input" + if err := checkInputPubKey(inAcc, tx.Input); err != nil { + log.Debug(Fmt("Can't find pubkey for %X", tx.Input.Address)) + return err + } + signBytes := account.SignBytes(tx) + err := validateInput(inAcc, signBytes, tx.Input) + if err != nil { + log.Debug(Fmt("validateInput failed on %X:", tx.Input.Address)) + return err + } + if tx.Input.Amount < tx.Fee { + log.Debug(Fmt("Sender did not send enough to cover the fee %X", tx.Input.Address)) + return types.ErrTxInsufficientFunds + } + + createAccount := len(tx.Address) == 0 + if !createAccount { + // Validate output + if len(tx.Address) != 20 { + log.Debug(Fmt("Destination address is not 20 bytes %X", tx.Address)) + return types.ErrTxInvalidAddress + } + // this may be nil if we are still in mempool and contract was created in same block as this tx + // but that's fine, because the account will be created properly when the create tx runs in the block + // and then this won't return nil. otherwise, we take their fee + outAcc = blockCache.GetAccount(tx.Address) + } + + log.Debug(Fmt("Out account: %v", outAcc)) + + // Good! + value := tx.Input.Amount - tx.Fee + inAcc.Sequence += 1 + + if runCall { + + var ( + gas uint64 = tx.GasLimit + err error = nil + caller *vm.Account = toVMAccount(inAcc) + callee *vm.Account = nil + code []byte = nil + txCache = NewTxCache(blockCache) + params = vm.Params{ + BlockHeight: uint64(_s.LastBlockHeight), + BlockHash: RightPadWord256(_s.LastBlockHash), + BlockTime: _s.LastBlockTime.Unix(), + GasLimit: 10000000, + } + ) + + // Maybe create a new callee account if + // this transaction is creating a new contract. + if !createAccount { + if outAcc == nil { + // take fees (sorry pal) + inAcc.Balance -= tx.Fee + blockCache.UpdateAccount(inAcc) + log.Debug(Fmt("Cannot find destination address %X. Deducting fee from caller", tx.Address)) + return types.ErrTxInvalidAddress + + } + callee = toVMAccount(outAcc) + code = callee.Code + log.Debug(Fmt("Calling contract %X with code %X", callee.Address, callee.Code)) + } else { + callee = txCache.CreateAccount(caller) + log.Debug(Fmt("Created new account %X", callee.Address)) + code = tx.Data + } + log.Debug(Fmt("Code for this contract: %X", code)) + + txCache.UpdateAccount(caller) // because we adjusted by input above, and bumped nonce maybe. + txCache.UpdateAccount(callee) // because we adjusted by input above. + vmach := vm.NewVM(txCache, params, caller.Address) + // NOTE: Call() transfers the value from caller to callee iff call succeeds. + ret, err := vmach.Call(caller, callee, code, tx.Data, value, &gas) + if err != nil { + // Failure. Charge the gas fee. The 'value' was otherwise not transferred. + log.Debug(Fmt("Error on execution: %v", err)) + inAcc.Balance -= tx.Fee + blockCache.UpdateAccount(inAcc) + // Throw away 'txCache' which holds incomplete updates (don't sync it). + } else { + log.Debug("Successful execution") + // Success + if createAccount { + callee.Code = ret + } + + txCache.Sync() + } + // Create a receipt from the ret and whether errored. + log.Info("VM call complete", "caller", caller, "callee", callee, "return", ret, "err", err) + } else { + // The mempool does not call txs until + // the proposer determines the order of txs. + // So mempool will skip the actual .Call(), + // and only deduct from the caller's balance. + inAcc.Balance -= value + if createAccount { + inAcc.Sequence += 1 + } + blockCache.UpdateAccount(inAcc) + } + + return nil + + case *types.BondTx: + valInfo := blockCache.State().GetValidatorInfo(tx.PubKey.Address()) + if valInfo != nil { + // TODO: In the future, check that the validator wasn't destroyed, + // add funds, merge UnbondTo outputs, and unbond validator. + return errors.New("Adding coins to existing validators not yet supported") + } + accounts, err := getOrMakeAccounts(blockCache, tx.Inputs, nil) + if err != nil { + return err + } + signBytes := account.SignBytes(tx) + inTotal, err := validateInputs(accounts, signBytes, tx.Inputs) + if err != nil { + return err + } + if err := tx.PubKey.ValidateBasic(); err != nil { + return err + } + outTotal, err := validateOutputs(tx.UnbondTo) + if err != nil { + return err + } + if outTotal > inTotal { + return types.ErrTxInsufficientFunds + } + fee := inTotal - outTotal + fees += fee + + // Good! Adjust accounts + adjustByInputs(accounts, tx.Inputs) + for _, acc := range accounts { + blockCache.UpdateAccount(acc) + } + // Add ValidatorInfo + _s.SetValidatorInfo(&ValidatorInfo{ + Address: tx.PubKey.Address(), + PubKey: tx.PubKey, + UnbondTo: tx.UnbondTo, + FirstBondHeight: _s.LastBlockHeight + 1, + FirstBondAmount: outTotal, + }) + // Add Validator + added := _s.BondedValidators.Add(&Validator{ + Address: tx.PubKey.Address(), + PubKey: tx.PubKey, + BondHeight: _s.LastBlockHeight + 1, + VotingPower: outTotal, + Accum: 0, + }) + if !added { + panic("Failed to add validator") + } + return nil + + case *types.UnbondTx: + // The validator must be active + _, val := _s.BondedValidators.GetByAddress(tx.Address) + if val == nil { + return types.ErrTxInvalidAddress + } + + // Verify the signature + signBytes := account.SignBytes(tx) + if !val.PubKey.VerifyBytes(signBytes, tx.Signature) { + return types.ErrTxInvalidSignature + } + + // tx.Height must be greater than val.LastCommitHeight + if tx.Height <= val.LastCommitHeight { + return errors.New("Invalid unbond height") + } + + // Good! + _s.unbondValidator(val) + return nil + + case *types.RebondTx: + // The validator must be inactive + _, val := _s.UnbondingValidators.GetByAddress(tx.Address) + if val == nil { + return types.ErrTxInvalidAddress + } + + // Verify the signature + signBytes := account.SignBytes(tx) + if !val.PubKey.VerifyBytes(signBytes, tx.Signature) { + return types.ErrTxInvalidSignature + } + + // tx.Height must be equal to the next height + if tx.Height != _s.LastBlockHeight+1 { + return errors.New(Fmt("Invalid rebond height. Expected %v, got %v", _s.LastBlockHeight+1, tx.Height)) + } + + // Good! + _s.rebondValidator(val) + return nil + + case *types.DupeoutTx: + // Verify the signatures + _, accused := _s.BondedValidators.GetByAddress(tx.Address) + if accused == nil { + _, accused = _s.UnbondingValidators.GetByAddress(tx.Address) + if accused == nil { + return types.ErrTxInvalidAddress + } + } + voteASignBytes := account.SignBytes(&tx.VoteA) + voteBSignBytes := account.SignBytes(&tx.VoteB) + if !accused.PubKey.VerifyBytes(voteASignBytes, tx.VoteA.Signature) || + !accused.PubKey.VerifyBytes(voteBSignBytes, tx.VoteB.Signature) { + return types.ErrTxInvalidSignature + } + + // Verify equivocation + // TODO: in the future, just require one vote from a previous height that + // doesn't exist on this chain. + if tx.VoteA.Height != tx.VoteB.Height { + return errors.New("DupeoutTx heights don't match") + } + if tx.VoteA.Type == types.VoteTypeCommit && tx.VoteA.Round < tx.VoteB.Round { + // Check special case (not an error, validator must be slashed!) + // Validators should not sign another vote after committing. + } else if tx.VoteB.Type == types.VoteTypeCommit && tx.VoteB.Round < tx.VoteA.Round { + // We need to check both orderings of the votes + } else { + if tx.VoteA.Round != tx.VoteB.Round { + return errors.New("DupeoutTx rounds don't match") + } + if tx.VoteA.Type != tx.VoteB.Type { + return errors.New("DupeoutTx types don't match") + } + if bytes.Equal(tx.VoteA.BlockHash, tx.VoteB.BlockHash) { + return errors.New("DupeoutTx blockhashes shouldn't match") + } + } + + // Good! (Bad validator!) + _s.destroyValidator(accused) + return nil + + default: + panic("Unknown Tx type") + } +} diff --git a/state/state.go b/state/state.go index 0fcd64f7b..700202b87 100644 --- a/state/state.go +++ b/state/state.go @@ -2,17 +2,14 @@ package state import ( "bytes" - "errors" "fmt" "time" "github.com/tendermint/tendermint/account" "github.com/tendermint/tendermint/binary" - . "github.com/tendermint/tendermint/common" dbm "github.com/tendermint/tendermint/db" "github.com/tendermint/tendermint/merkle" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/vm" ) var ( @@ -25,17 +22,6 @@ var ( //----------------------------------------------------------------------------- -type InvalidTxError struct { - Tx types.Tx - Reason error -} - -func (txErr InvalidTxError) Error() string { - return fmt.Sprintf("Invalid tx: [%v] reason: [%v]", txErr.Tx, txErr.Reason) -} - -//----------------------------------------------------------------------------- - // NOTE: not goroutine-safe. type State struct { DB dbm.DB @@ -78,7 +64,6 @@ func LoadState(db dbm.DB) *State { return s } -// Save this state into the db. func (s *State) Save() { s.accounts.Save() s.validatorInfos.Save() @@ -98,6 +83,9 @@ func (s *State) Save() { s.DB.Set(stateKey, buf.Bytes()) } +// CONTRACT: +// Copy() is a cheap way to take a snapshot, +// as if State were copied by value. func (s *State) Copy() *State { return &State{ DB: s.DB, @@ -113,437 +101,81 @@ func (s *State) Copy() *State { } } -// The accounts from the TxInputs must either already have -// account.PubKey.(type) != PubKeyNil, (it must be known), -// or it must be specified in the TxInput. If redeclared, -// the TxInput is modified and input.PubKey set to PubKeyNil. -func (s *State) GetOrMakeAccounts(ins []*types.TxInput, outs []*types.TxOutput) (map[string]*account.Account, error) { - accounts := map[string]*account.Account{} - for _, in := range ins { - // Account shouldn't be duplicated - if _, ok := accounts[string(in.Address)]; ok { - return nil, types.ErrTxDuplicateAddress - } - acc := s.GetAccount(in.Address) - if acc == nil { - return nil, types.ErrTxInvalidAddress - } - // PubKey should be present in either "account" or "in" - if err := checkInputPubKey(acc, in); err != nil { - return nil, err - } - accounts[string(in.Address)] = acc +// Returns a hash that represents the state data, excluding Last* +func (s *State) Hash() []byte { + hashables := []merkle.Hashable{ + s.BondedValidators, + s.UnbondingValidators, + s.accounts, + s.validatorInfos, } - for _, out := range outs { - // Account shouldn't be duplicated - if _, ok := accounts[string(out.Address)]; ok { - return nil, types.ErrTxDuplicateAddress - } - acc := s.GetAccount(out.Address) - // output account may be nil (new) - if acc == nil { - acc = &account.Account{ - Address: out.Address, - PubKey: account.PubKeyNil{}, - Sequence: 0, - Balance: 0, - } - } - accounts[string(out.Address)] = acc - } - return accounts, nil + return merkle.HashFromHashables(hashables) } -func checkInputPubKey(acc *account.Account, in *types.TxInput) error { - if _, isNil := acc.PubKey.(account.PubKeyNil); isNil { - if _, isNil := in.PubKey.(account.PubKeyNil); isNil { - return types.ErrTxUnknownPubKey - } - if !bytes.Equal(in.PubKey.Address(), acc.Address) { - return types.ErrTxInvalidPubKey - } - acc.PubKey = in.PubKey - } else { - in.PubKey = account.PubKeyNil{} - } - return nil -} - -func (s *State) ValidateInputs(accounts map[string]*account.Account, signBytes []byte, ins []*types.TxInput) (total uint64, err error) { - for _, in := range ins { - acc := accounts[string(in.Address)] - if acc == nil { - panic("ValidateInputs() expects account in accounts") - } - err = s.ValidateInput(acc, signBytes, in) - if err != nil { - return - } - // Good. Add amount to total - total += in.Amount - } - return total, nil -} - -func (s *State) ValidateInput(acc *account.Account, signBytes []byte, in *types.TxInput) (err error) { - // Check TxInput basic - if err := in.ValidateBasic(); err != nil { +// Mutates the block in place and updates it with new state hash. +func (s *State) SetBlockStateHash(block *types.Block) error { + sCopy := s.Copy() + err := execBlock(sCopy, block, types.PartSetHeader{}) + if err != nil { return err } - // Check signatures - if !acc.PubKey.VerifyBytes(signBytes, in.Signature) { - return types.ErrTxInvalidSignature - } - // Check sequences - if acc.Sequence+1 != in.Sequence { - return types.ErrTxInvalidSequence{ - Got: uint64(in.Sequence), - Expected: uint64(acc.Sequence + 1), - } - } - // Check amount - if acc.Balance < in.Amount { - return types.ErrTxInsufficientFunds - } + // Set block.StateHash + block.StateHash = sCopy.Hash() return nil } -func (s *State) ValidateOutputs(outs []*types.TxOutput) (total uint64, err error) { - for _, out := range outs { - // Check TxOutput basic - if err := out.ValidateBasic(); err != nil { - return 0, err - } - // Good. Add amount to total - total += out.Amount +//------------------------------------- +// State.accounts + +// The returned Account is a copy, so mutating it +// has no side effects. +// Implements Statelike +func (s *State) GetAccount(address []byte) *account.Account { + _, acc := s.accounts.Get(address) + if acc == nil { + return nil } - return total, nil + return acc.(*account.Account).Copy() } -func (s *State) AdjustByInputs(accounts map[string]*account.Account, ins []*types.TxInput) { - for _, in := range ins { - acc := accounts[string(in.Address)] - if acc == nil { - panic("AdjustByInputs() expects account in accounts") - } - if acc.Balance < in.Amount { - panic("AdjustByInputs() expects sufficient funds") - } - acc.Balance -= in.Amount - acc.Sequence += 1 - } +// The account is copied before setting, so mutating it +// afterwards has no side effects. +// Implements Statelike +func (s *State) UpdateAccount(account *account.Account) bool { + return s.accounts.Set(account.Address, account.Copy()) } -func (s *State) AdjustByOutputs(accounts map[string]*account.Account, outs []*types.TxOutput) { - for _, out := range outs { - acc := accounts[string(out.Address)] - if acc == nil { - panic("AdjustByOutputs() expects account in accounts") - } - acc.Balance += out.Amount - } +// Implements Statelike +func (s *State) RemoveAccount(address []byte) bool { + _, removed := s.accounts.Remove(address) + return removed } -// If the tx is invalid, an error will be returned. -// Unlike AppendBlock(), state will not be altered. -func (s *State) ExecTx(tx_ types.Tx, runCall bool) error { +// The returned Account is a copy, so mutating it +// has no side effects. +func (s *State) GetAccounts() merkle.Tree { + return s.accounts.Copy() +} - // TODO: do something with fees - fees := uint64(0) +// State.accounts +//------------------------------------- +// State.validators - // Exec tx - switch tx := tx_.(type) { - case *types.SendTx: - accounts, err := s.GetOrMakeAccounts(tx.Inputs, tx.Outputs) - if err != nil { - return err - } - signBytes := account.SignBytes(tx) - inTotal, err := s.ValidateInputs(accounts, signBytes, tx.Inputs) - if err != nil { - return err - } - outTotal, err := s.ValidateOutputs(tx.Outputs) - if err != nil { - return err - } - if outTotal > inTotal { - return types.ErrTxInsufficientFunds - } - fee := inTotal - outTotal - fees += fee - - // Good! Adjust accounts - s.AdjustByInputs(accounts, tx.Inputs) - s.AdjustByOutputs(accounts, tx.Outputs) - s.UpdateAccounts(accounts) +// The returned ValidatorInfo is a copy, so mutating it +// has no side effects. +func (s *State) GetValidatorInfo(address []byte) *ValidatorInfo { + _, valInfo := s.validatorInfos.Get(address) + if valInfo == nil { return nil - - case *types.CallTx: - var inAcc, outAcc *account.Account - - // Validate input - inAcc = s.GetAccount(tx.Input.Address) - if inAcc == nil { - log.Debug(Fmt("Can't find in account %X", tx.Input.Address)) - return types.ErrTxInvalidAddress - } - // pubKey should be present in either "inAcc" or "tx.Input" - if err := checkInputPubKey(inAcc, tx.Input); err != nil { - log.Debug(Fmt("Can't find pubkey for %X", tx.Input.Address)) - return err - } - signBytes := account.SignBytes(tx) - err := s.ValidateInput(inAcc, signBytes, tx.Input) - if err != nil { - log.Debug(Fmt("ValidateInput failed on %X:", tx.Input.Address)) - return err - } - if tx.Input.Amount < tx.Fee { - log.Debug(Fmt("Sender did not send enough to cover the fee %X", tx.Input.Address)) - return types.ErrTxInsufficientFunds - } - - createAccount := len(tx.Address) == 0 - if !createAccount { - // Validate output - if len(tx.Address) != 20 { - log.Debug(Fmt("Destination address is not 20 bytes %X", tx.Address)) - return types.ErrTxInvalidAddress - } - // this may be nil if we are still in mempool and contract was created in same block as this tx - // but that's fine, because the account will be created properly when the create tx runs in the block - // and then this won't return nil. otherwise, we take their fee - outAcc = s.GetAccount(tx.Address) - } - - log.Debug(Fmt("Out account: %v", outAcc)) - - // Good! - value := tx.Input.Amount - tx.Fee - inAcc.Sequence += 1 - - if runCall { - - var ( - gas uint64 = tx.GasLimit - err error = nil - caller *vm.Account = toVMAccount(inAcc) - callee *vm.Account = nil - code []byte = nil - appState = NewVMAppState(s) // TODO: confusing. - params = vm.Params{ - BlockHeight: uint64(s.LastBlockHeight), - BlockHash: vm.BytesToWord(s.LastBlockHash), - BlockTime: s.LastBlockTime.Unix(), - GasLimit: 10000000, - } - ) - - // Maybe create a new callee account if - // this transaction is creating a new contract. - if !createAccount { - if outAcc == nil { - // take fees (sorry pal) - inAcc.Balance -= tx.Fee - s.UpdateAccount(inAcc) - log.Debug(Fmt("Cannot find destination address %X. Deducting fee from caller", tx.Address)) - return types.ErrTxInvalidAddress - - } - callee = toVMAccount(outAcc) - code = callee.Code - log.Debug(Fmt("Calling contract %X with code %X", callee.Address.Address(), callee.Code)) - } else { - callee, err = appState.CreateAccount(caller) - if err != nil { - log.Debug(Fmt("Error creating account")) - return err - } - log.Debug(Fmt("Created new account %X", callee.Address.Address())) - code = tx.Data - } - log.Debug(Fmt("Code for this contract: %X", code)) - - appState.UpdateAccount(caller) // because we adjusted by input above, and bumped nonce maybe. - appState.UpdateAccount(callee) // because we adjusted by input above. - vmach := vm.NewVM(appState, params, caller.Address) - // NOTE: Call() transfers the value from caller to callee iff call succeeds. - ret, err := vmach.Call(caller, callee, code, tx.Data, value, &gas) - if err != nil { - // Failure. Charge the gas fee. The 'value' was otherwise not transferred. - log.Debug(Fmt("Error on execution: %v", err)) - inAcc.Balance -= tx.Fee - s.UpdateAccount(inAcc) - // Throw away 'appState' which holds incomplete updates (don't sync it). - } else { - log.Debug("Successful execution") - // Success - if createAccount { - callee.Code = ret - } - - appState.Sync() - } - // Create a receipt from the ret and whether errored. - log.Info("VM call complete", "caller", caller, "callee", callee, "return", ret, "err", err) - } else { - // The mempool does not call txs until - // the proposer determines the order of txs. - // So mempool will skip the actual .Call(), - // and only deduct from the caller's balance. - inAcc.Balance -= value - if createAccount { - inAcc.Sequence += 1 - } - s.UpdateAccount(inAcc) - } - - return nil - - case *types.BondTx: - valInfo := s.GetValidatorInfo(tx.PubKey.Address()) - if valInfo != nil { - // TODO: In the future, check that the validator wasn't destroyed, - // add funds, merge UnbondTo outputs, and unbond validator. - return errors.New("Adding coins to existing validators not yet supported") - } - accounts, err := s.GetOrMakeAccounts(tx.Inputs, nil) - if err != nil { - return err - } - signBytes := account.SignBytes(tx) - inTotal, err := s.ValidateInputs(accounts, signBytes, tx.Inputs) - if err != nil { - return err - } - if err := tx.PubKey.ValidateBasic(); err != nil { - return err - } - outTotal, err := s.ValidateOutputs(tx.UnbondTo) - if err != nil { - return err - } - if outTotal > inTotal { - return types.ErrTxInsufficientFunds - } - fee := inTotal - outTotal - fees += fee - - // Good! Adjust accounts - s.AdjustByInputs(accounts, tx.Inputs) - s.UpdateAccounts(accounts) - // Add ValidatorInfo - s.SetValidatorInfo(&ValidatorInfo{ - Address: tx.PubKey.Address(), - PubKey: tx.PubKey, - UnbondTo: tx.UnbondTo, - FirstBondHeight: s.LastBlockHeight + 1, - FirstBondAmount: outTotal, - }) - // Add Validator - added := s.BondedValidators.Add(&Validator{ - Address: tx.PubKey.Address(), - PubKey: tx.PubKey, - BondHeight: s.LastBlockHeight + 1, - VotingPower: outTotal, - Accum: 0, - }) - if !added { - panic("Failed to add validator") - } - return nil - - case *types.UnbondTx: - // The validator must be active - _, val := s.BondedValidators.GetByAddress(tx.Address) - if val == nil { - return types.ErrTxInvalidAddress - } - - // Verify the signature - signBytes := account.SignBytes(tx) - if !val.PubKey.VerifyBytes(signBytes, tx.Signature) { - return types.ErrTxInvalidSignature - } - - // tx.Height must be greater than val.LastCommitHeight - if tx.Height <= val.LastCommitHeight { - return errors.New("Invalid unbond height") - } - - // Good! - s.unbondValidator(val) - return nil - - case *types.RebondTx: - // The validator must be inactive - _, val := s.UnbondingValidators.GetByAddress(tx.Address) - if val == nil { - return types.ErrTxInvalidAddress - } - - // Verify the signature - signBytes := account.SignBytes(tx) - if !val.PubKey.VerifyBytes(signBytes, tx.Signature) { - return types.ErrTxInvalidSignature - } - - // tx.Height must be equal to the next height - if tx.Height != s.LastBlockHeight+1 { - return errors.New(Fmt("Invalid rebond height. Expected %v, got %v", s.LastBlockHeight+1, tx.Height)) - } - - // Good! - s.rebondValidator(val) - return nil - - case *types.DupeoutTx: - // Verify the signatures - _, accused := s.BondedValidators.GetByAddress(tx.Address) - if accused == nil { - _, accused = s.UnbondingValidators.GetByAddress(tx.Address) - if accused == nil { - return types.ErrTxInvalidAddress - } - } - voteASignBytes := account.SignBytes(&tx.VoteA) - voteBSignBytes := account.SignBytes(&tx.VoteB) - if !accused.PubKey.VerifyBytes(voteASignBytes, tx.VoteA.Signature) || - !accused.PubKey.VerifyBytes(voteBSignBytes, tx.VoteB.Signature) { - return types.ErrTxInvalidSignature - } - - // Verify equivocation - // TODO: in the future, just require one vote from a previous height that - // doesn't exist on this chain. - if tx.VoteA.Height != tx.VoteB.Height { - return errors.New("DupeoutTx heights don't match") - } - if tx.VoteA.Type == types.VoteTypeCommit && tx.VoteA.Round < tx.VoteB.Round { - // Check special case (not an error, validator must be slashed!) - // Validators should not sign another vote after committing. - } else if tx.VoteB.Type == types.VoteTypeCommit && tx.VoteB.Round < tx.VoteA.Round { - // We need to check both orderings of the votes - } else { - if tx.VoteA.Round != tx.VoteB.Round { - return errors.New("DupeoutTx rounds don't match") - } - if tx.VoteA.Type != tx.VoteB.Type { - return errors.New("DupeoutTx types don't match") - } - if bytes.Equal(tx.VoteA.BlockHash, tx.VoteB.BlockHash) { - return errors.New("DupeoutTx blockhashes shouldn't match") - } - } - - // Good! (Bad validator!) - s.destroyValidator(accused) - return nil - - default: - panic("Unknown Tx type") } + return valInfo.(*ValidatorInfo).Copy() +} + +// Returns false if new, true if updated. +// The valInfo is copied before setting, so mutating it +// afterwards has no side effects. +func (s *State) SetValidatorInfo(valInfo *ValidatorInfo) (updated bool) { + return s.validatorInfos.Set(valInfo.Address, valInfo.Copy()) } func (s *State) unbondValidator(val *Validator) { @@ -582,12 +214,14 @@ func (s *State) releaseValidator(val *Validator) { s.SetValidatorInfo(valInfo) // Send coins back to UnbondTo outputs - accounts, err := s.GetOrMakeAccounts(nil, valInfo.UnbondTo) + accounts, err := getOrMakeAccounts(s, nil, valInfo.UnbondTo) if err != nil { panic("Couldn't get or make unbondTo accounts") } - s.AdjustByOutputs(accounts, valInfo.UnbondTo) - s.UpdateAccounts(accounts) + adjustByOutputs(accounts, valInfo.UnbondTo) + for _, acc := range accounts { + s.UpdateAccount(acc) + } // Remove validator from UnbondingValidators _, removed := s.UnbondingValidators.Remove(val.Address) @@ -617,220 +251,26 @@ func (s *State) destroyValidator(val *Validator) { } -// NOTE: If an error occurs during block execution, state will be left -// at an invalid state. Copy the state before calling AppendBlock! -func (s *State) AppendBlock(block *types.Block, blockPartsHeader types.PartSetHeader) error { - err := s.appendBlock(block, blockPartsHeader) - if err != nil { - return err - } - // State.Hash should match block.StateHash - stateHash := s.Hash() - if !bytes.Equal(stateHash, block.StateHash) { - return Errorf("Invalid state hash. Expected %X, got %X", - stateHash, block.StateHash) - } - return nil +// State.validators +//------------------------------------- +// State.storage + +func (s *State) LoadStorage(hash []byte) (storage merkle.Tree) { + storage = merkle.NewIAVLTree(binary.BasicCodec, binary.BasicCodec, 1024, s.DB) + storage.Load(hash) + return storage } -// Mutates the block in place and updates it with new state hash. -func (s *State) SetBlockStateHash(block *types.Block) error { - sCopy := s.Copy() - err := sCopy.appendBlock(block, types.PartSetHeader{}) - if err != nil { - return err - } - // Set block.StateHash - block.StateHash = sCopy.Hash() - return nil +// State.storage +//------------------------------------- + +//----------------------------------------------------------------------------- + +type InvalidTxError struct { + Tx types.Tx + Reason error } -// Appends the block, does not check block.StateHash -// NOTE: If an error occurs during block execution, state will be left -// at an invalid state. Copy the state before calling appendBlock! -func (s *State) appendBlock(block *types.Block, blockPartsHeader types.PartSetHeader) error { - // Basic block validation. - err := block.ValidateBasic(s.LastBlockHeight, s.LastBlockHash, s.LastBlockParts, s.LastBlockTime) - if err != nil { - return err - } - - // Validate block Validation. - if block.Height == 1 { - if len(block.Validation.Commits) != 0 { - return errors.New("Block at height 1 (first block) should have no Validation commits") - } - } else { - if uint(len(block.Validation.Commits)) != s.LastBondedValidators.Size() { - return errors.New(Fmt("Invalid block validation size. Expected %v, got %v", - s.LastBondedValidators.Size(), len(block.Validation.Commits))) - } - var sumVotingPower uint64 - s.LastBondedValidators.Iterate(func(index uint, val *Validator) bool { - commit := block.Validation.Commits[index] - if commit.IsZero() { - return false - } else { - vote := &types.Vote{ - Height: block.Height - 1, - Round: commit.Round, - Type: types.VoteTypeCommit, - BlockHash: block.LastBlockHash, - BlockParts: block.LastBlockParts, - } - if val.PubKey.VerifyBytes(account.SignBytes(vote), commit.Signature) { - sumVotingPower += val.VotingPower - return false - } else { - log.Warn(Fmt("Invalid validation signature.\nval: %v\nvote: %v", val, vote)) - err = errors.New("Invalid validation signature") - return true - } - } - }) - if err != nil { - return err - } - if sumVotingPower <= s.LastBondedValidators.TotalVotingPower()*2/3 { - return errors.New("Insufficient validation voting power") - } - } - - // Update Validator.LastCommitHeight as necessary. - for i, commit := range block.Validation.Commits { - if commit.IsZero() { - continue - } - _, val := s.LastBondedValidators.GetByIndex(uint(i)) - if val == nil { - panic(Fmt("Failed to fetch validator at index %v", i)) - } - if _, val_ := s.BondedValidators.GetByAddress(val.Address); val_ != nil { - val_.LastCommitHeight = block.Height - 1 - updated := s.BondedValidators.Update(val_) - if !updated { - panic("Failed to update bonded validator LastCommitHeight") - } - } else if _, val_ := s.UnbondingValidators.GetByAddress(val.Address); val_ != nil { - val_.LastCommitHeight = block.Height - 1 - updated := s.UnbondingValidators.Update(val_) - if !updated { - panic("Failed to update unbonding validator LastCommitHeight") - } - } else { - panic("Could not find validator") - } - } - - // Remember LastBondedValidators - s.LastBondedValidators = s.BondedValidators.Copy() - - // Commit each tx - for _, tx := range block.Data.Txs { - err := s.ExecTx(tx, true) - if err != nil { - return InvalidTxError{tx, err} - } - } - - // If any unbonding periods are over, - // reward account with bonded coins. - toRelease := []*Validator{} - s.UnbondingValidators.Iterate(func(index uint, val *Validator) bool { - if val.UnbondHeight+unbondingPeriodBlocks < block.Height { - toRelease = append(toRelease, val) - } - return false - }) - for _, val := range toRelease { - s.releaseValidator(val) - } - - // If any validators haven't signed in a while, - // unbond them, they have timed out. - toTimeout := []*Validator{} - s.BondedValidators.Iterate(func(index uint, val *Validator) bool { - lastActivityHeight := MaxUint(val.BondHeight, val.LastCommitHeight) - if lastActivityHeight+validatorTimeoutBlocks < block.Height { - log.Info("Validator timeout", "validator", val, "height", block.Height) - toTimeout = append(toTimeout, val) - } - return false - }) - for _, val := range toTimeout { - s.unbondValidator(val) - } - - // Increment validator AccumPowers - s.BondedValidators.IncrementAccum(1) - - s.LastBlockHeight = block.Height - s.LastBlockHash = block.Hash() - s.LastBlockParts = blockPartsHeader - s.LastBlockTime = block.Time - return nil -} - -// The returned Account is a copy, so mutating it -// has no side effects. -func (s *State) GetAccount(address []byte) *account.Account { - _, acc := s.accounts.Get(address) - if acc == nil { - return nil - } - return acc.(*account.Account).Copy() -} - -// The returned Account is a copy, so mutating it -// has no side effects. -func (s *State) GetAccounts() merkle.Tree { - return s.accounts.Copy() -} - -// The account is copied before setting, so mutating it -// afterwards has no side effects. -func (s *State) UpdateAccount(account *account.Account) { - s.accounts.Set(account.Address, account.Copy()) -} - -// The accounts are copied before setting, so mutating it -// afterwards has no side effects. -func (s *State) UpdateAccounts(accounts map[string]*account.Account) { - for _, acc := range accounts { - s.accounts.Set(acc.Address, acc.Copy()) - } -} - -func (s *State) RemoveAccount(address []byte) bool { - _, removed := s.accounts.Remove(address) - return removed -} - -// The returned ValidatorInfo is a copy, so mutating it -// has no side effects. -func (s *State) GetValidatorInfo(address []byte) *ValidatorInfo { - _, valInfo := s.validatorInfos.Get(address) - if valInfo == nil { - return nil - } - return valInfo.(*ValidatorInfo).Copy() -} - -// Returns false if new, true if updated. -// The valInfo is copied before setting, so mutating it -// afterwards has no side effects. -func (s *State) SetValidatorInfo(valInfo *ValidatorInfo) (updated bool) { - return s.validatorInfos.Set(valInfo.Address, valInfo.Copy()) -} - -// Returns a hash that represents the state data, -// excluding Last* -func (s *State) Hash() []byte { - hashables := []merkle.Hashable{ - s.BondedValidators, - s.UnbondingValidators, - s.accounts, - s.validatorInfos, - } - return merkle.HashFromHashables(hashables) +func (txErr InvalidTxError) Error() string { + return fmt.Sprintf("Invalid tx: [%v] reason: [%v]", txErr.Tx, txErr.Reason) } diff --git a/state/state_test.go b/state/state_test.go index 0d2c963bb..5f2990652 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -10,6 +10,17 @@ import ( "time" ) +func execTxWithState(state *State, tx types.Tx, runCall bool) error { + cache := NewBlockCache(state) + err := ExecTx(cache, tx, runCall) + if err != nil { + return err + } else { + cache.Sync() + return nil + } +} + func TestCopyState(t *testing.T) { // Generate a random state s0, privAccounts, _ := RandGenesisState(10, true, 1000, 5, true, 1000) @@ -93,7 +104,7 @@ func TestGenesisSaveLoad(t *testing.T) { blockParts := block.MakePartSet() // Now append the block to s0. - err := s0.AppendBlock(block, blockParts.Header()) + err := ExecBlock(s0, block, blockParts.Header()) if err != nil { t.Error("Error appending initial block:", err) } @@ -182,7 +193,7 @@ func TestTxSequence(t *testing.T) { tx := makeSendTx(sequence) tx.Inputs[0].Signature = privAccounts[0].Sign(tx) stateCopy := state.Copy() - err := stateCopy.ExecTx(tx, true) + err := execTxWithState(stateCopy, tx, true) if i == 1 { // Sequence is good. if err != nil { @@ -241,7 +252,7 @@ func TestTxs(t *testing.T) { } tx.Inputs[0].Signature = privAccounts[0].Sign(tx) - err := state.ExecTx(tx, true) + err := execTxWithState(state, tx, true) if err != nil { t.Errorf("Got error in executing send transaction, %v", err) } @@ -278,7 +289,7 @@ func TestTxs(t *testing.T) { }, } tx.Inputs[0].Signature = privAccounts[0].Sign(tx) - err := state.ExecTx(tx, true) + err := execTxWithState(state, tx, true) if err != nil { t.Errorf("Got error in executing bond transaction, %v", err) } @@ -345,7 +356,7 @@ func TestAddValidator(t *testing.T) { } // Now append the block to s0. - err := s0.AppendBlock(block0, block0Parts.Header()) + err := ExecBlock(s0, block0, block0Parts.Header()) if err != nil { t.Error("Error appending initial block:", err) } @@ -379,7 +390,7 @@ func TestAddValidator(t *testing.T) { }, nil, ) block1Parts := block1.MakePartSet() - err = s0.AppendBlock(block1, block1Parts.Header()) + err = ExecBlock(s0, block1, block1Parts.Header()) if err != nil { t.Error("Error appending secondary block:", err) } diff --git a/state/tx_cache.go b/state/tx_cache.go new file mode 100644 index 000000000..1e7fd7139 --- /dev/null +++ b/state/tx_cache.go @@ -0,0 +1,193 @@ +package state + +import ( + ac "github.com/tendermint/tendermint/account" + . "github.com/tendermint/tendermint/common" + "github.com/tendermint/tendermint/vm" + "github.com/tendermint/tendermint/vm/sha3" +) + +type TxCache struct { + backend *BlockCache + accounts map[Word256]vmAccountInfo + storages map[Tuple256]Word256 + logs []*vm.Log +} + +func NewTxCache(backend *BlockCache) *TxCache { + return &TxCache{ + backend: backend, + accounts: make(map[Word256]vmAccountInfo), + storages: make(map[Tuple256]Word256), + logs: make([]*vm.Log, 0), + } +} + +//------------------------------------- +// TxCache.account + +func (cache *TxCache) GetAccount(addr Word256) *vm.Account { + acc, removed := vmUnpack(cache.accounts[addr]) + if removed { + return nil + } else { + return acc + } +} + +func (cache *TxCache) UpdateAccount(acc *vm.Account) { + addr := acc.Address + // SANITY CHECK + _, removed := vmUnpack(cache.accounts[addr]) + if removed { + panic("UpdateAccount on a removed account") + } + // SANITY CHECK END + cache.accounts[addr] = vmAccountInfo{acc, false} +} + +func (cache *TxCache) RemoveAccount(acc *vm.Account) { + addr := acc.Address + // SANITY CHECK + _, removed := vmUnpack(cache.accounts[addr]) + if removed { + panic("RemoveAccount on a removed account") + } + // SANITY CHECK END + cache.accounts[addr] = vmAccountInfo{acc, true} +} + +// Creates a 20 byte address and bumps the creator's nonce. +func (cache *TxCache) CreateAccount(creator *vm.Account) *vm.Account { + + // Generate an address + nonce := creator.Nonce + creator.Nonce += 1 + + addr := RightPadWord256(NewContractAddress(creator.Address.Prefix(20), nonce)) + + // Create account from address. + account, removed := vmUnpack(cache.accounts[addr]) + if removed || account == nil { + account = &vm.Account{ + Address: addr, + Balance: 0, + Code: nil, + Nonce: 0, + StorageRoot: Zero256, + } + cache.accounts[addr] = vmAccountInfo{account, false} + return account + } else { + panic(Fmt("Could not create account, address already exists: %X", addr)) + } +} + +// TxCache.account +//------------------------------------- +// TxCache.storage + +func (cache *TxCache) GetStorage(addr Word256, key Word256) Word256 { + // Check cache + value, ok := cache.storages[Tuple256{addr, key}] + if ok { + return value + } + + // Load and set cache + value = cache.backend.GetStorage(addr, key) + cache.storages[Tuple256{addr, key}] = value + return value +} + +// NOTE: Set value to zero to removed from the trie. +func (cache *TxCache) SetStorage(addr Word256, key Word256, value Word256) { + _, removed := vmUnpack(cache.accounts[addr]) + if removed { + panic("SetStorage() on a removed account") + } + cache.storages[Tuple256{addr, key}] = value +} + +// TxCache.storage +//------------------------------------- + +// These updates do not have to be in deterministic order, +// the backend is responsible for ordering updates. +func (cache *TxCache) Sync() { + + // Remove or update storage + for addrKey, value := range cache.storages { + addr, key := Tuple256Split(addrKey) + cache.backend.SetStorage(addr, key, value) + } + + // Remove or update accounts + for addr, accInfo := range cache.accounts { + acc, removed := vmUnpack(accInfo) + if removed { + cache.backend.RemoveAccount(addr.Prefix(20)) + } else { + cache.backend.UpdateAccount(toStateAccount(acc)) + } + } + + // TODO support logs, add them to the cache somehow. +} + +func (cache *TxCache) AddLog(log *vm.Log) { + cache.logs = append(cache.logs, log) +} + +//----------------------------------------------------------------------------- + +// Convenience function to return address of new contract +func NewContractAddress(caller []byte, nonce uint64) []byte { + temp := make([]byte, 32+8) + copy(temp, caller) + PutUint64(temp[32:], nonce) + return sha3.Sha3(temp)[:20] +} + +// Converts backend.Account to vm.Account struct. +func toVMAccount(acc *ac.Account) *vm.Account { + return &vm.Account{ + Address: RightPadWord256(acc.Address), + Balance: acc.Balance, + Code: acc.Code, // This is crazy. + Nonce: uint64(acc.Sequence), + StorageRoot: RightPadWord256(acc.StorageRoot), + Other: acc.PubKey, + } +} + +// Converts vm.Account to backend.Account struct. +func toStateAccount(acc *vm.Account) *ac.Account { + pubKey, ok := acc.Other.(ac.PubKey) + if !ok { + pubKey = ac.PubKeyNil{} + } + var storageRoot []byte + if acc.StorageRoot.IsZero() { + storageRoot = nil + } else { + storageRoot = acc.StorageRoot.Bytes() + } + return &ac.Account{ + Address: acc.Address.Prefix(20), + PubKey: pubKey, + Balance: acc.Balance, + Code: acc.Code, + Sequence: uint(acc.Nonce), + StorageRoot: storageRoot, + } +} + +type vmAccountInfo struct { + account *vm.Account + removed bool +} + +func vmUnpack(accInfo vmAccountInfo) (*vm.Account, bool) { + return accInfo.account, accInfo.removed +} diff --git a/state/vm_app_state.go b/state/vm_app_state.go deleted file mode 100644 index 1005fc3dd..000000000 --- a/state/vm_app_state.go +++ /dev/null @@ -1,265 +0,0 @@ -package state - -import ( - "bytes" - "sort" - - ac "github.com/tendermint/tendermint/account" - "github.com/tendermint/tendermint/binary" - . "github.com/tendermint/tendermint/common" - "github.com/tendermint/tendermint/merkle" - "github.com/tendermint/tendermint/vm" - "github.com/tendermint/tendermint/vm/sha3" -) - -// Converts state.Account to vm.Account struct. -func toVMAccount(acc *ac.Account) *vm.Account { - return &vm.Account{ - Address: vm.BytesToWord(acc.Address), - Balance: acc.Balance, - Code: acc.Code, // This is crazy. - Nonce: uint64(acc.Sequence), - StorageRoot: vm.BytesToWord(acc.StorageRoot), - Other: acc.PubKey, - } -} - -// Converts vm.Account to state.Account struct. -func toStateAccount(acc *vm.Account) *ac.Account { - pubKey, ok := acc.Other.(ac.PubKey) - if !ok { - pubKey = ac.PubKeyNil{} - } - var storageRoot []byte - if acc.StorageRoot.IsZero() { - storageRoot = nil - } else { - storageRoot = acc.StorageRoot.Bytes() - } - return &ac.Account{ - Address: acc.Address.Address(), - PubKey: pubKey, - Balance: acc.Balance, - Code: acc.Code, - Sequence: uint(acc.Nonce), - StorageRoot: storageRoot, - } -} - -//----------------------------------------------------------------------------- - -type AccountInfo struct { - account *vm.Account - deleted bool -} - -type VMAppState struct { - state *State - - accounts map[string]AccountInfo - storage map[string]vm.Word - logs []*vm.Log -} - -func NewVMAppState(state *State) *VMAppState { - return &VMAppState{ - state: state, - accounts: make(map[string]AccountInfo), - storage: make(map[string]vm.Word), - logs: make([]*vm.Log, 0), - } -} - -func unpack(accInfo AccountInfo) (*vm.Account, bool) { - return accInfo.account, accInfo.deleted -} - -func (vas *VMAppState) GetAccount(addr vm.Word) (*vm.Account, error) { - account, deleted := unpack(vas.accounts[addr.String()]) - if deleted { - return nil, Errorf("Account was deleted: %X", addr) - } else if account != nil { - return account, nil - } else { - acc := vas.state.GetAccount(addr.Address()) - if acc == nil { - return nil, Errorf("Invalid account addr: %X", addr) - } - return toVMAccount(acc), nil - } -} - -func (vas *VMAppState) UpdateAccount(account *vm.Account) error { - accountInfo, ok := vas.accounts[account.Address.String()] - if !ok { - vas.accounts[account.Address.String()] = AccountInfo{account, false} - return nil - } - account, deleted := unpack(accountInfo) - if deleted { - return Errorf("Account was deleted: %X", account.Address) - } else { - vas.accounts[account.Address.String()] = AccountInfo{account, false} - return nil - } -} - -func (vas *VMAppState) DeleteAccount(account *vm.Account) error { - accountInfo, ok := vas.accounts[account.Address.String()] - if !ok { - vas.accounts[account.Address.String()] = AccountInfo{account, true} - return nil - } - account, deleted := unpack(accountInfo) - if deleted { - return Errorf("Account was already deleted: %X", account.Address) - } else { - vas.accounts[account.Address.String()] = AccountInfo{account, true} - return nil - } -} - -// Creates a 20 byte address and bumps the creator's nonce. -func (vas *VMAppState) CreateAccount(creator *vm.Account) (*vm.Account, error) { - - // Generate an address - nonce := creator.Nonce - creator.Nonce += 1 - - addr := vm.RightPadWord(NewContractAddress(creator.Address.Address(), nonce)) - - // Create account from address. - account, deleted := unpack(vas.accounts[addr.String()]) - if deleted || account == nil { - account = &vm.Account{ - Address: addr, - Balance: 0, - Code: nil, - Nonce: 0, - StorageRoot: vm.Zero, - } - vas.accounts[addr.String()] = AccountInfo{account, false} - return account, nil - } else { - panic(Fmt("Could not create account, address already exists: %X", addr)) - // return nil, Errorf("Account already exists: %X", addr) - } -} - -func (vas *VMAppState) GetStorage(addr vm.Word, key vm.Word) (vm.Word, error) { - account, deleted := unpack(vas.accounts[addr.String()]) - if account == nil { - return vm.Zero, Errorf("Invalid account addr: %X", addr) - } else if deleted { - return vm.Zero, Errorf("Account was deleted: %X", addr) - } - - value, ok := vas.storage[addr.String()+key.String()] - if ok { - return value, nil - } else { - return vm.Zero, nil - } -} - -// NOTE: Set value to zero to delete from the trie. -func (vas *VMAppState) SetStorage(addr vm.Word, key vm.Word, value vm.Word) (bool, error) { - account, deleted := unpack(vas.accounts[addr.String()]) - if account == nil { - return false, Errorf("Invalid account addr: %X", addr) - } else if deleted { - return false, Errorf("Account was deleted: %X", addr) - } - - _, ok := vas.storage[addr.String()+key.String()] - vas.storage[addr.String()+key.String()] = value - return ok, nil -} - -// CONTRACT the updates are in deterministic order. -func (vas *VMAppState) Sync() { - - // Determine order for accounts - addrStrs := []string{} - for addrStr := range vas.accounts { - addrStrs = append(addrStrs, addrStr) - } - sort.Strings(addrStrs) - - // Update or delete accounts. - for _, addrStr := range addrStrs { - account, deleted := unpack(vas.accounts[addrStr]) - if deleted { - removed := vas.state.RemoveAccount(account.Address.Address()) - if !removed { - panic(Fmt("Could not remove account to be deleted: %X", account.Address)) - } - } else { - if account == nil { - panic(Fmt("Account should not be nil for addr: %X", account.Address)) - } - vas.state.UpdateAccount(toStateAccount(account)) - } - } - - // Determine order for storage updates - // The address comes first so it'll be grouped. - storageKeyStrs := []string{} - for keyStr := range vas.storage { - storageKeyStrs = append(storageKeyStrs, keyStr) - } - sort.Strings(storageKeyStrs) - - // Update storage for all account/key. - storage := merkle.NewIAVLTree( - binary.BasicCodec, // TODO change - binary.BasicCodec, // TODO change - 1024, // TODO change. - vas.state.DB, - ) - var currentAccount *vm.Account - var deleted bool - for _, storageKey := range storageKeyStrs { - value := vas.storage[storageKey] - addrKeyBytes := []byte(storageKey) - addr := addrKeyBytes[:32] - key := addrKeyBytes[32:] - if currentAccount == nil || !bytes.Equal(currentAccount.Address[:], addr) { - currentAccount, deleted = unpack(vas.accounts[string(addr)]) - if deleted { - continue - } - var storageRoot []byte - if currentAccount.StorageRoot.IsZero() { - storageRoot = nil - } else { - storageRoot = currentAccount.StorageRoot.Bytes() - } - storage.Load(storageRoot) - } - if value.IsZero() { - _, removed := storage.Remove(key) - if !removed { - panic(Fmt("Storage could not be removed for addr: %X @ %X", addr, key)) - } - } else { - storage.Set(key, value) - } - } - - // TODO support logs, add them to the state somehow. -} - -func (vas *VMAppState) AddLog(log *vm.Log) { - vas.logs = append(vas.logs, log) -} - -//----------------------------------------------------------------------------- - -// Convenience function to return address of new contract -func NewContractAddress(caller []byte, nonce uint64) []byte { - temp := make([]byte, 32+8) - copy(temp, caller) - vm.PutUint64(temp[32:], nonce) - return sha3.Sha3(temp)[:20] -} diff --git a/vm/common.go b/vm/common.go deleted file mode 100644 index cf72ed6f9..000000000 --- a/vm/common.go +++ /dev/null @@ -1,35 +0,0 @@ -package vm - -import ( - "encoding/binary" -) - -func Uint64ToWord(i uint64) Word { - word := Word{} - PutUint64(word[:], i) - return word -} - -func BytesToWord(bz []byte) Word { - word := Word{} - copy(word[:], bz) - return word -} - -func LeftPadWord(bz []byte) (word Word) { - copy(word[32-len(bz):], bz) - return -} - -func RightPadWord(bz []byte) (word Word) { - copy(word[:], bz) - return -} - -func GetUint64(word Word) uint64 { - return binary.LittleEndian.Uint64(word[:]) -} - -func PutUint64(dest []byte, i uint64) { - binary.LittleEndian.PutUint64(dest, i) -} diff --git a/vm/gas.go b/vm/gas.go index 40a7b9a06..ebe5573a3 100644 --- a/vm/gas.go +++ b/vm/gas.go @@ -3,7 +3,6 @@ package vm const ( GasSha3 uint64 = 1 GasGetAccount uint64 = 1 - GasStorageCreate uint64 = 1 GasStorageUpdate uint64 = 1 GasStackOp uint64 = 1 diff --git a/vm/native.go b/vm/native.go index 467e0a022..ad9f3f3ae 100644 --- a/vm/native.go +++ b/vm/native.go @@ -3,19 +3,18 @@ package vm import ( "code.google.com/p/go.crypto/ripemd160" "crypto/sha256" + . "github.com/tendermint/tendermint/common" "github.com/tendermint/tendermint/vm/secp256k1" "github.com/tendermint/tendermint/vm/sha3" - - . "github.com/tendermint/tendermint/common" ) -var nativeContracts = make(map[Word]NativeContract) +var nativeContracts = make(map[Word256]NativeContract) func init() { - nativeContracts[Uint64ToWord(1)] = ecrecoverFunc - nativeContracts[Uint64ToWord(2)] = sha256Func - nativeContracts[Uint64ToWord(3)] = ripemd160Func - nativeContracts[Uint64ToWord(4)] = identityFunc + nativeContracts[Uint64ToWord256(1)] = ecrecoverFunc + nativeContracts[Uint64ToWord256(2)] = sha256Func + nativeContracts[Uint64ToWord256(3)] = ripemd160Func + nativeContracts[Uint64ToWord256(4)] = identityFunc } //----------------------------------------------------------------------------- diff --git a/vm/stack.go b/vm/stack.go index 4ac05ba9a..6b74643ac 100644 --- a/vm/stack.go +++ b/vm/stack.go @@ -2,11 +2,12 @@ package vm import ( "fmt" + . "github.com/tendermint/tendermint/common" ) // Not goroutine safe type Stack struct { - data []Word + data []Word256 ptr int gas *uint64 @@ -15,7 +16,7 @@ type Stack struct { func NewStack(capacity int, gas *uint64, err *error) *Stack { return &Stack{ - data: make([]Word, capacity), + data: make([]Word256, capacity), ptr: 0, gas: gas, err: err, @@ -36,7 +37,7 @@ func (st *Stack) setErr(err error) { } } -func (st *Stack) Push(d Word) { +func (st *Stack) Push(d Word256) { st.useGas(GasStackOp) if st.ptr == cap(st.data) { st.setErr(ErrDataStackOverflow) @@ -50,18 +51,18 @@ func (st *Stack) PushBytes(bz []byte) { if len(bz) != 32 { panic("Invalid bytes size: expected 32") } - st.Push(BytesToWord(bz)) + st.Push(RightPadWord256(bz)) } func (st *Stack) Push64(i uint64) { - st.Push(Uint64ToWord(i)) + st.Push(Uint64ToWord256(i)) } -func (st *Stack) Pop() Word { +func (st *Stack) Pop() Word256 { st.useGas(GasStackOp) if st.ptr == 0 { st.setErr(ErrDataStackUnderflow) - return Zero + return Zero256 } st.ptr-- return st.data[st.ptr] @@ -72,7 +73,7 @@ func (st *Stack) PopBytes() []byte { } func (st *Stack) Pop64() uint64 { - return GetUint64(st.Pop()) + return GetUint64(st.Pop().Bytes()) } func (st *Stack) Len() int { @@ -100,7 +101,7 @@ func (st *Stack) Dup(n int) { } // Not an opcode, costs no gas. -func (st *Stack) Peek() Word { +func (st *Stack) Peek() Word256 { return st.data[st.ptr-1] } diff --git a/vm/test/fake_app_state.go b/vm/test/fake_app_state.go index be11467ea..876a77cba 100644 --- a/vm/test/fake_app_state.go +++ b/vm/test/fake_app_state.go @@ -10,41 +10,39 @@ import ( type FakeAppState struct { accounts map[string]*Account - storage map[string]Word + storage map[string]Word256 logs []*Log } -func (fas *FakeAppState) GetAccount(addr Word) (*Account, error) { +func (fas *FakeAppState) GetAccount(addr Word256) *Account { account := fas.accounts[addr.String()] if account != nil { - return account, nil + return account } else { - return nil, Errorf("Invalid account addr: %v", addr) + panic(Fmt("Invalid account addr: %X", addr)) } } -func (fas *FakeAppState) UpdateAccount(account *Account) error { +func (fas *FakeAppState) UpdateAccount(account *Account) { _, ok := fas.accounts[account.Address.String()] if !ok { - return Errorf("Invalid account addr: %v", account.Address.String()) + panic(Fmt("Invalid account addr: %X", account.Address)) } else { // Nothing to do - return nil } } -func (fas *FakeAppState) DeleteAccount(account *Account) error { +func (fas *FakeAppState) RemoveAccount(account *Account) { _, ok := fas.accounts[account.Address.String()] if !ok { - return Errorf("Invalid account addr: %v", account.Address.String()) + panic(Fmt("Invalid account addr: %X", account.Address)) } else { - // Delete account + // Remove account delete(fas.accounts, account.Address.String()) - return nil } } -func (fas *FakeAppState) CreateAccount(creator *Account) (*Account, error) { +func (fas *FakeAppState) CreateAccount(creator *Account) *Account { addr := createAddress(creator) account := fas.accounts[addr.String()] if account == nil { @@ -53,36 +51,34 @@ func (fas *FakeAppState) CreateAccount(creator *Account) (*Account, error) { Balance: 0, Code: nil, Nonce: 0, - StorageRoot: Zero, - }, nil + StorageRoot: Zero256, + } } else { - return nil, Errorf("Invalid account addr: %v", addr) + panic(Fmt("Invalid account addr: %X", addr)) } } -func (fas *FakeAppState) GetStorage(addr Word, key Word) (Word, error) { +func (fas *FakeAppState) GetStorage(addr Word256, key Word256) Word256 { _, ok := fas.accounts[addr.String()] if !ok { - return Zero, Errorf("Invalid account addr: %v", addr) + panic(Fmt("Invalid account addr: %X", addr)) } value, ok := fas.storage[addr.String()+key.String()] if ok { - return value, nil + return value } else { - return Zero, nil + return Zero256 } } -func (fas *FakeAppState) SetStorage(addr Word, key Word, value Word) (bool, error) { +func (fas *FakeAppState) SetStorage(addr Word256, key Word256, value Word256) { _, ok := fas.accounts[addr.String()] if !ok { - return false, Errorf("Invalid account addr: %v", addr) + panic(Fmt("Invalid account addr: %X", addr)) } - _, ok = fas.storage[addr.String()+key.String()] fas.storage[addr.String()+key.String()] = value - return ok, nil } func (fas *FakeAppState) AddLog(log *Log) { @@ -92,23 +88,23 @@ func (fas *FakeAppState) AddLog(log *Log) { func main() { appState := &FakeAppState{ accounts: make(map[string]*Account), - storage: make(map[string]Word), + storage: make(map[string]Word256), logs: nil, } params := Params{ BlockHeight: 0, - BlockHash: Zero, + BlockHash: Zero256, BlockTime: 0, GasLimit: 0, } - ourVm := NewVM(appState, params, Zero) + ourVm := NewVM(appState, params, Zero256) // Create accounts account1 := &Account{ - Address: Uint64ToWord(100), + Address: Uint64ToWord256(100), } account2 := &Account{ - Address: Uint64ToWord(101), + Address: Uint64ToWord256(101), } var gas uint64 = 1000 @@ -117,11 +113,11 @@ func main() { } // Creates a 20 byte address and bumps the nonce. -func createAddress(creator *Account) Word { +func createAddress(creator *Account) Word256 { nonce := creator.Nonce creator.Nonce += 1 temp := make([]byte, 32+8) copy(temp, creator.Address[:]) PutUint64(temp[32:], nonce) - return RightPadWord(sha3.Sha3(temp)[:20]) + return RightPadWord256(sha3.Sha3(temp)[:20]) } diff --git a/vm/types.go b/vm/types.go index 968538ef1..443b72c63 100644 --- a/vm/types.go +++ b/vm/types.go @@ -1,44 +1,25 @@ package vm -import () +import ( + . "github.com/tendermint/tendermint/common" +) const ( defaultDataStackCapacity = 10 ) -var ( - Zero = Word{0} - One = Word{1} -) - -type Word [32]byte - -func (w Word) String() string { return string(w[:]) } -func (w Word) Copy() Word { return w } -func (w Word) Bytes() []byte { return w[:] } // copied. -func (w Word) Address() []byte { return w[:20] } -func (w Word) IsZero() bool { - accum := byte(0) - for _, byt := range w { - accum |= byt - } - return accum == 0 -} - -//----------------------------------------------------------------------------- - type Account struct { - Address Word + Address Word256 Balance uint64 Code []byte Nonce uint64 - StorageRoot Word + StorageRoot Word256 Other interface{} // For holding all other data. } type Log struct { - Address Word - Topics []Word + Address Word256 + Topics []Word256 Data []byte Height uint64 } @@ -46,14 +27,14 @@ type Log struct { type AppState interface { // Accounts - GetAccount(addr Word) (*Account, error) - UpdateAccount(*Account) error - DeleteAccount(*Account) error - CreateAccount(*Account) (*Account, error) + GetAccount(addr Word256) *Account + UpdateAccount(*Account) + RemoveAccount(*Account) + CreateAccount(*Account) *Account // Storage - GetStorage(Word, Word) (Word, error) - SetStorage(Word, Word, Word) (bool, error) // Setting to Zero is deleting. + GetStorage(Word256, Word256) Word256 + SetStorage(Word256, Word256, Word256) // Setting to Zero is deleting. // Logs AddLog(*Log) @@ -61,7 +42,7 @@ type AppState interface { type Params struct { BlockHeight uint64 - BlockHash Word + BlockHash Word256 BlockTime int64 GasLimit uint64 } diff --git a/vm/vm.go b/vm/vm.go index aa4c3d62e..8d5c15096 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -10,6 +10,7 @@ import ( ) var ( + ErrUnknownAddress = errors.New("Unknown address") ErrInsufficientBalance = errors.New("Insufficient balance") ErrInvalidJumpDest = errors.New("Invalid jump dest") ErrInsufficientGas = errors.New("Insuffient gas") @@ -32,12 +33,12 @@ const ( type VM struct { appState AppState params Params - origin Word + origin Word256 callDepth int } -func NewVM(appState AppState, params Params, origin Word) *VM { +func NewVM(appState AppState, params Params, origin Word256) *VM { return &VM{ appState: appState, params: params, @@ -114,7 +115,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga case DIV: // 0x04 x, y := stack.Pop64(), stack.Pop64() if y == 0 { // TODO - stack.Push(Zero) + stack.Push(Zero256) fmt.Printf(" %v / %v = %v (TODO)\n", x, y, 0) } else { stack.Push64(x / y) @@ -124,7 +125,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga case SDIV: // 0x05 x, y := int64(stack.Pop64()), int64(stack.Pop64()) if y == 0 { // TODO - stack.Push(Zero) + stack.Push(Zero256) fmt.Printf(" %v / %v = %v (TODO)\n", x, y, 0) } else { stack.Push64(uint64(x / y)) @@ -134,7 +135,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga case MOD: // 0x06 x, y := stack.Pop64(), stack.Pop64() if y == 0 { // TODO - stack.Push(Zero) + stack.Push(Zero256) fmt.Printf(" %v %% %v = %v (TODO)\n", x, y, 0) } else { stack.Push64(x % y) @@ -144,7 +145,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga case SMOD: // 0x07 x, y := int64(stack.Pop64()), int64(stack.Pop64()) if y == 0 { // TODO - stack.Push(Zero) + stack.Push(Zero256) fmt.Printf(" %v %% %v = %v (TODO)\n", x, y, 0) } else { stack.Push64(uint64(x % y)) @@ -154,7 +155,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga case ADDMOD: // 0x08 x, y, z := stack.Pop64(), stack.Pop64(), stack.Pop64() if z == 0 { // TODO - stack.Push(Zero) + stack.Push(Zero256) fmt.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0) } else { stack.Push64(x % y) @@ -164,7 +165,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga case MULMOD: // 0x09 x, y, z := stack.Pop64(), stack.Pop64(), stack.Pop64() if z == 0 { // TODO - stack.Push(Zero) + stack.Push(Zero256) fmt.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0) } else { stack.Push64(x % y) @@ -187,7 +188,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if x < y { stack.Push64(1) } else { - stack.Push(Zero) + stack.Push(Zero256) } fmt.Printf(" %v < %v = %v\n", x, y, x < y) @@ -196,7 +197,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if x > y { stack.Push64(1) } else { - stack.Push(Zero) + stack.Push(Zero256) } fmt.Printf(" %v > %v = %v\n", x, y, x > y) @@ -205,7 +206,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if x < y { stack.Push64(1) } else { - stack.Push(Zero) + stack.Push(Zero256) } fmt.Printf(" %v < %v = %v\n", x, y, x < y) @@ -214,7 +215,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if x > y { stack.Push64(1) } else { - stack.Push(Zero) + stack.Push(Zero256) } fmt.Printf(" %v > %v = %v\n", x, y, x > y) @@ -223,7 +224,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if x > y { stack.Push64(1) } else { - stack.Push(Zero) + stack.Push(Zero256) } fmt.Printf(" %v == %v = %v\n", x, y, x == y) @@ -232,7 +233,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if x == 0 { stack.Push64(1) } else { - stack.Push(Zero) + stack.Push(Zero256) } fmt.Printf(" %v == 0 = %v\n", x, x == 0) @@ -287,11 +288,11 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if ok = useGas(gas, GasGetAccount); !ok { return nil, firstErr(err, ErrInsufficientGas) } - account, err_ := vm.appState.GetAccount(addr) // TODO ensure that 20byte lengths are supported. - if err_ != nil { - return nil, firstErr(err, err_) + acc := vm.appState.GetAccount(addr) // TODO ensure that 20byte lengths are supported. + if acc == nil { + return nil, firstErr(err, ErrUnknownAddress) } - balance := account.Balance + balance := acc.Balance stack.Push64(balance) fmt.Printf(" => %v (%X)\n", balance, addr) @@ -313,7 +314,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if !ok { return nil, firstErr(err, ErrInputOutOfBounds) } - stack.Push(RightPadWord(data)) + stack.Push(RightPadWord256(data)) fmt.Printf(" => 0x%X\n", data) case CALLDATASIZE: // 0x36 @@ -357,7 +358,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga fmt.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data) case GASPRICE_DEPRECATED: // 0x3A - stack.Push(Zero) + stack.Push(Zero256) fmt.Printf(" => %X (GASPRICE IS DEPRECATED)\n") case EXTCODESIZE: // 0x3B @@ -365,11 +366,11 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if ok = useGas(gas, GasGetAccount); !ok { return nil, firstErr(err, ErrInsufficientGas) } - account, err_ := vm.appState.GetAccount(addr) - if err_ != nil { - return nil, firstErr(err, err_) + acc := vm.appState.GetAccount(addr) + if acc == nil { + return nil, firstErr(err, ErrUnknownAddress) } - code := account.Code + code := acc.Code l := uint64(len(code)) stack.Push64(l) fmt.Printf(" => %d\n", l) @@ -379,11 +380,11 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if ok = useGas(gas, GasGetAccount); !ok { return nil, firstErr(err, ErrInsufficientGas) } - account, err_ := vm.appState.GetAccount(addr) - if err_ != nil { - return nil, firstErr(err, err_) + acc := vm.appState.GetAccount(addr) + if acc == nil { + return nil, firstErr(err, ErrUnknownAddress) } - code := account.Code + code := acc.Code memOff := stack.Pop64() codeOff := stack.Pop64() length := stack.Pop64() @@ -399,11 +400,11 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga fmt.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data) case BLOCKHASH: // 0x40 - stack.Push(Zero) + stack.Push(Zero256) fmt.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes()) case COINBASE: // 0x41 - stack.Push(Zero) + stack.Push(Zero256) fmt.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes()) case TIMESTAMP: // 0x42 @@ -430,7 +431,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if !ok { return nil, firstErr(err, ErrMemoryOutOfBounds) } - stack.Push(RightPadWord(data)) + stack.Push(RightPadWord256(data)) fmt.Printf(" => 0x%X\n", data) case MSTORE: // 0x52 @@ -452,21 +453,14 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga case SLOAD: // 0x54 loc := stack.Pop() - data, _ := vm.appState.GetStorage(callee.Address, loc) + data := vm.appState.GetStorage(callee.Address, loc) stack.Push(data) fmt.Printf(" {0x%X : 0x%X}\n", loc, data) case SSTORE: // 0x55 loc, data := stack.Pop(), stack.Pop() - updated, err_ := vm.appState.SetStorage(callee.Address, loc, data) - if err = firstErr(err, err_); err != nil { - return nil, err - } - if updated { - useGas(gas, GasStorageUpdate) - } else { - useGas(gas, GasStorageCreate) - } + vm.appState.SetStorage(callee.Address, loc, data) + useGas(gas, GasStorageUpdate) fmt.Printf(" {0x%X : 0x%X}\n", loc, data) case JUMP: // 0x56 @@ -501,7 +495,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if !ok { return nil, firstErr(err, ErrCodeOutOfBounds) } - res := RightPadWord(codeSegment) + res := RightPadWord256(codeSegment) stack.Push(res) pc += a fmt.Printf(" => 0x%X\n", res) @@ -518,7 +512,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga case LOG0, LOG1, LOG2, LOG3, LOG4: n := int(op - LOG0) - topics := make([]Word, n) + topics := make([]Word256, n) offset, size := stack.Pop64(), stack.Pop64() for i := 0; i < n; i++ { topics[i] = stack.Pop() @@ -551,19 +545,14 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga // TODO charge for gas to create account _ the code length * GasCreateByte - newAccount, err := vm.appState.CreateAccount(callee) - if err != nil { - stack.Push(Zero) - fmt.Printf(" (*) 0x0 %v\n", err) + newAccount := vm.appState.CreateAccount(callee) + // Run the input to get the contract code. + ret, err_ := vm.Call(callee, newAccount, input, input, contractValue, gas) + if err_ != nil { + stack.Push(Zero256) } else { - // Run the input to get the contract code. - ret, err_ := vm.Call(callee, newAccount, input, input, contractValue, gas) - if err_ != nil { - stack.Push(Zero) - } else { - newAccount.Code = ret // Set the code - stack.Push(newAccount.Address) - } + newAccount.Code = ret // Set the code + stack.Push(newAccount.Address) } case CALL, CALLCODE: // 0xF1, 0xF2 @@ -598,22 +587,22 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if ok = useGas(gas, GasGetAccount); !ok { return nil, firstErr(err, ErrInsufficientGas) } - account, err_ := vm.appState.GetAccount(addr) - if err = firstErr(err, err_); err != nil { - return nil, err + acc := vm.appState.GetAccount(addr) + if acc == nil { + return nil, firstErr(err, ErrUnknownAddress) } if op == CALLCODE { - ret, err = vm.Call(callee, callee, account.Code, args, value, gas) + ret, err = vm.Call(callee, callee, acc.Code, args, value, gas) } else { - ret, err = vm.Call(callee, account, account.Code, args, value, gas) + ret, err = vm.Call(callee, acc, acc.Code, args, value, gas) } } // Push result if err != nil { - stack.Push(Zero) + stack.Push(Zero256) } else { - stack.Push(One) + stack.Push(One256) dest, ok := subslice(memory, retOffset, retSize, false) if !ok { return nil, firstErr(err, ErrMemoryOutOfBounds) @@ -640,15 +629,15 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if ok = useGas(gas, GasGetAccount); !ok { return nil, firstErr(err, ErrInsufficientGas) } - // TODO if the receiver is Zero, then make it the fee. - receiver, err_ := vm.appState.GetAccount(addr) - if err = firstErr(err, err_); err != nil { - return nil, err + // TODO if the receiver is Zero256, then make it the fee. + receiver := vm.appState.GetAccount(addr) + if receiver == nil { + return nil, firstErr(err, ErrUnknownAddress) } balance := callee.Balance receiver.Balance += balance vm.appState.UpdateAccount(receiver) - vm.appState.DeleteAccount(callee) + vm.appState.RemoveAccount(callee) fmt.Printf(" => (%X) %v\n", addr[:4], balance) fallthrough From 4be97fa9d40c031628af4ce285f91dd950f5dc41 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 29 Mar 2015 00:07:10 -0700 Subject: [PATCH 17/18] Transcribe vm changes from vm_fixes by Ethan --- vm/test/fake_app_state.go | 29 ----- vm/test/vm_test.go | 99 +++++++++++++++ vm/vm.go | 250 +++++++++++++++++++++++--------------- 3 files changed, 253 insertions(+), 125 deletions(-) create mode 100644 vm/test/vm_test.go diff --git a/vm/test/fake_app_state.go b/vm/test/fake_app_state.go index 876a77cba..3d6f5ca22 100644 --- a/vm/test/fake_app_state.go +++ b/vm/test/fake_app_state.go @@ -1,8 +1,6 @@ package vm import ( - "fmt" - . "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/vm" "github.com/tendermint/tendermint/vm/sha3" @@ -85,33 +83,6 @@ func (fas *FakeAppState) AddLog(log *Log) { fas.logs = append(fas.logs, log) } -func main() { - appState := &FakeAppState{ - accounts: make(map[string]*Account), - storage: make(map[string]Word256), - logs: nil, - } - params := Params{ - BlockHeight: 0, - BlockHash: Zero256, - BlockTime: 0, - GasLimit: 0, - } - ourVm := NewVM(appState, params, Zero256) - - // Create accounts - account1 := &Account{ - Address: Uint64ToWord256(100), - } - account2 := &Account{ - Address: Uint64ToWord256(101), - } - - var gas uint64 = 1000 - output, err := ourVm.Call(account1, account2, []byte{0x5B, 0x60, 0x00, 0x56}, []byte{}, 0, &gas) - fmt.Printf("Output: %v Error: %v\n", output, err) -} - // Creates a 20 byte address and bumps the nonce. func createAddress(creator *Account) Word256 { nonce := creator.Nonce diff --git a/vm/test/vm_test.go b/vm/test/vm_test.go new file mode 100644 index 000000000..ee3fe7e57 --- /dev/null +++ b/vm/test/vm_test.go @@ -0,0 +1,99 @@ +package vm + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "strings" + "testing" + "time" + + . "github.com/tendermint/tendermint/common" + . "github.com/tendermint/tendermint/vm" +) + +func newAppState() *FakeAppState { + return &FakeAppState{ + accounts: make(map[string]*Account), + storage: make(map[string]Word256), + logs: nil, + } +} + +func newParams() Params { + return Params{ + BlockHeight: 0, + BlockHash: Zero256, + BlockTime: 0, + GasLimit: 0, + } +} + +func makeBytes(n int) []byte { + b := make([]byte, n) + rand.Read(b) + return b +} + +func TestVM(t *testing.T) { + ourVm := NewVM(newAppState(), newParams(), Zero256) + + // Create accounts + account1 := &Account{ + Address: Uint64ToWord256(100), + } + account2 := &Account{ + Address: Uint64ToWord256(101), + } + + var gas uint64 = 1000 + N := []byte{0xff, 0xff} + // Loop N times + code := []byte{0x60, 0x00, 0x60, 0x20, 0x52, 0x5B, byte(0x60 + len(N) - 1)} + for i := 0; i < len(N); i++ { + code = append(code, N[i]) + } + code = append(code, []byte{0x60, 0x20, 0x51, 0x12, 0x15, 0x60, byte(0x1b + len(N)), 0x57, 0x60, 0x01, 0x60, 0x20, 0x51, 0x01, 0x60, 0x20, 0x52, 0x60, 0x05, 0x56, 0x5B}...) + start := time.Now() + output, err := ourVm.Call(account1, account2, code, []byte{}, 0, &gas) + fmt.Printf("Output: %v Error: %v\n", output, err) + fmt.Println("Call took:", time.Since(start)) +} + +func TestSubcurrency(t *testing.T) { + st := newAppState() + // Create accounts + account1 := &Account{ + Address: RightPadWord256(makeBytes(20)), + } + account2 := &Account{ + Address: RightPadWord256(makeBytes(20)), + } + st.accounts[account1.Address.String()] = account1 + st.accounts[account2.Address.String()] = account2 + + ourVm := NewVM(st, newParams(), Zero256) + + var gas uint64 = 1000 + code_parts := []string{"620f42403355", + "7c0100000000000000000000000000000000000000000000000000000000", + "600035046315cf268481141561004657", + "6004356040526040515460605260206060f35b63693200ce81141561008757", + "60043560805260243560a052335460c0523360e05260a05160c05112151561008657", + "60a05160c0510360e0515560a0516080515401608051555b5b505b6000f3"} + code, _ := hex.DecodeString(strings.Join(code_parts, "")) + fmt.Printf("Code: %x\n", code) + data, _ := hex.DecodeString("693200CE0000000000000000000000004B4363CDE27C2EB05E66357DB05BC5C88F850C1A0000000000000000000000000000000000000000000000000000000000000005") + output, err := ourVm.Call(account1, account2, code, data, 0, &gas) + fmt.Printf("Output: %v Error: %v\n", output, err) + +} + +/* + // infinite loop + code := []byte{0x5B, 0x60, 0x00, 0x56} + // mstore + code := []byte{0x60, 0x00, 0x60, 0x20} + // mstore, mload + code := []byte{0x60, 0x01, 0x60, 0x20, 0x52, 0x60, 0x20, 0x51} +*/ diff --git a/vm/vm.go b/vm/vm.go index 8d5c15096..52f3d0234 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -3,7 +3,7 @@ package vm import ( "errors" "fmt" - "math" + "math/big" . "github.com/tendermint/tendermint/common" "github.com/tendermint/tendermint/vm/sha3" @@ -24,12 +24,21 @@ var ( ErrInvalidContract = errors.New("Invalid contract") ) +type Debug bool + const ( - dataStackCapacity = 1024 - callStackCapacity = 100 // TODO ensure usage. - memoryCapacity = 1024 * 1024 // 1 MB + dataStackCapacity = 1024 + callStackCapacity = 100 // TODO ensure usage. + memoryCapacity = 1024 * 1024 // 1 MB + dbg Debug = true ) +func (d Debug) Printf(s string, a ...interface{}) { + if d { + fmt.Printf(s, a...) + } +} + type VM struct { appState AppState params Params @@ -74,7 +83,7 @@ func (vm *VM) Call(caller, callee *Account, code, input []byte, value uint64, ga // Just like Call() but does not transfer 'value' or modify the callDepth. func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, gas *uint64) (output []byte, err error) { - fmt.Printf("(%d) (%X) %X (code=%d) gas: %v (d) %X\n", vm.callDepth, caller.Address[:4], callee.Address, len(callee.Code), *gas, input) + dbg.Printf("(%d) (%X) %X (code=%d) gas: %v (d) %X\n", vm.callDepth, caller.Address[:4], callee.Address, len(callee.Code), *gas, input) var ( pc uint64 = 0 @@ -90,7 +99,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga } var op = codeGetOp(code, pc) - fmt.Printf("(pc) %-3d (op) %-14s (st) %-4d ", pc, op.String(), stack.Len()) + dbg.Printf("(pc) %-3d (op) %-14s (st) %-4d ", pc, op.String(), stack.Len()) switch op { @@ -98,90 +107,123 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga return nil, nil case ADD: // 0x01 - x, y := stack.Pop64(), stack.Pop64() - stack.Push64(x + y) - fmt.Printf(" %v + %v = %v\n", x, y, x+y) + //x, y := stack.Pop64(), stack.Pop64() + //stack.Push64(x + y) + x, y := stack.Pop(), stack.Pop() + xb := new(big.Int).SetBytes(flip(x[:])) + yb := new(big.Int).SetBytes(flip(y[:])) + sum := new(big.Int).Add(xb, yb) + stack.Push(RightPadWord256(flip(sum.Bytes()))) + dbg.Printf(" %v + %v = %v\n", xb, yb, sum) case MUL: // 0x02 - x, y := stack.Pop64(), stack.Pop64() - stack.Push64(x * y) - fmt.Printf(" %v * %v = %v\n", x, y, x*y) + //x, y := stack.Pop64(), stack.Pop64() + //stack.Push64(x * y) + x, y := stack.Pop(), stack.Pop() + xb := new(big.Int).SetBytes(flip(x[:])) + yb := new(big.Int).SetBytes(flip(y[:])) + prod := new(big.Int).Mul(xb, yb) + stack.Push(RightPadWord256(flip(prod.Bytes()))) + dbg.Printf(" %v * %v = %v\n", xb, yb, prod) case SUB: // 0x03 - x, y := stack.Pop64(), stack.Pop64() - stack.Push64(x - y) - fmt.Printf(" %v - %v = %v\n", x, y, x-y) + //x, y := stack.Pop64(), stack.Pop64() + //stack.Push64(x - y) + x, y := stack.Pop(), stack.Pop() + xb := new(big.Int).SetBytes(flip(x[:])) + yb := new(big.Int).SetBytes(flip(y[:])) + diff := new(big.Int).Sub(xb, yb) + stack.Push(RightPadWord256(flip(diff.Bytes()))) + dbg.Printf(" %v - %v = %v\n", xb, yb, diff) case DIV: // 0x04 - x, y := stack.Pop64(), stack.Pop64() - if y == 0 { // TODO + //x, y := stack.Pop64(), stack.Pop64() + //stack.Push64(x / y) + x, y := stack.Pop(), stack.Pop() + if y.IsZero() { // TODO stack.Push(Zero256) - fmt.Printf(" %v / %v = %v (TODO)\n", x, y, 0) + dbg.Printf(" %x / %x = %v (TODO)\n", x, y, 0) } else { - stack.Push64(x / y) - fmt.Printf(" %v / %v = %v\n", x, y, x/y) + xb := new(big.Int).SetBytes(flip(x[:])) + yb := new(big.Int).SetBytes(flip(y[:])) + div := new(big.Int).Div(xb, yb) + stack.Push(RightPadWord256(flip(div.Bytes()))) + dbg.Printf(" %v / %v = %v\n", xb, yb, div) } case SDIV: // 0x05 + // TODO ... big? x, y := int64(stack.Pop64()), int64(stack.Pop64()) if y == 0 { // TODO stack.Push(Zero256) - fmt.Printf(" %v / %v = %v (TODO)\n", x, y, 0) + dbg.Printf(" %v / %v = %v (TODO)\n", x, y, 0) } else { stack.Push64(uint64(x / y)) - fmt.Printf(" %v / %v = %v\n", x, y, x/y) + dbg.Printf(" %v / %v = %v\n", x, y, x/y) } case MOD: // 0x06 - x, y := stack.Pop64(), stack.Pop64() - if y == 0 { // TODO + //x, y := stack.Pop64(), stack.Pop64() + x, y := stack.Pop(), stack.Pop() + if y.IsZero() { // TODO stack.Push(Zero256) - fmt.Printf(" %v %% %v = %v (TODO)\n", x, y, 0) + dbg.Printf(" %v %% %v = %v (TODO)\n", x, y, 0) } else { - stack.Push64(x % y) - fmt.Printf(" %v %% %v = %v\n", x, y, x%y) + xb := new(big.Int).SetBytes(flip(x[:])) + yb := new(big.Int).SetBytes(flip(y[:])) + mod := new(big.Int).Mod(xb, yb) + stack.Push(RightPadWord256(flip(mod.Bytes()))) + dbg.Printf(" %v %% %v = %v\n", xb, yb, mod) } case SMOD: // 0x07 + // TODO ... big? x, y := int64(stack.Pop64()), int64(stack.Pop64()) if y == 0 { // TODO stack.Push(Zero256) - fmt.Printf(" %v %% %v = %v (TODO)\n", x, y, 0) + dbg.Printf(" %v %% %v = %v (TODO)\n", x, y, 0) } else { stack.Push64(uint64(x % y)) - fmt.Printf(" %v %% %v = %v\n", x, y, x%y) + dbg.Printf(" %v %% %v = %v\n", x, y, x%y) } case ADDMOD: // 0x08 + // TODO ... big? x, y, z := stack.Pop64(), stack.Pop64(), stack.Pop64() if z == 0 { // TODO stack.Push(Zero256) - fmt.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0) + dbg.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0) } else { - stack.Push64(x % y) - fmt.Printf(" (%v + %v) %% %v = %v\n", x, y, z, (x+y)%z) + stack.Push64((x + y) % z) + dbg.Printf(" (%v + %v) %% %v = %v\n", x, y, z, (x+y)%z) } case MULMOD: // 0x09 + // TODO ... big? x, y, z := stack.Pop64(), stack.Pop64(), stack.Pop64() if z == 0 { // TODO stack.Push(Zero256) - fmt.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0) + dbg.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0) } else { - stack.Push64(x % y) - fmt.Printf(" (%v + %v) %% %v = %v\n", x, y, z, (x*y)%z) + stack.Push64((x * y) % z) + dbg.Printf(" (%v + %v) %% %v = %v\n", x, y, z, (x*y)%z) } case EXP: // 0x0A - x, y := stack.Pop64(), stack.Pop64() - stack.Push64(ExpUint64(x, y)) - fmt.Printf(" %v ** %v = %v\n", x, y, uint64(math.Pow(float64(x), float64(y)))) + //x, y := stack.Pop64(), stack.Pop64() + //stack.Push64(ExpUint64(x, y)) + x, y := stack.Pop(), stack.Pop() + xb := new(big.Int).SetBytes(flip(x[:])) + yb := new(big.Int).SetBytes(flip(y[:])) + pow := new(big.Int).Exp(xb, yb, big.NewInt(0)) + stack.Push(RightPadWord256(flip(pow.Bytes()))) + dbg.Printf(" %v ** %v = %v\n", xb, yb, pow) case SIGNEXTEND: // 0x0B x, y := stack.Pop64(), stack.Pop64() res := (y << uint(x)) >> x stack.Push64(res) - fmt.Printf(" (%v << %v) >> %v = %v\n", y, x, x, res) + dbg.Printf(" (%v << %v) >> %v = %v\n", y, x, x, res) case LT: // 0x10 x, y := stack.Pop64(), stack.Pop64() @@ -190,7 +232,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga } else { stack.Push(Zero256) } - fmt.Printf(" %v < %v = %v\n", x, y, x < y) + dbg.Printf(" %v < %v = %v\n", x, y, x < y) case GT: // 0x11 x, y := stack.Pop64(), stack.Pop64() @@ -199,7 +241,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga } else { stack.Push(Zero256) } - fmt.Printf(" %v > %v = %v\n", x, y, x > y) + dbg.Printf(" %v > %v = %v\n", x, y, x > y) case SLT: // 0x12 x, y := int64(stack.Pop64()), int64(stack.Pop64()) @@ -208,7 +250,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga } else { stack.Push(Zero256) } - fmt.Printf(" %v < %v = %v\n", x, y, x < y) + dbg.Printf(" %v < %v = %v\n", x, y, x < y) case SGT: // 0x13 x, y := int64(stack.Pop64()), int64(stack.Pop64()) @@ -217,16 +259,16 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga } else { stack.Push(Zero256) } - fmt.Printf(" %v > %v = %v\n", x, y, x > y) + dbg.Printf(" %v > %v = %v\n", x, y, x > y) case EQ: // 0x14 x, y := stack.Pop64(), stack.Pop64() - if x > y { + if x == y { stack.Push64(1) } else { stack.Push(Zero256) } - fmt.Printf(" %v == %v = %v\n", x, y, x == y) + dbg.Printf(" %v == %v = %v\n", x, y, x == y) case ISZERO: // 0x15 x := stack.Pop64() @@ -235,27 +277,27 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga } else { stack.Push(Zero256) } - fmt.Printf(" %v == 0 = %v\n", x, x == 0) + dbg.Printf(" %v == 0 = %v\n", x, x == 0) case AND: // 0x16 x, y := stack.Pop64(), stack.Pop64() stack.Push64(x & y) - fmt.Printf(" %v & %v = %v\n", x, y, x&y) + dbg.Printf(" %v & %v = %v\n", x, y, x&y) case OR: // 0x17 x, y := stack.Pop64(), stack.Pop64() stack.Push64(x | y) - fmt.Printf(" %v | %v = %v\n", x, y, x|y) + dbg.Printf(" %v | %v = %v\n", x, y, x|y) case XOR: // 0x18 x, y := stack.Pop64(), stack.Pop64() stack.Push64(x ^ y) - fmt.Printf(" %v ^ %v = %v\n", x, y, x^y) + dbg.Printf(" %v ^ %v = %v\n", x, y, x^y) case NOT: // 0x19 x := stack.Pop64() stack.Push64(^x) - fmt.Printf(" !%v = %v\n", x, ^x) + dbg.Printf(" !%v = %v\n", x, ^x) case BYTE: // 0x1A idx, val := stack.Pop64(), stack.Pop() @@ -264,7 +306,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga res = val[idx] } stack.Push64(uint64(res)) - fmt.Printf(" => 0x%X\n", res) + dbg.Printf(" => 0x%X\n", res) case SHA3: // 0x20 if ok = useGas(gas, GasSha3); !ok { @@ -277,11 +319,11 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga } data = sha3.Sha3(data) stack.PushBytes(data) - fmt.Printf(" => (%v) %X\n", size, data) + dbg.Printf(" => (%v) %X\n", size, data) case ADDRESS: // 0x30 stack.Push(callee.Address) - fmt.Printf(" => %X\n", callee.Address) + dbg.Printf(" => %X\n", callee.Address) case BALANCE: // 0x31 addr := stack.Pop() @@ -294,19 +336,19 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga } balance := acc.Balance stack.Push64(balance) - fmt.Printf(" => %v (%X)\n", balance, addr) + dbg.Printf(" => %v (%X)\n", balance, addr) case ORIGIN: // 0x32 stack.Push(vm.origin) - fmt.Printf(" => %X\n", vm.origin) + dbg.Printf(" => %X\n", vm.origin) case CALLER: // 0x33 stack.Push(caller.Address) - fmt.Printf(" => %X\n", caller.Address) + dbg.Printf(" => %X\n", caller.Address) case CALLVALUE: // 0x34 stack.Push64(value) - fmt.Printf(" => %v\n", value) + dbg.Printf(" => %v\n", value) case CALLDATALOAD: // 0x35 offset := stack.Pop64() @@ -315,11 +357,11 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga return nil, firstErr(err, ErrInputOutOfBounds) } stack.Push(RightPadWord256(data)) - fmt.Printf(" => 0x%X\n", data) + dbg.Printf(" => 0x%X\n", data) case CALLDATASIZE: // 0x36 stack.Push64(uint64(len(input))) - fmt.Printf(" => %d\n", len(input)) + dbg.Printf(" => %d\n", len(input)) case CALLDATACOPY: // 0x37 memOff := stack.Pop64() @@ -334,18 +376,17 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga return nil, firstErr(err, ErrMemoryOutOfBounds) } copy(dest, data) - fmt.Printf(" => [%v, %v, %v] %X\n", memOff, inputOff, length, data) + dbg.Printf(" => [%v, %v, %v] %X\n", memOff, inputOff, length, data) case CODESIZE: // 0x38 l := uint64(len(code)) stack.Push64(l) - fmt.Printf(" => %d\n", l) + dbg.Printf(" => %d\n", l) case CODECOPY: // 0x39 memOff := stack.Pop64() codeOff := stack.Pop64() length := stack.Pop64() - fmt.Println("CODECOPY: codeOff, length, codelength", codeOff, length, len(code)) data, ok := subslice(code, codeOff, length, false) if !ok { return nil, firstErr(err, ErrCodeOutOfBounds) @@ -355,11 +396,11 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga return nil, firstErr(err, ErrMemoryOutOfBounds) } copy(dest, data) - fmt.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data) + dbg.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data) case GASPRICE_DEPRECATED: // 0x3A stack.Push(Zero256) - fmt.Printf(" => %X (GASPRICE IS DEPRECATED)\n") + dbg.Printf(" => %X (GASPRICE IS DEPRECATED)\n") case EXTCODESIZE: // 0x3B addr := stack.Pop() @@ -373,7 +414,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga code := acc.Code l := uint64(len(code)) stack.Push64(l) - fmt.Printf(" => %d\n", l) + dbg.Printf(" => %d\n", l) case EXTCODECOPY: // 0x3C addr := stack.Pop() @@ -397,33 +438,33 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga return nil, firstErr(err, ErrMemoryOutOfBounds) } copy(dest, data) - fmt.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data) + dbg.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data) case BLOCKHASH: // 0x40 stack.Push(Zero256) - fmt.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes()) + dbg.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes()) case COINBASE: // 0x41 stack.Push(Zero256) - fmt.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes()) + dbg.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes()) case TIMESTAMP: // 0x42 time := vm.params.BlockTime stack.Push64(uint64(time)) - fmt.Printf(" => 0x%X\n", time) + dbg.Printf(" => 0x%X\n", time) case BLOCKHEIGHT: // 0x43 number := uint64(vm.params.BlockHeight) stack.Push64(number) - fmt.Printf(" => 0x%X\n", number) + dbg.Printf(" => 0x%X\n", number) case GASLIMIT: // 0x45 stack.Push64(vm.params.GasLimit) - fmt.Printf(" => %v\n", vm.params.GasLimit) + dbg.Printf(" => %v\n", vm.params.GasLimit) case POP: // 0x50 stack.Pop() - fmt.Printf(" => %v\n", vm.params.GasLimit) + dbg.Printf(" => %v\n", vm.params.GasLimit) case MLOAD: // 0x51 offset := stack.Pop64() @@ -432,16 +473,16 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga return nil, firstErr(err, ErrMemoryOutOfBounds) } stack.Push(RightPadWord256(data)) - fmt.Printf(" => 0x%X\n", data) + dbg.Printf(" => 0x%X\n", data) case MSTORE: // 0x52 offset, data := stack.Pop64(), stack.Pop() - dest, ok := subslice(memory, offset, 32, true) + dest, ok := subslice(memory, offset, 32, false) if !ok { return nil, firstErr(err, ErrMemoryOutOfBounds) } - copy(dest, data[:]) - fmt.Printf(" => 0x%X\n", data) + copy(dest, flip(data[:])) + dbg.Printf(" => 0x%X\n", data) case MSTORE8: // 0x53 offset, val := stack.Pop64(), byte(stack.Pop64()&0xFF) @@ -449,19 +490,21 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga return nil, firstErr(err, ErrMemoryOutOfBounds) } memory[offset] = val - fmt.Printf(" => [%v] 0x%X\n", offset, val) + dbg.Printf(" => [%v] 0x%X\n", offset, val) case SLOAD: // 0x54 loc := stack.Pop() data := vm.appState.GetStorage(callee.Address, loc) - stack.Push(data) - fmt.Printf(" {0x%X : 0x%X}\n", loc, data) + stack.Push(flipWord(data)) + dbg.Printf(" {0x%X : 0x%X}\n", loc, data) case SSTORE: // 0x55 loc, data := stack.Pop(), stack.Pop() + loc = flipWord(loc) + data = flipWord(data) vm.appState.SetStorage(callee.Address, loc, data) useGas(gas, GasStorageUpdate) - fmt.Printf(" {0x%X : 0x%X}\n", loc, data) + dbg.Printf(" {0x%X : 0x%X}\n", loc, data) case JUMP: // 0x56 err = jump(code, stack.Pop64(), &pc) @@ -473,7 +516,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga err = jump(code, pos, &pc) continue } - fmt.Printf(" ~> false\n") + dbg.Printf(" ~> false\n") case PC: // 0x58 stack.Push64(pc) @@ -483,10 +526,10 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga case GAS: // 0x5A stack.Push64(*gas) - fmt.Printf(" => %X\n", *gas) + dbg.Printf(" => %X\n", *gas) case JUMPDEST: // 0x5B - fmt.Printf("\n") + dbg.Printf("\n") // Do nothing case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32: @@ -498,17 +541,17 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga res := RightPadWord256(codeSegment) stack.Push(res) pc += a - fmt.Printf(" => 0x%X\n", res) + dbg.Printf(" => 0x%X\n", res) case DUP1, DUP2, DUP3, DUP4, DUP5, DUP6, DUP7, DUP8, DUP9, DUP10, DUP11, DUP12, DUP13, DUP14, DUP15, DUP16: n := int(op - DUP1 + 1) stack.Dup(n) - fmt.Printf(" => [%d] 0x%X\n", n, stack.Peek().Bytes()) + dbg.Printf(" => [%d] 0x%X\n", n, stack.Peek().Bytes()) case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16: n := int(op - SWAP1 + 2) stack.Swap(n) - fmt.Printf(" => [%d]\n", n) + dbg.Printf(" => [%d]\n", n) case LOG0, LOG1, LOG2, LOG3, LOG4: n := int(op - LOG0) @@ -528,7 +571,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga vm.params.BlockHeight, } vm.appState.AddLog(log) - fmt.Printf(" => %v\n", log) + dbg.Printf(" => %v\n", log) case CREATE: // 0xF0 contractValue := stack.Pop64() @@ -560,7 +603,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga addr, value := stack.Pop(), stack.Pop64() inOffset, inSize := stack.Pop64(), stack.Pop64() // inputs retOffset, retSize := stack.Pop64(), stack.Pop64() // outputs - fmt.Printf(" => %X\n", addr) + dbg.Printf(" => %X\n", addr) // Get the arguments from the memory args, ok := subslice(memory, inOffset, inSize, false) @@ -613,7 +656,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga // Handle remaining gas. *gas += gasLimit - fmt.Printf("resume %X (%v)\n", callee.Address, gas) + dbg.Printf("resume %X (%v)\n", callee.Address, gas) case RETURN: // 0xF3 offset, size := stack.Pop64(), stack.Pop64() @@ -621,7 +664,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if !ok { return nil, firstErr(err, ErrMemoryOutOfBounds) } - fmt.Printf(" => [%v, %v] (%d) 0x%X\n", offset, size, len(ret), ret) + dbg.Printf(" => [%v, %v] (%d) 0x%X\n", offset, size, len(ret), ret) return ret, nil case SUICIDE: // 0xFF @@ -629,7 +672,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga if ok = useGas(gas, GasGetAccount); !ok { return nil, firstErr(err, ErrInsufficientGas) } - // TODO if the receiver is Zero256, then make it the fee. + // TODO if the receiver is , then make it the fee. receiver := vm.appState.GetAccount(addr) if receiver == nil { return nil, firstErr(err, ErrUnknownAddress) @@ -638,11 +681,11 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga receiver.Balance += balance vm.appState.UpdateAccount(receiver) vm.appState.RemoveAccount(callee) - fmt.Printf(" => (%X) %v\n", addr[:4], balance) + dbg.Printf(" => (%X) %v\n", addr[:4], balance) fallthrough default: - fmt.Printf("(pc) %-3v Invalid opcode %X\n", pc, op) + dbg.Printf("(pc) %-3v Invalid opcode %X\n", pc, op) panic(fmt.Errorf("Invalid opcode %X", op)) } @@ -677,10 +720,10 @@ func codeGetOp(code []byte, n uint64) OpCode { func jump(code []byte, to uint64, pc *uint64) (err error) { dest := codeGetOp(code, to) if dest != JUMPDEST { - fmt.Printf(" ~> %v invalid jump dest %v\n", to, dest) + dbg.Printf(" ~> %v invalid jump dest %v\n", to, dest) return ErrInvalidJumpDest } - fmt.Printf(" ~> %v\n", to) + dbg.Printf(" ~> %v\n", to) *pc = to return nil } @@ -713,10 +756,25 @@ func transfer(from, to *Account, amount uint64) error { } func flip(in []byte) []byte { + l2 := len(in) / 2 flipped := make([]byte, len(in)) - for i := 0; i < len(flipped)/2; i++ { + // copy the middle bit (if its even it will get overwritten) + if len(in) != 0 { + flipped[l2] = in[l2] + } + for i := 0; i < l2; i++ { flipped[i] = in[len(in)-1-i] flipped[len(in)-1-i] = in[i] } return flipped } + +func flipWord(in Word256) Word256 { + word := Word256{} + // copy the middle bit (if its even it will get overwritten) + for i := 0; i < 16; i++ { + word[i] = in[len(in)-1-i] + word[len(in)-1-i] = in[i] + } + return word +} From 42e2e11364b2ec7aa3c5921593f067bf26db3f79 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 29 Mar 2015 17:47:12 -0700 Subject: [PATCH 18/18] =?UTF-8?q?add=20dirty=20bit=20to=20BlockCache=20acc?= =?UTF-8?q?ounts=20and=20storages.=20construct=20account=E2=80=99s=20stora?= =?UTF-8?q?ge=20lazily.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- state/block_cache.go | 69 ++++++++++++++++++++++++++++++-------------- state/tx_cache.go | 6 ++-- 2 files changed, 49 insertions(+), 26 deletions(-) diff --git a/state/block_cache.go b/state/block_cache.go index 4da8e7ce7..9af3b4edc 100644 --- a/state/block_cache.go +++ b/state/block_cache.go @@ -1,6 +1,7 @@ package state import ( + "bytes" "sort" ac "github.com/tendermint/tendermint/account" @@ -25,7 +26,7 @@ type BlockCache struct { db dbm.DB backend *State accounts map[string]accountInfo - storages map[Tuple256]Word256 + storages map[Tuple256]storageInfo } func NewBlockCache(backend *State) *BlockCache { @@ -33,7 +34,7 @@ func NewBlockCache(backend *State) *BlockCache { db: backend.DB, backend: backend, accounts: make(map[string]accountInfo), - storages: make(map[Tuple256]Word256), + storages: make(map[Tuple256]storageInfo), } } @@ -45,15 +46,14 @@ func (cache *BlockCache) State() *State { // BlockCache.account func (cache *BlockCache) GetAccount(addr []byte) *ac.Account { - acc, storage, removed := unpack(cache.accounts[string(addr)]) + acc, _, removed, _ := cache.accounts[string(addr)].unpack() if removed { return nil } else if acc != nil { return acc } else { acc = cache.backend.GetAccount(addr) - storage = makeStorage(cache.db, acc.StorageRoot) - cache.accounts[string(addr)] = accountInfo{acc, storage, false} + cache.accounts[string(addr)] = accountInfo{acc, nil, false, false} return acc } } @@ -61,22 +61,22 @@ func (cache *BlockCache) GetAccount(addr []byte) *ac.Account { func (cache *BlockCache) UpdateAccount(acc *ac.Account) { addr := acc.Address // SANITY CHECK - _, storage, removed := unpack(cache.accounts[string(addr)]) + _, storage, removed, _ := cache.accounts[string(addr)].unpack() if removed { panic("UpdateAccount on a removed account") } // SANITY CHECK END - cache.accounts[string(addr)] = accountInfo{acc, storage, false} + cache.accounts[string(addr)] = accountInfo{acc, storage, false, true} } func (cache *BlockCache) RemoveAccount(addr []byte) { // SANITY CHECK - _, _, removed := unpack(cache.accounts[string(addr)]) + _, _, removed, _ := cache.accounts[string(addr)].unpack() if removed { panic("RemoveAccount on a removed account") } // SANITY CHECK END - cache.accounts[string(addr)] = accountInfo{nil, nil, true} + cache.accounts[string(addr)] = accountInfo{nil, nil, true, false} } // BlockCache.account @@ -85,16 +85,20 @@ func (cache *BlockCache) RemoveAccount(addr []byte) { func (cache *BlockCache) GetStorage(addr Word256, key Word256) (value Word256) { // Check cache - value, ok := cache.storages[Tuple256{addr, key}] + info, ok := cache.storages[Tuple256{addr, key}] if ok { - return value + return info.value } // Get or load storage - _, storage, removed := unpack(cache.accounts[string(addr.Prefix(20))]) + acc, storage, removed, dirty := cache.accounts[string(addr.Prefix(20))].unpack() if removed { panic("GetStorage() on removed account") } + if storage == nil { + storage = makeStorage(cache.db, acc.StorageRoot) + cache.accounts[string(addr.Prefix(20))] = accountInfo{acc, storage, false, dirty} + } // Load and set cache _, val_ := storage.Get(key.Bytes()) @@ -102,17 +106,17 @@ func (cache *BlockCache) GetStorage(addr Word256, key Word256) (value Word256) { if val_ != nil { value = RightPadWord256(val_.([]byte)) } - cache.storages[Tuple256{addr, key}] = value + cache.storages[Tuple256{addr, key}] = storageInfo{value, false} return value } // NOTE: Set value to zero to removed from the trie. func (cache *BlockCache) SetStorage(addr Word256, key Word256, value Word256) { - _, _, removed := unpack(cache.accounts[string(addr.Prefix(20))]) + _, _, removed, _ := cache.accounts[string(addr.Prefix(20))].unpack() if removed { panic("SetStorage() on a removed account") } - cache.storages[Tuple256{addr, key}] = value + cache.storages[Tuple256{addr, key}] = storageInfo{value, true} } // BlockCache.storage @@ -140,7 +144,7 @@ func (cache *BlockCache) Sync() { for _, storageKey := range storageKeys { addr, key := Tuple256Split(storageKey) if addr != curAddr || curAcc == nil { - acc, storage, removed := unpack(cache.accounts[string(addr.Prefix(20))]) + acc, storage, removed, _ := cache.accounts[string(addr.Prefix(20))].unpack() curAddr = addr curAcc = acc curAccRemoved = removed @@ -149,7 +153,10 @@ func (cache *BlockCache) Sync() { if curAccRemoved { continue } - value := cache.storages[storageKey] + value, dirty := cache.storages[storageKey].unpack() + if !dirty { + continue + } if value.IsZero() { curStorage.Remove(key.Bytes()) } else { @@ -166,7 +173,7 @@ func (cache *BlockCache) Sync() { // Update or delete accounts. for _, addrStr := range addrStrs { - acc, storage, removed := unpack(cache.accounts[addrStr]) + acc, storage, removed, dirty := cache.accounts[addrStr].unpack() if removed { removed := cache.backend.RemoveAccount(acc.Address) if !removed { @@ -176,8 +183,16 @@ func (cache *BlockCache) Sync() { if acc == nil { panic(Fmt("Account should not be nil for addr: %X", acc.Address)) } - acc.StorageRoot = storage.Save() - cache.backend.UpdateAccount(acc) + if storage != nil { + newStorageRoot := storage.Save() + if !bytes.Equal(newStorageRoot, acc.StorageRoot) { + acc.StorageRoot = newStorageRoot + dirty = true + } + } + if dirty { + cache.backend.UpdateAccount(acc) + } } } @@ -189,8 +204,18 @@ type accountInfo struct { account *ac.Account storage merkle.Tree removed bool + dirty bool } -func unpack(accInfo accountInfo) (*ac.Account, merkle.Tree, bool) { - return accInfo.account, accInfo.storage, accInfo.removed +func (accInfo accountInfo) unpack() (*ac.Account, merkle.Tree, bool, bool) { + return accInfo.account, accInfo.storage, accInfo.removed, accInfo.dirty +} + +type storageInfo struct { + value Word256 + dirty bool +} + +func (stjInfo storageInfo) unpack() (Word256, bool) { + return stjInfo.value, stjInfo.dirty } diff --git a/state/tx_cache.go b/state/tx_cache.go index 1e7fd7139..d4188db3c 100644 --- a/state/tx_cache.go +++ b/state/tx_cache.go @@ -94,10 +94,8 @@ func (cache *TxCache) GetStorage(addr Word256, key Word256) Word256 { return value } - // Load and set cache - value = cache.backend.GetStorage(addr, key) - cache.storages[Tuple256{addr, key}] = value - return value + // Load from backend + return cache.backend.GetStorage(addr, key) } // NOTE: Set value to zero to removed from the trie.