diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index abe453677..d38da66c4 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -121,7 +121,7 @@ jobs: - run: | cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt if: env.GIT_DIFF - - uses: codecov/codecov-action@v1.5.0 + - uses: codecov/codecov-action@v1.5.2 with: file: ./coverage.txt if: env.GIT_DIFF diff --git a/.golangci.yml b/.golangci.yml index a97ad7a88..f05cde90c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -39,6 +39,7 @@ linters: # - wsl # - gocognit - nolintlint + - asciicheck issues: exclude-rules: diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 29cb54d20..8d753f004 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -64,6 +64,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - Data Storage - [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters) - [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) + - [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106) - Tooling - [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106) @@ -118,6 +119,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm) - [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778) - [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm) +- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778) ### BUG FIXES @@ -125,3 +127,5 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes) - [blockchain/v1] \#5711 Fix deadlock (@melekes) - [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) +- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106) +- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) diff --git a/go.mod b/go.mod index c0acc3a78..6e7f26605 100644 --- a/go.mod +++ b/go.mod @@ -8,8 +8,8 @@ require ( github.com/Masterminds/squirrel v1.5.0 github.com/Workiva/go-datastructures v1.0.53 github.com/adlio/schema v1.1.13 - github.com/btcsuite/btcd v0.21.0-beta - github.com/btcsuite/btcutil v1.0.2 + github.com/btcsuite/btcd v0.22.0-beta + github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.10.0 github.com/gogo/protobuf v1.3.2 @@ -26,10 +26,10 @@ require ( github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/ory/dockertest v3.3.5+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.10.0 + github.com/prometheus/client_golang v1.11.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.7.0 - github.com/rs/zerolog v1.22.0 + github.com/rs/zerolog v1.23.0 github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.1.3 diff --git a/go.sum b/go.sum index 21a6d4272..9c1fb0e19 100644 --- a/go.sum +++ b/go.sum @@ -63,12 +63,12 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= +github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= -github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= @@ -100,6 +100,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d h1:49RLWk1j44Xu4fjHb6JFYmeUnDORVwHNkDxaQ0ctCVU= @@ -159,11 +160,13 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -283,6 +286,7 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -408,8 +412,8 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -423,8 +427,8 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.18.0 h1:WCVKW7aL6LEe1uryfI9dnEc2ZqNB1Fn0ok930v0iL1Y= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -442,8 +446,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.22.0 h1:XrVUjV4K+izZpKXZHlPrYQiDtmdGiCylnT4i43AAWxg= -github.com/rs/zerolog v1.22.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= +github.com/rs/zerolog v1.23.0 h1:UskrK+saS9P9Y789yNNulYKdARjPZuS35B8gJF2x60g= +github.com/rs/zerolog v1.23.0/go.mod h1:6c7hFfxPOy7TacJc4Fcdi24/J0NKYGzjG8FWRI916Qo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -645,8 +649,8 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/internal/blockchain/v0/reactor.go b/internal/blockchain/v0/reactor.go index 8dec338c3..aa8208914 100644 --- a/internal/blockchain/v0/reactor.go +++ b/internal/blockchain/v0/reactor.go @@ -85,9 +85,10 @@ type Reactor struct { consReactor consensusReactor fastSync bool - blockchainCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} + blockchainCh *p2p.Channel + peerUpdates *p2p.PeerUpdates + peerUpdatesCh chan p2p.Envelope + closeCh chan struct{} requestsCh <-chan BlockRequest errorsCh <-chan peerError @@ -122,17 +123,18 @@ func NewReactor( errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count. r := &Reactor{ - initialState: state, - blockExec: blockExec, - store: store, - pool: NewBlockPool(startHeight, requestsCh, errorsCh), - consReactor: consReactor, - fastSync: fastSync, - requestsCh: requestsCh, - errorsCh: errorsCh, - blockchainCh: blockchainCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), + initialState: state, + blockExec: blockExec, + store: store, + pool: NewBlockPool(startHeight, requestsCh, errorsCh), + consReactor: consReactor, + fastSync: fastSync, + requestsCh: requestsCh, + errorsCh: errorsCh, + blockchainCh: blockchainCh, + peerUpdates: peerUpdates, + peerUpdatesCh: make(chan p2p.Envelope), + closeCh: make(chan struct{}), } r.BaseService = *service.NewBaseService(logger, "Blockchain", r) @@ -277,9 +279,9 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } // processBlockchainCh initiates a blocking process where we listen for and handle -// envelopes on the BlockchainChannel. Any error encountered during message -// execution will result in a PeerError being sent on the BlockchainChannel. When -// the reactor is stopped, we will catch the signal and close the p2p Channel +// envelopes on the BlockchainChannel and peerUpdatesCh. Any error encountered during +// message execution will result in a PeerError being sent on the BlockchainChannel. +// When the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. func (r *Reactor) processBlockchainCh() { defer r.blockchainCh.Close() @@ -295,9 +297,13 @@ func (r *Reactor) processBlockchainCh() { } } + case envelop := <-r.peerUpdatesCh: + r.blockchainCh.Out <- envelop + case <-r.closeCh: r.Logger.Debug("stopped listening on blockchain channel; closing...") return + } } } @@ -314,7 +320,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { switch peerUpdate.Status { case p2p.PeerStatusUp: // send a status update the newly added peer - r.blockchainCh.Out <- p2p.Envelope{ + r.peerUpdatesCh <- p2p.Envelope{ To: peerUpdate.NodeID, Message: &bcproto.StatusResponse{ Base: r.store.Base(), @@ -548,7 +554,7 @@ FOR_LOOP: // TODO: Same thing for app - but we would need a way to get the hash // without persisting the state. - state, _, err = r.blockExec.ApplyBlock(state, firstID, first) + state, err = r.blockExec.ApplyBlock(state, firstID, first) if err != nil { // TODO: This is bad, are we zombie? panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) diff --git a/internal/blockchain/v0/reactor_test.go b/internal/blockchain/v0/reactor_test.go index f1b73ed3d..841a4d64c 100644 --- a/internal/blockchain/v0/reactor_test.go +++ b/internal/blockchain/v0/reactor_test.go @@ -17,6 +17,7 @@ import ( bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" @@ -103,11 +104,9 @@ func (rts *reactorTestSuite) addNode(t *testing.T, stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(blockDB) - state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) + state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - - db := dbm.NewMemDB() - stateStore = sm.NewStore(db) + require.NoError(t, stateStore.Save(state)) blockExec := sm.NewBlockExecutor( stateStore, @@ -115,8 +114,8 @@ func (rts *reactorTestSuite) addNode(t *testing.T, rts.app[nodeID].Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, + blockStore, ) - require.NoError(t, stateStore.Save(state)) for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) @@ -142,11 +141,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T, ) } - thisBlock := makeBlock(blockHeight, state, lastCommit) + thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) + state, err = blockExec.ApplyBlock(state, blockID, thisBlock) require.NoError(t, err) blockStore.SaveBlock(thisBlock, thisParts, lastCommit) diff --git a/internal/blockchain/v0/test_util.go b/internal/blockchain/v0/test_util.go deleted file mode 100644 index a36934366..000000000 --- a/internal/blockchain/v0/test_util.go +++ /dev/null @@ -1,18 +0,0 @@ -package v0 - -import ( - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -func makeTxs(height int64) (txs []types.Tx) { - for i := 0; i < 10; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) - } - return txs -} - -func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address) - return block -} diff --git a/internal/blockchain/v2/processor_context.go b/internal/blockchain/v2/processor_context.go index 6a0466550..7385bcc6e 100644 --- a/internal/blockchain/v2/processor_context.go +++ b/internal/blockchain/v2/processor_context.go @@ -30,7 +30,7 @@ func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContex } func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error { - newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block) + newState, err := pc.applier.ApplyBlock(pc.state, blockID, block) pc.state = newState return err } diff --git a/internal/blockchain/v2/reactor.go b/internal/blockchain/v2/reactor.go index 5ce08c7d0..50c9fa565 100644 --- a/internal/blockchain/v2/reactor.go +++ b/internal/blockchain/v2/reactor.go @@ -50,7 +50,7 @@ type BlockchainReactor struct { } type blockApplier interface { - ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error) + ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error) } // XXX: unify naming in this package around tmState diff --git a/internal/blockchain/v2/reactor_test.go b/internal/blockchain/v2/reactor_test.go index b034dab51..4dd661fc5 100644 --- a/internal/blockchain/v2/reactor_test.go +++ b/internal/blockchain/v2/reactor_test.go @@ -25,7 +25,8 @@ import ( bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" + sf "github.com/tendermint/tendermint/state/test/factory" + tmstore "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -84,9 +85,9 @@ type mockBlockApplier struct { // XXX: Add whitelist/blacklist? func (mba *mockBlockApplier) ApplyBlock( state sm.State, blockID types.BlockID, block *types.Block, -) (sm.State, int64, error) { +) (sm.State, error) { state.LastBlockHeight++ - return state, 0, nil + return state, nil } type mockSwitchIo struct { @@ -151,8 +152,8 @@ type testReactorParams struct { mockA bool } -func newTestReactor(p testReactorParams) *BlockchainReactor { - store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight) +func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor { + store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight) reporter := behavior.NewMockReporter() var appl blockApplier @@ -164,15 +165,14 @@ func newTestReactor(p testReactorParams) *BlockchainReactor { cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() - if err != nil { - panic(fmt.Errorf("error start app: %w", err)) - } + require.NoError(t, err) db := dbm.NewMemDB() stateStore := sm.NewStore(db) - appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) - if err = stateStore.Save(state); err != nil { - panic(err) - } + blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) + appl = sm.NewBlockExecutor( + stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) + err = stateStore.Save(state) + require.NoError(t, err) } r := newReactor(state, store, reporter, appl, true) @@ -400,7 +400,7 @@ func TestReactorHelperMode(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - reactor := newTestReactor(params) + reactor := newTestReactor(t, params) mockSwitch := &mockSwitchIo{switchedToConsensus: false} reactor.io = mockSwitch err := reactor.Start() @@ -457,7 +457,7 @@ func TestReactorSetSwitchNil(t *testing.T) { defer os.RemoveAll(config.RootDir) genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) - reactor := newTestReactor(testReactorParams{ + reactor := newTestReactor(t, testReactorParams{ logger: log.TestingLogger(), genDoc: genDoc, privVals: privVals, @@ -468,34 +468,18 @@ func TestReactorSetSwitchNil(t *testing.T) { assert.Nil(t, reactor.io) } -//---------------------------------------------- -// utility funcs - -func makeTxs(height int64) (txs []types.Tx) { - for i := 0; i < 10; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) - } - return txs -} - -func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address) - return block -} - type testApp struct { abci.BaseApplication } -// Why are we importing the entire blockExecutor dependency graph here -// when we have the facilities to func newReactorStore( + t *testing.T, genDoc *types.GenesisDoc, privVals []types.PrivValidator, - maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) { - if len(privVals) != 1 { - panic("only support one validator") - } + maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) { + t.Helper() + + require.Len(t, privVals, 1) app := &testApp{} cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc) @@ -505,20 +489,15 @@ func newReactorStore( } stateDB := dbm.NewMemDB() - blockStore := store.NewBlockStore(dbm.NewMemDB()) + blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) stateStore := sm.NewStore(stateDB) - state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) - if err != nil { - panic(fmt.Errorf("error constructing state from genesis file: %w", err)) - } + state, err := sm.MakeGenesisState(genDoc) + require.NoError(t, err) - db := dbm.NewMemDB() - stateStore = sm.NewStore(db) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, sm.EmptyEvidencePool{}) - if err = stateStore.Save(state); err != nil { - panic(err) - } + mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) + err = stateStore.Save(state) + require.NoError(t, err) // add blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { @@ -533,22 +512,18 @@ func newReactorStore( lastBlockMeta.BlockID, time.Now(), ) - if err != nil { - panic(err) - } + require.NoError(t, err) lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) } - thisBlock := makeBlock(blockHeight, state, lastCommit) + thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) - if err != nil { - panic(fmt.Errorf("error apply block: %w", err)) - } + state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + require.NoError(t, err) blockStore.SaveBlock(thisBlock, thisParts, lastCommit) } diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 5f72527f3..7522dda7f 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -45,7 +45,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { logger := consensusLogger().With("test", "byzantine", "validator", i) stateDB := dbm.NewMemDB() // each state needs its own db stateStore := sm.NewStore(stateDB) - state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) + state, err := sm.MakeGenesisState(genDoc) + require.NoError(t, err) + require.NoError(t, stateStore.Save(state)) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) @@ -76,7 +78,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { require.NoError(t, err) // Make State - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(cs.Logger) // set private validator diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index b2400669d..af8c3ca27 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -384,8 +384,8 @@ func newStateWithConfig( pv types.PrivValidator, app abci.Application, ) *State { - blockDB := dbm.NewMemDB() - return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) + return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockStore) } func newStateWithConfigAndBlockStore( @@ -393,11 +393,8 @@ func newStateWithConfigAndBlockStore( state sm.State, pv types.PrivValidator, app abci.Application, - blockDB dbm.DB, + blockStore *store.BlockStore, ) *State { - // Get BlockStore - blockStore := store.NewBlockStore(blockDB) - // one for mempool, one for consensus mtx := new(tmsync.RWMutex) proxyAppConnMem := abcicli.NewLocalClient(mtx, app) @@ -419,7 +416,7 @@ func newStateWithConfigAndBlockStore( panic(err) } - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) @@ -693,6 +690,7 @@ func consensusLogger() log.Logger { } func randConsensusState( + t *testing.T, config *cfg.Config, nValidators int, testName string, @@ -710,9 +708,9 @@ func randConsensusState( configRootDirs := make([]string, 0, nValidators) for i := 0; i < nValidators; i++ { - stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) - state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) + blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db + state, err := sm.MakeGenesisState(genDoc) + require.NoError(t, err) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) @@ -731,7 +729,7 @@ func randConsensusState( vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) - css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB) + css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, blockStore) css[i].SetTimeoutTicker(tickerFunc()) css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } @@ -762,9 +760,7 @@ func randConsensusNetWithPeers( var peer0Config *cfg.Config configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { - stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) - state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) + state, _ := sm.MakeGenesisState(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index 8307ed5cf..fa5bc2ead 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -18,7 +18,7 @@ func TestReactorInvalidPrecommit(t *testing.T) { config := configSetup(t) n := 4 - states, cleanup := randConsensusState(config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter) t.Cleanup(cleanup) for i := 0; i < 4; i++ { diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 763e87e50..5edec248a 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -17,6 +17,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" mempl "github.com/tendermint/tendermint/internal/mempool" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -123,9 +124,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { config := configSetup(t) state, privVals := randGenesisState(config, 1, false, 10) - blockDB := dbm.NewMemDB() - stateStore := sm.NewStore(blockDB) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB) + stateStore := sm.NewStore(dbm.NewMemDB()) + blockStore := store.NewBlockStore(dbm.NewMemDB()) + cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore) err := stateStore.Save(state) require.NoError(t, err) newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader) @@ -150,9 +151,9 @@ func TestMempoolRmBadTx(t *testing.T) { state, privVals := randGenesisState(config, 1, false, 10) app := NewCounterApplication() - blockDB := dbm.NewMemDB() - stateStore := sm.NewStore(blockDB) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) + stateStore := sm.NewStore(dbm.NewMemDB()) + blockStore := store.NewBlockStore(dbm.NewMemDB()) + cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockStore) err := stateStore.Save(state) require.NoError(t, err) diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index bbd823a3f..a4f56f631 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -48,7 +48,7 @@ type Metrics struct { // Number of transactions. NumTxs metrics.Gauge // Size of the block. - BlockSizeBytes metrics.Gauge + BlockSizeBytes metrics.Histogram // Total number of transactions. TotalTxs metrics.Gauge // The latest block height. @@ -150,7 +150,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "num_txs", Help: "Number of transactions.", }, labels).With(labelsAndValues...), - BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + BlockSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "block_size_bytes", @@ -210,7 +210,7 @@ func NopMetrics() *Metrics { BlockIntervalSeconds: discard.NewHistogram(), NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewGauge(), + BlockSizeBytes: discard.NewHistogram(), TotalTxs: discard.NewGauge(), CommittedHeight: discard.NewGauge(), FastSyncing: discard.NewGauge(), diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 7617eb7b5..fcd2ae2a2 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -257,7 +257,7 @@ func TestReactorBasic(t *testing.T) { config := configSetup(t) n := 4 - states, cleanup := randConsensusState(config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter) t.Cleanup(cleanup) rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock @@ -296,7 +296,8 @@ func TestReactorWithEvidence(t *testing.T) { for i := 0; i < n; i++ { stateDB := dbm.NewMemDB() // each state needs its own db stateStore := sm.NewStore(stateDB) - state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) + state, err := sm.MakeGenesisState(genDoc) + require.NoError(t, err) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) @@ -334,14 +335,14 @@ func TestReactorWithEvidence(t *testing.T) { evpool2 := sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) eventBus := types.NewEventBus() eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err := eventBus.Start() + err = eventBus.Start() require.NoError(t, err) cs.SetEventBus(eventBus) @@ -381,6 +382,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { n := 4 states, cleanup := randConsensusState( + t, config, n, "consensus_reactor_test", @@ -429,7 +431,7 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { config := configSetup(t) n := 4 - states, cleanup := randConsensusState(config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter) t.Cleanup(cleanup) rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock @@ -487,6 +489,7 @@ func TestReactorVotingPowerChange(t *testing.T) { n := 4 states, cleanup := randConsensusState( + t, config, n, "consensus_voting_power_changes_test", diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 03dc90e5a..9b22f4631 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -473,7 +473,7 @@ func (h *Handshaker) replayBlocks( // We emit events for the index services at the final block due to the sync issue when // the node shutdown during the block committing status. blockExec := sm.NewBlockExecutor( - h.stateStore, h.logger, proxyApp.Consensus(), emptyMempool{}, sm.EmptyEvidencePool{}) + h.stateStore, h.logger, proxyApp.Consensus(), emptyMempool{}, sm.EmptyEvidencePool{}, h.store) blockExec.SetEventBus(h.eventBus) appHash, err = sm.ExecCommitBlock( blockExec, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) @@ -511,11 +511,11 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap // Use stubs for both mempool and evidence pool since no transactions nor // evidence are needed here - block already exists. - blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}) + blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}, h.store) blockExec.SetEventBus(h.eventBus) var err error - state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block) + state, err = blockExec.ApplyBlock(state, meta.BlockID, block) if err != nil { return sm.State{}, err } diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 2244d868e..ed4007048 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -328,7 +328,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo } mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) consensusState := NewState(csConfig, state.Copy(), blockExec, blockStore, mempool, evpool) diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index e20fb5209..d7d3e8a47 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -33,6 +33,8 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + sf "github.com/tendermint/tendermint/state/test/factory" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -54,21 +56,23 @@ import ( func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { logger := log.TestingLogger() - state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) + require.NoError(t, err) privValidator := loadPrivValidator(consensusReplayConfig) + blockStore := store.NewBlockStore(dbm.NewMemDB()) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, state, privValidator, kvstore.NewApplication(), - blockDB, + blockStore, ) cs.SetLogger(logger) bytes, _ := ioutil.ReadFile(cs.config.WalFile()) t.Logf("====== WAL: \n\r%X\n", bytes) - err := cs.Start() + err = cs.Start() require.NoError(t, err) defer func() { if err := cs.Stop(); err != nil { @@ -147,6 +151,7 @@ LOOP: blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(blockDB) state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) require.NoError(t, err) privValidator := loadPrivValidator(consensusReplayConfig) @@ -155,7 +160,7 @@ LOOP: state, privValidator, kvstore.NewApplication(), - blockDB, + blockStore, ) cs.SetLogger(logger) @@ -727,7 +732,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod state := genesisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(config, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode) + state = buildTMStateFromChain(config, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store) latestAppHash := state.AppHash // make a new client creator @@ -744,7 +749,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod stateStore := sm.NewStore(stateDB1) err := stateStore.Save(genesisState) require.NoError(t, err) - buildAppStateFromChain(proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode) + buildAppStateFromChain(proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store) } // Prune block store if requested @@ -809,12 +814,13 @@ func applyBlock(stateStore sm.Store, evpool sm.EvidencePool, st sm.State, blk *types.Block, - proxyApp proxy.AppConns) sm.State { + proxyApp proxy.AppConns, + blockStore *mockBlockStore) sm.State { testPartSize := types.BlockPartSizeBytes - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()} - newState, _, err := blockExec.ApplyBlock(st, blkID, blk) + newState, err := blockExec.ApplyBlock(st, blkID, blk) if err != nil { panic(err) } @@ -828,7 +834,9 @@ func buildAppStateFromChain( evpool sm.EvidencePool, state sm.State, chain []*types.Block, - nBlocks int, mode uint) { + nBlocks int, + mode uint, + blockStore *mockBlockStore) { // start a new app without handshake, play nBlocks blocks if err := proxyApp.Start(); err != nil { panic(err) @@ -849,18 +857,18 @@ func buildAppStateFromChain( case 0: for i := 0; i < nBlocks; i++ { block := chain[i] - state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp) + state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore) } case 1, 2, 3: for i := 0; i < nBlocks-1; i++ { block := chain[i] - state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp) + state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore) } if mode == 2 || mode == 3 { // update the kvstore height and apphash // as if we ran commit but not - state = applyBlock(stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp) + state = applyBlock(stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore) } default: panic(fmt.Sprintf("unknown mode %v", mode)) @@ -876,7 +884,8 @@ func buildTMStateFromChain( state sm.State, chain []*types.Block, nBlocks int, - mode uint) sm.State { + mode uint, + blockStore *mockBlockStore) sm.State { // run the whole chain against this client to build up the tendermint state kvstoreApp := kvstore.NewPersistentKVStoreApplication( filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) @@ -903,19 +912,19 @@ func buildTMStateFromChain( case 0: // sync right up for _, block := range chain { - state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp) + state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore) } case 1, 2, 3: // sync up to the penultimate as if we stored the block. // whether we commit or not depends on the appHash for _, block := range chain[:len(chain)-1] { - state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp) + state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore) } // apply the final block to a state copy so we can // get the right next appHash but keep the state back - applyBlock(stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp) + applyBlock(stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore) default: panic(fmt.Sprintf("unknown mode %v", mode)) } @@ -940,7 +949,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks - blocks := makeBlocks(3, &state, privVal) + blocks := sf.MakeBlocks(3, &state, privVal) store.chain = blocks // 2. Tendermint must panic if app returns wrong hash for the first block @@ -992,51 +1001,6 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { } } -func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { - blocks := make([]*types.Block, 0) - - var ( - prevBlock *types.Block - prevBlockMeta *types.BlockMeta - ) - - appHeight := byte(0x01) - for i := 0; i < n; i++ { - height := int64(i + 1) - - block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height) - blocks = append(blocks, block) - - prevBlock = block - prevBlockMeta = types.NewBlockMeta(block, parts) - - // update state - state.AppHash = []byte{appHeight} - appHeight++ - state.LastBlockHeight = height - } - - return blocks -} - -func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, - privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { - - lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) - if height > 1 { - vote, _ := factory.MakeVote( - privVal, - lastBlock.Header.ChainID, - 1, lastBlock.Header.Height, 0, 2, - lastBlockMeta.BlockID, - time.Now()) - lastCommit = types.NewCommit(vote.Height, vote.Round, - lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) - } - - return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) -} - type badApp struct { abci.BaseApplication numBlocks byte diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 0f0d80899..e117d7ddb 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -1655,12 +1655,7 @@ func (cs *State) finalizeCommit(height int64) { // Execute and commit the block, update and save the state, and update the mempool. // NOTE The block.AppHash wont reflect these txs until the next block. - var ( - err error - retainHeight int64 - ) - - stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( + stateCopy, err := cs.blockExec.ApplyBlock( stateCopy, types.BlockID{ Hash: block.Hash(), @@ -1675,16 +1670,6 @@ func (cs *State) finalizeCommit(height int64) { fail.Fail() // XXX - // Prune old heights, if requested by ABCI app. - if retainHeight > 0 { - pruned, err := cs.pruneBlocks(retainHeight) - if err != nil { - logger.Error("failed to prune blocks", "retain_height", retainHeight, "err", err) - } else { - logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight) - } - } - // must be called before we update state cs.recordMetrics(height, block) @@ -1708,23 +1693,6 @@ func (cs *State) finalizeCommit(height int64) { // * cs.StartTime is set to when we will start round0. } -func (cs *State) pruneBlocks(retainHeight int64) (uint64, error) { - base := cs.blockStore.Base() - if retainHeight <= base { - return 0, nil - } - pruned, err := cs.blockStore.PruneBlocks(retainHeight) - if err != nil { - return 0, fmt.Errorf("failed to prune block store: %w", err) - } - - err = cs.blockExec.Store().PruneStates(retainHeight) - if err != nil { - return 0, fmt.Errorf("failed to prune state store: %w", err) - } - return pruned, nil -} - func (cs *State) recordMetrics(height int64, block *types.Block) { cs.metrics.Validators.Set(float64(cs.Validators.Size())) cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower())) @@ -1809,7 +1777,7 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs))) - cs.metrics.BlockSizeBytes.Set(float64(block.Size())) + cs.metrics.BlockSizeBytes.Observe(float64(block.Size())) cs.metrics.CommittedHeight.Set(float64(block.Height)) } diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index b217e4e5b..b7ee90d4d 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -87,7 +87,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { }) mempool := emptyMempool{} evpool := sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetLogger(logger) consensusState.SetEventBus(eventBus) diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index e3d62e63f..ac5f27b8e 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -17,6 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/log" sm "github.com/tendermint/tendermint/state" smmocks "github.com/tendermint/tendermint/state/mocks" + sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" @@ -438,8 +439,7 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) *store.Bloc for i := int64(1); i <= state.LastBlockHeight; i++ { lastCommit := makeCommit(i-1, valAddr) - block, _ := state.MakeBlock(i, []types.Tx{}, lastCommit, nil, - state.Validators.GetProposer().Address) + block := sf.MakeBlock(state, i, lastCommit) block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) block.Header.Version = version.Consensus{Block: version.BlockProtocol, App: 1} const parts = 1 diff --git a/internal/statesync/block_queue.go b/internal/statesync/block_queue.go new file mode 100644 index 000000000..946875490 --- /dev/null +++ b/internal/statesync/block_queue.go @@ -0,0 +1,263 @@ +package statesync + +import ( + "container/heap" + "fmt" + "sync" + "time" + + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/types" +) + +type lightBlockResponse struct { + block *types.LightBlock + peer p2p.NodeID +} + +// a block queue is used for asynchronously fetching and verifying light blocks +type blockQueue struct { + mtx sync.Mutex + + // cursors to keep track of which heights need to be fetched and verified + fetchHeight int64 + verifyHeight int64 + + // termination conditions + stopHeight int64 + stopTime time.Time + terminal *types.LightBlock + + // track failed heights so we know what blocks to try fetch again + failed *maxIntHeap + // also count retries to know when to give up + retries int + maxRetries int + + // store inbound blocks and serve them to a verifying thread via a channel + pending map[int64]lightBlockResponse + verifyCh chan lightBlockResponse + + // waiters are workers on idle until a height is required + waiters []chan int64 + + // this channel is closed once the verification process is complete + doneCh chan struct{} +} + +func newBlockQueue( + startHeight, stopHeight int64, + stopTime time.Time, + maxRetries int, +) *blockQueue { + return &blockQueue{ + stopHeight: stopHeight, + stopTime: stopTime, + fetchHeight: startHeight, + verifyHeight: startHeight, + pending: make(map[int64]lightBlockResponse), + failed: &maxIntHeap{}, + retries: 0, + maxRetries: maxRetries, + waiters: make([]chan int64, 0), + doneCh: make(chan struct{}), + } +} + +// Add adds a block to the queue to be verified and stored +// CONTRACT: light blocks should have passed basic validation +func (q *blockQueue) add(l lightBlockResponse) { + q.mtx.Lock() + defer q.mtx.Unlock() + + // return early if the process has already finished + select { + case <-q.doneCh: + return + default: + } + + // sometimes more blocks are fetched then what is necessary. If we already + // have what we need then ignore this + if q.terminal != nil && l.block.Height < q.terminal.Height { + return + } + + // if the block that was returned is at the verify height then the verifier + // is already waiting for this block so we send it directly to them + if l.block.Height == q.verifyHeight && q.verifyCh != nil { + q.verifyCh <- l + close(q.verifyCh) + q.verifyCh = nil + } else { + // else we add it in the pending bucket + q.pending[l.block.Height] = l + } + + // Lastly, if the incoming block is past the stop time and stop height then + // we mark it as the terminal block + if l.block.Height <= q.stopHeight && l.block.Time.Before(q.stopTime) { + q.terminal = l.block + } +} + +// NextHeight returns the next height that needs to be retrieved. +// We assume that for every height allocated that the peer will eventually add +// the block or signal that it needs to be retried +func (q *blockQueue) nextHeight() <-chan int64 { + q.mtx.Lock() + defer q.mtx.Unlock() + ch := make(chan int64, 1) + // if a previous process failed then we pick up this one + if q.failed.Len() > 0 { + failedHeight := heap.Pop(q.failed) + ch <- failedHeight.(int64) + close(ch) + return ch + } + + if q.terminal == nil { + // return and decrement the fetch height + ch <- q.fetchHeight + q.fetchHeight-- + close(ch) + return ch + } + + // at this point there is no height that we know we need so we create a + // waiter to hold out for either an outgoing request to fail or a block to + // fail verification + q.waiters = append(q.waiters, ch) + return ch +} + +// Finished returns true when the block queue has has all light blocks retrieved, +// verified and stored. There is no more work left to be done +func (q *blockQueue) done() <-chan struct{} { + return q.doneCh +} + +// VerifyNext pulls the next block off the pending queue and adds it to a +// channel if it's already there or creates a waiter to add it to the +// channel once it comes in. NOTE: This is assumed to +// be a single thread as light blocks need to be sequentially verified. +func (q *blockQueue) verifyNext() <-chan lightBlockResponse { + q.mtx.Lock() + defer q.mtx.Unlock() + ch := make(chan lightBlockResponse, 1) + + select { + case <-q.doneCh: + return ch + default: + } + + if lb, ok := q.pending[q.verifyHeight]; ok { + ch <- lb + close(ch) + delete(q.pending, q.verifyHeight) + } else { + q.verifyCh = ch + } + + return ch +} + +// Retry is called when a dispatcher failed to fetch a light block or the +// fetched light block failed verification. It signals to the queue to add the +// height back to the request queue +func (q *blockQueue) retry(height int64) { + q.mtx.Lock() + defer q.mtx.Unlock() + + select { + case <-q.doneCh: + return + default: + } + + // we don't need to retry if this is below the terminal height + if q.terminal != nil && height < q.terminal.Height { + return + } + + q.retries++ + if q.retries >= q.maxRetries { + q._closeChannels() + return + } + + if len(q.waiters) > 0 { + q.waiters[0] <- height + close(q.waiters[0]) + q.waiters = q.waiters[1:] + } else { + heap.Push(q.failed, height) + } +} + +// Success is called when a light block has been successfully verified and +// processed +func (q *blockQueue) success(height int64) { + q.mtx.Lock() + defer q.mtx.Unlock() + if q.terminal != nil && q.verifyHeight == q.terminal.Height { + q._closeChannels() + } + q.verifyHeight-- +} + +func (q *blockQueue) error() error { + q.mtx.Lock() + defer q.mtx.Unlock() + if q.retries >= q.maxRetries { + return fmt.Errorf("max retries to fetch valid blocks exceeded (%d); "+ + "target height: %d, height reached: %d", q.maxRetries, q.stopHeight, q.verifyHeight) + } + return nil +} + +// close the queue and respective channels +func (q *blockQueue) close() { + q.mtx.Lock() + defer q.mtx.Unlock() + q._closeChannels() +} + +// CONTRACT: must have a write lock. Use close instead +func (q *blockQueue) _closeChannels() { + close(q.doneCh) + + // wait for the channel to be drained + select { + case <-q.doneCh: + return + default: + } + + for _, ch := range q.waiters { + close(ch) + } + if q.verifyCh != nil { + close(q.verifyCh) + } +} + +// A max-heap of ints. +type maxIntHeap []int64 + +func (h maxIntHeap) Len() int { return len(h) } +func (h maxIntHeap) Less(i, j int) bool { return h[i] < h[j] } +func (h maxIntHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func (h *maxIntHeap) Push(x interface{}) { + *h = append(*h, x.(int64)) +} + +func (h *maxIntHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} diff --git a/internal/statesync/block_queue_test.go b/internal/statesync/block_queue_test.go new file mode 100644 index 000000000..581def941 --- /dev/null +++ b/internal/statesync/block_queue_test.go @@ -0,0 +1,241 @@ +package statesync + +import ( + "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/test/factory" +) + +var ( + startHeight int64 = 200 + stopHeight int64 = 100 + stopTime = time.Date(2019, 1, 1, 1, 0, 0, 0, time.UTC) + endTime = stopTime.Add(-1 * time.Second) + numWorkers = 1 +) + +func TestBlockQueueBasic(t *testing.T) { + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + + queue := newBlockQueue(startHeight, stopHeight, stopTime, 1) + wg := &sync.WaitGroup{} + + // asynchronously fetch blocks and add it to the queue + for i := 0; i <= numWorkers; i++ { + wg.Add(1) + go func() { + for { + select { + case height := <-queue.nextHeight(): + queue.add(mockLBResp(t, peerID, height, endTime)) + case <-queue.done(): + wg.Done() + return + } + } + }() + } + + trackingHeight := startHeight + wg.Add(1) + +loop: + for { + select { + case <-queue.done(): + wg.Done() + break loop + + case resp := <-queue.verifyNext(): + // assert that the queue serializes the blocks + require.Equal(t, resp.block.Height, trackingHeight) + trackingHeight-- + queue.success(resp.block.Height) + } + + } + + wg.Wait() + assert.Less(t, trackingHeight, stopHeight) +} + +// Test with spurious failures and retries +func TestBlockQueueWithFailures(t *testing.T) { + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + + queue := newBlockQueue(startHeight, stopHeight, stopTime, 200) + wg := &sync.WaitGroup{} + + failureRate := 4 + for i := 0; i <= numWorkers; i++ { + wg.Add(1) + go func() { + for { + select { + case height := <-queue.nextHeight(): + if rand.Intn(failureRate) == 0 { + queue.retry(height) + } else { + queue.add(mockLBResp(t, peerID, height, endTime)) + } + case <-queue.done(): + wg.Done() + return + } + } + }() + } + + trackingHeight := startHeight + for { + select { + case resp := <-queue.verifyNext(): + // assert that the queue serializes the blocks + assert.Equal(t, resp.block.Height, trackingHeight) + if rand.Intn(failureRate) == 0 { + queue.retry(resp.block.Height) + } else { + trackingHeight-- + queue.success(resp.block.Height) + } + + case <-queue.done(): + wg.Wait() + assert.Less(t, trackingHeight, stopHeight) + return + } + } +} + +// Test that when all the blocks are retrieved that the queue still holds on to +// it's workers and in the event of failure can still fetch the failed block +func TestBlockQueueBlocks(t *testing.T) { + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + queue := newBlockQueue(startHeight, stopHeight, stopTime, 2) + expectedHeight := startHeight + retryHeight := stopHeight + 2 + +loop: + for { + select { + case height := <-queue.nextHeight(): + require.Equal(t, height, expectedHeight) + require.GreaterOrEqual(t, height, stopHeight) + expectedHeight-- + queue.add(mockLBResp(t, peerID, height, endTime)) + case <-time.After(1 * time.Second): + if expectedHeight >= stopHeight { + t.Fatalf("expected next height %d", expectedHeight) + } + break loop + } + } + + // close any waiter channels that the previous worker left hanging + for _, ch := range queue.waiters { + close(ch) + } + queue.waiters = make([]chan int64, 0) + + wg := &sync.WaitGroup{} + wg.Add(1) + // so far so good. The worker is waiting. Now we fail a previous + // block and check that the worker fetches them + go func(t *testing.T) { + defer wg.Done() + select { + case height := <-queue.nextHeight(): + require.Equal(t, retryHeight, height) + case <-time.After(1 * time.Second): + require.Fail(t, "queue didn't ask worker to fetch failed height") + } + }(t) + queue.retry(retryHeight) + wg.Wait() + +} + +func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) { + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + queue := newBlockQueue(startHeight, stopHeight, stopTime, 1) + defer queue.close() + +loop: + for { + select { + case height := <-queue.nextHeight(): + require.GreaterOrEqual(t, height, stopHeight) + queue.add(mockLBResp(t, peerID, height, endTime)) + case <-time.After(1 * time.Second): + break loop + } + } + + require.Len(t, queue.pending, int(startHeight-stopHeight)+1) + + queue.add(mockLBResp(t, peerID, stopHeight-1, endTime)) + require.Len(t, queue.pending, int(startHeight-stopHeight)+1) +} + +// Test a scenario where more blocks are needed then just the stopheight because +// we haven't found a block with a small enough time. +func TestBlockQueueStopTime(t *testing.T) { + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + + queue := newBlockQueue(startHeight, stopHeight, stopTime, 1) + wg := &sync.WaitGroup{} + + baseTime := stopTime.Add(-50 * time.Second) + + // asynchronously fetch blocks and add it to the queue + for i := 0; i <= numWorkers; i++ { + wg.Add(1) + go func() { + for { + select { + case height := <-queue.nextHeight(): + blockTime := baseTime.Add(time.Duration(height) * time.Second) + queue.add(mockLBResp(t, peerID, height, blockTime)) + case <-queue.done(): + wg.Done() + return + } + } + }() + } + + trackingHeight := startHeight + for { + select { + case resp := <-queue.verifyNext(): + // assert that the queue serializes the blocks + assert.Equal(t, resp.block.Height, trackingHeight) + trackingHeight-- + queue.success(resp.block.Height) + + case <-queue.done(): + wg.Wait() + assert.Less(t, trackingHeight, stopHeight-50) + return + } + } +} + +func mockLBResp(t *testing.T, peer p2p.NodeID, height int64, time time.Time) lightBlockResponse { + return lightBlockResponse{ + block: mockLB(t, height, time, factory.MakeBlockID()), + peer: peer, + } +} diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go new file mode 100644 index 000000000..e58f6d468 --- /dev/null +++ b/internal/statesync/dispatcher.go @@ -0,0 +1,322 @@ +package statesync + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/light/provider" + ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" + proto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +var ( + errNoConnectedPeers = errors.New("no available peers to dispatch request to") + errUnsolicitedResponse = errors.New("unsolicited light block response") + errNoResponse = errors.New("peer failed to respond within timeout") + errPeerAlreadyBusy = errors.New("peer is already processing a request") + errDisconnected = errors.New("dispatcher has been disconnected") +) + +// dispatcher keeps a list of peers and allows concurrent requests for light +// blocks. NOTE: It is not the responsibility of the dispatcher to verify the +// light blocks. +type dispatcher struct { + availablePeers *peerlist + requestCh chan<- p2p.Envelope + timeout time.Duration + + mtx sync.Mutex + calls map[p2p.NodeID]chan *types.LightBlock + running bool +} + +func newDispatcher(requestCh chan<- p2p.Envelope, timeout time.Duration) *dispatcher { + return &dispatcher{ + availablePeers: newPeerList(), + timeout: timeout, + requestCh: requestCh, + calls: make(map[p2p.NodeID]chan *types.LightBlock), + running: true, + } +} + +func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.LightBlock, p2p.NodeID, error) { + d.mtx.Lock() + outgoingCalls := len(d.calls) + d.mtx.Unlock() + + // check to see that the dispatcher is connected to at least one peer + if d.availablePeers.Len() == 0 && outgoingCalls == 0 { + return nil, "", errNoConnectedPeers + } + + // fetch the next peer id in the list and request a light block from that + // peer + peer := d.availablePeers.Pop() + lb, err := d.lightBlock(ctx, height, peer) + return lb, peer, err +} + +func (d *dispatcher) Providers(chainID string, timeout time.Duration) []provider.Provider { + d.mtx.Lock() + defer d.mtx.Unlock() + + providers := make([]provider.Provider, d.availablePeers.Len()) + peers := d.availablePeers.Peers() + for index, peer := range peers { + providers[index] = &blockProvider{ + peer: peer, + dispatcher: d, + chainID: chainID, + timeout: timeout, + } + } + return providers +} + +func (d *dispatcher) stop() { + d.mtx.Lock() + defer d.mtx.Unlock() + d.running = false + for peer, call := range d.calls { + close(call) + delete(d.calls, peer) + } +} + +func (d *dispatcher) start() { + d.mtx.Lock() + defer d.mtx.Unlock() + d.running = true +} + +func (d *dispatcher) lightBlock(ctx context.Context, height int64, peer p2p.NodeID) (*types.LightBlock, error) { + // dispatch the request to the peer + callCh, err := d.dispatch(peer, height) + if err != nil { + return nil, err + } + + // wait for a response, cancel or timeout + select { + case resp := <-callCh: + return resp, nil + + case <-ctx.Done(): + d.release(peer) + return nil, nil + + case <-time.After(d.timeout): + d.release(peer) + return nil, errNoResponse + } +} + +// respond allows the underlying process which receives requests on the +// requestCh to respond with the respective light block +func (d *dispatcher) respond(lb *proto.LightBlock, peer p2p.NodeID) error { + d.mtx.Lock() + defer d.mtx.Unlock() + + // check that the response came from a request + answerCh, ok := d.calls[peer] + if !ok { + // this can also happen if the response came in after the timeout + return errUnsolicitedResponse + } + // release the peer after returning the response + defer d.availablePeers.Append(peer) + defer close(answerCh) + defer delete(d.calls, peer) + + if lb == nil { + answerCh <- nil + return nil + } + + block, err := types.LightBlockFromProto(lb) + if err != nil { + fmt.Println("error with converting light block") + return err + } + + answerCh <- block + return nil +} + +func (d *dispatcher) addPeer(peer p2p.NodeID) { + d.availablePeers.Append(peer) +} + +func (d *dispatcher) removePeer(peer p2p.NodeID) { + d.mtx.Lock() + defer d.mtx.Unlock() + if _, ok := d.calls[peer]; ok { + delete(d.calls, peer) + } else { + d.availablePeers.Remove(peer) + } +} + +// dispatch takes a peer and allocates it a channel so long as it's not already +// busy and the receiving channel is still running. It then dispatches the message +func (d *dispatcher) dispatch(peer p2p.NodeID, height int64) (chan *types.LightBlock, error) { + d.mtx.Lock() + defer d.mtx.Unlock() + ch := make(chan *types.LightBlock, 1) + + // check if the dispatcher is running or not + if !d.running { + close(ch) + return ch, errDisconnected + } + + // this should happen only if we add the same peer twice (somehow) + if _, ok := d.calls[peer]; ok { + close(ch) + return ch, errPeerAlreadyBusy + } + d.calls[peer] = ch + + // send request + d.requestCh <- p2p.Envelope{ + To: peer, + Message: &ssproto.LightBlockRequest{ + Height: uint64(height), + }, + } + return ch, nil +} + +// release appends the peer back to the list and deletes the allocated call so +// that a new call can be made to that peer +func (d *dispatcher) release(peer p2p.NodeID) { + d.mtx.Lock() + defer d.mtx.Unlock() + if call, ok := d.calls[peer]; ok { + close(call) + delete(d.calls, peer) + } + d.availablePeers.Append(peer) +} + +//---------------------------------------------------------------- + +// blockProvider is a p2p based light provider which uses a dispatcher connected +// to the state sync reactor to serve light blocks to the light client +// +// TODO: This should probably be moved over to the light package but as we're +// not yet officially supporting p2p light clients we'll leave this here for now. +type blockProvider struct { + peer p2p.NodeID + chainID string + timeout time.Duration + dispatcher *dispatcher +} + +func (p *blockProvider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { + // FIXME: The provider doesn't know if the dispatcher is still connected to + // that peer. If the connection is dropped for whatever reason the + // dispatcher needs to be able to relay this back to the provider so it can + // return ErrConnectionClosed instead of ErrNoResponse + ctx, cancel := context.WithTimeout(ctx, p.timeout) + defer cancel() + lb, _ := p.dispatcher.lightBlock(ctx, height, p.peer) + if lb == nil { + return nil, provider.ErrNoResponse + } + + if err := lb.ValidateBasic(p.chainID); err != nil { + return nil, provider.ErrBadLightBlock{Reason: err} + } + + return lb, nil +} + +// ReportEvidence should allow for the light client to report any light client +// attacks. This is a no op as there currently isn't a way to wire this up to +// the evidence reactor (we should endeavor to do this in the future but for now +// it's not critical for backwards verification) +func (p *blockProvider) ReportEvidence(ctx context.Context, ev types.Evidence) error { + return nil +} + +// String implements stringer interface +func (p *blockProvider) String() string { return string(p.peer) } + +//---------------------------------------------------------------- + +// peerList is a rolling list of peers. This is used to distribute the load of +// retrieving blocks over all the peers the reactor is connected to +type peerlist struct { + mtx sync.Mutex + peers []p2p.NodeID + waiting []chan p2p.NodeID +} + +func newPeerList() *peerlist { + return &peerlist{ + peers: make([]p2p.NodeID, 0), + waiting: make([]chan p2p.NodeID, 0), + } +} + +func (l *peerlist) Len() int { + l.mtx.Lock() + defer l.mtx.Unlock() + return len(l.peers) +} + +func (l *peerlist) Pop() p2p.NodeID { + l.mtx.Lock() + if len(l.peers) == 0 { + // if we don't have any peers in the list we block until a peer is + // appended + wait := make(chan p2p.NodeID, 1) + l.waiting = append(l.waiting, wait) + // unlock whilst waiting so that the list can be appended to + l.mtx.Unlock() + peer := <-wait + return peer + } + + peer := l.peers[0] + l.peers = l.peers[1:] + l.mtx.Unlock() + return peer +} + +func (l *peerlist) Append(peer p2p.NodeID) { + l.mtx.Lock() + defer l.mtx.Unlock() + if len(l.waiting) > 0 { + wait := l.waiting[0] + l.waiting = l.waiting[1:] + wait <- peer + close(wait) + } else { + l.peers = append(l.peers, peer) + } +} + +func (l *peerlist) Remove(peer p2p.NodeID) { + l.mtx.Lock() + defer l.mtx.Unlock() + for i, p := range l.peers { + if p == peer { + l.peers = append(l.peers[:i], l.peers[i+1:]...) + return + } + } +} + +func (l *peerlist) Peers() []p2p.NodeID { + l.mtx.Lock() + defer l.mtx.Unlock() + return l.peers +} diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go new file mode 100644 index 000000000..5026abd19 --- /dev/null +++ b/internal/statesync/dispatcher_test.go @@ -0,0 +1,179 @@ +package statesync + +import ( + "context" + "fmt" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/internal/p2p" + ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" +) + +func TestDispatcherBasic(t *testing.T) { + + ch := make(chan p2p.Envelope, 100) + closeCh := make(chan struct{}) + defer close(closeCh) + + d := newDispatcher(ch, 1*time.Second) + + go handleRequests(t, d, ch, closeCh) + + peers := createPeerSet(5) + for _, peer := range peers { + d.addPeer(peer) + } + + wg := sync.WaitGroup{} + + // make a bunch of async requests and require that the correct responses are + // given + for i := 1; i < 10; i++ { + wg.Add(1) + go func(height int64) { + defer wg.Done() + lb, peer, err := d.LightBlock(context.Background(), height) + require.NoError(t, err) + require.NotNil(t, lb) + require.Equal(t, lb.Height, height) + require.Contains(t, peers, peer) + }(int64(i)) + } + wg.Wait() +} + +func TestDispatcherProviders(t *testing.T) { + + ch := make(chan p2p.Envelope, 100) + chainID := "state-sync-test" + closeCh := make(chan struct{}) + defer close(closeCh) + + d := newDispatcher(ch, 1*time.Second) + + go handleRequests(t, d, ch, closeCh) + + peers := createPeerSet(5) + for _, peer := range peers { + d.addPeer(peer) + } + + providers := d.Providers(chainID, 5*time.Second) + require.Len(t, providers, 5) + for i, p := range providers { + bp, ok := p.(*blockProvider) + require.True(t, ok) + assert.Equal(t, bp.String(), string(peers[i])) + lb, err := p.LightBlock(context.Background(), 10) + assert.Error(t, err) + assert.Nil(t, lb) + } +} + +func TestPeerListBasic(t *testing.T) { + peerList := newPeerList() + assert.Zero(t, peerList.Len()) + numPeers := 10 + peerSet := createPeerSet(numPeers) + + for _, peer := range peerSet { + peerList.Append(peer) + } + + for idx, peer := range peerList.Peers() { + assert.Equal(t, peer, peerSet[idx]) + } + + assert.Equal(t, numPeers, peerList.Len()) + + half := numPeers / 2 + for i := 0; i < half; i++ { + assert.Equal(t, peerSet[i], peerList.Pop()) + } + assert.Equal(t, half, peerList.Len()) + + peerList.Remove(p2p.NodeID("lp")) + assert.Equal(t, half, peerList.Len()) + + peerList.Remove(peerSet[half]) + half++ + assert.Equal(t, peerSet[half], peerList.Pop()) + +} + +func TestPeerListConcurrent(t *testing.T) { + peerList := newPeerList() + numPeers := 10 + + wg := sync.WaitGroup{} + // we run a set of goroutines requesting the next peer in the list. As the + // peer list hasn't been populated each these go routines should block + for i := 0; i < numPeers/2; i++ { + go func() { + _ = peerList.Pop() + wg.Done() + }() + } + + // now we add the peers to the list, this should allow the previously + // blocked go routines to unblock + for _, peer := range createPeerSet(numPeers) { + wg.Add(1) + peerList.Append(peer) + } + + // we request the second half of the peer set + for i := 0; i < numPeers/2; i++ { + go func() { + _ = peerList.Pop() + wg.Done() + }() + } + + // we use a context with cancel and a separate go routine to wait for all + // the other goroutines to close. + ctx, cancel := context.WithCancel(context.Background()) + go func() { wg.Wait(); cancel() }() + + select { + case <-time.After(time.Second): + // not all of the blocked go routines waiting on peers have closed after + // one second. This likely means the list got blocked. + t.Failed() + case <-ctx.Done(): + // there should be no peers remaining + require.Equal(t, 0, peerList.Len()) + } +} + +// handleRequests is a helper function usually run in a separate go routine to +// imitate the expected responses of the reactor wired to the dispatcher +func handleRequests(t *testing.T, d *dispatcher, ch chan p2p.Envelope, closeCh chan struct{}) { + t.Helper() + for { + select { + case request := <-ch: + height := request.Message.(*ssproto.LightBlockRequest).Height + peer := request.To + resp := mockLBResp(t, peer, int64(height), time.Now()) + block, _ := resp.block.ToProto() + require.NoError(t, d.respond(block, resp.peer)) + case <-closeCh: + return + } + } +} + +func createPeerSet(num int) []p2p.NodeID { + peers := make([]p2p.NodeID, num) + for i := 0; i < num; i++ { + peers[i], _ = p2p.NewNodeID(strings.Repeat(fmt.Sprintf("%d", i), 2*p2p.NodeIDByteLength)) + } + return peers +} diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 951514060..4f367380d 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. package mocks diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index fa38ca293..b5436d1ab 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -1,9 +1,11 @@ package statesync import ( + "bytes" "context" "errors" "fmt" + "reflect" "sort" "time" @@ -15,6 +17,7 @@ import ( ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -49,6 +52,17 @@ var ( SendQueueCapacity: 4, RecvMessageCapacity: chunkMsgSize, + MaxSendBytes: 400, + }, + }, + LightBlockChannel: { + MsgType: new(ssproto.Message), + Descriptor: &p2p.ChannelDescriptor{ + ID: byte(LightBlockChannel), + Priority: 1, + SendQueueCapacity: 10, + RecvMessageCapacity: lightBlockMsgSize, + MaxSendBytes: 400, }, }, @@ -62,6 +76,9 @@ const ( // ChunkChannel exchanges chunk contents ChunkChannel = p2p.ChannelID(0x61) + // LightBlockChannel exchanges light blocks + LightBlockChannel = p2p.ChannelID(0x62) + // recentSnapshots is the number of recent snapshots to send and receive per peer. recentSnapshots = 10 @@ -70,6 +87,21 @@ const ( // chunkMsgSize is the maximum size of a chunkResponseMessage chunkMsgSize = int(16e6) + + // lightBlockMsgSize is the maximum size of a lightBlockResponseMessage + lightBlockMsgSize = int(1e7) + + // lightBlockResponseTimeout is how long the dispatcher waits for a peer to + // return a light block + lightBlockResponseTimeout = 10 * time.Second + + // maxLightBlockRequestRetries is the amount of retries acceptable before + // the backfill process aborts + maxLightBlockRequestRetries = 20 + + // the amount of processes fetching light blocks - this should be roughly calculated + // as the time to fetch a block / time to verify a block + lightBlockFetchers = 4 ) // Reactor handles state sync, both restoring snapshots for the local node and @@ -77,14 +109,20 @@ const ( type Reactor struct { service.BaseService + stateStore sm.Store + blockStore *store.BlockStore + conn proxy.AppConnSnapshot connQuery proxy.AppConnQuery tempDir string snapshotCh *p2p.Channel chunkCh *p2p.Channel + blockCh *p2p.Channel peerUpdates *p2p.PeerUpdates closeCh chan struct{} + dispatcher *dispatcher + // This will only be set when a state sync is in progress. It is used to feed // received snapshots and chunks into the sync. mtx tmsync.RWMutex @@ -99,8 +137,10 @@ func NewReactor( logger log.Logger, conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, - snapshotCh, chunkCh *p2p.Channel, + snapshotCh, chunkCh, blockCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, + stateStore sm.Store, + blockStore *store.BlockStore, tempDir string, ) *Reactor { r := &Reactor{ @@ -108,9 +148,13 @@ func NewReactor( connQuery: connQuery, snapshotCh: snapshotCh, chunkCh: chunkCh, + blockCh: blockCh, peerUpdates: peerUpdates, closeCh: make(chan struct{}), tempDir: tempDir, + dispatcher: newDispatcher(blockCh.Out, lightBlockResponseTimeout), + stateStore: stateStore, + blockStore: blockStore, } r.BaseService = *service.NewBaseService(logger, "StateSync", r) @@ -134,14 +178,21 @@ func (r *Reactor) OnStart() error { // have to deal with bounding workers or pools. go r.processChunkCh() + go r.processBlockCh() + go r.processPeerUpdates() + r.dispatcher.start() + return nil } // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. func (r *Reactor) OnStop() { + // tell the dispatcher to stop sending any more requests + r.dispatcher.stop() + // Close closeCh to signal to all spawned goroutines to gracefully exit. All // p2p Channels should execute Close(). close(r.closeCh) @@ -151,9 +202,231 @@ func (r *Reactor) OnStop() { // panics will occur. <-r.snapshotCh.Done() <-r.chunkCh.Done() + <-r.blockCh.Done() <-r.peerUpdates.Done() } +// Sync runs a state sync, fetching snapshots and providing chunks to the +// application. It also saves tendermint state and runs a backfill process to +// retrieve the necessary amount of headers, commits and validators sets to be +// able to process evidence and participate in consensus. +func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration) (sm.State, error) { + r.mtx.Lock() + if r.syncer != nil { + r.mtx.Unlock() + return sm.State{}, errors.New("a state sync is already in progress") + } + + r.syncer = newSyncer(r.Logger, r.conn, r.connQuery, stateProvider, r.snapshotCh.Out, r.chunkCh.Out, r.tempDir) + r.mtx.Unlock() + + hook := func() { + // request snapshots from all currently connected peers + r.Logger.Debug("requesting snapshots from known peers") + r.snapshotCh.Out <- p2p.Envelope{ + Broadcast: true, + Message: &ssproto.SnapshotsRequest{}, + } + } + + hook() + + state, commit, err := r.syncer.SyncAny(discoveryTime, hook) + if err != nil { + return sm.State{}, err + } + + r.mtx.Lock() + r.syncer = nil + r.mtx.Unlock() + + err = r.stateStore.Bootstrap(state) + if err != nil { + return sm.State{}, fmt.Errorf("failed to bootstrap node with new state: %w", err) + } + + err = r.blockStore.SaveSeenCommit(state.LastBlockHeight, commit) + if err != nil { + return sm.State{}, fmt.Errorf("failed to store last seen commit: %w", err) + } + + return state, nil +} + +// Backfill sequentially fetches, verifies and stores light blocks in reverse +// order. It does not stop verifying blocks until reaching a block with a height +// and time that is less or equal to the stopHeight and stopTime. The +// trustedBlockID should be of the header at startHeight. +func (r *Reactor) Backfill(state sm.State) error { + params := state.ConsensusParams.Evidence + stopHeight := state.LastBlockHeight - params.MaxAgeNumBlocks + stopTime := state.LastBlockTime.Add(-params.MaxAgeDuration) + // ensure that stop height doesn't go below the initial height + if stopHeight < state.InitialHeight { + stopHeight = state.InitialHeight + // this essentially makes stop time a void criteria for termination + stopTime = state.LastBlockTime + } + return r.backfill( + context.Background(), + state.ChainID, + state.LastBlockHeight, stopHeight, + state.LastBlockID, + stopTime, + ) +} + +func (r *Reactor) backfill( + ctx context.Context, + chainID string, + startHeight, stopHeight int64, + trustedBlockID types.BlockID, + stopTime time.Time, +) error { + r.Logger.Info("starting backfill process...", "startHeight", startHeight, + "stopHeight", stopHeight, "trustedBlockID", trustedBlockID) + + const sleepTime = 1 * time.Second + var ( + lastValidatorSet *types.ValidatorSet + lastChangeHeight int64 = startHeight + ) + + queue := newBlockQueue(startHeight, stopHeight, stopTime, maxLightBlockRequestRetries) + + // fetch light blocks across four workers. The aim with deploying concurrent + // workers is to equate the network messaging time with the verification + // time. Ideally we want the verification process to never have to be + // waiting on blocks. If it takes 4s to retrieve a block and 1s to verify + // it, then steady state involves four workers. + for i := 0; i < lightBlockFetchers; i++ { + go func() { + for { + select { + case height := <-queue.nextHeight(): + r.Logger.Debug("fetching next block", "height", height) + lb, peer, err := r.dispatcher.LightBlock(ctx, height) + if err != nil { + queue.retry(height) + if errors.Is(err, errNoConnectedPeers) { + r.Logger.Info("backfill: no connected peers to fetch light blocks from; sleeping...", + "sleepTime", sleepTime) + time.Sleep(sleepTime) + } else { + // we don't punish the peer as it might just have not responded in time + r.Logger.Info("backfill: error with fetching light block", + "height", height, "err", err) + } + continue + } + if lb == nil { + r.Logger.Info("backfill: peer didn't have block, fetching from another peer", "height", height) + queue.retry(height) + // as we are fetching blocks backwards, if this node doesn't have the block it likely doesn't + // have any prior ones, thus we remove it from the peer list + r.dispatcher.removePeer(peer) + continue + } + + // run a validate basic. This checks the validator set and commit + // hashes line up + err = lb.ValidateBasic(chainID) + if err != nil || lb.Height != height { + r.Logger.Info("backfill: fetched light block failed validate basic, removing peer...", + "err", err, "height", height) + queue.retry(height) + r.blockCh.Error <- p2p.PeerError{ + NodeID: peer, + Err: fmt.Errorf("received invalid light block: %w", err), + } + continue + } + + // add block to queue to be verified + queue.add(lightBlockResponse{ + block: lb, + peer: peer, + }) + r.Logger.Debug("backfill: added light block to processing queue", "height", height) + + case <-queue.done(): + return + } + } + }() + } + + // verify all light blocks + for { + select { + case <-r.closeCh: + queue.close() + return nil + case <-ctx.Done(): + queue.close() + return nil + case resp := <-queue.verifyNext(): + // validate the header hash. We take the last block id of the + // previous header (i.e. one height above) as the trusted hash which + // we equate to. ValidatorsHash and CommitHash have already been + // checked in the `ValidateBasic` + if w, g := trustedBlockID.Hash, resp.block.Hash(); !bytes.Equal(w, g) { + r.Logger.Info("received invalid light block. header hash doesn't match trusted LastBlockID", + "trustedHash", w, "receivedHash", g, "height", resp.block.Height) + r.blockCh.Error <- p2p.PeerError{ + NodeID: resp.peer, + Err: fmt.Errorf("received invalid light block. Expected hash %v, got: %v", w, g), + } + queue.retry(resp.block.Height) + continue + } + + // save the signed headers + err := r.blockStore.SaveSignedHeader(resp.block.SignedHeader, trustedBlockID) + if err != nil { + return err + } + + // check if there has been a change in the validator set + if lastValidatorSet != nil && !bytes.Equal(resp.block.Header.ValidatorsHash, resp.block.Header.NextValidatorsHash) { + // save all the heights that the last validator set was the same + err = r.stateStore.SaveValidatorSets(resp.block.Height+1, lastChangeHeight, lastValidatorSet) + if err != nil { + return err + } + + // update the lastChangeHeight + lastChangeHeight = resp.block.Height + } + + trustedBlockID = resp.block.LastBlockID + queue.success(resp.block.Height) + r.Logger.Info("backfill: verified and stored light block", "height", resp.block.Height) + + lastValidatorSet = resp.block.ValidatorSet + + case <-queue.done(): + if err := queue.error(); err != nil { + return err + } + + // save the final batch of validators + if err := r.stateStore.SaveValidatorSets(queue.terminal.Height, lastChangeHeight, lastValidatorSet); err != nil { + return err + } + + r.Logger.Info("successfully completed backfill process", "endHeight", queue.terminal.Height) + return nil + } + } +} + +// Dispatcher exposes the dispatcher so that a state provider can use it for +// light client verification +func (r *Reactor) Dispatcher() *dispatcher { //nolint:golint + return r.dispatcher +} + // handleSnapshotMessage handles envelopes sent from peers on the // SnapshotChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. @@ -311,6 +584,44 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { return nil } +func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { + switch msg := envelope.Message.(type) { + case *ssproto.LightBlockRequest: + r.Logger.Info("received light block request", "height", msg.Height) + lb, err := r.fetchLightBlock(msg.Height) + if err != nil { + r.Logger.Error("failed to retrieve light block", "err", err, "height", msg.Height) + return err + } + + lbproto, err := lb.ToProto() + if err != nil { + r.Logger.Error("marshaling light block to proto", "err", err) + return nil + } + + // NOTE: If we don't have the light block we will send a nil light block + // back to the requested node, indicating that we don't have it. + r.blockCh.Out <- p2p.Envelope{ + To: envelope.From, + Message: &ssproto.LightBlockResponse{ + LightBlock: lbproto, + }, + } + + case *ssproto.LightBlockResponse: + if err := r.dispatcher.respond(msg.LightBlock, envelope.From); err != nil { + r.Logger.Error("error processing light block response", "err", err) + return err + } + + default: + return fmt.Errorf("received unknown message: %T", msg) + } + + return nil +} + // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. @@ -321,7 +632,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From) + r.Logger.Debug("received message", "message", reflect.TypeOf(envelope.Message), "peer", envelope.From) switch chID { case SnapshotChannel: @@ -330,6 +641,9 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err case ChunkChannel: err = r.handleChunkMessage(envelope) + case LightBlockChannel: + err = r.handleLightBlockMessage(envelope) + default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) } @@ -338,52 +652,44 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } // processSnapshotCh initiates a blocking process where we listen for and handle -// envelopes on the SnapshotChannel. Any error encountered during message -// execution will result in a PeerError being sent on the SnapshotChannel. When -// the reactor is stopped, we will catch the signal and close the p2p Channel -// gracefully. +// envelopes on the SnapshotChannel. func (r *Reactor) processSnapshotCh() { - defer r.snapshotCh.Close() - - for { - select { - case envelope := <-r.snapshotCh.In: - if err := r.handleMessage(r.snapshotCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.snapshotCh.ID, "envelope", envelope, "err", err) - r.snapshotCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } - } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on snapshot channel; closing...") - return - } - } + r.processCh(r.snapshotCh, "snapshot") } // processChunkCh initiates a blocking process where we listen for and handle -// envelopes on the ChunkChannel. Any error encountered during message -// execution will result in a PeerError being sent on the ChunkChannel. When -// the reactor is stopped, we will catch the signal and close the p2p Channel -// gracefully. +// envelopes on the ChunkChannel. func (r *Reactor) processChunkCh() { - defer r.chunkCh.Close() + r.processCh(r.chunkCh, "chunk") +} + +// processBlockCh initiates a blocking process where we listen for and handle +// envelopes on the LightBlockChannel. +func (r *Reactor) processBlockCh() { + r.processCh(r.blockCh, "light block") +} + +// processCh routes state sync messages to their respective handlers. Any error +// encountered during message execution will result in a PeerError being sent on +// the respective channel. When the reactor is stopped, we will catch the signal +// and close the p2p Channel gracefully. +func (r *Reactor) processCh(ch *p2p.Channel, chName string) { + defer ch.Close() for { select { - case envelope := <-r.chunkCh.In: - if err := r.handleMessage(r.chunkCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.chunkCh.ID, "envelope", envelope, "err", err) - r.chunkCh.Error <- p2p.PeerError{ + case envelope := <-ch.In: + if err := r.handleMessage(ch.ID, envelope); err != nil { + r.Logger.Error(fmt.Sprintf("failed to process %s message", chName), + "ch_id", ch.ID, "envelope", envelope, "err", err) + ch.Error <- p2p.PeerError{ NodeID: envelope.From, Err: err, } } case <-r.closeCh: - r.Logger.Debug("stopped listening on chunk channel; closing...") + r.Logger.Debug(fmt.Sprintf("stopped listening on %s channel; closing...", chName)) return } } @@ -397,14 +703,18 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { r.mtx.RLock() defer r.mtx.RUnlock() - if r.syncer != nil { - switch peerUpdate.Status { - case p2p.PeerStatusUp: + switch peerUpdate.Status { + case p2p.PeerStatusUp: + if r.syncer != nil { r.syncer.AddPeer(peerUpdate.NodeID) + } + r.dispatcher.addPeer(peerUpdate.NodeID) - case p2p.PeerStatusDown: + case p2p.PeerStatusDown: + if r.syncer != nil { r.syncer.RemovePeer(peerUpdate.NodeID) } + r.dispatcher.removePeer(peerUpdate.NodeID) } } @@ -465,34 +775,35 @@ func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { return snapshots, nil } -// Sync runs a state sync, returning the new state and last commit at the snapshot height. -// The caller must store the state and commit in the state database and block store. -func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration) (sm.State, *types.Commit, error) { - r.mtx.Lock() - if r.syncer != nil { - r.mtx.Unlock() - return sm.State{}, nil, errors.New("a state sync is already in progress") +// fetchLightBlock works out whether the node has a light block at a particular +// height and if so returns it so it can be gossiped to peers +func (r *Reactor) fetchLightBlock(height uint64) (*types.LightBlock, error) { + h := int64(height) + + blockMeta := r.blockStore.LoadBlockMeta(h) + if blockMeta == nil { + return nil, nil } - r.syncer = newSyncer(r.Logger, r.conn, r.connQuery, stateProvider, r.snapshotCh.Out, r.chunkCh.Out, r.tempDir) - r.mtx.Unlock() - - hook := func() { - // request snapshots from all currently connected peers - r.Logger.Debug("requesting snapshots from known peers") - r.snapshotCh.Out <- p2p.Envelope{ - Broadcast: true, - Message: &ssproto.SnapshotsRequest{}, - } + commit := r.blockStore.LoadBlockCommit(h) + if commit == nil { + return nil, nil } - hook() + vals, err := r.stateStore.LoadValidators(h) + if err != nil { + return nil, err + } + if vals == nil { + return nil, nil + } - state, commit, err := r.syncer.SyncAny(discoveryTime, hook) + return &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: &blockMeta.Header, + Commit: commit, + }, + ValidatorSet: vals, + }, nil - r.mtx.Lock() - r.syncer = nil - r.mtx.Unlock() - - return state, commit, err } diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index df9667b29..fab6e30c7 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -2,17 +2,29 @@ package statesync import ( "context" + "fmt" + "math/rand" + "sync" "testing" "time" + // "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/statesync/mocks" + "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" proxymocks "github.com/tendermint/tendermint/proxy/mocks" + smmocks "github.com/tendermint/tendermint/state/mocks" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" ) type reactorTestSuite struct { @@ -33,7 +45,16 @@ type reactorTestSuite struct { chunkOutCh chan p2p.Envelope chunkPeerErrCh chan p2p.PeerError - peerUpdates *p2p.PeerUpdates + blockChannel *p2p.Channel + blockInCh chan p2p.Envelope + blockOutCh chan p2p.Envelope + blockPeerErrCh chan p2p.PeerError + + peerUpdateCh chan p2p.PeerUpdate + peerUpdates *p2p.PeerUpdates + + stateStore *smmocks.Store + blockStore *store.BlockStore } func setup( @@ -62,12 +83,17 @@ func setup( chunkInCh: make(chan p2p.Envelope, chBuf), chunkOutCh: make(chan p2p.Envelope, chBuf), chunkPeerErrCh: make(chan p2p.PeerError, chBuf), - peerUpdates: p2p.NewPeerUpdates(make(chan p2p.PeerUpdate), int(chBuf)), + blockInCh: make(chan p2p.Envelope, chBuf), + blockOutCh: make(chan p2p.Envelope, chBuf), + blockPeerErrCh: make(chan p2p.PeerError, chBuf), conn: conn, connQuery: connQuery, stateProvider: stateProvider, } + rts.peerUpdateCh = make(chan p2p.PeerUpdate, chBuf) + rts.peerUpdates = p2p.NewPeerUpdates(rts.peerUpdateCh, int(chBuf)) + rts.snapshotChannel = p2p.NewChannel( SnapshotChannel, new(ssproto.Message), @@ -84,16 +110,33 @@ func setup( rts.chunkPeerErrCh, ) + rts.blockChannel = p2p.NewChannel( + LightBlockChannel, + new(ssproto.Message), + rts.blockInCh, + rts.blockOutCh, + rts.blockPeerErrCh, + ) + + rts.stateStore = &smmocks.Store{} + rts.blockStore = store.NewBlockStore(dbm.NewMemDB()) + rts.reactor = NewReactor( - log.NewNopLogger(), + log.TestingLogger(), conn, connQuery, rts.snapshotChannel, rts.chunkChannel, + rts.blockChannel, rts.peerUpdates, + rts.stateStore, + rts.blockStore, "", ) + // override the dispatcher with one with a shorter timeout + rts.reactor.dispatcher = newDispatcher(rts.blockChannel.Out, 1*time.Second) + rts.syncer = newSyncer( log.NewNopLogger(), conn, @@ -270,6 +313,172 @@ func TestReactor_SnapshotsRequest(t *testing.T) { } } +func TestReactor_LightBlockResponse(t *testing.T) { + rts := setup(t, nil, nil, nil, 2) + + var height int64 = 10 + h := factory.MakeRandomHeader() + h.Height = height + blockID := factory.MakeBlockIDWithHash(h.Hash()) + vals, pv := factory.RandValidatorSet(1, 10) + vote, err := factory.MakeVote(pv[0], h.ChainID, 0, h.Height, 0, 2, + blockID, factory.DefaultTestTime) + require.NoError(t, err) + + sh := &types.SignedHeader{ + Header: h, + Commit: &types.Commit{ + Height: h.Height, + BlockID: blockID, + Signatures: []types.CommitSig{ + vote.CommitSig(), + }, + }, + } + + lb := &types.LightBlock{ + SignedHeader: sh, + ValidatorSet: vals, + } + + require.NoError(t, rts.blockStore.SaveSignedHeader(sh, blockID)) + + rts.stateStore.On("LoadValidators", height).Return(vals, nil) + + rts.blockInCh <- p2p.Envelope{ + From: p2p.NodeID("aa"), + Message: &ssproto.LightBlockRequest{ + Height: 10, + }, + } + require.Empty(t, rts.blockPeerErrCh) + + select { + case response := <-rts.blockOutCh: + require.Equal(t, p2p.NodeID("aa"), response.To) + res, ok := response.Message.(*ssproto.LightBlockResponse) + require.True(t, ok) + receivedLB, err := types.LightBlockFromProto(res.LightBlock) + require.NoError(t, err) + require.Equal(t, lb, receivedLB) + case <-time.After(1 * time.Second): + t.Fatal("expected light block response") + } +} + +func TestReactor_Dispatcher(t *testing.T) { + rts := setup(t, nil, nil, nil, 2) + rts.peerUpdateCh <- p2p.PeerUpdate{ + NodeID: p2p.NodeID("aa"), + Status: p2p.PeerStatusUp, + } + rts.peerUpdateCh <- p2p.PeerUpdate{ + NodeID: p2p.NodeID("bb"), + Status: p2p.PeerStatusUp, + } + + closeCh := make(chan struct{}) + defer close(closeCh) + + chain := buildLightBlockChain(t, 1, 10, time.Now()) + go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + + dispatcher := rts.reactor.Dispatcher() + providers := dispatcher.Providers(factory.DefaultTestChainID, 5*time.Second) + require.Len(t, providers, 2) + + wg := sync.WaitGroup{} + + for _, p := range providers { + wg.Add(1) + go func(t *testing.T, p provider.Provider) { + defer wg.Done() + for height := 2; height < 10; height++ { + lb, err := p.LightBlock(context.Background(), int64(height)) + require.NoError(t, err) + require.NotNil(t, lb) + require.Equal(t, height, int(lb.Height)) + } + }(t, p) + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { wg.Wait(); cancel() }() + + select { + case <-time.After(time.Second): + // not all of the requests to the dispatcher were responded to + // within the timeout + t.Fail() + case <-ctx.Done(): + } +} + +func TestReactor_Backfill(t *testing.T) { + // test backfill algorithm with varying failure rates [0, 10] + failureRates := []int{0, 3, 9} + for _, failureRate := range failureRates { + failureRate := failureRate + t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) { + // t.Cleanup(leaktest.Check(t)) + rts := setup(t, nil, nil, nil, 21) + + var ( + startHeight int64 = 20 + stopHeight int64 = 10 + stopTime = time.Date(2020, 1, 1, 0, 100, 0, 0, time.UTC) + ) + + peers := []string{"a", "b", "c", "d"} + for _, peer := range peers { + rts.peerUpdateCh <- p2p.PeerUpdate{ + NodeID: p2p.NodeID(peer), + Status: p2p.PeerStatusUp, + } + } + + trackingHeight := startHeight + rts.stateStore.On("SaveValidatorSets", mock.AnythingOfType("int64"), mock.AnythingOfType("int64"), + mock.AnythingOfType("*types.ValidatorSet")).Return(func(lh, uh int64, vals *types.ValidatorSet) error { + require.Equal(t, trackingHeight, lh) + require.Equal(t, lh, uh) + require.GreaterOrEqual(t, lh, stopHeight) + trackingHeight-- + return nil + }) + + chain := buildLightBlockChain(t, stopHeight-1, startHeight+1, stopTime) + + closeCh := make(chan struct{}) + defer close(closeCh) + go handleLightBlockRequests(t, chain, rts.blockOutCh, + rts.blockInCh, closeCh, failureRate) + + err := rts.reactor.backfill( + context.Background(), + factory.DefaultTestChainID, + startHeight, + stopHeight, + factory.MakeBlockIDWithHash(chain[startHeight].Header.Hash()), + stopTime, + ) + if failureRate > 5 { + require.Error(t, err) + } else { + require.NoError(t, err) + + for height := startHeight; height <= stopHeight; height++ { + blockMeta := rts.blockStore.LoadBlockMeta(height) + require.NotNil(t, blockMeta) + } + + require.Nil(t, rts.blockStore.LoadBlockMeta(stopHeight-1)) + require.Nil(t, rts.blockStore.LoadBlockMeta(startHeight+1)) + } + }) + } +} + // retryUntil will continue to evaluate fn and will return successfully when true // or fail when the timeout is reached. func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { @@ -284,3 +493,87 @@ func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { require.NoError(t, ctx.Err()) } } + +func handleLightBlockRequests(t *testing.T, + chain map[int64]*types.LightBlock, + receiving chan p2p.Envelope, + sending chan p2p.Envelope, + close chan struct{}, + failureRate int) { + requests := 0 + for { + select { + case envelope := <-receiving: + if msg, ok := envelope.Message.(*ssproto.LightBlockRequest); ok { + if requests%10 >= failureRate { + lb, err := chain[int64(msg.Height)].ToProto() + require.NoError(t, err) + sending <- p2p.Envelope{ + From: envelope.To, + Message: &ssproto.LightBlockResponse{ + LightBlock: lb, + }, + } + } else { + switch rand.Intn(3) { + case 0: // send a different block + differntLB, err := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID()).ToProto() + require.NoError(t, err) + sending <- p2p.Envelope{ + From: envelope.To, + Message: &ssproto.LightBlockResponse{ + LightBlock: differntLB, + }, + } + case 1: // send nil block i.e. pretend we don't have it + sending <- p2p.Envelope{ + From: envelope.To, + Message: &ssproto.LightBlockResponse{ + LightBlock: nil, + }, + } + case 2: // don't do anything + } + } + } + case <-close: + return + } + requests++ + } +} + +func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime time.Time) map[int64]*types.LightBlock { + chain := make(map[int64]*types.LightBlock, toHeight-fromHeight) + lastBlockID := factory.MakeBlockID() + blockTime := startTime.Add(-5 * time.Minute) + for height := fromHeight; height < toHeight; height++ { + chain[height] = mockLB(t, height, blockTime, lastBlockID) + lastBlockID = factory.MakeBlockIDWithHash(chain[height].Header.Hash()) + blockTime = blockTime.Add(1 * time.Minute) + } + return chain +} + +func mockLB(t *testing.T, height int64, time time.Time, + lastBlockID types.BlockID) *types.LightBlock { + header, err := factory.MakeHeader(&types.Header{ + Height: height, + LastBlockID: lastBlockID, + Time: time, + }) + require.NoError(t, err) + vals, pv := factory.RandValidatorSet(3, 10) + header.ValidatorsHash = vals.Hash() + lastBlockID = factory.MakeBlockIDWithHash(header.Hash()) + voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, vals) + commit, err := factory.MakeCommit(lastBlockID, height, 0, voteSet, pv, time) + require.NoError(t, err) + return &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: header, + Commit: commit, + }, + ValidatorSet: vals, + } +} diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index 99eef2bbe..c165f72dc 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -53,7 +53,7 @@ func NewLightClientStateProvider( logger log.Logger, ) (StateProvider, error) { if len(servers) < 2 { - return nil, fmt.Errorf("at least 2 RPC servers are required, got %v", len(servers)) + return nil, fmt.Errorf("at least 2 RPC servers are required, got %d", len(servers)) } providers := make([]lightprovider.Provider, 0, len(servers)) @@ -83,6 +83,41 @@ func NewLightClientStateProvider( }, nil } +// NewLightClientStateProviderFromDispatcher creates a light client state +// provider but uses a p2p connected dispatched instead of RPC endpoints +func NewLightClientStateProviderFromDispatcher( + ctx context.Context, + chainID string, + version sm.Version, + initialHeight int64, + dispatcher *dispatcher, + trustOptions light.TrustOptions, + logger log.Logger, +) (StateProvider, error) { + providers := dispatcher.Providers(chainID, 10*time.Second) + if len(providers) < 2 { + return nil, fmt.Errorf("at least 2 peers are required, got %d", len(providers)) + } + + providersMap := make(map[lightprovider.Provider]string) + for _, p := range providers { + providersMap[p] = p.(*blockProvider).String() + } + + lc, err := light.NewClient(ctx, chainID, trustOptions, providers[0], providers[1:], + lightdb.New(dbm.NewMemDB()), light.Logger(logger)) + if err != nil { + return nil, err + } + + return &lightClientStateProvider{ + lc: lc, + version: version, + initialHeight: initialHeight, + providers: providersMap, + }, nil +} + // AppHash implements StateProvider. func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) { s.Lock() diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 47f640aa3..d58c27d61 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -143,7 +143,7 @@ func (s *syncer) RemovePeer(peerID p2p.NodeID) { // which the caller must use to bootstrap the node. func (s *syncer) SyncAny(discoveryTime time.Duration, retryHook func()) (sm.State, *types.Commit, error) { if discoveryTime != 0 && discoveryTime < minimumDiscoveryTime { - discoveryTime = 5 * minimumDiscoveryTime + discoveryTime = minimumDiscoveryTime } if discoveryTime > 0 { diff --git a/internal/test/factory/block.go b/internal/test/factory/block.go index 87203221f..f8772f189 100644 --- a/internal/test/factory/block.go +++ b/internal/test/factory/block.go @@ -1,6 +1,8 @@ package factory import ( + "time" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/types" @@ -11,6 +13,10 @@ const ( DefaultTestChainID = "test-chain" ) +var ( + DefaultTestTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) +) + func MakeVersion() version.Consensus { return version.Consensus{ Block: version.BlockProtocol, diff --git a/internal/test/factory/tx.go b/internal/test/factory/tx.go new file mode 100644 index 000000000..c97aeefc9 --- /dev/null +++ b/internal/test/factory/tx.go @@ -0,0 +1,16 @@ +package factory + +import "github.com/tendermint/tendermint/types" + +// MakeTxs is a helper function to generate mock transactions by given the block height +// and the transaction numbers. +func MakeTxs(height int64, num int) (txs []types.Tx) { + for i := 0; i < num; i++ { + txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) + } + return txs +} + +func MakeTenTxs(height int64) (txs []types.Tx) { + return MakeTxs(height, 10) +} diff --git a/node/node.go b/node/node.go index caa3a47a2..ab0288fdb 100644 --- a/node/node.go +++ b/node/node.go @@ -22,7 +22,6 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/internal/statesync" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" @@ -39,7 +38,6 @@ import ( "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) // nodeImpl is the highest level interface to a full Tendermint node. @@ -133,10 +131,19 @@ func makeNode(config *cfg.Config, if err != nil { return nil, err } - stateStore := sm.NewStore(stateDB) - state, genDoc, err := loadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) + genDoc, err := genesisDocProvider() + if err != nil { + return nil, err + } + + err = genDoc.ValidateAndComplete() + if err != nil { + return nil, fmt.Errorf("error in genesis doc: %w", err) + } + + state, err := loadStateFromDBOrGenesisDocProvider(stateStore, genDoc) if err != nil { return nil, err } @@ -264,6 +271,7 @@ func makeNode(config *cfg.Config, proxyApp.Consensus(), mp, evPool, + blockStore, sm.BlockExecutorWithMetrics(smMetrics), ) @@ -327,7 +335,10 @@ func makeNode(config *cfg.Config, proxyApp.Query(), channels[statesync.SnapshotChannel], channels[statesync.ChunkChannel], + channels[statesync.LightBlockChannel], peerUpdates, + stateStore, + blockStore, config.StateSync.TempDir, ) @@ -422,7 +433,6 @@ func makeNode(config *cfg.Config, consensusReactor: csReactor, stateSyncReactor: stateSyncReactor, stateSync: stateSync, - stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state pexReactor: pexReactor, evidenceReactor: evReactor, indexerService: indexerService, @@ -664,8 +674,15 @@ func (n *nodeImpl) OnStart() error { if !ok { return fmt.Errorf("this blockchain reactor does not support switching from state sync") } - err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, - n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis) + + // we need to get the genesis state to get parameters such as + state, err := sm.MakeGenesisState(n.genesisDoc) + if err != nil { + return fmt.Errorf("unable to derive state: %w", err) + } + + err = startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, + n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, state) if err != nil { return fmt.Errorf("failed to start state sync: %w", err) } @@ -957,7 +974,7 @@ func (n *nodeImpl) NodeInfo() p2p.NodeInfo { func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor, stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool, stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error { - ssR.Logger.Info("Starting state sync") + ssR.Logger.Info("starting state sync...") if stateProvider == nil { var err error @@ -977,29 +994,25 @@ func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reacto } go func() { - state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime) + state, err := ssR.Sync(stateProvider, config.DiscoveryTime) if err != nil { - ssR.Logger.Error("State sync failed", "err", err) - return - } - err = stateStore.Bootstrap(state) - if err != nil { - ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) - return - } - err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) - if err != nil { - ssR.Logger.Error("Failed to store last seen commit", "err", err) + ssR.Logger.Error("state sync failed", "err", err) return } + err = ssR.Backfill(state) + if err != nil { + ssR.Logger.Error("backfill failed; node has insufficient history to verify all evidence;"+ + " proceeding optimistically...", "err", err) + } + + conR.Metrics.StateSyncing.Set(0) if fastSync { // FIXME Very ugly to have these metrics bleed through here. - conR.Metrics.StateSyncing.Set(0) conR.Metrics.FastSyncing.Set(1) err = bcR.SwitchToFastSync(state) if err != nil { - ssR.Logger.Error("Failed to switch to fast sync", "err", err) + ssR.Logger.Error("failed to switch to fast sync", "err", err) return } } else { @@ -1041,71 +1054,29 @@ func defaultMetricsProvider(config *cfg.InstrumentationConfig) metricsProvider { //------------------------------------------------------------------------------ -var ( - genesisDocKey = []byte("genesisDoc") -) - // loadStateFromDBOrGenesisDocProvider attempts to load the state from the // database, or creates one using the given genesisDocProvider. On success this also // returns the genesis doc loaded through the given provider. func loadStateFromDBOrGenesisDocProvider( - stateDB dbm.DB, - genesisDocProvider genesisDocProvider, -) (sm.State, *types.GenesisDoc, error) { - // Get genesis doc - genDoc, err := loadGenesisDoc(stateDB) + stateStore sm.Store, + genDoc *types.GenesisDoc, +) (sm.State, error) { + + // 1. Attempt to load state form the database + state, err := stateStore.Load() if err != nil { - genDoc, err = genesisDocProvider() + return sm.State{}, err + } + + if state.IsEmpty() { + // 2. If it's not there, derive it from the genesis doc + state, err = sm.MakeGenesisState(genDoc) if err != nil { - return sm.State{}, nil, err - } - - err = genDoc.ValidateAndComplete() - if err != nil { - return sm.State{}, nil, fmt.Errorf("error in genesis doc: %w", err) - } - // save genesis doc to prevent a certain class of user errors (e.g. when it - // was changed, accidentally or not). Also good for audit trail. - if err := saveGenesisDoc(stateDB, genDoc); err != nil { - return sm.State{}, nil, err + return sm.State{}, err } } - stateStore := sm.NewStore(stateDB) - state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) - if err != nil { - return sm.State{}, nil, err - } - return state, genDoc, nil -} -// panics if failed to unmarshal bytes -func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { - b, err := db.Get(genesisDocKey) - if err != nil { - panic(err) - } - if len(b) == 0 { - return nil, errors.New("genesis doc not found") - } - var genDoc *types.GenesisDoc - err = tmjson.Unmarshal(b, &genDoc) - if err != nil { - panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) - } - return genDoc, nil -} - -// panics if failed to marshal the given genesis document -func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error { - b, err := tmjson.Marshal(genDoc) - if err != nil { - return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err) - } - if err := db.SetSync(genesisDocKey, b); err != nil { - return err - } - - return nil + return state, nil } func createAndStartPrivValidatorSocketClient( diff --git a/node/node_test.go b/node/node_test.go index 1aed3c6c5..16e0733b6 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -25,6 +25,7 @@ import ( "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" @@ -277,6 +278,7 @@ func TestCreateProposalBlock(t *testing.T) { proxyApp.Consensus(), mp, evidencePool, + blockStore, ) commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) @@ -316,6 +318,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { const height int64 = 1 state, stateDB, _ := state(1, height) stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 16384 const partSize uint32 = 256 state.ConsensusParams.Block.MaxBytes = maxBytes @@ -344,6 +347,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { proxyApp.Consensus(), mp, sm.EmptyEvidencePool{}, + blockStore, ) commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) @@ -375,6 +379,7 @@ func TestMaxProposalBlockSize(t *testing.T) { state, stateDB, _ := state(types.MaxVotesCount, int64(1)) stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 1024 * 1024 * 2 state.ConsensusParams.Block.MaxBytes = maxBytes proposerAddr, _ := state.Validators.GetByIndex(0) @@ -409,6 +414,7 @@ func TestMaxProposalBlockSize(t *testing.T) { proxyApp.Consensus(), mp, sm.EmptyEvidencePool{}, + blockStore, ) blockID := types.BlockID{ @@ -631,3 +637,22 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { } return s, stateDB, privVals } + +func TestLoadStateFromGenesis(t *testing.T) { + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) + config := cfg.ResetTestRoot("load_state_from_genesis") + + loadedState, err := stateStore.Load() + require.NoError(t, err) + require.True(t, loadedState.IsEmpty()) + + genDoc, _ := factory.RandGenesisDoc(config, 0, false, 10) + + state, err := loadStateFromDBOrGenesisDocProvider( + stateStore, + genDoc, + ) + require.NoError(t, err) + require.NotNil(t, state) +} diff --git a/node/setup.go b/node/setup.go index 5e42de8a0..a5e8e886d 100644 --- a/node/setup.go +++ b/node/setup.go @@ -295,7 +295,7 @@ func createEvidenceReactor( evidencePool, err := evidence.NewPool(logger, evidenceDB, sm.NewStore(stateDB), blockStore) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err) } var ( @@ -749,6 +749,7 @@ func makeNodeInfo( byte(evidence.EvidenceChannel), byte(statesync.SnapshotChannel), byte(statesync.ChunkChannel), + byte(statesync.LightBlockChannel), }, Moniker: config.Moniker, Other: p2p.NodeInfoOther{ diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 2bb08a714..bf088e8b1 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -213,7 +213,7 @@ message ResponseDeliverTx { message ResponseEndBlock { repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; tendermint.types.ConsensusParams consensus_param_updates = 2; - repeated Event events = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + repeated Event events = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; } message ResponseCommit { diff --git a/proto/tendermint/p2p/pex.proto b/proto/tendermint/p2p/pex.proto index 4e630f85f..1f78c9864 100644 --- a/proto/tendermint/p2p/pex.proto +++ b/proto/tendermint/p2p/pex.proto @@ -27,7 +27,6 @@ message PexResponseV2 { repeated PexAddressV2 addresses = 1 [(gogoproto.nullable) = false]; } - message PexMessage { oneof sum { PexRequest pex_request = 1; diff --git a/proto/tendermint/statesync/message.go b/proto/tendermint/statesync/message.go index 71d0b4eb8..6f9b6ad59 100644 --- a/proto/tendermint/statesync/message.go +++ b/proto/tendermint/statesync/message.go @@ -22,6 +22,12 @@ func (m *Message) Wrap(pb proto.Message) error { case *SnapshotsResponse: m.Sum = &Message_SnapshotsResponse{SnapshotsResponse: msg} + case *LightBlockRequest: + m.Sum = &Message_LightBlockRequest{LightBlockRequest: msg} + + case *LightBlockResponse: + m.Sum = &Message_LightBlockResponse{LightBlockResponse: msg} + default: return fmt.Errorf("unknown message: %T", msg) } @@ -45,6 +51,12 @@ func (m *Message) Unwrap() (proto.Message, error) { case *Message_SnapshotsResponse: return m.GetSnapshotsResponse(), nil + case *Message_LightBlockRequest: + return m.GetLightBlockRequest(), nil + + case *Message_LightBlockResponse: + return m.GetLightBlockResponse(), nil + default: return nil, fmt.Errorf("unknown message: %T", msg) } @@ -86,6 +98,14 @@ func (m *Message) Validate() error { return errors.New("snapshot has no chunks") } + case *Message_LightBlockRequest: + if m.GetLightBlockRequest().Height == 0 { + return errors.New("height cannot be 0") + } + + // light block validation handled by the backfill process + case *Message_LightBlockResponse: + default: return fmt.Errorf("unknown message type: %T", msg) } diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 8391e0ead..f5eab7a33 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -6,6 +6,7 @@ package statesync import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" + types "github.com/tendermint/tendermint/proto/tendermint/types" io "io" math "math" math_bits "math/bits" @@ -28,6 +29,8 @@ type Message struct { // *Message_SnapshotsResponse // *Message_ChunkRequest // *Message_ChunkResponse + // *Message_LightBlockRequest + // *Message_LightBlockResponse Sum isMessage_Sum `protobuf_oneof:"sum"` } @@ -82,11 +85,19 @@ type Message_ChunkRequest struct { type Message_ChunkResponse struct { ChunkResponse *ChunkResponse `protobuf:"bytes,4,opt,name=chunk_response,json=chunkResponse,proto3,oneof" json:"chunk_response,omitempty"` } +type Message_LightBlockRequest struct { + LightBlockRequest *LightBlockRequest `protobuf:"bytes,5,opt,name=light_block_request,json=lightBlockRequest,proto3,oneof" json:"light_block_request,omitempty"` +} +type Message_LightBlockResponse struct { + LightBlockResponse *LightBlockResponse `protobuf:"bytes,6,opt,name=light_block_response,json=lightBlockResponse,proto3,oneof" json:"light_block_response,omitempty"` +} -func (*Message_SnapshotsRequest) isMessage_Sum() {} -func (*Message_SnapshotsResponse) isMessage_Sum() {} -func (*Message_ChunkRequest) isMessage_Sum() {} -func (*Message_ChunkResponse) isMessage_Sum() {} +func (*Message_SnapshotsRequest) isMessage_Sum() {} +func (*Message_SnapshotsResponse) isMessage_Sum() {} +func (*Message_ChunkRequest) isMessage_Sum() {} +func (*Message_ChunkResponse) isMessage_Sum() {} +func (*Message_LightBlockRequest) isMessage_Sum() {} +func (*Message_LightBlockResponse) isMessage_Sum() {} func (m *Message) GetSum() isMessage_Sum { if m != nil { @@ -123,6 +134,20 @@ func (m *Message) GetChunkResponse() *ChunkResponse { return nil } +func (m *Message) GetLightBlockRequest() *LightBlockRequest { + if x, ok := m.GetSum().(*Message_LightBlockRequest); ok { + return x.LightBlockRequest + } + return nil +} + +func (m *Message) GetLightBlockResponse() *LightBlockResponse { + if x, ok := m.GetSum().(*Message_LightBlockResponse); ok { + return x.LightBlockResponse + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Message) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -130,6 +155,8 @@ func (*Message) XXX_OneofWrappers() []interface{} { (*Message_SnapshotsResponse)(nil), (*Message_ChunkRequest)(nil), (*Message_ChunkResponse)(nil), + (*Message_LightBlockRequest)(nil), + (*Message_LightBlockResponse)(nil), } } @@ -381,43 +408,139 @@ func (m *ChunkResponse) GetMissing() bool { return false } +type LightBlockRequest struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *LightBlockRequest) Reset() { *m = LightBlockRequest{} } +func (m *LightBlockRequest) String() string { return proto.CompactTextString(m) } +func (*LightBlockRequest) ProtoMessage() {} +func (*LightBlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a1c2869546ca7914, []int{5} +} +func (m *LightBlockRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LightBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LightBlockRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LightBlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LightBlockRequest.Merge(m, src) +} +func (m *LightBlockRequest) XXX_Size() int { + return m.Size() +} +func (m *LightBlockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LightBlockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LightBlockRequest proto.InternalMessageInfo + +func (m *LightBlockRequest) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +type LightBlockResponse struct { + LightBlock *types.LightBlock `protobuf:"bytes,1,opt,name=light_block,json=lightBlock,proto3" json:"light_block,omitempty"` +} + +func (m *LightBlockResponse) Reset() { *m = LightBlockResponse{} } +func (m *LightBlockResponse) String() string { return proto.CompactTextString(m) } +func (*LightBlockResponse) ProtoMessage() {} +func (*LightBlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a1c2869546ca7914, []int{6} +} +func (m *LightBlockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LightBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LightBlockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LightBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LightBlockResponse.Merge(m, src) +} +func (m *LightBlockResponse) XXX_Size() int { + return m.Size() +} +func (m *LightBlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LightBlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LightBlockResponse proto.InternalMessageInfo + +func (m *LightBlockResponse) GetLightBlock() *types.LightBlock { + if m != nil { + return m.LightBlock + } + return nil +} + func init() { proto.RegisterType((*Message)(nil), "tendermint.statesync.Message") proto.RegisterType((*SnapshotsRequest)(nil), "tendermint.statesync.SnapshotsRequest") proto.RegisterType((*SnapshotsResponse)(nil), "tendermint.statesync.SnapshotsResponse") proto.RegisterType((*ChunkRequest)(nil), "tendermint.statesync.ChunkRequest") proto.RegisterType((*ChunkResponse)(nil), "tendermint.statesync.ChunkResponse") + proto.RegisterType((*LightBlockRequest)(nil), "tendermint.statesync.LightBlockRequest") + proto.RegisterType((*LightBlockResponse)(nil), "tendermint.statesync.LightBlockResponse") } func init() { proto.RegisterFile("tendermint/statesync/types.proto", fileDescriptor_a1c2869546ca7914) } var fileDescriptor_a1c2869546ca7914 = []byte{ - // 393 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcd, 0x6a, 0xdb, 0x40, - 0x18, 0x94, 0xfc, 0xcf, 0x57, 0xab, 0xd8, 0x8b, 0x29, 0xa2, 0x07, 0x61, 0x54, 0x68, 0x7b, 0x92, - 0xa0, 0x3d, 0xf6, 0xe6, 0x5e, 0x5c, 0x68, 0x2f, 0xdb, 0x18, 0x42, 0x2e, 0x61, 0x2d, 0x6f, 0x24, - 0x11, 0xb4, 0x52, 0xf4, 0xad, 0x20, 0x7e, 0x80, 0x9c, 0x72, 0xc9, 0x63, 0xe5, 0xe8, 0x63, 0xc8, - 0x29, 0xd8, 0x2f, 0x12, 0xb4, 0x92, 0x65, 0xc5, 0x31, 0x09, 0x81, 0xdc, 0x76, 0xc6, 0xe3, 0xd1, - 0xcc, 0xc0, 0x07, 0x63, 0xc9, 0xc5, 0x82, 0xa7, 0x51, 0x28, 0xa4, 0x8b, 0x92, 0x49, 0x8e, 0x4b, - 0xe1, 0xb9, 0x72, 0x99, 0x70, 0x74, 0x92, 0x34, 0x96, 0x31, 0x19, 0xed, 0x14, 0x4e, 0xa5, 0xb0, - 0xef, 0x1b, 0xd0, 0xfd, 0xc7, 0x11, 0x99, 0xcf, 0xc9, 0x0c, 0x86, 0x28, 0x58, 0x82, 0x41, 0x2c, - 0xf1, 0x34, 0xe5, 0x17, 0x19, 0x47, 0x69, 0xea, 0x63, 0xfd, 0xfb, 0x87, 0x1f, 0x5f, 0x9d, 0x43, - 0xff, 0x76, 0xfe, 0x6f, 0xe5, 0xb4, 0x50, 0x4f, 0x35, 0x3a, 0xc0, 0x3d, 0x8e, 0x1c, 0x03, 0xa9, - 0xdb, 0x62, 0x12, 0x0b, 0xe4, 0x66, 0x43, 0xf9, 0x7e, 0x7b, 0xd5, 0xb7, 0x90, 0x4f, 0x35, 0x3a, - 0xc4, 0x7d, 0x92, 0xfc, 0x01, 0xc3, 0x0b, 0x32, 0x71, 0x5e, 0x85, 0x6d, 0x2a, 0x53, 0xfb, 0xb0, - 0xe9, 0xef, 0x5c, 0xba, 0x0b, 0xda, 0xf7, 0x6a, 0x98, 0xfc, 0x85, 0x8f, 0x5b, 0xab, 0x32, 0x60, - 0x4b, 0x79, 0x7d, 0x79, 0xd1, 0xab, 0x0a, 0x67, 0x78, 0x75, 0x62, 0xd2, 0x86, 0x26, 0x66, 0x91, - 0x4d, 0x60, 0xb0, 0xbf, 0x90, 0x7d, 0xad, 0xc3, 0xf0, 0x59, 0x3d, 0xf2, 0x09, 0x3a, 0x01, 0x0f, - 0xfd, 0xa0, 0xd8, 0xbb, 0x45, 0x4b, 0x94, 0xf3, 0x67, 0x71, 0x1a, 0x31, 0xa9, 0xf6, 0x32, 0x68, - 0x89, 0x72, 0x5e, 0x7d, 0x11, 0x55, 0x65, 0x83, 0x96, 0x88, 0x10, 0x68, 0x05, 0x0c, 0x03, 0x15, - 0xbe, 0x4f, 0xd5, 0x9b, 0x7c, 0x86, 0x5e, 0xc4, 0x25, 0x5b, 0x30, 0xc9, 0xcc, 0xb6, 0xe2, 0x2b, - 0x6c, 0x1f, 0x41, 0xbf, 0x3e, 0xcb, 0x9b, 0x73, 0x8c, 0xa0, 0x1d, 0x8a, 0x05, 0xbf, 0x2c, 0x63, - 0x14, 0xc0, 0xbe, 0xd2, 0xc1, 0x78, 0xb2, 0xd0, 0xfb, 0xf8, 0xe6, 0xac, 0xea, 0x59, 0xd6, 0x2b, - 0x00, 0x31, 0xa1, 0x1b, 0x85, 0x88, 0xa1, 0xf0, 0x55, 0xbd, 0x1e, 0xdd, 0xc2, 0xc9, 0xec, 0x76, - 0x6d, 0xe9, 0xab, 0xb5, 0xa5, 0x3f, 0xac, 0x2d, 0xfd, 0x66, 0x63, 0x69, 0xab, 0x8d, 0xa5, 0xdd, - 0x6d, 0x2c, 0xed, 0xe4, 0x97, 0x1f, 0xca, 0x20, 0x9b, 0x3b, 0x5e, 0x1c, 0xb9, 0xb5, 0xcb, 0xa9, - 0x3d, 0xd5, 0xd1, 0xb8, 0x87, 0xae, 0x6a, 0xde, 0x51, 0xbf, 0xfd, 0x7c, 0x0c, 0x00, 0x00, 0xff, - 0xff, 0xcc, 0x16, 0xc2, 0x8b, 0x74, 0x03, 0x00, 0x00, + // 485 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x51, 0x6b, 0xd3, 0x50, + 0x14, 0x4e, 0x5c, 0xdb, 0x8d, 0xb3, 0x46, 0x96, 0x63, 0x91, 0x32, 0x46, 0x18, 0x11, 0x74, 0x20, + 0xa4, 0xa0, 0x8f, 0xe2, 0x4b, 0x7d, 0x99, 0x30, 0x5f, 0xee, 0x1c, 0xa8, 0x08, 0x23, 0x4d, 0xaf, + 0x4d, 0xb0, 0x49, 0x6a, 0xcf, 0x2d, 0xb8, 0x1f, 0xe0, 0x93, 0x2f, 0x82, 0x7f, 0xca, 0xc7, 0x3d, + 0xfa, 0x28, 0xed, 0x1f, 0x91, 0x9c, 0xdc, 0x26, 0x77, 0x6d, 0x5d, 0x11, 0xf6, 0x96, 0xef, 0xeb, + 0x77, 0x3e, 0xbe, 0x73, 0xcf, 0xe9, 0x81, 0x63, 0x25, 0xb3, 0xa1, 0x9c, 0xa6, 0x49, 0xa6, 0x7a, + 0xa4, 0x42, 0x25, 0xe9, 0x2a, 0x8b, 0x7a, 0xea, 0x6a, 0x22, 0x29, 0x98, 0x4c, 0x73, 0x95, 0x63, + 0xa7, 0x56, 0x04, 0x95, 0xe2, 0xf0, 0xc8, 0xa8, 0x63, 0xb5, 0x59, 0xe3, 0xff, 0x6c, 0xc0, 0xee, + 0x1b, 0x49, 0x14, 0x8e, 0x24, 0x5e, 0x80, 0x4b, 0x59, 0x38, 0xa1, 0x38, 0x57, 0x74, 0x39, 0x95, + 0x5f, 0x66, 0x92, 0x54, 0xd7, 0x3e, 0xb6, 0x4f, 0xf6, 0x9f, 0x3d, 0x0e, 0x36, 0x79, 0x07, 0xe7, + 0x4b, 0xb9, 0x28, 0xd5, 0xa7, 0x96, 0x38, 0xa0, 0x15, 0x0e, 0xdf, 0x01, 0x9a, 0xb6, 0x34, 0xc9, + 0x33, 0x92, 0xdd, 0x7b, 0xec, 0xfb, 0x64, 0xab, 0x6f, 0x29, 0x3f, 0xb5, 0x84, 0x4b, 0xab, 0x24, + 0xbe, 0x06, 0x27, 0x8a, 0x67, 0xd9, 0xe7, 0x2a, 0xec, 0x0e, 0x9b, 0xfa, 0x9b, 0x4d, 0x5f, 0x15, + 0xd2, 0x3a, 0x68, 0x3b, 0x32, 0x30, 0x9e, 0xc1, 0xfd, 0xa5, 0x95, 0x0e, 0xd8, 0x60, 0xaf, 0x47, + 0xb7, 0x7a, 0x55, 0xe1, 0x9c, 0xc8, 0x24, 0xf0, 0x3d, 0x3c, 0x18, 0x27, 0xa3, 0x58, 0x5d, 0x0e, + 0xc6, 0x79, 0x54, 0xc7, 0x6b, 0xde, 0xd6, 0xf3, 0x59, 0x51, 0xd0, 0x2f, 0xf4, 0x75, 0x46, 0x77, + 0xbc, 0x4a, 0xe2, 0x47, 0xe8, 0xdc, 0xb4, 0xd6, 0x71, 0x5b, 0xec, 0x7d, 0xb2, 0xdd, 0xbb, 0xca, + 0x8c, 0xe3, 0x35, 0xb6, 0xdf, 0x84, 0x1d, 0x9a, 0xa5, 0x3e, 0xc2, 0xc1, 0xea, 0x68, 0xfd, 0xef, + 0x36, 0xb8, 0x6b, 0x73, 0xc1, 0x87, 0xd0, 0x8a, 0x65, 0xe1, 0xc3, 0x8b, 0xd2, 0x10, 0x1a, 0x15, + 0xfc, 0xa7, 0x7c, 0x9a, 0x86, 0x8a, 0x07, 0xed, 0x08, 0x8d, 0x0a, 0x9e, 0x9f, 0x8a, 0x78, 0x56, + 0x8e, 0xd0, 0x08, 0x11, 0x1a, 0x71, 0x48, 0x31, 0xbf, 0x7a, 0x5b, 0xf0, 0x37, 0x1e, 0xc2, 0x5e, + 0x2a, 0x55, 0x38, 0x0c, 0x55, 0xc8, 0x4f, 0xd7, 0x16, 0x15, 0xf6, 0xdf, 0x42, 0xdb, 0x9c, 0xe7, + 0x7f, 0xe7, 0xe8, 0x40, 0x33, 0xc9, 0x86, 0xf2, 0xab, 0x8e, 0x51, 0x02, 0xff, 0x9b, 0x0d, 0xce, + 0x8d, 0xd1, 0xde, 0x8d, 0x6f, 0xc1, 0x72, 0x9f, 0xba, 0xbd, 0x12, 0x60, 0x17, 0x76, 0xd3, 0x84, + 0x28, 0xc9, 0x46, 0xdc, 0xde, 0x9e, 0x58, 0x42, 0xff, 0x29, 0xb8, 0x6b, 0xeb, 0xf0, 0xaf, 0x28, + 0xfe, 0x39, 0xe0, 0xfa, 0x7c, 0xf1, 0x25, 0xec, 0x1b, 0x7b, 0xa2, 0xff, 0xc6, 0x47, 0xe6, 0x7a, + 0x94, 0x67, 0xc0, 0x28, 0x85, 0x7a, 0x21, 0xfa, 0x17, 0xbf, 0xe6, 0x9e, 0x7d, 0x3d, 0xf7, 0xec, + 0x3f, 0x73, 0xcf, 0xfe, 0xb1, 0xf0, 0xac, 0xeb, 0x85, 0x67, 0xfd, 0x5e, 0x78, 0xd6, 0x87, 0x17, + 0xa3, 0x44, 0xc5, 0xb3, 0x41, 0x10, 0xe5, 0x69, 0xcf, 0x3c, 0x2d, 0xf5, 0x27, 0x5f, 0x96, 0xde, + 0xa6, 0x73, 0x35, 0x68, 0xf1, 0x6f, 0xcf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x45, 0x35, + 0xee, 0xcd, 0x04, 0x00, 0x00, } func (m *Message) Marshal() (dAtA []byte, err error) { @@ -536,6 +659,48 @@ func (m *Message_ChunkResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } +func (m *Message_LightBlockRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_LightBlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LightBlockRequest != nil { + { + size, err := m.LightBlockRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Message_LightBlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_LightBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LightBlockResponse != nil { + { + size, err := m.LightBlockResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} func (m *SnapshotsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -704,6 +869,69 @@ func (m *ChunkResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *LightBlockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LightBlockRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LightBlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LightBlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LightBlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LightBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LightBlock != nil { + { + size, err := m.LightBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -775,6 +1003,30 @@ func (m *Message_ChunkResponse) Size() (n int) { } return n } +func (m *Message_LightBlockRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LightBlockRequest != nil { + l = m.LightBlockRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_LightBlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LightBlockResponse != nil { + l = m.LightBlockResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} func (m *SnapshotsRequest) Size() (n int) { if m == nil { return 0 @@ -853,6 +1105,31 @@ func (m *ChunkResponse) Size() (n int) { return n } +func (m *LightBlockRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *LightBlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LightBlock != nil { + l = m.LightBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1028,6 +1305,76 @@ func (m *Message) Unmarshal(dAtA []byte) error { } m.Sum = &Message_ChunkResponse{v} iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LightBlockRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &LightBlockRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_LightBlockRequest{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LightBlockResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &LightBlockResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_LightBlockResponse{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -1542,6 +1889,161 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *LightBlockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LightBlockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LightBlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LightBlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LightBlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LightBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LightBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LightBlock == nil { + m.LightBlock = &types.LightBlock{} + } + if err := m.LightBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto index 8d4a714c1..a4dd8e693 100644 --- a/proto/tendermint/statesync/types.proto +++ b/proto/tendermint/statesync/types.proto @@ -1,14 +1,18 @@ syntax = "proto3"; package tendermint.statesync; +import "tendermint/types/types.proto"; + option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync"; message Message { oneof sum { - SnapshotsRequest snapshots_request = 1; - SnapshotsResponse snapshots_response = 2; - ChunkRequest chunk_request = 3; - ChunkResponse chunk_response = 4; + SnapshotsRequest snapshots_request = 1; + SnapshotsResponse snapshots_response = 2; + ChunkRequest chunk_request = 3; + ChunkResponse chunk_response = 4; + LightBlockRequest light_block_request = 5; + LightBlockResponse light_block_response = 6; } } @@ -35,3 +39,11 @@ message ChunkResponse { bytes chunk = 4; bool missing = 5; } + +message LightBlockRequest { + uint64 height = 1; +} + +message LightBlockResponse { + tendermint.types.LightBlock light_block = 1; +} \ No newline at end of file diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go index a90dd0c6b..3bf787dbc 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/proxy/mocks/app_conn_consensus.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. package mocks diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go index 7d16dadf0..02b8bea8c 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/proxy/mocks/app_conn_mempool.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. package mocks diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go index 85ac57ccc..6af88ad7c 100644 --- a/proxy/mocks/app_conn_query.go +++ b/proxy/mocks/app_conn_query.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. package mocks diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go index 9ba75860a..6964a8425 100644 --- a/proxy/mocks/app_conn_snapshot.go +++ b/proxy/mocks/app_conn_snapshot.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. package mocks diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 5b3d75769..f36a4e3de 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -98,8 +98,8 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes. block := env.BlockStore.LoadBlock(height) blockMeta := env.BlockStore.LoadBlockMeta(height) - if blockMeta == nil { - return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: block}, nil + if blockMeta == nil || block == nil { + return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: &types.Block{}}, nil } return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 67cc79e6e..71c00137b 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "strings" + "time" tmsync "github.com/tendermint/tendermint/internal/libs/sync" types "github.com/tendermint/tendermint/rpc/jsonrpc/types" @@ -370,6 +371,7 @@ func makeHTTPDialer(remoteAddr string) (func(string, string) (net.Conn, error), } protocol := u.Scheme + padding := u.Scheme // accept http(s) as an alias for tcp switch protocol { @@ -378,7 +380,13 @@ func makeHTTPDialer(remoteAddr string) (func(string, string) (net.Conn, error), } dialFn := func(proto, addr string) (net.Conn, error) { - return net.Dial(protocol, u.GetDialAddress()) + var timeout = 10 * time.Second + if !u.isUnixSocket && strings.LastIndex(u.Host, ":") == -1 { + u.Host = fmt.Sprintf("%s:%s", u.Host, padding) + return net.DialTimeout(protocol, u.GetDialAddress(), timeout) + } + + return net.DialTimeout(protocol, u.GetDialAddress(), timeout) } return dialFn, nil diff --git a/rpc/jsonrpc/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go index 4b82ff1eb..5a03af512 100644 --- a/rpc/jsonrpc/client/http_json_client_test.go +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -84,3 +84,31 @@ func Test_parsedURL(t *testing.T) { }) } } + +func TestMakeHTTPDialerURL(t *testing.T) { + remotes := []string{"https://foo-bar.com", "http://foo-bar.com"} + + for _, remote := range remotes { + u, err := newParsedURL(remote) + require.NoError(t, err) + dialFn, err := makeHTTPDialer(remote) + require.Nil(t, err) + + addr, err := dialFn(u.Scheme, u.GetHostWithPath()) + require.NoError(t, err) + require.NotNil(t, addr) + } + + errorURLs := []string{"tcp://foo-bar.com", "ftp://foo-bar.com"} + + for _, errorURL := range errorURLs { + u, err := newParsedURL(errorURL) + require.NoError(t, err) + dialFn, err := makeHTTPDialer(errorURL) + require.Nil(t, err) + + addr, err := dialFn(u.Scheme, u.GetHostWithPath()) + require.Error(t, err) + require.Nil(t, addr) + } +} diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index eb8a22eb4..bbb32b407 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -141,7 +141,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han if len(responses) > 0 { if wErr := WriteRPCResponseHTTP(w, c, responses...); wErr != nil { - logger.Error("failed to write responses", "res", responses, "err", wErr) + logger.Error("failed to write responses", "err", wErr) } } } diff --git a/state/execution.go b/state/execution.go index 4e7d17c1d..05d5bdd52 100644 --- a/state/execution.go +++ b/state/execution.go @@ -26,6 +26,9 @@ type BlockExecutor struct { // save state, validators, consensus params, abci responses here store Store + // use blockstore for the pruning functions. + blockStore BlockStore + // execute the app against this proxyApp proxy.AppConnConsensus @@ -60,17 +63,19 @@ func NewBlockExecutor( proxyApp proxy.AppConnConsensus, mempool mempl.Mempool, evpool EvidencePool, + blockStore BlockStore, options ...BlockExecutorOption, ) *BlockExecutor { res := &BlockExecutor{ - store: stateStore, - proxyApp: proxyApp, - eventBus: types.NopEventBus{}, - mempool: mempool, - evpool: evpool, - logger: logger, - metrics: NopMetrics(), - cache: make(map[string]struct{}), + store: stateStore, + proxyApp: proxyApp, + eventBus: types.NopEventBus{}, + mempool: mempool, + evpool: evpool, + logger: logger, + metrics: NopMetrics(), + cache: make(map[string]struct{}), + blockStore: blockStore, } for _, option := range options { @@ -139,17 +144,17 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e // ApplyBlock validates the block against the state, executes it against the app, // fires the relevant events, commits the app, and saves the new state and responses. -// It returns the new state and the block height to retain (pruning older blocks). +// It returns the new state. // It's the only function that needs to be called // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. func (blockExec *BlockExecutor) ApplyBlock( state State, blockID types.BlockID, block *types.Block, -) (State, int64, error) { +) (State, error) { // validate the block if we haven't already if err := blockExec.ValidateBlock(state, block); err != nil { - return state, 0, ErrInvalidBlock(err) + return state, ErrInvalidBlock(err) } startTime := time.Now().UnixNano() @@ -159,14 +164,14 @@ func (blockExec *BlockExecutor) ApplyBlock( endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { - return state, 0, ErrProxyAppConn(err) + return state, ErrProxyAppConn(err) } fail.Fail() // XXX // Save the results before we commit. if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil { - return state, 0, err + return state, err } fail.Fail() // XXX @@ -175,12 +180,12 @@ func (blockExec *BlockExecutor) ApplyBlock( abciValUpdates := abciResponses.EndBlock.ValidatorUpdates err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator) if err != nil { - return state, 0, fmt.Errorf("error in validator updates: %v", err) + return state, fmt.Errorf("error in validator updates: %v", err) } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates) if err != nil { - return state, 0, err + return state, err } if len(validatorUpdates) > 0 { blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) @@ -189,13 +194,13 @@ func (blockExec *BlockExecutor) ApplyBlock( // Update the state with the block and responses. state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) if err != nil { - return state, 0, fmt.Errorf("commit failed for application: %v", err) + return state, fmt.Errorf("commit failed for application: %v", err) } // Lock mempool, commit app state, update mempoool. appHash, retainHeight, err := blockExec.Commit(state, block, abciResponses.DeliverTxs) if err != nil { - return state, 0, fmt.Errorf("commit failed for application: %v", err) + return state, fmt.Errorf("commit failed for application: %v", err) } // Update evpool with the latest state. @@ -206,11 +211,21 @@ func (blockExec *BlockExecutor) ApplyBlock( // Update the app hash and save the state. state.AppHash = appHash if err := blockExec.store.Save(state); err != nil { - return state, 0, err + return state, err } fail.Fail() // XXX + // Prune old heights, if requested by ABCI app. + if retainHeight > 0 { + pruned, err := blockExec.pruneBlocks(retainHeight) + if err != nil { + blockExec.logger.Error("failed to prune blocks", "retain_height", retainHeight, "err", err) + } else { + blockExec.logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight) + } + } + // reset the verification cache blockExec.cache = make(map[string]struct{}) @@ -218,7 +233,7 @@ func (blockExec *BlockExecutor) ApplyBlock( // NOTE: if we crash between Commit and Save, events wont be fired during replay fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, abciResponses, validatorUpdates) - return state, retainHeight, nil + return state, nil } // Commit locks the mempool, runs the ABCI Commit message, and updates the @@ -595,3 +610,20 @@ func ExecCommitBlock( // ResponseCommit has no error or log, just data return res.Data, nil } + +func (blockExec *BlockExecutor) pruneBlocks(retainHeight int64) (uint64, error) { + base := blockExec.blockStore.Base() + if retainHeight <= base { + return 0, nil + } + pruned, err := blockExec.blockStore.PruneBlocks(retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune block store: %w", err) + } + + err = blockExec.Store().PruneStates(retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune state store: %w", err) + } + return pruned, nil +} diff --git a/state/execution_test.go b/state/execution_test.go index e15b1686f..1b1cf6042 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -19,15 +19,17 @@ import ( "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/mocks" + sf "github.com/tendermint/tendermint/state/test/factory" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" + dbm "github.com/tendermint/tm-db" ) var ( chainID = "execution_chain" testPartSize uint32 = 65536 - nTxsPerBlock = 10 ) func TestApplyBlock(t *testing.T) { @@ -40,16 +42,15 @@ func TestApplyBlock(t *testing.T) { state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) - + blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), - mmock.Mempool{}, sm.EmptyEvidencePool{}) + mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - block := makeBlock(state, 1) + block := sf.MakeBlock(state, 1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} - state, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) + state, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) - assert.EqualValues(t, retainHeight, 1) // TODO check state and mempool assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated") @@ -98,7 +99,7 @@ func TestBeginBlockValidators(t *testing.T) { lastCommit := types.NewCommit(1, 0, prevBlockID, tc.lastCommitSigs) // block for height 2 - block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) + block := sf.MakeBlock(state, 2, lastCommit) _, err = sm.ExecCommitBlock(nil, proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1, state) require.Nil(t, err, tc.desc) @@ -196,17 +197,18 @@ func TestBeginBlockByzantineValidators(t *testing.T) { evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), - mmock.Mempool{}, evpool) + blockStore := store.NewBlockStore(dbm.NewMemDB()) - block := makeBlock(state, 1) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + mmock.Mempool{}, evpool, blockStore) + + block := sf.MakeBlock(state, 1, new(types.Commit)) block.Evidence = types.EvidenceData{Evidence: ev} block.Header.EvidenceHash = block.Evidence.Hash() blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} - state, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) + _, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) - assert.EqualValues(t, retainHeight, 1) // TODO check state and mempool assert.Equal(t, abciEv, app.ByzantineValidators) @@ -353,6 +355,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, @@ -360,6 +363,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}, + blockStore, ) eventBus := types.NewEventBus() @@ -376,7 +380,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { ) require.NoError(t, err) - block := makeBlock(state, 1) + block := sf.MakeBlock(state, 1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} pubkey := ed25519.GenPrivKey().PubKey() @@ -386,7 +390,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { {PubKey: pk, Power: 10}, } - state, _, err = blockExec.ApplyBlock(state, blockID, block) + state, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) // test new validator was added to NextValidators if assert.Equal(t, state.Validators.Size()+1, state.NextValidators.Size()) { @@ -424,15 +428,17 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}, + blockStore, ) - block := makeBlock(state, 1) + block := sf.MakeBlock(state, 1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} vp, err := cryptoenc.PubKeyToProto(state.Validators.Validators[0].PubKey) @@ -442,7 +448,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { {PubKey: vp, Power: 0}, } - assert.NotPanics(t, func() { state, _, err = blockExec.ApplyBlock(state, blockID, block) }) + assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block) }) assert.NotNil(t, err) assert.NotEmpty(t, state.NextValidators.Validators) } diff --git a/state/helpers_test.go b/state/helpers_test.go index 8163345b3..7772d7b35 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -17,6 +17,7 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -56,13 +57,13 @@ func makeAndCommitGoodBlock( func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commit, proposerAddr []byte, blockExec *sm.BlockExecutor, evidence []types.Evidence) (sm.State, types.BlockID, error) { - block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, evidence, proposerAddr) + block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) if err := blockExec.ValidateBlock(state, block); err != nil { return state, types.BlockID{}, err } blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} - state, _, err := blockExec.ApplyBlock(state, blockID, block) + state, err := blockExec.ApplyBlock(state, blockID, block) if err != nil { return state, types.BlockID{}, err } @@ -87,14 +88,6 @@ func makeValidCommit( return types.NewCommit(height, 0, blockID, sigs), nil } -// make some bogus txs -func makeTxs(height int64) (txs []types.Tx) { - for i := 0; i < nTxsPerBlock; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) - } - return txs -} - func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) { vals := make([]types.GenesisValidator, nVals) privVals := make(map[string]types.PrivValidator, nVals) @@ -133,17 +126,6 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida return s, stateDB, privVals } -func makeBlock(state sm.State, height int64) *types.Block { - block, _ := state.MakeBlock( - height, - makeTxs(state.LastBlockHeight), - new(types.Commit), - nil, - state.Validators.GetProposer().Address, - ) - return block -} - func genValSet(size int) *types.ValidatorSet { vals := make([]*types.Validator, size) for i := 0; i < size; i++ { @@ -157,7 +139,7 @@ func makeHeaderPartsResponsesValPubKeyChange( pubkey crypto.PubKey, ) (types.Header, types.BlockID, *tmstate.ABCIResponses) { - block := makeBlock(state, state.LastBlockHeight+1) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, @@ -189,7 +171,7 @@ func makeHeaderPartsResponsesValPowerChange( power int64, ) (types.Header, types.BlockID, *tmstate.ABCIResponses) { - block := makeBlock(state, state.LastBlockHeight+1) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, @@ -217,7 +199,7 @@ func makeHeaderPartsResponsesParams( params *types.ConsensusParams, ) (types.Header, types.BlockID, *tmstate.ABCIResponses) { - block := makeBlock(state, state.LastBlockHeight+1) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) pbParams := params.ToProto() abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, diff --git a/state/indexer/sink/psql/psql.go b/state/indexer/sink/psql/psql.go index 0e66d6117..94ae4cb93 100644 --- a/state/indexer/sink/psql/psql.go +++ b/state/indexer/sink/psql/psql.go @@ -50,7 +50,9 @@ func (es *EventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error { sqlStmt := sq. Insert(TableEventBlock). Columns("key", "value", "height", "type", "created_at", "chain_id"). - PlaceholderFormat(sq.Dollar) + PlaceholderFormat(sq.Dollar). + Suffix("ON CONFLICT (key,height)"). + Suffix("DO NOTHING") ts := time.Now() // index the reserved block height index @@ -83,12 +85,16 @@ func (es *EventSink) IndexTxEvents(txr []*abci.TxResult) error { Columns("tx_result", "created_at"). PlaceholderFormat(sq.Dollar). RunWith(es.store). + Suffix("ON CONFLICT (tx_result)"). + Suffix("DO NOTHING"). Suffix("RETURNING \"id\"") sqlStmtEvents := sq. Insert(TableEventTx). Columns("key", "value", "height", "hash", "tx_result_id", "created_at", "chain_id"). - PlaceholderFormat(sq.Dollar) + PlaceholderFormat(sq.Dollar). + Suffix("ON CONFLICT (key,hash)"). + Suffix("DO NOTHING") ts := time.Now() for _, tx := range txr { @@ -100,17 +106,24 @@ func (es *EventSink) IndexTxEvents(txr []*abci.TxResult) error { sqlStmtTxResult = sqlStmtTxResult.Values(txBz, ts) // execute sqlStmtTxResult db query and retrieve the txid - err = sqlStmtTxResult.QueryRow().Scan(&txid) + r, err := sqlStmtTxResult.Query() if err != nil { return err } + if !r.Next() { + return nil + } + + if err := r.Scan(&txid); err != nil { + return err + } + // index the reserved height and hash indices hash := fmt.Sprintf("%X", types.Tx(tx.Tx).Hash()) sqlStmtEvents = sqlStmtEvents.Values(types.TxHashKey, hash, tx.Height, hash, txid, ts, es.chainID) sqlStmtEvents = sqlStmtEvents.Values(types.TxHeightKey, fmt.Sprint(tx.Height), tx.Height, hash, txid, ts, es.chainID) - for _, event := range tx.Result.Events { // only index events with a non-empty type if len(event.Type) == 0 { diff --git a/state/indexer/sink/psql/psql_test.go b/state/indexer/sink/psql/psql_test.go index 591d23edd..0c3cf81d0 100644 --- a/state/indexer/sink/psql/psql_test.go +++ b/state/indexer/sink/psql/psql_test.go @@ -72,6 +72,11 @@ func TestBlockFuncs(t *testing.T) { assert.Equal(t, errors.New("block search is not supported via the postgres event sink"), err) require.NoError(t, verifyTimeStamp(TableEventBlock)) + + // try to insert the duplicate block events. + err = indexer.IndexBlockEvents(getTestBlockHeader()) + require.NoError(t, err) + require.NoError(t, teardown(t, pool)) } @@ -104,6 +109,10 @@ func TestTxFuncs(t *testing.T) { assert.Nil(t, r2) assert.Equal(t, errors.New("tx search is not supported via the postgres event sink"), err) + // try to insert the duplicate tx events. + err = indexer.IndexTxEvents([]*abci.TxResult{txResult}) + require.NoError(t, err) + assert.Nil(t, teardown(t, pool)) } diff --git a/state/indexer/sink/psql/schema.sql b/state/indexer/sink/psql/schema.sql index 36e7327cc..0be9bdfa8 100644 --- a/state/indexer/sink/psql/schema.sql +++ b/state/indexer/sink/psql/schema.sql @@ -6,12 +6,14 @@ CREATE TABLE block_events ( height INTEGER NOT NULL, type block_event_type, created_at TIMESTAMPTZ NOT NULL, - chain_id VARCHAR NOT NULL + chain_id VARCHAR NOT NULL, + UNIQUE (key, height) ); CREATE TABLE tx_results ( id SERIAL PRIMARY KEY, tx_result BYTEA NOT NULL, - created_at TIMESTAMPTZ NOT NULL + created_at TIMESTAMPTZ NOT NULL, + UNIQUE (tx_result) ); CREATE TABLE tx_events ( id SERIAL PRIMARY KEY, @@ -22,6 +24,7 @@ CREATE TABLE tx_events ( tx_result_id SERIAL, created_at TIMESTAMPTZ NOT NULL, chain_id VARCHAR NOT NULL, + UNIQUE (hash, key), FOREIGN KEY (tx_result_id) REFERENCES tx_results(id) ON DELETE CASCADE diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go index 0b6ebc97d..9cfc7b40b 100644 --- a/state/mocks/evidence_pool.go +++ b/state/mocks/evidence_pool.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. package mocks diff --git a/state/mocks/store.go b/state/mocks/store.go index d1e3a3746..bf70adc86 100644 --- a/state/mocks/store.go +++ b/state/mocks/store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. package mocks @@ -201,3 +201,17 @@ func (_m *Store) SaveABCIResponses(_a0 int64, _a1 *tendermintstate.ABCIResponses return r0 } + +// SaveValidatorSets provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet) error { + ret := _m.Called(_a0, _a1, _a2) + + var r0 error + if rf, ok := ret.Get(0).(func(int64, int64, *types.ValidatorSet) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/state/state.go b/state/state.go index d5362fc40..bf266085c 100644 --- a/state/state.go +++ b/state/state.go @@ -341,7 +341,7 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { } var validatorSet, nextValidatorSet *types.ValidatorSet - if genDoc.Validators == nil { + if genDoc.Validators == nil || len(genDoc.Validators) == 0 { validatorSet = types.NewValidatorSet(nil) nextValidatorSet = types.NewValidatorSet(nil) } else { diff --git a/state/state_test.go b/state/state_test.go index 7c48c58d5..99d45bb62 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -21,6 +21,7 @@ import ( cryptoenc "github.com/tendermint/tendermint/crypto/encoding" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" sm "github.com/tendermint/tendermint/state" + sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" ) @@ -29,10 +30,14 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { config := cfg.ResetTestRoot("state_") dbType := dbm.BackendType(config.DBBackend) stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) - stateStore := sm.NewStore(stateDB) require.NoError(t, err) - state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) - assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") + stateStore := sm.NewStore(stateDB) + state, err := stateStore.Load() + require.NoError(t, err) + require.Empty(t, state) + state, err = sm.MakeGenesisStateFromFile(config.GenesisFile()) + assert.NoError(t, err) + assert.NotNil(t, state) err = stateStore.Save(state) require.NoError(t, err) @@ -101,7 +106,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { state.LastBlockHeight++ // Build mock responses. - block := makeBlock(state, 2) + block := sf.MakeBlock(state, 2, new(types.Commit)) abciResponses := new(tmstate.ABCIResponses) dtxs := make([]*abci.ResponseDeliverTx, 2) @@ -443,7 +448,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1 assert.EqualValues(t, 0, val1.ProposerPriority) - block := makeBlock(state, state.LastBlockHeight+1) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, @@ -557,7 +562,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // we only have one validator: assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) - block := makeBlock(state, state.LastBlockHeight+1) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} // no updates: abciResponses := &tmstate.ABCIResponses{ @@ -744,7 +749,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := makeBlock(oldState, oldState.LastBlockHeight+1) + block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -773,7 +778,7 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, } - block := makeBlock(oldState, oldState.LastBlockHeight+1) + block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -788,7 +793,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := makeBlock(lastState, lastState.LastBlockHeight+1) + block := sf.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -821,7 +826,7 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, } - block := makeBlock(oldState, oldState.LastBlockHeight+1) + block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -836,7 +841,7 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, } - block = makeBlock(oldState, oldState.LastBlockHeight+1) + block = sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) @@ -857,7 +862,7 @@ func TestLargeGenesisValidator(t *testing.T) { } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block = makeBlock(curState, curState.LastBlockHeight+1) + block = sf.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -882,7 +887,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := makeBlock(updatedState, updatedState.LastBlockHeight+1) + block := sf.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -977,7 +982,7 @@ func TestStateMakeBlock(t *testing.T) { proposerAddress := state.Validators.GetProposer().Address stateVersion := state.Version.Consensus - block := makeBlock(state, 2) + block := sf.MakeBlock(state, 2, new(types.Commit)) // test we set some fields assert.Equal(t, stateVersion, block.Version) diff --git a/state/store.go b/state/store.go index a488d0722..84b19a685 100644 --- a/state/store.go +++ b/state/store.go @@ -75,12 +75,6 @@ func init() { // It is used to retrieve current state and save and load ABCI responses, // validators and consensus parameters type Store interface { - // LoadFromDBOrGenesisFile loads the most recent state. - // If the chain is new it will use the genesis file from the provided genesis file path as the current state. - LoadFromDBOrGenesisFile(string) (State, error) - // LoadFromDBOrGenesisDoc loads the most recent state. - // If the chain is new it will use the genesis doc as the current state. - LoadFromDBOrGenesisDoc(*types.GenesisDoc) (State, error) // Load loads the current state of the blockchain Load() (State, error) // LoadValidators loads the validator set at a given height @@ -93,6 +87,8 @@ type Store interface { Save(State) error // SaveABCIResponses saves ABCIResponses for a given height SaveABCIResponses(int64, *tmstate.ABCIResponses) error + // SaveValidatorSet saves the validator set at a given height + SaveValidatorSets(int64, int64, *types.ValidatorSet) error // Bootstrap is used for bootstrapping state when not starting from a initial height. Bootstrap(State) error // PruneStates takes the height from which to prune up to (exclusive) @@ -111,43 +107,6 @@ func NewStore(db dbm.DB) Store { return dbStore{db} } -// LoadStateFromDBOrGenesisFile loads the most recent state from the database, -// or creates a new one from the given genesisFilePath. -func (store dbStore) LoadFromDBOrGenesisFile(genesisFilePath string) (State, error) { - state, err := store.Load() - if err != nil { - return State{}, err - } - if state.IsEmpty() { - var err error - state, err = MakeGenesisStateFromFile(genesisFilePath) - if err != nil { - return state, err - } - } - - return state, nil -} - -// LoadStateFromDBOrGenesisDoc loads the most recent state from the database, -// or creates a new one from the given genesisDoc. -func (store dbStore) LoadFromDBOrGenesisDoc(genesisDoc *types.GenesisDoc) (State, error) { - state, err := store.Load() - if err != nil { - return State{}, err - } - - if state.IsEmpty() { - var err error - state, err = MakeGenesisState(genesisDoc) - if err != nil { - return state, err - } - } - - return state, nil -} - // LoadState loads the State from the database. func (store dbStore) Load() (State, error) { return store.loadState(stateKey) @@ -502,6 +461,24 @@ func (store dbStore) saveABCIResponses(height int64, abciResponses *tmstate.ABCI return store.db.SetSync(abciResponsesKey(height), bz) } +// SaveValidatorSets is used to save the validator set over multiple heights. +// It is exposed so that a backfill operation during state sync can populate +// the store with the necessary amount of validator sets to verify any evidence +// it may encounter. +func (store dbStore) SaveValidatorSets(lowerHeight, upperHeight int64, vals *types.ValidatorSet) error { + batch := store.db.NewBatch() + defer batch.Close() + + // batch together all the validator sets from lowerHeight to upperHeight + for height := lowerHeight; height <= upperHeight; height++ { + if err := store.saveValidatorsInfo(height, lowerHeight, vals, batch); err != nil { + return err + } + } + + return batch.WriteSync() +} + //----------------------------------------------------------------------------- // LoadValidators loads the ValidatorSet for a given height. @@ -606,12 +583,7 @@ func (store dbStore) saveValidatorsInfo( return err } - err = batch.Set(validatorsKey(height), bz) - if err != nil { - return err - } - - return nil + return batch.Set(validatorsKey(height), bz) } //----------------------------------------------------------------------------- diff --git a/state/store_test.go b/state/store_test.go index 467ed34c7..5d32040b5 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -108,7 +108,7 @@ func BenchmarkLoadValidators(b *testing.B) { stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) require.NoError(b, err) stateStore := sm.NewStore(stateDB) - state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) if err != nil { b.Fatal(err) } @@ -118,6 +118,8 @@ func BenchmarkLoadValidators(b *testing.B) { err = stateStore.Save(state) require.NoError(b, err) + b.ResetTimer() + for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ... i := i err = stateStore.Save(makeRandomStateFromValidatorSet(state.NextValidators, diff --git a/state/test/factory/block.go b/state/test/factory/block.go new file mode 100644 index 000000000..b4eb83fa7 --- /dev/null +++ b/state/test/factory/block.go @@ -0,0 +1,65 @@ +package factory + +import ( + "time" + + "github.com/tendermint/tendermint/internal/test/factory" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { + blocks := make([]*types.Block, 0) + + var ( + prevBlock *types.Block + prevBlockMeta *types.BlockMeta + ) + + appHeight := byte(0x01) + for i := 0; i < n; i++ { + height := int64(i + 1) + + block, parts := makeBlockAndPartSet(*state, prevBlock, prevBlockMeta, privVal, height) + blocks = append(blocks, block) + + prevBlock = block + prevBlockMeta = types.NewBlockMeta(block, parts) + + // update state + state.AppHash = []byte{appHeight} + appHeight++ + state.LastBlockHeight = height + } + + return blocks +} + +func MakeBlock(state sm.State, height int64, c *types.Commit) *types.Block { + block, _ := state.MakeBlock( + height, + factory.MakeTenTxs(state.LastBlockHeight), + c, + nil, + state.Validators.GetProposer().Address, + ) + return block +} + +func makeBlockAndPartSet(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, + privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { + + lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + if height > 1 { + vote, _ := factory.MakeVote( + privVal, + lastBlock.Header.ChainID, + 1, lastBlock.Header.Height, 0, 2, + lastBlockMeta.BlockID, + time.Now()) + lastCommit = types.NewCommit(vote.Height, vote.Round, + lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) + } + + return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) +} diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 7936d94c7..d6236fcbf 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -1,14 +1,11 @@ package state_test import ( - "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - tmrand "github.com/tendermint/tendermint/libs/rand" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -31,10 +28,7 @@ func TestTxFilter(t *testing.T) { } for i, tc := range testCases { - stateDB, err := dbm.NewDB("state", "memdb", os.TempDir()) - require.NoError(t, err) - stateStore := sm.NewStore(stateDB) - state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) + state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) f := sm.TxPreCheck(state) diff --git a/state/validation_test.go b/state/validation_test.go index d124c6bd1..8ed7cdb6f 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -18,8 +18,11 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/mocks" + sf "github.com/tendermint/tendermint/state/test/factory" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" + dbm "github.com/tendermint/tm-db" ) const validationTestsStopHeight int64 = 10 @@ -31,12 +34,14 @@ func TestValidateBlockHeader(t *testing.T) { state, stateDB, privVals := makeState(3, 1) stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), proxyApp.Consensus(), memmock.Mempool{}, sm.EmptyEvidencePool{}, + blockStore, ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) @@ -81,12 +86,11 @@ func TestValidateBlockHeader(t *testing.T) { // Build up state for multiple heights for height := int64(1); height < validationTestsStopHeight; height++ { - proposerAddr := state.Validators.GetProposer().Address /* Invalid blocks don't pass */ for _, tc := range testCases { - block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, proposerAddr) + block := sf.MakeBlock(state, height, lastCommit) tc.malleateBlock(block) err := blockExec.ValidateBlock(state, block) t.Logf("%s: %v", tc.name, err) @@ -97,18 +101,13 @@ func TestValidateBlockHeader(t *testing.T) { A good block passes */ var err error - state, _, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, nil) + state, _, lastCommit, err = makeAndCommitGoodBlock( + state, height, lastCommit, state.Validators.GetProposer().Address, blockExec, privVals, nil) require.NoError(t, err, "height %d", height) } nextHeight := validationTestsStopHeight - block, _ := state.MakeBlock( - nextHeight, - makeTxs(nextHeight), - lastCommit, - nil, - state.Validators.GetProposer().Address, - ) + block := sf.MakeBlock(state, nextHeight, lastCommit) state.InitialHeight = nextHeight + 1 err := blockExec.ValidateBlock(state, block) require.Error(t, err, "expected an error when state is ahead of block") @@ -122,12 +121,14 @@ func TestValidateBlockCommit(t *testing.T) { state, stateDB, privVals := makeState(1, 1) stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), proxyApp.Consensus(), memmock.Mempool{}, sm.EmptyEvidencePool{}, + blockStore, ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) wrongSigsCommit := types.NewCommit(1, 0, types.BlockID{}, nil) @@ -157,7 +158,7 @@ func TestValidateBlockCommit(t *testing.T) { state.LastBlockID, []types.CommitSig{wrongHeightVote.CommitSig()}, ) - block, _ := state.MakeBlock(height, makeTxs(height), wrongHeightCommit, nil, proposerAddr) + block := sf.MakeBlock(state, height, wrongHeightCommit) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) @@ -165,7 +166,7 @@ func TestValidateBlockCommit(t *testing.T) { /* #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() */ - block, _ = state.MakeBlock(height, makeTxs(height), wrongSigsCommit, nil, proposerAddr) + block = sf.MakeBlock(state, height, wrongSigsCommit) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures) require.True(t, isErrInvalidCommitSignatures, @@ -241,6 +242,7 @@ func TestValidateBlockEvidence(t *testing.T) { state, stateDB, privVals := makeState(4, 1) stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) evpool := &mocks.EvidencePool{} @@ -256,6 +258,7 @@ func TestValidateBlockEvidence(t *testing.T) { proxyApp.Consensus(), memmock.Mempool{}, evpool, + blockStore, ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) @@ -275,7 +278,7 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) currentBytes += int64(len(newEv.Bytes())) } - block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, evidence, proposerAddr) + block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) err := blockExec.ValidateBlock(state, block) if assert.Error(t, err) { _, ok := err.(*types.ErrEvidenceOverflow) diff --git a/store/store.go b/store/store.go index 8f67fd0c9..1396ca777 100644 --- a/store/store.go +++ b/store/store.go @@ -519,6 +519,48 @@ func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) err return bs.db.Set(seenCommitKey(height), seenCommitBytes) } +func (bs *BlockStore) SaveSignedHeader(sh *types.SignedHeader, blockID types.BlockID) error { + // first check that the block store doesn't already have the block + bz, err := bs.db.Get(blockMetaKey(sh.Height)) + if err != nil { + return err + } + if bz != nil { + return fmt.Errorf("block at height %d already saved", sh.Height) + } + + // FIXME: saving signed headers although necessary for proving evidence, + // doesn't have complete parity with block meta's thus block size and num + // txs are filled with negative numbers. We should aim to find a solution to + // this. + blockMeta := &types.BlockMeta{ + BlockID: blockID, + BlockSize: -1, + Header: *sh.Header, + NumTxs: -1, + } + + batch := bs.db.NewBatch() + + pbm := blockMeta.ToProto() + metaBytes := mustEncode(pbm) + if err := batch.Set(blockMetaKey(sh.Height), metaBytes); err != nil { + return fmt.Errorf("unable to save block meta: %w", err) + } + + pbc := sh.Commit.ToProto() + blockCommitBytes := mustEncode(pbc) + if err := batch.Set(blockCommitKey(sh.Height), blockCommitBytes); err != nil { + return fmt.Errorf("unable to save commit: %w", err) + } + + if err := batch.WriteSync(); err != nil { + return err + } + + return batch.Close() +} + //---------------------------------- KEY ENCODING ----------------------------------------- // key prefixes diff --git a/store/store_test.go b/store/store_test.go index 1aea1e5a9..a631ead5a 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -17,6 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" @@ -44,26 +45,10 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit { commitSigs) } -func makeTxs(height int64) (txs []types.Tx) { - for i := 0; i < 10; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) - } - return txs -} - -func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address) - return block -} - func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { config := cfg.ResetTestRoot("blockchain_reactor_test") - // blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB()) - // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) blockDB := dbm.NewMemDB() - stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) - state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } @@ -87,7 +72,7 @@ var ( func TestMain(m *testing.M) { var cleanup cleanupFunc state, _, cleanup = makeStateAndBlockStore(log.NewNopLogger()) - block = makeBlock(1, state, new(types.Commit)) + block = factory.MakeBlock(state, 1, new(types.Commit)) partSet = block.MakePartSet(2) part1 = partSet.GetPart(0) part2 = partSet.GetPart(1) @@ -113,7 +98,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } // save a block - block := makeBlock(bs.Height()+1, state, new(types.Commit)) + block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) validPartSet := block.MakePartSet(2) seenCommit := makeTestCommit(10, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) @@ -309,13 +294,12 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { func TestLoadBaseMeta(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - stateStore := sm.NewStore(dbm.NewMemDB()) - state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) require.NoError(t, err) bs := NewBlockStore(dbm.NewMemDB()) for h := int64(1); h <= 10; h++ { - block := makeBlock(h, state, new(types.Commit)) + block := factory.MakeBlock(state, h, new(types.Commit)) partSet := block.MakePartSet(2) seenCommit := makeTestCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) @@ -366,8 +350,7 @@ func TestLoadBlockPart(t *testing.T) { func TestPruneBlocks(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - stateStore := sm.NewStore(dbm.NewMemDB()) - state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) require.NoError(t, err) db := dbm.NewMemDB() bs := NewBlockStore(db) @@ -380,7 +363,7 @@ func TestPruneBlocks(t *testing.T) { // make more than 1000 blocks, to test batch deletions for h := int64(1); h <= 1500; h++ { - block := makeBlock(h, state, new(types.Commit)) + block := factory.MakeBlock(state, h, new(types.Commit)) partSet := block.MakePartSet(2) seenCommit := makeTestCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) @@ -485,7 +468,7 @@ func TestBlockFetchAtHeight(t *testing.T) { state, bs, cleanup := makeStateAndBlockStore(log.NewNopLogger()) defer cleanup() require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") - block := makeBlock(bs.Height()+1, state, new(types.Commit)) + block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) partSet := block.MakePartSet(2) seenCommit := makeTestCommit(10, tmtime.Now()) @@ -531,7 +514,7 @@ func TestSeenAndCanonicalCommit(t *testing.T) { c2 := bs.LoadBlockCommit(h - 1) require.Nil(t, c2) blockCommit := makeTestCommit(h-1, tmtime.Now()) - block := makeBlock(h, state, blockCommit) + block := factory.MakeBlock(state, h, blockCommit) partSet := block.MakePartSet(2) seenCommit := makeTestCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) diff --git a/test/e2e/networks/simple.toml b/test/e2e/networks/simple.toml index 05cda1819..f96d48011 100644 --- a/test/e2e/networks/simple.toml +++ b/test/e2e/networks/simple.toml @@ -1,5 +1,4 @@ [node.validator01] [node.validator02] [node.validator03] -[node.validator04] - +[node.validator04] \ No newline at end of file diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 3fb5543a8..2cb626bf6 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -49,7 +49,7 @@ const ( PerturbationRestart Perturbation = "restart" EvidenceAgeHeight int64 = 5 - EvidenceAgeTime time.Duration = 10 * time.Second + EvidenceAgeTime time.Duration = 500 * time.Millisecond ) // Testnet represents a single testnet. diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index a9373e355..d2fa0bc56 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -65,7 +65,7 @@ func InjectEvidence(testnet *e2e.Testnet, amount int) error { // wait for the node to reach the height above the forged height so that // it is able to validate the evidence - status, err := waitForNode(targetNode, waitHeight, 10*time.Second) + status, err := waitForNode(targetNode, waitHeight, 15*time.Second) if err != nil { return err } diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index adeb9c93b..573e46540 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -81,7 +81,7 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int) { select { case chTx <- tx: - time.Sleep(time.Duration(100/multiplier) * time.Millisecond) + time.Sleep(time.Second / time.Duration(multiplier)) case <-ctx.Done(): close(chTx) diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index 50c9f67a7..81f3d77ac 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -72,7 +72,7 @@ func PerturbNode(node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.Resul return nil, nil } - status, err := waitForNode(node, 0, 10*time.Second) + status, err := waitForNode(node, 0, 15*time.Second) if err != nil { return nil, err } diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index 6df7ba87b..7b2549d48 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -43,7 +43,7 @@ func waitForHeight(testnet *e2e.Testnet, height int64) (*types.Block, *types.Blo if err != nil { continue } - if result.Block != nil && (maxResult == nil || result.Block.Height >= maxResult.Block.Height) { + if result.Block != nil && (maxResult == nil || result.Block.Height > maxResult.Block.Height) { maxResult = result lastIncrease = time.Now() } diff --git a/test/e2e/tests/block_test.go b/test/e2e/tests/block_test.go index b3f4e9139..21aeeda99 100644 --- a/test/e2e/tests/block_test.go +++ b/test/e2e/tests/block_test.go @@ -32,6 +32,11 @@ func TestBlock_Header(t *testing.T) { if block.Header.Height < first { continue } + // the first blocks after state sync come from the backfill process + // and are therefore not complete + if node.StateSync && block.Header.Height <= first+e2e.EvidenceAgeHeight+1 { + continue + } if block.Header.Height > last { break } @@ -63,10 +68,10 @@ func TestBlock_Range(t *testing.T) { last := status.SyncInfo.LatestBlockHeight switch { + // if the node state synced we ignore any assertions because it's hard to know how far back + // the node ran reverse sync for case node.StateSync: - assert.Greater(t, first, node.Testnet.InitialHeight, - "state synced nodes should not contain network's initial height") - + break case node.RetainBlocks > 0 && int64(node.RetainBlocks) < (last-node.Testnet.InitialHeight+1): // Delta handles race conditions in reading first/last heights. assert.InDelta(t, node.RetainBlocks, last-first+1, 1, @@ -78,12 +83,16 @@ func TestBlock_Range(t *testing.T) { } for h := first; h <= last; h++ { + if node.StateSync && h <= first+e2e.EvidenceAgeHeight+1 { + continue + } resp, err := client.Block(ctx, &(h)) if err != nil && node.RetainBlocks > 0 && h == first { // Ignore errors in first block if node is pruning blocks due to race conditions. continue } require.NoError(t, err) + require.NotNil(t, resp.Block) assert.Equal(t, h, resp.Block.Height) } diff --git a/test/fuzz/oss-fuzz-build.sh b/test/fuzz/oss-fuzz-build.sh new file mode 100755 index 000000000..c6d1fabdf --- /dev/null +++ b/test/fuzz/oss-fuzz-build.sh @@ -0,0 +1,3 @@ +#!/bin/bash -eu + +export FUZZ_ROOT="github.com/tendermint/tendermint" diff --git a/types/block_meta_test.go b/types/block_meta_test.go index 1e29a132a..a1a382ffa 100644 --- a/types/block_meta_test.go +++ b/types/block_meta_test.go @@ -10,7 +10,7 @@ import ( ) func TestBlockMeta_ToProto(t *testing.T) { - h := makeRandHeader() + h := MakeRandHeader() bi := BlockID{Hash: h.Hash(), PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(tmhash.Size)}} bm := &BlockMeta{ @@ -47,7 +47,7 @@ func TestBlockMeta_ToProto(t *testing.T) { } func TestBlockMeta_ValidateBasic(t *testing.T) { - h := makeRandHeader() + h := MakeRandHeader() bi := BlockID{Hash: h.Hash(), PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(tmhash.Size)}} bi2 := BlockID{Hash: tmrand.Bytes(tmhash.Size), PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(tmhash.Size)}} diff --git a/types/block_test.go b/types/block_test.go index 21b251901..8685de6c7 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -749,7 +749,8 @@ func TestEvidenceDataProtoBuf(t *testing.T) { } } -func makeRandHeader() Header { +// exposed for testing +func MakeRandHeader() Header { chainID := "test" t := time.Now() height := mrand.Int63() @@ -778,7 +779,7 @@ func makeRandHeader() Header { } func TestHeaderProto(t *testing.T) { - h1 := makeRandHeader() + h1 := MakeRandHeader() tc := []struct { msg string h1 *Header @@ -830,7 +831,7 @@ func TestBlockIDProtoBuf(t *testing.T) { func TestSignedHeaderProtoBuf(t *testing.T) { commit := randCommit(time.Now()) - h := makeRandHeader() + h := MakeRandHeader() sh := SignedHeader{Header: &h, Commit: commit} diff --git a/types/light_test.go b/types/light_test.go index abf4374d4..94b2c4b4f 100644 --- a/types/light_test.go +++ b/types/light_test.go @@ -12,7 +12,7 @@ import ( ) func TestLightBlockValidateBasic(t *testing.T) { - header := makeRandHeader() + header := MakeRandHeader() commit := randCommit(time.Now()) vals, _ := randValidatorPrivValSet(5, 1) header.Height = commit.Height @@ -57,7 +57,7 @@ func TestLightBlockValidateBasic(t *testing.T) { } func TestLightBlockProtobuf(t *testing.T) { - header := makeRandHeader() + header := MakeRandHeader() commit := randCommit(time.Now()) vals, _ := randValidatorPrivValSet(5, 1) header.Height = commit.Height