mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-03 11:45:18 +00:00
crypto/merkle: remove simple prefix (#4989)
## Description This PR removes simple prefix from all types in the crypto/merkle directory. The two proto types `Proof` & `ProofOp` have been moved to the `proto/crypto/merkle` directory. proto messge `Proof` was renamed to `ProofOps` and `SimpleProof` message to `Proof`. Closes: #2755
This commit is contained in:
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSimpleArmor(t *testing.T) {
|
||||
func TestArmor(t *testing.T) {
|
||||
blockType := "MINT TEST"
|
||||
data := []byte("somedata")
|
||||
armorStr := EncodeArmor(blockType, nil, data)
|
||||
|
||||
@@ -1,617 +0,0 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: crypto/merkle/merkle.proto
|
||||
|
||||
package merkle
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// ProofOp defines an operation used for calculating Merkle root
|
||||
// The data could be arbitrary format, providing nessecary data
|
||||
// for example neighbouring node hash
|
||||
type ProofOp struct {
|
||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProofOp) Reset() { *m = ProofOp{} }
|
||||
func (m *ProofOp) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProofOp) ProtoMessage() {}
|
||||
func (*ProofOp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c1c2162d560d38e, []int{0}
|
||||
}
|
||||
func (m *ProofOp) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ProofOp) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ProofOp.Merge(m, src)
|
||||
}
|
||||
func (m *ProofOp) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ProofOp) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ProofOp.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ProofOp proto.InternalMessageInfo
|
||||
|
||||
func (m *ProofOp) GetType() string {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ProofOp) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofOp) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Proof is Merkle proof defined by the list of ProofOps
|
||||
type Proof struct {
|
||||
Ops []ProofOp `protobuf:"bytes,1,rep,name=ops,proto3" json:"ops"`
|
||||
}
|
||||
|
||||
func (m *Proof) Reset() { *m = Proof{} }
|
||||
func (m *Proof) String() string { return proto.CompactTextString(m) }
|
||||
func (*Proof) ProtoMessage() {}
|
||||
func (*Proof) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c1c2162d560d38e, []int{1}
|
||||
}
|
||||
func (m *Proof) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Proof.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Proof) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Proof.Merge(m, src)
|
||||
}
|
||||
func (m *Proof) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Proof) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Proof.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Proof proto.InternalMessageInfo
|
||||
|
||||
func (m *Proof) GetOps() []ProofOp {
|
||||
if m != nil {
|
||||
return m.Ops
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ProofOp)(nil), "tendermint.crypto.merkle.ProofOp")
|
||||
proto.RegisterType((*Proof)(nil), "tendermint.crypto.merkle.Proof")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_9c1c2162d560d38e) }
|
||||
|
||||
var fileDescriptor_9c1c2162d560d38e = []byte{
|
||||
// 234 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c,
|
||||
0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25,
|
||||
0xf9, 0x42, 0x12, 0x25, 0xa9, 0x79, 0x29, 0xa9, 0x45, 0xb9, 0x99, 0x79, 0x25, 0x7a, 0x10, 0x65,
|
||||
0x7a, 0x10, 0x79, 0x29, 0xb5, 0x92, 0x8c, 0xcc, 0xa2, 0x94, 0xf8, 0x82, 0xc4, 0xa2, 0x92, 0x4a,
|
||||
0x7d, 0xb0, 0x62, 0xfd, 0xf4, 0xfc, 0xf4, 0x7c, 0x04, 0x0b, 0x62, 0x82, 0x92, 0x33, 0x17, 0x7b,
|
||||
0x40, 0x51, 0x7e, 0x7e, 0x9a, 0x7f, 0x81, 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04,
|
||||
0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, 0x2d, 0x24, 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1,
|
||||
0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0x62, 0x82, 0x54, 0xa5, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x83,
|
||||
0x85, 0xc0, 0x6c, 0x25, 0x27, 0x2e, 0x56, 0xb0, 0x21, 0x42, 0x96, 0x5c, 0xcc, 0xf9, 0x05, 0xc5,
|
||||
0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x8a, 0x7a, 0xb8, 0x5c, 0xa7, 0x07, 0xb5, 0xd2, 0x89,
|
||||
0xe5, 0xc4, 0x3d, 0x79, 0x86, 0x20, 0x90, 0x1e, 0x27, 0x8f, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c,
|
||||
0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e,
|
||||
0x3c, 0x96, 0x63, 0x88, 0xd2, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5,
|
||||
0x47, 0x98, 0x88, 0xcc, 0x44, 0x09, 0xa1, 0x24, 0x36, 0xb0, 0xcf, 0x8c, 0x01, 0x01, 0x00, 0x00,
|
||||
0xff, 0xff, 0x39, 0xc0, 0xa3, 0x13, 0x39, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *ProofOp) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ProofOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Data) > 0 {
|
||||
i -= len(m.Data)
|
||||
copy(dAtA[i:], m.Data)
|
||||
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data)))
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
i -= len(m.Key)
|
||||
copy(dAtA[i:], m.Key)
|
||||
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.Type) > 0 {
|
||||
i -= len(m.Type)
|
||||
copy(dAtA[i:], m.Type)
|
||||
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Proof) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Proof) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Proof) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Ops) > 0 {
|
||||
for iNdEx := len(m.Ops) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Ops[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintMerkle(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintMerkle(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovMerkle(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *ProofOp) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Type)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovMerkle(uint64(l))
|
||||
}
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovMerkle(uint64(l))
|
||||
}
|
||||
l = len(m.Data)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovMerkle(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Proof) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Ops) > 0 {
|
||||
for _, e := range m.Ops {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovMerkle(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovMerkle(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozMerkle(x uint64) (n int) {
|
||||
return sovMerkle(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *ProofOp) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMerkle
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ProofOp: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ProofOp: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMerkle
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Type = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMerkle
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMerkle
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Data == nil {
|
||||
m.Data = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipMerkle(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Proof) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMerkle
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Proof: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Ops", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMerkle
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Ops = append(m.Ops, ProofOp{})
|
||||
if err := m.Ops[len(m.Ops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipMerkle(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthMerkle
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipMerkle(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMerkle
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMerkle
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMerkle
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthMerkle
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupMerkle
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMerkle
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupMerkle = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
||||
@@ -1,28 +0,0 @@
|
||||
syntax = "proto3";
|
||||
package tendermint.crypto.merkle;
|
||||
option go_package = "github.com/tendermint/tendermint/crypto/merkle";
|
||||
|
||||
// For more information on gogo.proto, see:
|
||||
// https://github.com/gogo/protobuf/blob/master/extensions.md
|
||||
import "third_party/proto/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
|
||||
//----------------------------------------
|
||||
// Message types
|
||||
|
||||
// ProofOp defines an operation used for calculating Merkle root
|
||||
// The data could be arbitrary format, providing nessecary data
|
||||
// for example neighbouring node hash
|
||||
message ProofOp {
|
||||
string type = 1;
|
||||
bytes key = 2;
|
||||
bytes data = 3;
|
||||
}
|
||||
|
||||
// Proof is Merkle proof defined by the list of ProofOps
|
||||
message Proof {
|
||||
repeated ProofOp ops = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
@@ -4,135 +4,232 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
tmmerkle "github.com/tendermint/tendermint/proto/crypto/merkle"
|
||||
)
|
||||
|
||||
//----------------------------------------
|
||||
// ProofOp gets converted to an instance of ProofOperator:
|
||||
const (
|
||||
// MaxAunts is the maximum number of aunts that can be included in a Proof.
|
||||
// This corresponds to a tree of size 2^100, which should be sufficient for all conceivable purposes.
|
||||
// This maximum helps prevent Denial-of-Service attacks by limitting the size of the proofs.
|
||||
MaxAunts = 100
|
||||
)
|
||||
|
||||
// ProofOperator is a layer for calculating intermediate Merkle roots
|
||||
// when a series of Merkle trees are chained together.
|
||||
// Run() takes leaf values from a tree and returns the Merkle
|
||||
// root for the corresponding tree. It takes and returns a list of bytes
|
||||
// to allow multiple leaves to be part of a single proof, for instance in a range proof.
|
||||
// ProofOp() encodes the ProofOperator in a generic way so it can later be
|
||||
// decoded with OpDecoder.
|
||||
type ProofOperator interface {
|
||||
Run([][]byte) ([][]byte, error)
|
||||
GetKey() []byte
|
||||
ProofOp() ProofOp
|
||||
// Proof represents a Merkle proof.
|
||||
// NOTE: The convention for proofs is to include leaf hashes but to
|
||||
// exclude the root hash.
|
||||
// This convention is implemented across IAVL range proofs as well.
|
||||
// Keep this consistent unless there's a very good reason to change
|
||||
// everything. This also affects the generalized proof system as
|
||||
// well.
|
||||
type Proof struct {
|
||||
Total int64 `json:"total"` // Total number of items.
|
||||
Index int64 `json:"index"` // Index of item to prove.
|
||||
LeafHash []byte `json:"leaf_hash"` // Hash of item value.
|
||||
Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Operations on a list of ProofOperators
|
||||
|
||||
// ProofOperators is a slice of ProofOperator(s).
|
||||
// Each operator will be applied to the input value sequentially
|
||||
// and the last Merkle root will be verified with already known data
|
||||
type ProofOperators []ProofOperator
|
||||
|
||||
func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) {
|
||||
return poz.Verify(root, keypath, [][]byte{value})
|
||||
// ProofsFromByteSlices computes inclusion proof for given items.
|
||||
// proofs[0] is the proof for items[0].
|
||||
func ProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*Proof) {
|
||||
trails, rootSPN := trailsFromByteSlices(items)
|
||||
rootHash = rootSPN.Hash
|
||||
proofs = make([]*Proof, len(items))
|
||||
for i, trail := range trails {
|
||||
proofs[i] = &Proof{
|
||||
Total: int64(len(items)),
|
||||
Index: int64(i),
|
||||
LeafHash: trail.Hash,
|
||||
Aunts: trail.FlattenAunts(),
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) {
|
||||
keys, err := KeyPathToKeys(keypath)
|
||||
if err != nil {
|
||||
return
|
||||
// Verify that the Proof proves the root hash.
|
||||
// Check sp.Index/sp.Total manually if needed
|
||||
func (sp *Proof) Verify(rootHash []byte, leaf []byte) error {
|
||||
leafHash := leafHash(leaf)
|
||||
if sp.Total < 0 {
|
||||
return errors.New("proof total must be positive")
|
||||
}
|
||||
|
||||
for i, op := range poz {
|
||||
key := op.GetKey()
|
||||
if len(key) != 0 {
|
||||
if len(keys) == 0 {
|
||||
return fmt.Errorf("key path has insufficient # of parts: expected no more keys but got %+v", string(key))
|
||||
}
|
||||
lastKey := keys[len(keys)-1]
|
||||
if !bytes.Equal(lastKey, key) {
|
||||
return fmt.Errorf("key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key))
|
||||
}
|
||||
keys = keys[:len(keys)-1]
|
||||
}
|
||||
args, err = op.Run(args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if sp.Index < 0 {
|
||||
return errors.New("proof index cannot be negative")
|
||||
}
|
||||
if !bytes.Equal(root, args[0]) {
|
||||
return fmt.Errorf("calculated root hash is invalid: expected %+v but got %+v", root, args[0])
|
||||
if !bytes.Equal(sp.LeafHash, leafHash) {
|
||||
return fmt.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash)
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
return errors.New("keypath not consumed all")
|
||||
computedHash := sp.ComputeRootHash()
|
||||
if !bytes.Equal(computedHash, rootHash) {
|
||||
return fmt.Errorf("invalid root hash: wanted %X got %X", rootHash, computedHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// ProofRuntime - main entrypoint
|
||||
|
||||
type OpDecoder func(ProofOp) (ProofOperator, error)
|
||||
|
||||
type ProofRuntime struct {
|
||||
decoders map[string]OpDecoder
|
||||
// Compute the root hash given a leaf hash. Does not verify the result.
|
||||
func (sp *Proof) ComputeRootHash() []byte {
|
||||
return computeHashFromAunts(
|
||||
sp.Index,
|
||||
sp.Total,
|
||||
sp.LeafHash,
|
||||
sp.Aunts,
|
||||
)
|
||||
}
|
||||
|
||||
func NewProofRuntime() *ProofRuntime {
|
||||
return &ProofRuntime{
|
||||
decoders: make(map[string]OpDecoder),
|
||||
// String implements the stringer interface for Proof.
|
||||
// It is a wrapper around StringIndented.
|
||||
func (sp *Proof) String() string {
|
||||
return sp.StringIndented("")
|
||||
}
|
||||
|
||||
// StringIndented generates a canonical string representation of a Proof.
|
||||
func (sp *Proof) StringIndented(indent string) string {
|
||||
return fmt.Sprintf(`Proof{
|
||||
%s Aunts: %X
|
||||
%s}`,
|
||||
indent, sp.Aunts,
|
||||
indent)
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
// NOTE: it expects the LeafHash and the elements of Aunts to be of size tmhash.Size,
|
||||
// and it expects at most MaxAunts elements in Aunts.
|
||||
func (sp *Proof) ValidateBasic() error {
|
||||
if sp.Total < 0 {
|
||||
return errors.New("negative Total")
|
||||
}
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) {
|
||||
_, ok := prt.decoders[typ]
|
||||
if ok {
|
||||
panic("already registered for type " + typ)
|
||||
if sp.Index < 0 {
|
||||
return errors.New("negative Index")
|
||||
}
|
||||
prt.decoders[typ] = dec
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) {
|
||||
decoder := prt.decoders[pop.Type]
|
||||
if decoder == nil {
|
||||
return nil, fmt.Errorf("unrecognized proof type %v", pop.Type)
|
||||
if len(sp.LeafHash) != tmhash.Size {
|
||||
return fmt.Errorf("expected LeafHash size to be %d, got %d", tmhash.Size, len(sp.LeafHash))
|
||||
}
|
||||
return decoder(pop)
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) {
|
||||
poz := make(ProofOperators, 0, len(proof.Ops))
|
||||
for _, pop := range proof.Ops {
|
||||
operator, err := prt.Decode(pop)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding a proof operator: %w", err)
|
||||
if len(sp.Aunts) > MaxAunts {
|
||||
return fmt.Errorf("expected no more than %d aunts, got %d", MaxAunts, len(sp.Aunts))
|
||||
}
|
||||
for i, auntHash := range sp.Aunts {
|
||||
if len(auntHash) != tmhash.Size {
|
||||
return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, tmhash.Size, len(auntHash))
|
||||
}
|
||||
poz = append(poz, operator)
|
||||
}
|
||||
return poz, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) {
|
||||
return prt.Verify(proof, root, keypath, [][]byte{value})
|
||||
}
|
||||
|
||||
// TODO In the long run we'll need a method of classifcation of ops,
|
||||
// whether existence or absence or perhaps a third?
|
||||
func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string) (err error) {
|
||||
return prt.Verify(proof, root, keypath, nil)
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) {
|
||||
poz, err := prt.DecodeProof(proof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding proof: %w", err)
|
||||
func (sp *Proof) ToProto() *tmmerkle.Proof {
|
||||
if sp == nil {
|
||||
return nil
|
||||
}
|
||||
return poz.Verify(root, keypath, args)
|
||||
pb := new(tmmerkle.Proof)
|
||||
|
||||
pb.Total = sp.Total
|
||||
pb.Index = sp.Index
|
||||
pb.LeafHash = sp.LeafHash
|
||||
pb.Aunts = sp.Aunts
|
||||
|
||||
return pb
|
||||
}
|
||||
|
||||
// DefaultProofRuntime only knows about Simple value
|
||||
// proofs.
|
||||
// To use e.g. IAVL proofs, register op-decoders as
|
||||
// defined in the IAVL package.
|
||||
func DefaultProofRuntime() (prt *ProofRuntime) {
|
||||
prt = NewProofRuntime()
|
||||
prt.RegisterOpDecoder(ProofOpSimpleValue, SimpleValueOpDecoder)
|
||||
return
|
||||
func ProofFromProto(pb *tmmerkle.Proof) (*Proof, error) {
|
||||
if pb == nil {
|
||||
return nil, errors.New("nil proof")
|
||||
}
|
||||
sp := new(Proof)
|
||||
|
||||
sp.Total = pb.Total
|
||||
sp.Index = pb.Index
|
||||
sp.LeafHash = pb.LeafHash
|
||||
sp.Aunts = pb.Aunts
|
||||
|
||||
return sp, sp.ValidateBasic()
|
||||
}
|
||||
|
||||
// Use the leafHash and innerHashes to get the root merkle hash.
|
||||
// If the length of the innerHashes slice isn't exactly correct, the result is nil.
|
||||
// Recursive impl.
|
||||
func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]byte) []byte {
|
||||
if index >= total || index < 0 || total <= 0 {
|
||||
return nil
|
||||
}
|
||||
switch total {
|
||||
case 0:
|
||||
panic("Cannot call computeHashFromAunts() with 0 total")
|
||||
case 1:
|
||||
if len(innerHashes) != 0 {
|
||||
return nil
|
||||
}
|
||||
return leafHash
|
||||
default:
|
||||
if len(innerHashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
numLeft := getSplitPoint(total)
|
||||
if index < numLeft {
|
||||
leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
if leftHash == nil {
|
||||
return nil
|
||||
}
|
||||
return innerHash(leftHash, innerHashes[len(innerHashes)-1])
|
||||
}
|
||||
rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
if rightHash == nil {
|
||||
return nil
|
||||
}
|
||||
return innerHash(innerHashes[len(innerHashes)-1], rightHash)
|
||||
}
|
||||
}
|
||||
|
||||
// ProofNode is a helper structure to construct merkle proof.
|
||||
// The node and the tree is thrown away afterwards.
|
||||
// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil.
|
||||
// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or
|
||||
// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child.
|
||||
type ProofNode struct {
|
||||
Hash []byte
|
||||
Parent *ProofNode
|
||||
Left *ProofNode // Left sibling (only one of Left,Right is set)
|
||||
Right *ProofNode // Right sibling (only one of Left,Right is set)
|
||||
}
|
||||
|
||||
// FlattenAunts will return the inner hashes for the item corresponding to the leaf,
|
||||
// starting from a leaf ProofNode.
|
||||
func (spn *ProofNode) FlattenAunts() [][]byte {
|
||||
// Nonrecursive impl.
|
||||
innerHashes := [][]byte{}
|
||||
for spn != nil {
|
||||
switch {
|
||||
case spn.Left != nil:
|
||||
innerHashes = append(innerHashes, spn.Left.Hash)
|
||||
case spn.Right != nil:
|
||||
innerHashes = append(innerHashes, spn.Right.Hash)
|
||||
default:
|
||||
break
|
||||
}
|
||||
spn = spn.Parent
|
||||
}
|
||||
return innerHashes
|
||||
}
|
||||
|
||||
// trails[0].Hash is the leaf hash for items[0].
|
||||
// trails[i].Parent.Parent....Parent == root for all i.
|
||||
func trailsFromByteSlices(items [][]byte) (trails []*ProofNode, root *ProofNode) {
|
||||
// Recursive impl.
|
||||
switch len(items) {
|
||||
case 0:
|
||||
return nil, nil
|
||||
case 1:
|
||||
trail := &ProofNode{leafHash(items[0]), nil, nil, nil}
|
||||
return []*ProofNode{trail}, trail
|
||||
default:
|
||||
k := getSplitPoint(int64(len(items)))
|
||||
lefts, leftRoot := trailsFromByteSlices(items[:k])
|
||||
rights, rightRoot := trailsFromByteSlices(items[k:])
|
||||
rootHash := innerHash(leftRoot.Hash, rightRoot.Hash)
|
||||
root := &ProofNode{rootHash, nil, nil, nil}
|
||||
leftRoot.Parent = root
|
||||
leftRoot.Right = rightRoot
|
||||
rightRoot.Parent = root
|
||||
rightRoot.Left = leftRoot
|
||||
return append(lefts, rights...), root
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
/32:)
|
||||
|
||||
For example, for a Cosmos-SDK application where the first two proof layers
|
||||
are SimpleValueOps, and the third proof layer is an IAVLValueOp, the keys
|
||||
are ValueOps, and the third proof layer is an IAVLValueOp, the keys
|
||||
might look like:
|
||||
|
||||
0: []byte("App")
|
||||
|
||||
139
crypto/merkle/proof_op.go
Normal file
139
crypto/merkle/proof_op.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
tmmerkle "github.com/tendermint/tendermint/proto/crypto/merkle"
|
||||
)
|
||||
|
||||
//----------------------------------------
|
||||
// ProofOp gets converted to an instance of ProofOperator:
|
||||
|
||||
// ProofOperator is a layer for calculating intermediate Merkle roots
|
||||
// when a series of Merkle trees are chained together.
|
||||
// Run() takes leaf values from a tree and returns the Merkle
|
||||
// root for the corresponding tree. It takes and returns a list of bytes
|
||||
// to allow multiple leaves to be part of a single proof, for instance in a range proof.
|
||||
// ProofOp() encodes the ProofOperator in a generic way so it can later be
|
||||
// decoded with OpDecoder.
|
||||
type ProofOperator interface {
|
||||
Run([][]byte) ([][]byte, error)
|
||||
GetKey() []byte
|
||||
ProofOp() tmmerkle.ProofOp
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Operations on a list of ProofOperators
|
||||
|
||||
// ProofOperators is a slice of ProofOperator(s).
|
||||
// Each operator will be applied to the input value sequentially
|
||||
// and the last Merkle root will be verified with already known data
|
||||
type ProofOperators []ProofOperator
|
||||
|
||||
func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) {
|
||||
return poz.Verify(root, keypath, [][]byte{value})
|
||||
}
|
||||
|
||||
func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) {
|
||||
keys, err := KeyPathToKeys(keypath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, op := range poz {
|
||||
key := op.GetKey()
|
||||
if len(key) != 0 {
|
||||
if len(keys) == 0 {
|
||||
return fmt.Errorf("key path has insufficient # of parts: expected no more keys but got %+v", string(key))
|
||||
}
|
||||
lastKey := keys[len(keys)-1]
|
||||
if !bytes.Equal(lastKey, key) {
|
||||
return fmt.Errorf("key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key))
|
||||
}
|
||||
keys = keys[:len(keys)-1]
|
||||
}
|
||||
args, err = op.Run(args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(root, args[0]) {
|
||||
return fmt.Errorf("calculated root hash is invalid: expected %+v but got %+v", root, args[0])
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
return errors.New("keypath not consumed all")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// ProofRuntime - main entrypoint
|
||||
|
||||
type OpDecoder func(tmmerkle.ProofOp) (ProofOperator, error)
|
||||
|
||||
type ProofRuntime struct {
|
||||
decoders map[string]OpDecoder
|
||||
}
|
||||
|
||||
func NewProofRuntime() *ProofRuntime {
|
||||
return &ProofRuntime{
|
||||
decoders: make(map[string]OpDecoder),
|
||||
}
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) {
|
||||
_, ok := prt.decoders[typ]
|
||||
if ok {
|
||||
panic("already registered for type " + typ)
|
||||
}
|
||||
prt.decoders[typ] = dec
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) Decode(pop tmmerkle.ProofOp) (ProofOperator, error) {
|
||||
decoder := prt.decoders[pop.Type]
|
||||
if decoder == nil {
|
||||
return nil, fmt.Errorf("unrecognized proof type %v", pop.Type)
|
||||
}
|
||||
return decoder(pop)
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) DecodeProof(proof *tmmerkle.ProofOps) (ProofOperators, error) {
|
||||
poz := make(ProofOperators, 0, len(proof.Ops))
|
||||
for _, pop := range proof.Ops {
|
||||
operator, err := prt.Decode(pop)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding a proof operator: %w", err)
|
||||
}
|
||||
poz = append(poz, operator)
|
||||
}
|
||||
return poz, nil
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) VerifyValue(proof *tmmerkle.ProofOps, root []byte, keypath string, value []byte) (err error) {
|
||||
return prt.Verify(proof, root, keypath, [][]byte{value})
|
||||
}
|
||||
|
||||
// TODO In the long run we'll need a method of classifcation of ops,
|
||||
// whether existence or absence or perhaps a third?
|
||||
func (prt *ProofRuntime) VerifyAbsence(proof *tmmerkle.ProofOps, root []byte, keypath string) (err error) {
|
||||
return prt.Verify(proof, root, keypath, nil)
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) Verify(proof *tmmerkle.ProofOps, root []byte, keypath string, args [][]byte) (err error) {
|
||||
poz, err := prt.DecodeProof(proof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding proof: %w", err)
|
||||
}
|
||||
return poz.Verify(root, keypath, args)
|
||||
}
|
||||
|
||||
// DefaultProofRuntime only knows about value proofs.
|
||||
// To use e.g. IAVL proofs, register op-decoders as
|
||||
// defined in the IAVL package.
|
||||
func DefaultProofRuntime() (prt *ProofRuntime) {
|
||||
prt = NewProofRuntime()
|
||||
prt.RegisterOpDecoder(ProofOpValue, ValueOpDecoder)
|
||||
return
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
tmmerkle "github.com/tendermint/tendermint/proto/crypto/merkle"
|
||||
)
|
||||
|
||||
const ProofOpDomino = "test:domino"
|
||||
@@ -28,21 +29,21 @@ func NewDominoOp(key, input, output string) DominoOp {
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func DominoOpDecoder(pop ProofOp) (ProofOperator, error) {
|
||||
func DominoOpDecoder(pop tmmerkle.ProofOp) (ProofOperator, error) {
|
||||
if pop.Type != ProofOpDomino {
|
||||
panic("unexpected proof op type")
|
||||
}
|
||||
var op DominoOp // a bit strange as we'll discard this, but it works.
|
||||
err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding ProofOp.Data into SimpleValueOp: %w", err)
|
||||
return nil, fmt.Errorf("decoding ProofOp.Data into ValueOp: %w", err)
|
||||
}
|
||||
return NewDominoOp(string(pop.Key), op.Input, op.Output), nil
|
||||
}
|
||||
|
||||
func (dop DominoOp) ProofOp() ProofOp {
|
||||
func (dop DominoOp) ProofOp() tmmerkle.ProofOp {
|
||||
bz := amino.MustMarshalBinaryLengthPrefixed(dop)
|
||||
return ProofOp{
|
||||
return tmmerkle.ProofOp{
|
||||
Type: ProofOpDomino,
|
||||
Key: []byte(dop.key),
|
||||
Data: bz,
|
||||
@@ -140,3 +141,37 @@ func TestProofOperators(t *testing.T) {
|
||||
func bz(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
||||
|
||||
func TestProofValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
malleateProof func(*Proof)
|
||||
errStr string
|
||||
}{
|
||||
{"Good", func(sp *Proof) {}, ""},
|
||||
{"Negative Total", func(sp *Proof) { sp.Total = -1 }, "negative Total"},
|
||||
{"Negative Index", func(sp *Proof) { sp.Index = -1 }, "negative Index"},
|
||||
{"Invalid LeafHash", func(sp *Proof) { sp.LeafHash = make([]byte, 10) },
|
||||
"expected LeafHash size to be 32, got 10"},
|
||||
{"Too many Aunts", func(sp *Proof) { sp.Aunts = make([][]byte, MaxAunts+1) },
|
||||
"expected no more than 100 aunts, got 101"},
|
||||
{"Invalid Aunt", func(sp *Proof) { sp.Aunts[0] = make([]byte, 10) },
|
||||
"expected Aunts#0 size to be 32, got 10"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
_, proofs := ProofsFromByteSlices([][]byte{
|
||||
[]byte("apple"),
|
||||
[]byte("watermelon"),
|
||||
[]byte("kiwi"),
|
||||
})
|
||||
tc.malleateProof(proofs[0])
|
||||
err := proofs[0].ValidateBasic()
|
||||
if tc.errStr != "" {
|
||||
assert.Contains(t, err.Error(), tc.errStr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,63 +5,64 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
tmmerkle "github.com/tendermint/tendermint/proto/crypto/merkle"
|
||||
)
|
||||
|
||||
const ProofOpSimpleValue = "simple:v"
|
||||
const ProofOpValue = "simple:v"
|
||||
|
||||
// SimpleValueOp takes a key and a single value as argument and
|
||||
// ValueOp takes a key and a single value as argument and
|
||||
// produces the root hash. The corresponding tree structure is
|
||||
// the SimpleMap tree. SimpleMap takes a Hasher, and currently
|
||||
// Tendermint uses aminoHasher. SimpleValueOp should support
|
||||
// Tendermint uses aminoHasher. ValueOp should support
|
||||
// the hash function as used in aminoHasher. TODO support
|
||||
// additional hash functions here as options/args to this
|
||||
// operator.
|
||||
//
|
||||
// If the produced root hash matches the expected hash, the
|
||||
// proof is good.
|
||||
type SimpleValueOp struct {
|
||||
type ValueOp struct {
|
||||
// Encoded in ProofOp.Key.
|
||||
key []byte
|
||||
|
||||
// To encode in ProofOp.Data
|
||||
Proof *SimpleProof `json:"simple_proof"`
|
||||
Proof *Proof `json:"proof"`
|
||||
}
|
||||
|
||||
var _ ProofOperator = SimpleValueOp{}
|
||||
var _ ProofOperator = ValueOp{}
|
||||
|
||||
func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp {
|
||||
return SimpleValueOp{
|
||||
func NewValueOp(key []byte, proof *Proof) ValueOp {
|
||||
return ValueOp{
|
||||
key: key,
|
||||
Proof: proof,
|
||||
}
|
||||
}
|
||||
|
||||
func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) {
|
||||
if pop.Type != ProofOpSimpleValue {
|
||||
return nil, fmt.Errorf("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue)
|
||||
func ValueOpDecoder(pop tmmerkle.ProofOp) (ProofOperator, error) {
|
||||
if pop.Type != ProofOpValue {
|
||||
return nil, fmt.Errorf("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpValue)
|
||||
}
|
||||
var op SimpleValueOp // a bit strange as we'll discard this, but it works.
|
||||
var op ValueOp // a bit strange as we'll discard this, but it works.
|
||||
err := cdc.UnmarshalBinaryLengthPrefixed(pop.Data, &op)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding ProofOp.Data into SimpleValueOp: %w", err)
|
||||
return nil, fmt.Errorf("decoding ProofOp.Data into ValueOp: %w", err)
|
||||
}
|
||||
return NewSimpleValueOp(pop.Key, op.Proof), nil
|
||||
return NewValueOp(pop.Key, op.Proof), nil
|
||||
}
|
||||
|
||||
func (op SimpleValueOp) ProofOp() ProofOp {
|
||||
func (op ValueOp) ProofOp() tmmerkle.ProofOp {
|
||||
bz := cdc.MustMarshalBinaryLengthPrefixed(op)
|
||||
return ProofOp{
|
||||
Type: ProofOpSimpleValue,
|
||||
return tmmerkle.ProofOp{
|
||||
Type: ProofOpValue,
|
||||
Key: op.key,
|
||||
Data: bz,
|
||||
}
|
||||
}
|
||||
|
||||
func (op SimpleValueOp) String() string {
|
||||
return fmt.Sprintf("SimpleValueOp{%v}", op.GetKey())
|
||||
func (op ValueOp) String() string {
|
||||
return fmt.Sprintf("ValueOp{%v}", op.GetKey())
|
||||
}
|
||||
|
||||
func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
func (op ValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 arg, got %v", len(args))
|
||||
}
|
||||
@@ -85,6 +86,6 @@ func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op SimpleValueOp) GetKey() []byte {
|
||||
func (op ValueOp) GetKey() []byte {
|
||||
return op.key
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
EnumsAsInts: true,
|
||||
EmitDefaults: true,
|
||||
}
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
||||
func (r *ProofOp) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ProofOp) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *Proof) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *Proof) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
// Some compile time assertions to ensure we don't
|
||||
// have accidental runtime surprises later on.
|
||||
// jsonEncodingRoundTripper ensures that asserted
|
||||
// interfaces implement both MarshalJSON and UnmarshalJSON
|
||||
|
||||
type jsonRoundTripper interface {
|
||||
json.Marshaler
|
||||
json.Unmarshaler
|
||||
}
|
||||
|
||||
var _ jsonRoundTripper = (*ProofOp)(nil)
|
||||
var _ jsonRoundTripper = (*Proof)(nil)
|
||||
@@ -1,235 +0,0 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
tmmerkle "github.com/tendermint/tendermint/proto/crypto/merkle"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxAunts is the maximum number of aunts that can be included in a SimpleProof.
|
||||
// This corresponds to a tree of size 2^100, which should be sufficient for all conceivable purposes.
|
||||
// This maximum helps prevent Denial-of-Service attacks by limitting the size of the proofs.
|
||||
MaxAunts = 100
|
||||
)
|
||||
|
||||
// SimpleProof represents a simple Merkle proof.
|
||||
// NOTE: The convention for proofs is to include leaf hashes but to
|
||||
// exclude the root hash.
|
||||
// This convention is implemented across IAVL range proofs as well.
|
||||
// Keep this consistent unless there's a very good reason to change
|
||||
// everything. This also affects the generalized proof system as
|
||||
// well.
|
||||
type SimpleProof struct {
|
||||
Total int64 `json:"total"` // Total number of items.
|
||||
Index int64 `json:"index"` // Index of item to prove.
|
||||
LeafHash []byte `json:"leaf_hash"` // Hash of item value.
|
||||
Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
|
||||
}
|
||||
|
||||
// SimpleProofsFromByteSlices computes inclusion proof for given items.
|
||||
// proofs[0] is the proof for items[0].
|
||||
func SimpleProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*SimpleProof) {
|
||||
trails, rootSPN := trailsFromByteSlices(items)
|
||||
rootHash = rootSPN.Hash
|
||||
proofs = make([]*SimpleProof, len(items))
|
||||
for i, trail := range trails {
|
||||
proofs[i] = &SimpleProof{
|
||||
Total: int64(len(items)),
|
||||
Index: int64(i),
|
||||
LeafHash: trail.Hash,
|
||||
Aunts: trail.FlattenAunts(),
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Verify that the SimpleProof proves the root hash.
|
||||
// Check sp.Index/sp.Total manually if needed
|
||||
func (sp *SimpleProof) Verify(rootHash []byte, leaf []byte) error {
|
||||
leafHash := leafHash(leaf)
|
||||
if sp.Total < 0 {
|
||||
return errors.New("proof total must be positive")
|
||||
}
|
||||
if sp.Index < 0 {
|
||||
return errors.New("proof index cannot be negative")
|
||||
}
|
||||
if !bytes.Equal(sp.LeafHash, leafHash) {
|
||||
return fmt.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash)
|
||||
}
|
||||
computedHash := sp.ComputeRootHash()
|
||||
if !bytes.Equal(computedHash, rootHash) {
|
||||
return fmt.Errorf("invalid root hash: wanted %X got %X", rootHash, computedHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the root hash given a leaf hash. Does not verify the result.
|
||||
func (sp *SimpleProof) ComputeRootHash() []byte {
|
||||
return computeHashFromAunts(
|
||||
sp.Index,
|
||||
sp.Total,
|
||||
sp.LeafHash,
|
||||
sp.Aunts,
|
||||
)
|
||||
}
|
||||
|
||||
// String implements the stringer interface for SimpleProof.
|
||||
// It is a wrapper around StringIndented.
|
||||
func (sp *SimpleProof) String() string {
|
||||
return sp.StringIndented("")
|
||||
}
|
||||
|
||||
// StringIndented generates a canonical string representation of a SimpleProof.
|
||||
func (sp *SimpleProof) StringIndented(indent string) string {
|
||||
return fmt.Sprintf(`SimpleProof{
|
||||
%s Aunts: %X
|
||||
%s}`,
|
||||
indent, sp.Aunts,
|
||||
indent)
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
// NOTE: it expects the LeafHash and the elements of Aunts to be of size tmhash.Size,
|
||||
// and it expects at most MaxAunts elements in Aunts.
|
||||
func (sp *SimpleProof) ValidateBasic() error {
|
||||
if sp.Total < 0 {
|
||||
return errors.New("negative Total")
|
||||
}
|
||||
if sp.Index < 0 {
|
||||
return errors.New("negative Index")
|
||||
}
|
||||
if len(sp.LeafHash) != tmhash.Size {
|
||||
return fmt.Errorf("expected LeafHash size to be %d, got %d", tmhash.Size, len(sp.LeafHash))
|
||||
}
|
||||
if len(sp.Aunts) > MaxAunts {
|
||||
return fmt.Errorf("expected no more than %d aunts, got %d", MaxAunts, len(sp.Aunts))
|
||||
}
|
||||
for i, auntHash := range sp.Aunts {
|
||||
if len(auntHash) != tmhash.Size {
|
||||
return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, tmhash.Size, len(auntHash))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sp *SimpleProof) ToProto() *tmmerkle.SimpleProof {
|
||||
if sp == nil {
|
||||
return nil
|
||||
}
|
||||
pb := new(tmmerkle.SimpleProof)
|
||||
|
||||
pb.Total = sp.Total
|
||||
pb.Index = sp.Index
|
||||
pb.LeafHash = sp.LeafHash
|
||||
pb.Aunts = sp.Aunts
|
||||
|
||||
return pb
|
||||
}
|
||||
|
||||
func SimpleProofFromProto(pb *tmmerkle.SimpleProof) (*SimpleProof, error) {
|
||||
if pb == nil {
|
||||
return nil, errors.New("nil proof")
|
||||
}
|
||||
sp := new(SimpleProof)
|
||||
|
||||
sp.Total = pb.Total
|
||||
sp.Index = pb.Index
|
||||
sp.LeafHash = pb.LeafHash
|
||||
sp.Aunts = pb.Aunts
|
||||
|
||||
return sp, sp.ValidateBasic()
|
||||
}
|
||||
|
||||
// Use the leafHash and innerHashes to get the root merkle hash.
|
||||
// If the length of the innerHashes slice isn't exactly correct, the result is nil.
|
||||
// Recursive impl.
|
||||
func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]byte) []byte {
|
||||
if index >= total || index < 0 || total <= 0 {
|
||||
return nil
|
||||
}
|
||||
switch total {
|
||||
case 0:
|
||||
panic("Cannot call computeHashFromAunts() with 0 total")
|
||||
case 1:
|
||||
if len(innerHashes) != 0 {
|
||||
return nil
|
||||
}
|
||||
return leafHash
|
||||
default:
|
||||
if len(innerHashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
numLeft := getSplitPoint(total)
|
||||
if index < numLeft {
|
||||
leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
if leftHash == nil {
|
||||
return nil
|
||||
}
|
||||
return innerHash(leftHash, innerHashes[len(innerHashes)-1])
|
||||
}
|
||||
rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
if rightHash == nil {
|
||||
return nil
|
||||
}
|
||||
return innerHash(innerHashes[len(innerHashes)-1], rightHash)
|
||||
}
|
||||
}
|
||||
|
||||
// SimpleProofNode is a helper structure to construct merkle proof.
|
||||
// The node and the tree is thrown away afterwards.
|
||||
// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil.
|
||||
// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or
|
||||
// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child.
|
||||
type SimpleProofNode struct {
|
||||
Hash []byte
|
||||
Parent *SimpleProofNode
|
||||
Left *SimpleProofNode // Left sibling (only one of Left,Right is set)
|
||||
Right *SimpleProofNode // Right sibling (only one of Left,Right is set)
|
||||
}
|
||||
|
||||
// FlattenAunts will return the inner hashes for the item corresponding to the leaf,
|
||||
// starting from a leaf SimpleProofNode.
|
||||
func (spn *SimpleProofNode) FlattenAunts() [][]byte {
|
||||
// Nonrecursive impl.
|
||||
innerHashes := [][]byte{}
|
||||
for spn != nil {
|
||||
switch {
|
||||
case spn.Left != nil:
|
||||
innerHashes = append(innerHashes, spn.Left.Hash)
|
||||
case spn.Right != nil:
|
||||
innerHashes = append(innerHashes, spn.Right.Hash)
|
||||
default:
|
||||
break
|
||||
}
|
||||
spn = spn.Parent
|
||||
}
|
||||
return innerHashes
|
||||
}
|
||||
|
||||
// trails[0].Hash is the leaf hash for items[0].
|
||||
// trails[i].Parent.Parent....Parent == root for all i.
|
||||
func trailsFromByteSlices(items [][]byte) (trails []*SimpleProofNode, root *SimpleProofNode) {
|
||||
// Recursive impl.
|
||||
switch len(items) {
|
||||
case 0:
|
||||
return nil, nil
|
||||
case 1:
|
||||
trail := &SimpleProofNode{leafHash(items[0]), nil, nil, nil}
|
||||
return []*SimpleProofNode{trail}, trail
|
||||
default:
|
||||
k := getSplitPoint(int64(len(items)))
|
||||
lefts, leftRoot := trailsFromByteSlices(items[:k])
|
||||
rights, rightRoot := trailsFromByteSlices(items[k:])
|
||||
rootHash := innerHash(leftRoot.Hash, rightRoot.Hash)
|
||||
root := &SimpleProofNode{rootHash, nil, nil, nil}
|
||||
leftRoot.Parent = root
|
||||
leftRoot.Right = rightRoot
|
||||
rightRoot.Parent = root
|
||||
rightRoot.Left = leftRoot
|
||||
return append(lefts, rights...), root
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
func TestSimpleProofValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
malleateProof func(*SimpleProof)
|
||||
errStr string
|
||||
}{
|
||||
{"Good", func(sp *SimpleProof) {}, ""},
|
||||
{"Negative Total", func(sp *SimpleProof) { sp.Total = -1 }, "negative Total"},
|
||||
{"Negative Index", func(sp *SimpleProof) { sp.Index = -1 }, "negative Index"},
|
||||
{"Invalid LeafHash", func(sp *SimpleProof) { sp.LeafHash = make([]byte, 10) },
|
||||
"expected LeafHash size to be 32, got 10"},
|
||||
{"Too many Aunts", func(sp *SimpleProof) { sp.Aunts = make([][]byte, MaxAunts+1) },
|
||||
"expected no more than 100 aunts, got 101"},
|
||||
{"Invalid Aunt", func(sp *SimpleProof) { sp.Aunts[0] = make([]byte, 10) },
|
||||
"expected Aunts#0 size to be 32, got 10"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
_, proofs := SimpleProofsFromByteSlices([][]byte{
|
||||
[]byte("apple"),
|
||||
[]byte("watermelon"),
|
||||
[]byte("kiwi"),
|
||||
})
|
||||
tc.malleateProof(proofs[0])
|
||||
err := proofs[0].ValidateBasic()
|
||||
if tc.errStr != "" {
|
||||
assert.Contains(t, err.Error(), tc.errStr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleProofProtoBuf(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
ps1 *SimpleProof
|
||||
expPass bool
|
||||
}{
|
||||
{"failure empty", &SimpleProof{}, false},
|
||||
{"failure nil", nil, false},
|
||||
{"success",
|
||||
&SimpleProof{
|
||||
Total: 1,
|
||||
Index: 1,
|
||||
LeafHash: tmrand.Bytes(32),
|
||||
Aunts: [][]byte{tmrand.Bytes(32), tmrand.Bytes(32), tmrand.Bytes(32)},
|
||||
}, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
proto := tc.ps1.ToProto()
|
||||
p, err := SimpleProofFromProto(proto)
|
||||
if tc.expPass {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.ps1, p, tc.testName)
|
||||
} else {
|
||||
require.Error(t, err, tc.testName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// SimpleHashFromByteSlices computes a Merkle tree where the leaves are the byte slice,
|
||||
// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice,
|
||||
// in the provided order.
|
||||
func SimpleHashFromByteSlices(items [][]byte) []byte {
|
||||
func HashFromByteSlices(items [][]byte) []byte {
|
||||
switch len(items) {
|
||||
case 0:
|
||||
return nil
|
||||
@@ -14,27 +14,27 @@ func SimpleHashFromByteSlices(items [][]byte) []byte {
|
||||
return leafHash(items[0])
|
||||
default:
|
||||
k := getSplitPoint(int64(len(items)))
|
||||
left := SimpleHashFromByteSlices(items[:k])
|
||||
right := SimpleHashFromByteSlices(items[k:])
|
||||
left := HashFromByteSlices(items[:k])
|
||||
right := HashFromByteSlices(items[k:])
|
||||
return innerHash(left, right)
|
||||
}
|
||||
}
|
||||
|
||||
// SimpleHashFromByteSliceIterative is an iterative alternative to
|
||||
// SimpleHashFromByteSlice motivated by potential performance improvements.
|
||||
// HashFromByteSliceIterative is an iterative alternative to
|
||||
// HashFromByteSlice motivated by potential performance improvements.
|
||||
// (#2611) had suggested that an iterative version of
|
||||
// SimpleHashFromByteSlice would be faster, presumably because
|
||||
// HashFromByteSlice would be faster, presumably because
|
||||
// we can envision some overhead accumulating from stack
|
||||
// frames and function calls. Additionally, a recursive algorithm risks
|
||||
// hitting the stack limit and causing a stack overflow should the tree
|
||||
// be too large.
|
||||
//
|
||||
// Provided here is an iterative alternative, a simple test to assert
|
||||
// Provided here is an iterative alternative, a test to assert
|
||||
// correctness and a benchmark. On the performance side, there appears to
|
||||
// be no overall difference:
|
||||
//
|
||||
// BenchmarkSimpleHashAlternatives/recursive-4 20000 77677 ns/op
|
||||
// BenchmarkSimpleHashAlternatives/iterative-4 20000 76802 ns/op
|
||||
// BenchmarkHashAlternatives/recursive-4 20000 77677 ns/op
|
||||
// BenchmarkHashAlternatives/iterative-4 20000 76802 ns/op
|
||||
//
|
||||
// On the surface it might seem that the additional overhead is due to
|
||||
// the different allocation patterns of the implementations. The recursive
|
||||
@@ -47,9 +47,9 @@ func SimpleHashFromByteSlices(items [][]byte) []byte {
|
||||
//
|
||||
// These preliminary results suggest:
|
||||
//
|
||||
// 1. The performance of the SimpleHashFromByteSlice is pretty good
|
||||
// 1. The performance of the HashFromByteSlice is pretty good
|
||||
// 2. Go has low overhead for recursive functions
|
||||
// 3. The performance of the SimpleHashFromByteSlice routine is dominated
|
||||
// 3. The performance of the HashFromByteSlice routine is dominated
|
||||
// by the actual hashing of data
|
||||
//
|
||||
// Although this work is in no way exhaustive, point #3 suggests that
|
||||
@@ -59,7 +59,7 @@ func SimpleHashFromByteSlices(items [][]byte) []byte {
|
||||
// Finally, considering that the recursive implementation is easier to
|
||||
// read, it might not be worthwhile to switch to a less intuitive
|
||||
// implementation for so little benefit.
|
||||
func SimpleHashFromByteSlicesIterative(input [][]byte) []byte {
|
||||
func HashFromByteSlicesIterative(input [][]byte) []byte {
|
||||
items := make([][]byte, len(input))
|
||||
|
||||
for i, leaf := range input {
|
||||
@@ -17,7 +17,7 @@ func (tI testItem) Hash() []byte {
|
||||
return []byte(tI)
|
||||
}
|
||||
|
||||
func TestSimpleProof(t *testing.T) {
|
||||
func TestProof(t *testing.T) {
|
||||
|
||||
total := 100
|
||||
|
||||
@@ -26,9 +26,9 @@ func TestSimpleProof(t *testing.T) {
|
||||
items[i] = testItem(tmrand.Bytes(tmhash.Size))
|
||||
}
|
||||
|
||||
rootHash := SimpleHashFromByteSlices(items)
|
||||
rootHash := HashFromByteSlices(items)
|
||||
|
||||
rootHash2, proofs := SimpleProofsFromByteSlices(items)
|
||||
rootHash2, proofs := ProofsFromByteSlices(items)
|
||||
|
||||
require.Equal(t, rootHash, rootHash2, "Unmatched root hashes: %X vs %X", rootHash, rootHash2)
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestSimpleProof(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleHashAlternatives(t *testing.T) {
|
||||
func TestHashAlternatives(t *testing.T) {
|
||||
|
||||
total := 100
|
||||
|
||||
@@ -79,12 +79,12 @@ func TestSimpleHashAlternatives(t *testing.T) {
|
||||
items[i] = testItem(tmrand.Bytes(tmhash.Size))
|
||||
}
|
||||
|
||||
rootHash1 := SimpleHashFromByteSlicesIterative(items)
|
||||
rootHash2 := SimpleHashFromByteSlices(items)
|
||||
rootHash1 := HashFromByteSlicesIterative(items)
|
||||
rootHash2 := HashFromByteSlices(items)
|
||||
require.Equal(t, rootHash1, rootHash2, "Unmatched root hashes: %X vs %X", rootHash1, rootHash2)
|
||||
}
|
||||
|
||||
func BenchmarkSimpleHashAlternatives(b *testing.B) {
|
||||
func BenchmarkHashAlternatives(b *testing.B) {
|
||||
total := 100
|
||||
|
||||
items := make([][]byte, total)
|
||||
@@ -95,13 +95,13 @@ func BenchmarkSimpleHashAlternatives(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.Run("recursive", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = SimpleHashFromByteSlices(items)
|
||||
_ = HashFromByteSlices(items)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("iterative", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = SimpleHashFromByteSlicesIterative(items)
|
||||
_ = HashFromByteSlicesIterative(items)
|
||||
}
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user