Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit b3c54dd

Browse files
evan-forbesliamsiadlerjohn
committedSep 16, 2021
Spec compliant merge shares (#261)
* start spec compliant share merging * refactor and finish unit testing * whoops * linter gods * fix initial changes and use constants * use constant * more polish * docs fix Co-authored-by: Ismail Khoffi <[email protected]> * review feedback: docs and out of range panic protection * review feedback: add panic protection from empty input * use constant instead of recalculating `ShareSize` Co-authored-by: John Adler <[email protected]> * don't redeclare existing var Co-authored-by: John Adler <[email protected]> * be more explicit with returned nil Co-authored-by: John Adler <[email protected]> * use constant instead of recalculating `ShareSize` Co-authored-by: John Adler <[email protected]> * review feedback: use consistent capitalization * stop accepting reserved namespaces as normal messages * use a descriptive var name for message length * linter and comparison fix * reorg tests, add test for parse delimiter, DataFromBlock and fix evidence marshal bug * catch error for linter * update test MakeShares to include length delimiters for the SHARE_RESERVED_BYTE * minor iteration change * refactor share splitting to fix bug * fix all bugs with third and final refactor * fix conflict * revert unnecessary changes * review feedback: better docs Co-authored-by: Ismail Khoffi <[email protected]> * reivew feedback: add comment for safeLen * review feedback: remove unnecessay comments * review feedback: split up share merging and splitting into their own files * review feedback: more descriptive var names * fix accidental change * add some constant docs * spelling error Co-authored-by: Ismail Khoffi <[email protected]> Co-authored-by: John Adler <[email protected]>
1 parent a5339b1 commit b3c54dd

File tree

4 files changed

+807
-140
lines changed

4 files changed

+807
-140
lines changed
 

‎types/share_merging.go

+332
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,332 @@
1+
package types
2+
3+
import (
4+
"bytes"
5+
"encoding/binary"
6+
"errors"
7+
8+
"github.com/gogo/protobuf/proto"
9+
tmbytes "github.com/lazyledger/lazyledger-core/libs/bytes"
10+
tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types"
11+
"github.com/lazyledger/rsmt2d"
12+
)
13+
14+
// DataFromSquare extracts block data from an extended data square.
15+
func DataFromSquare(eds *rsmt2d.ExtendedDataSquare) (Data, error) {
16+
originalWidth := eds.Width() / 2
17+
18+
// sort block data shares by namespace
19+
var (
20+
sortedTxShares [][]byte
21+
sortedISRShares [][]byte
22+
sortedEvdShares [][]byte
23+
sortedMsgShares [][]byte
24+
)
25+
26+
// iterate over each row index
27+
for x := uint(0); x < originalWidth; x++ {
28+
// iterate over each col index
29+
for y := uint(0); y < originalWidth; y++ {
30+
// sort the data of that share types via namespace
31+
share := eds.Cell(x, y)
32+
nid := share[:NamespaceSize]
33+
switch {
34+
case bytes.Equal(TxNamespaceID, nid):
35+
sortedTxShares = append(sortedTxShares, share)
36+
37+
case bytes.Equal(IntermediateStateRootsNamespaceID, nid):
38+
sortedISRShares = append(sortedISRShares, share)
39+
40+
case bytes.Equal(EvidenceNamespaceID, nid):
41+
sortedEvdShares = append(sortedEvdShares, share)
42+
43+
case bytes.Equal(TailPaddingNamespaceID, nid):
44+
continue
45+
46+
// ignore unused but reserved namespaces
47+
case bytes.Compare(nid, MaxReservedNamespace) < 1:
48+
continue
49+
50+
// every other namespaceID should be a message
51+
default:
52+
sortedMsgShares = append(sortedMsgShares, share)
53+
}
54+
}
55+
}
56+
57+
// pass the raw share data to their respective parsers
58+
txs, err := parseTxs(sortedTxShares)
59+
if err != nil {
60+
return Data{}, err
61+
}
62+
63+
isrs, err := parseISRs(sortedISRShares)
64+
if err != nil {
65+
return Data{}, err
66+
}
67+
68+
evd, err := parseEvd(sortedEvdShares)
69+
if err != nil {
70+
return Data{}, err
71+
}
72+
73+
msgs, err := parseMsgs(sortedMsgShares)
74+
if err != nil {
75+
return Data{}, err
76+
}
77+
78+
return Data{
79+
Txs: txs,
80+
IntermediateStateRoots: isrs,
81+
Evidence: evd,
82+
Messages: msgs,
83+
}, nil
84+
}
85+
86+
// parseTxs collects all of the transactions from the shares provided
87+
func parseTxs(shares [][]byte) (Txs, error) {
88+
// parse the sharse
89+
rawTxs, err := processContiguousShares(shares)
90+
if err != nil {
91+
return nil, err
92+
}
93+
94+
// convert to the Tx type
95+
txs := make(Txs, len(rawTxs))
96+
for i := 0; i < len(txs); i++ {
97+
txs[i] = Tx(rawTxs[i])
98+
}
99+
100+
return txs, nil
101+
}
102+
103+
// parseISRs collects all the intermediate state roots from the shares provided
104+
func parseISRs(shares [][]byte) (IntermediateStateRoots, error) {
105+
rawISRs, err := processContiguousShares(shares)
106+
if err != nil {
107+
return IntermediateStateRoots{}, err
108+
}
109+
110+
ISRs := make([]tmbytes.HexBytes, len(rawISRs))
111+
for i := 0; i < len(ISRs); i++ {
112+
ISRs[i] = rawISRs[i]
113+
}
114+
115+
return IntermediateStateRoots{RawRootsList: ISRs}, nil
116+
}
117+
118+
// parseEvd collects all evidence from the shares provided.
119+
func parseEvd(shares [][]byte) (EvidenceData, error) {
120+
// the raw data returned does not have length delimiters or namespaces and
121+
// is ready to be unmarshaled
122+
rawEvd, err := processContiguousShares(shares)
123+
if err != nil {
124+
return EvidenceData{}, err
125+
}
126+
127+
evdList := make(EvidenceList, len(rawEvd))
128+
129+
// parse into protobuf bytes
130+
for i := 0; i < len(rawEvd); i++ {
131+
// unmarshal the evidence
132+
var protoEvd tmproto.Evidence
133+
err := proto.Unmarshal(rawEvd[i], &protoEvd)
134+
if err != nil {
135+
return EvidenceData{}, err
136+
}
137+
evd, err := EvidenceFromProto(&protoEvd)
138+
if err != nil {
139+
return EvidenceData{}, err
140+
}
141+
142+
evdList[i] = evd
143+
}
144+
145+
return EvidenceData{Evidence: evdList}, nil
146+
}
147+
148+
// parseMsgs collects all messages from the shares provided
149+
func parseMsgs(shares [][]byte) (Messages, error) {
150+
msgList, err := parseMsgShares(shares)
151+
if err != nil {
152+
return MessagesEmpty, err
153+
}
154+
155+
return Messages{
156+
MessagesList: msgList,
157+
}, nil
158+
}
159+
160+
// processContiguousShares takes raw shares and extracts out transactions,
161+
// intermediate state roots, or evidence. The returned [][]byte do have
162+
// namespaces or length delimiters and are ready to be unmarshalled
163+
func processContiguousShares(shares [][]byte) (txs [][]byte, err error) {
164+
if len(shares) == 0 {
165+
return nil, nil
166+
}
167+
168+
ss := newShareStack(shares)
169+
return ss.resolve()
170+
}
171+
172+
// shareStack hold variables for peel
173+
type shareStack struct {
174+
shares [][]byte
175+
txLen uint64
176+
txs [][]byte
177+
cursor int
178+
}
179+
180+
func newShareStack(shares [][]byte) *shareStack {
181+
return &shareStack{shares: shares}
182+
}
183+
184+
func (ss *shareStack) resolve() ([][]byte, error) {
185+
if len(ss.shares) == 0 {
186+
return nil, nil
187+
}
188+
err := ss.peel(ss.shares[0][NamespaceSize+ShareReservedBytes:], true)
189+
return ss.txs, err
190+
}
191+
192+
// peel recursively parses each chunk of data (either a transaction,
193+
// intermediate state root, or evidence) and adds it to the underlying slice of data.
194+
func (ss *shareStack) peel(share []byte, delimited bool) (err error) {
195+
if delimited {
196+
var txLen uint64
197+
share, txLen, err = parseDelimiter(share)
198+
if err != nil {
199+
return err
200+
}
201+
if txLen == 0 {
202+
return nil
203+
}
204+
ss.txLen = txLen
205+
}
206+
// safeLen describes the point in the share where it can be safely split. If
207+
// split beyond this point, it is possible to break apart a length
208+
// delimiter, which will result in incorrect share merging
209+
safeLen := len(share) - binary.MaxVarintLen64
210+
if safeLen < 0 {
211+
safeLen = 0
212+
}
213+
if ss.txLen <= uint64(safeLen) {
214+
ss.txs = append(ss.txs, share[:ss.txLen])
215+
share = share[ss.txLen:]
216+
return ss.peel(share, true)
217+
}
218+
// add the next share to the current share to continue merging if possible
219+
if len(ss.shares) > ss.cursor+1 {
220+
ss.cursor++
221+
share := append(share, ss.shares[ss.cursor][NamespaceSize+ShareReservedBytes:]...)
222+
return ss.peel(share, false)
223+
}
224+
// collect any remaining data
225+
if ss.txLen <= uint64(len(share)) {
226+
ss.txs = append(ss.txs, share[:ss.txLen])
227+
share = share[ss.txLen:]
228+
return ss.peel(share, true)
229+
}
230+
return errors.New("failure to parse block data: transaction length exceeded data length")
231+
}
232+
233+
// parseMsgShares iterates through raw shares and separates the contiguous chunks
234+
// of data. It is only used for Messages, i.e. shares with a non-reserved namespace.
235+
func parseMsgShares(shares [][]byte) ([]Message, error) {
236+
if len(shares) == 0 {
237+
return nil, nil
238+
}
239+
240+
// set the first nid and current share
241+
nid := shares[0][:NamespaceSize]
242+
currentShare := shares[0][NamespaceSize:]
243+
244+
// find and remove the msg len delimiter
245+
currentShare, msgLen, err := parseDelimiter(currentShare)
246+
if err != nil {
247+
return nil, err
248+
}
249+
250+
var msgs []Message
251+
for cursor := uint64(0); cursor < uint64(len(shares)); {
252+
var msg Message
253+
currentShare, nid, cursor, msgLen, msg, err = nextMsg(
254+
shares,
255+
currentShare,
256+
nid,
257+
cursor,
258+
msgLen,
259+
)
260+
if err != nil {
261+
return nil, err
262+
}
263+
if msg.Data != nil {
264+
msgs = append(msgs, msg)
265+
}
266+
}
267+
268+
return msgs, nil
269+
}
270+
271+
func nextMsg(
272+
shares [][]byte,
273+
current,
274+
nid []byte,
275+
cursor,
276+
msgLen uint64,
277+
) ([]byte, []byte, uint64, uint64, Message, error) {
278+
switch {
279+
// the message uses all of the current share data and at least some of the
280+
// next share
281+
case msgLen > uint64(len(current)):
282+
// add the next share to the current one and try again
283+
cursor++
284+
current = append(current, shares[cursor][NamespaceSize:]...)
285+
return nextMsg(shares, current, nid, cursor, msgLen)
286+
287+
// the msg we're looking for is contained in the current share
288+
case msgLen <= uint64(len(current)):
289+
msg := Message{nid, current[:msgLen]}
290+
cursor++
291+
292+
// call it a day if the work is done
293+
if cursor >= uint64(len(shares)) {
294+
return nil, nil, cursor, 0, msg, nil
295+
}
296+
297+
nextNid := shares[cursor][:NamespaceSize]
298+
next, msgLen, err := parseDelimiter(shares[cursor][NamespaceSize:])
299+
return next, nextNid, cursor, msgLen, msg, err
300+
}
301+
// this code is unreachable but the compiler doesn't know that
302+
return nil, nil, 0, 0, MessageEmpty, nil
303+
}
304+
305+
// parseDelimiter finds and returns the length delimiter of the message provided
306+
// while also removing the delimiter bytes from the input
307+
func parseDelimiter(input []byte) ([]byte, uint64, error) {
308+
if len(input) == 0 {
309+
return input, 0, nil
310+
}
311+
312+
l := binary.MaxVarintLen64
313+
if len(input) < binary.MaxVarintLen64 {
314+
l = len(input)
315+
}
316+
317+
delimiter := zeroPadIfNecessary(input[:l], binary.MaxVarintLen64)
318+
319+
// read the length of the message
320+
r := bytes.NewBuffer(delimiter)
321+
msgLen, err := binary.ReadUvarint(r)
322+
if err != nil {
323+
return nil, 0, err
324+
}
325+
326+
// calculate the number of bytes used by the delimiter
327+
lenBuf := make([]byte, binary.MaxVarintLen64)
328+
n := binary.PutUvarint(lenBuf, msgLen)
329+
330+
// return the input without the length delimiter
331+
return input[n:], msgLen, nil
332+
}

‎types/share_splitting.go

+137
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
package types
2+
3+
import (
4+
"bytes"
5+
6+
"github.com/lazyledger/nmt/namespace"
7+
)
8+
9+
// appendToShares appends raw data as shares.
10+
// Used for messages.
11+
func appendToShares(shares []NamespacedShare, nid namespace.ID, rawData []byte) []NamespacedShare {
12+
if len(rawData) <= MsgShareSize {
13+
rawShare := append(append(
14+
make([]byte, 0, len(nid)+len(rawData)),
15+
nid...),
16+
rawData...,
17+
)
18+
paddedShare := zeroPadIfNecessary(rawShare, ShareSize)
19+
share := NamespacedShare{paddedShare, nid}
20+
shares = append(shares, share)
21+
} else { // len(rawData) > MsgShareSize
22+
shares = append(shares, splitMessage(rawData, nid)...)
23+
}
24+
return shares
25+
}
26+
27+
// splitMessage breaks the data in a message into the minimum number of
28+
// namespaced shares
29+
func splitMessage(rawData []byte, nid namespace.ID) []NamespacedShare {
30+
shares := make([]NamespacedShare, 0)
31+
firstRawShare := append(append(
32+
make([]byte, 0, ShareSize),
33+
nid...),
34+
rawData[:MsgShareSize]...,
35+
)
36+
shares = append(shares, NamespacedShare{firstRawShare, nid})
37+
rawData = rawData[MsgShareSize:]
38+
for len(rawData) > 0 {
39+
shareSizeOrLen := min(MsgShareSize, len(rawData))
40+
rawShare := append(append(
41+
make([]byte, 0, ShareSize),
42+
nid...),
43+
rawData[:shareSizeOrLen]...,
44+
)
45+
paddedShare := zeroPadIfNecessary(rawShare, ShareSize)
46+
share := NamespacedShare{paddedShare, nid}
47+
shares = append(shares, share)
48+
rawData = rawData[shareSizeOrLen:]
49+
}
50+
return shares
51+
}
52+
53+
// splitContiguous splits multiple raw data contiguously as shares.
54+
// Used for transactions, intermediate state roots, and evidence.
55+
func splitContiguous(nid namespace.ID, rawDatas [][]byte) []NamespacedShare {
56+
shares := make([]NamespacedShare, 0)
57+
// Index into the outer slice of rawDatas
58+
outerIndex := 0
59+
// Index into the inner slice of rawDatas
60+
innerIndex := 0
61+
for outerIndex < len(rawDatas) {
62+
var rawData []byte
63+
startIndex := 0
64+
rawData, outerIndex, innerIndex, startIndex = getNextChunk(rawDatas, outerIndex, innerIndex, TxShareSize)
65+
rawShare := append(append(append(
66+
make([]byte, 0, len(nid)+1+len(rawData)),
67+
nid...),
68+
byte(startIndex)),
69+
rawData...)
70+
paddedShare := zeroPadIfNecessary(rawShare, ShareSize)
71+
share := NamespacedShare{paddedShare, nid}
72+
shares = append(shares, share)
73+
}
74+
return shares
75+
}
76+
77+
// getNextChunk gets the next chunk for contiguous shares
78+
// Precondition: none of the slices in rawDatas is zero-length
79+
// This precondition should always hold at this point since zero-length txs are simply invalid.
80+
func getNextChunk(rawDatas [][]byte, outerIndex int, innerIndex int, width int) ([]byte, int, int, int) {
81+
rawData := make([]byte, 0, width)
82+
startIndex := 0
83+
firstBytesToFetch := 0
84+
85+
curIndex := 0
86+
for curIndex < width && outerIndex < len(rawDatas) {
87+
bytesToFetch := min(len(rawDatas[outerIndex])-innerIndex, width-curIndex)
88+
if bytesToFetch == 0 {
89+
panic("zero-length contiguous share data is invalid")
90+
}
91+
if curIndex == 0 {
92+
firstBytesToFetch = bytesToFetch
93+
}
94+
// If we've already placed some data in this chunk, that means
95+
// a new data segment begins
96+
if curIndex != 0 {
97+
// Offset by the fixed reserved bytes at the beginning of the share
98+
startIndex = firstBytesToFetch + NamespaceSize + ShareReservedBytes
99+
}
100+
rawData = append(rawData, rawDatas[outerIndex][innerIndex:innerIndex+bytesToFetch]...)
101+
innerIndex += bytesToFetch
102+
if innerIndex >= len(rawDatas[outerIndex]) {
103+
innerIndex = 0
104+
outerIndex++
105+
}
106+
curIndex += bytesToFetch
107+
}
108+
109+
return rawData, outerIndex, innerIndex, startIndex
110+
}
111+
112+
func GenerateTailPaddingShares(n int, shareWidth int) NamespacedShares {
113+
shares := make([]NamespacedShare, n)
114+
for i := 0; i < n; i++ {
115+
shares[i] = NamespacedShare{bytes.Repeat([]byte{0}, shareWidth), TailPaddingNamespaceID}
116+
}
117+
return shares
118+
}
119+
120+
func min(a, b int) int {
121+
if a <= b {
122+
return a
123+
}
124+
return b
125+
}
126+
127+
func zeroPadIfNecessary(share []byte, width int) []byte {
128+
oldLen := len(share)
129+
if oldLen < width {
130+
missingBytes := width - oldLen
131+
padByte := []byte{0}
132+
padding := bytes.Repeat(padByte, missingBytes)
133+
share = append(share, padding...)
134+
return share
135+
}
136+
return share
137+
}

‎types/shares.go

-132
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package types
22

33
import (
4-
"bytes"
54
"encoding/binary"
65

76
"github.com/lazyledger/nmt/namespace"
@@ -42,7 +41,6 @@ func (tx Tx) MarshalDelimited() ([]byte, error) {
4241
lenBuf := make([]byte, binary.MaxVarintLen64)
4342
length := uint64(len(tx))
4443
n := binary.PutUvarint(lenBuf, length)
45-
4644
return append(lenBuf[:n], tx...), nil
4745
}
4846

@@ -54,133 +52,3 @@ func (m Message) MarshalDelimited() ([]byte, error) {
5452
n := binary.PutUvarint(lenBuf, length)
5553
return append(lenBuf[:n], m.Data...), nil
5654
}
57-
58-
// appendToShares appends raw data as shares.
59-
// Used for messages.
60-
func appendToShares(shares []NamespacedShare, nid namespace.ID, rawData []byte) []NamespacedShare {
61-
if len(rawData) <= MsgShareSize {
62-
rawShare := append(append(
63-
make([]byte, 0, len(nid)+len(rawData)),
64-
nid...),
65-
rawData...,
66-
)
67-
paddedShare := zeroPadIfNecessary(rawShare, ShareSize)
68-
share := NamespacedShare{paddedShare, nid}
69-
shares = append(shares, share)
70-
} else { // len(rawData) > MsgShareSize
71-
shares = append(shares, split(rawData, nid)...)
72-
}
73-
return shares
74-
}
75-
76-
// splitContiguous splits multiple raw data contiguously as shares.
77-
// Used for transactions, intermediate state roots, and evidence.
78-
func splitContiguous(nid namespace.ID, rawDatas [][]byte) []NamespacedShare {
79-
shares := make([]NamespacedShare, 0)
80-
// Index into the outer slice of rawDatas
81-
outerIndex := 0
82-
// Index into the inner slice of rawDatas
83-
innerIndex := 0
84-
for outerIndex < len(rawDatas) {
85-
var rawData []byte
86-
startIndex := 0
87-
rawData, outerIndex, innerIndex, startIndex = getNextChunk(rawDatas, outerIndex, innerIndex, TxShareSize)
88-
rawShare := append(append(append(
89-
make([]byte, 0, len(nid)+1+len(rawData)),
90-
nid...),
91-
byte(startIndex)),
92-
rawData...)
93-
paddedShare := zeroPadIfNecessary(rawShare, ShareSize)
94-
share := NamespacedShare{paddedShare, nid}
95-
shares = append(shares, share)
96-
}
97-
return shares
98-
}
99-
100-
// TODO(ismail): implement corresponding merge method for clients requesting
101-
// shares for a particular namespace
102-
func split(rawData []byte, nid namespace.ID) []NamespacedShare {
103-
shares := make([]NamespacedShare, 0)
104-
firstRawShare := append(append(
105-
make([]byte, 0, len(nid)+len(rawData[:MsgShareSize])),
106-
nid...),
107-
rawData[:MsgShareSize]...,
108-
)
109-
shares = append(shares, NamespacedShare{firstRawShare, nid})
110-
rawData = rawData[MsgShareSize:]
111-
for len(rawData) > 0 {
112-
shareSizeOrLen := min(MsgShareSize, len(rawData))
113-
rawShare := append(append(
114-
make([]byte, 0, len(nid)+1+len(rawData[:shareSizeOrLen])),
115-
nid...),
116-
rawData[:shareSizeOrLen]...,
117-
)
118-
paddedShare := zeroPadIfNecessary(rawShare, ShareSize)
119-
share := NamespacedShare{paddedShare, nid}
120-
shares = append(shares, share)
121-
rawData = rawData[shareSizeOrLen:]
122-
}
123-
return shares
124-
}
125-
126-
// getNextChunk gets the next chunk for contiguous shares
127-
// Precondition: none of the slices in rawDatas is zero-length
128-
// This precondition should always hold at this point since zero-length txs are simply invalid.
129-
func getNextChunk(rawDatas [][]byte, outerIndex int, innerIndex int, width int) ([]byte, int, int, int) {
130-
rawData := make([]byte, 0, width)
131-
startIndex := 0
132-
firstBytesToFetch := 0
133-
134-
curIndex := 0
135-
for curIndex < width && outerIndex < len(rawDatas) {
136-
bytesToFetch := min(len(rawDatas[outerIndex])-innerIndex, width-curIndex)
137-
if bytesToFetch == 0 {
138-
panic("zero-length contiguous share data is invalid")
139-
}
140-
if curIndex == 0 {
141-
firstBytesToFetch = bytesToFetch
142-
}
143-
// If we've already placed some data in this chunk, that means
144-
// a new data segment begins
145-
if curIndex != 0 {
146-
// Offset by the fixed reserved bytes at the beginning of the share
147-
startIndex = firstBytesToFetch + NamespaceSize + ShareReservedBytes
148-
}
149-
rawData = append(rawData, rawDatas[outerIndex][innerIndex:innerIndex+bytesToFetch]...)
150-
innerIndex += bytesToFetch
151-
if innerIndex >= len(rawDatas[outerIndex]) {
152-
innerIndex = 0
153-
outerIndex++
154-
}
155-
curIndex += bytesToFetch
156-
}
157-
158-
return rawData, outerIndex, innerIndex, startIndex
159-
}
160-
161-
func GenerateTailPaddingShares(n int, shareWidth int) NamespacedShares {
162-
shares := make([]NamespacedShare, n)
163-
for i := 0; i < n; i++ {
164-
shares[i] = NamespacedShare{bytes.Repeat([]byte{0}, shareWidth), TailPaddingNamespaceID}
165-
}
166-
return shares
167-
}
168-
169-
func min(a, b int) int {
170-
if a <= b {
171-
return a
172-
}
173-
return b
174-
}
175-
176-
func zeroPadIfNecessary(share []byte, width int) []byte {
177-
oldLen := len(share)
178-
if oldLen < width {
179-
missingBytes := width - oldLen
180-
padByte := []byte{0}
181-
padding := bytes.Repeat(padByte, missingBytes)
182-
share = append(share, padding...)
183-
return share
184-
}
185-
return share
186-
}

‎types/shares_test.go

+338-8
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,18 @@ package types
22

33
import (
44
"bytes"
5+
"context"
6+
"fmt"
7+
"math"
8+
"math/rand"
59
"reflect"
610
"testing"
11+
"time"
712

13+
tmbytes "github.com/lazyledger/lazyledger-core/libs/bytes"
814
"github.com/lazyledger/lazyledger-core/libs/protoio"
915
"github.com/lazyledger/nmt/namespace"
16+
"github.com/lazyledger/rsmt2d"
1017
"github.com/stretchr/testify/assert"
1118
)
1219

@@ -26,7 +33,11 @@ func TestMakeShares(t *testing.T) {
2633
VoteA: vote1,
2734
VoteB: vote2,
2835
}
29-
testEvidenceBytes, err := protoio.MarshalDelimited(testEvidence.ToProto())
36+
protoTestEvidence, err := EvidenceToProto(testEvidence)
37+
if err != nil {
38+
t.Error(err)
39+
}
40+
testEvidenceBytes, err := protoio.MarshalDelimited(protoTestEvidence)
3041
largeTx := Tx(bytes.Repeat([]byte("large Tx"), 50))
3142
largeTxLenDelimited, _ := largeTx.MarshalDelimited()
3243
smolTx := Tx("small Tx")
@@ -194,14 +205,333 @@ func Test_appendToSharesOverwrite(t *testing.T) {
194205
assert.Equal(t, extraCopy, []byte(newShare.Share[:MsgShareSize]))
195206
}
196207

197-
func generateRandomNamespacedShares(count, leafSize int) []NamespacedShare {
198-
shares := generateRandNamespacedRawData(count, NamespaceSize, leafSize)
199-
nsShares := make(NamespacedShares, count)
208+
func TestDataFromSquare(t *testing.T) {
209+
type test struct {
210+
name string
211+
txCount int
212+
isrCount int
213+
evdCount int
214+
msgCount int
215+
maxSize int // max size of each tx or msg
216+
}
217+
218+
tests := []test{
219+
{"one of each random small size", 1, 1, 1, 1, 40},
220+
{"one of each random large size", 1, 1, 1, 1, 400},
221+
{"many of each random large size", 10, 10, 10, 10, 40},
222+
{"many of each random large size", 10, 10, 10, 10, 400},
223+
{"only transactions", 10, 0, 0, 0, 400},
224+
{"only intermediate state roots", 0, 10, 0, 0, 400},
225+
{"only evidence", 0, 0, 10, 0, 400},
226+
{"only messages", 0, 0, 0, 10, 400},
227+
}
228+
229+
for _, tc := range tests {
230+
tc := tc
231+
232+
t.Run(tc.name, func(t *testing.T) {
233+
// generate random data
234+
data := generateRandomBlockData(
235+
t,
236+
tc.txCount,
237+
tc.isrCount,
238+
tc.evdCount,
239+
tc.msgCount,
240+
tc.maxSize,
241+
)
242+
243+
shares, _ := data.ComputeShares()
244+
rawShares := shares.RawShares()
245+
246+
eds, err := rsmt2d.ComputeExtendedDataSquare(rawShares, rsmt2d.RSGF8, rsmt2d.NewDefaultTree)
247+
if err != nil {
248+
t.Error(err)
249+
}
250+
251+
res, err := DataFromSquare(eds)
252+
if err != nil {
253+
t.Fatal(err)
254+
}
255+
256+
// we have to compare the evidence by string because the the
257+
// timestamps differ not by actual time represented, but by
258+
// internals see https://github.com/stretchr/testify/issues/666
259+
for i := 0; i < len(data.Evidence.Evidence); i++ {
260+
inputEvidence := data.Evidence.Evidence[i].(*DuplicateVoteEvidence)
261+
resultEvidence := res.Evidence.Evidence[i].(*DuplicateVoteEvidence)
262+
assert.Equal(t, inputEvidence.String(), resultEvidence.String())
263+
}
264+
265+
// compare the original to the result w/o the evidence
266+
data.Evidence = EvidenceData{}
267+
res.Evidence = EvidenceData{}
268+
269+
assert.Equal(t, data, res)
270+
})
271+
}
272+
}
273+
274+
func TestFuzz_DataFromSquare(t *testing.T) {
275+
t.Skip()
276+
// run random shares through processContiguousShares for a minute
277+
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
278+
defer cancel()
279+
for {
280+
select {
281+
case <-ctx.Done():
282+
return
283+
default:
284+
TestDataFromSquare(t)
285+
}
286+
}
287+
}
288+
289+
func Test_processContiguousShares(t *testing.T) {
290+
// exactTxShareSize is the length of tx that will fit exactly into a single
291+
// share, accounting for namespace id and the length delimiter prepended to
292+
// each tx
293+
const exactTxShareSize = TxShareSize - 1
294+
295+
type test struct {
296+
name string
297+
txSize int
298+
txCount int
299+
}
300+
301+
// each test is ran twice, once using txSize as an exact size, and again
302+
// using it as a cap for randomly sized txs
303+
tests := []test{
304+
{"single small tx", 10, 1},
305+
{"many small txs", 10, 10},
306+
{"single big tx", 1000, 1},
307+
{"many big txs", 1000, 10},
308+
{"single exact size tx", exactTxShareSize, 1},
309+
{"many exact size txs", exactTxShareSize, 10},
310+
}
311+
312+
for _, tc := range tests {
313+
tc := tc
314+
315+
// run the tests with identically sized txs
316+
t.Run(fmt.Sprintf("%s idendically sized ", tc.name), func(t *testing.T) {
317+
txs := generateRandomContiguousShares(tc.txCount, tc.txSize)
318+
319+
shares := txs.splitIntoShares()
320+
321+
parsedTxs, err := processContiguousShares(shares.RawShares())
322+
if err != nil {
323+
t.Error(err)
324+
}
325+
326+
// check that the data parsed is identical
327+
for i := 0; i < len(txs); i++ {
328+
assert.Equal(t, []byte(txs[i]), parsedTxs[i])
329+
}
330+
})
331+
332+
// run the same tests using randomly sized txs with caps of tc.txSize
333+
t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) {
334+
txs := generateRandomlySizedContiguousShares(tc.txCount, tc.txSize)
335+
336+
shares := txs.splitIntoShares()
337+
338+
parsedTxs, err := processContiguousShares(shares.RawShares())
339+
if err != nil {
340+
t.Error(err)
341+
}
342+
343+
// check that the data parsed is identical to the original
344+
for i := 0; i < len(txs); i++ {
345+
assert.Equal(t, []byte(txs[i]), parsedTxs[i])
346+
}
347+
})
348+
}
349+
}
350+
351+
func TestFuzz_processContiguousShares(t *testing.T) {
352+
t.Skip()
353+
// run random shares through processContiguousShares for a minute
354+
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
355+
defer cancel()
356+
for {
357+
select {
358+
case <-ctx.Done():
359+
return
360+
default:
361+
Test_processContiguousShares(t)
362+
}
363+
}
364+
}
365+
366+
func Test_parseMsgShares(t *testing.T) {
367+
// exactMsgShareSize is the length of message that will fit exactly into a single
368+
// share, accounting for namespace id and the length delimiter prepended to
369+
// each message
370+
const exactMsgShareSize = MsgShareSize - 2
371+
372+
type test struct {
373+
name string
374+
msgSize int
375+
msgCount int
376+
}
377+
378+
// each test is ran twice, once using msgSize as an exact size, and again
379+
// using it as a cap for randomly sized leaves
380+
tests := []test{
381+
{"single small msg", 1, 1},
382+
{"many small msgs", 4, 10},
383+
{"single big msg", 1000, 1},
384+
{"many big msgs", 1000, 10},
385+
{"single exact size msg", exactMsgShareSize, 1},
386+
{"many exact size msgs", exactMsgShareSize, 10},
387+
}
388+
389+
for _, tc := range tests {
390+
tc := tc
391+
392+
// run the tests with identically sized messagses
393+
t.Run(fmt.Sprintf("%s idendically sized ", tc.name), func(t *testing.T) {
394+
rawmsgs := make([]Message, tc.msgCount)
395+
for i := 0; i < tc.msgCount; i++ {
396+
rawmsgs[i] = generateRandomMessage(tc.msgSize)
397+
}
398+
msgs := Messages{MessagesList: rawmsgs}
399+
400+
shares := msgs.splitIntoShares()
401+
402+
parsedMsgs, err := parseMsgShares(shares.RawShares())
403+
if err != nil {
404+
t.Error(err)
405+
}
406+
407+
// check that the namesapces and data are the same
408+
for i := 0; i < len(msgs.MessagesList); i++ {
409+
assert.Equal(t, msgs.MessagesList[i].NamespaceID, parsedMsgs[i].NamespaceID)
410+
assert.Equal(t, msgs.MessagesList[i].Data, parsedMsgs[i].Data)
411+
}
412+
})
413+
414+
// run the same tests using randomly sized messages with caps of tc.msgSize
415+
t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) {
416+
msgs := generateRandomlySizedMessages(tc.msgCount, tc.msgSize)
417+
shares := msgs.splitIntoShares()
418+
419+
parsedMsgs, err := parseMsgShares(shares.RawShares())
420+
if err != nil {
421+
t.Error(err)
422+
}
423+
424+
// check that the namesapces and data are the same
425+
for i := 0; i < len(msgs.MessagesList); i++ {
426+
assert.Equal(t, msgs.MessagesList[i].NamespaceID, parsedMsgs[i].NamespaceID)
427+
assert.Equal(t, msgs.MessagesList[i].Data, parsedMsgs[i].Data)
428+
}
429+
})
430+
}
431+
}
432+
433+
func Test_parseDelimiter(t *testing.T) {
434+
for i := uint64(0); i < 100; i++ {
435+
tx := generateRandomContiguousShares(1, int(i))[0]
436+
input, err := tx.MarshalDelimited()
437+
if err != nil {
438+
panic(err)
439+
}
440+
res, txLen, err := parseDelimiter(input)
441+
if err != nil {
442+
panic(err)
443+
}
444+
assert.Equal(t, i, txLen)
445+
assert.Equal(t, []byte(tx), res)
446+
}
447+
}
448+
449+
// ////////////////////////////
450+
// Test data generation
451+
// ////////////////////////////
452+
453+
func generateRandomBlockData(t *testing.T, txCount, isrCount, evdCount, msgCount, maxSize int) Data {
454+
var out Data
455+
out.Txs = generateRandomlySizedContiguousShares(txCount, maxSize)
456+
out.IntermediateStateRoots = generateRandomISR(isrCount)
457+
out.Evidence = generateIdenticalEvidence(t, evdCount)
458+
out.Messages = generateRandomlySizedMessages(msgCount, maxSize)
459+
return out
460+
}
461+
462+
func generateRandomlySizedContiguousShares(count, max int) Txs {
463+
txs := make(Txs, count)
464+
for i := 0; i < count; i++ {
465+
size := rand.Intn(max)
466+
if size == 0 {
467+
size = 1
468+
}
469+
txs[i] = generateRandomContiguousShares(1, size)[0]
470+
}
471+
return txs
472+
}
473+
474+
func generateRandomContiguousShares(count, size int) Txs {
475+
txs := make(Txs, count)
476+
for i := 0; i < count; i++ {
477+
tx := make([]byte, size)
478+
_, err := rand.Read(tx)
479+
if err != nil {
480+
panic(err)
481+
}
482+
txs[i] = Tx(tx)
483+
}
484+
return txs
485+
}
486+
487+
func generateRandomISR(count int) IntermediateStateRoots {
488+
roots := make([]tmbytes.HexBytes, count)
489+
for i := 0; i < count; i++ {
490+
roots[i] = tmbytes.HexBytes(generateRandomContiguousShares(1, 32)[0])
491+
}
492+
return IntermediateStateRoots{RawRootsList: roots}
493+
}
494+
495+
func generateIdenticalEvidence(t *testing.T, count int) EvidenceData {
496+
evidence := make([]Evidence, count)
497+
for i := 0; i < count; i++ {
498+
ev := NewMockDuplicateVoteEvidence(math.MaxInt64, time.Now(), "chainID")
499+
evidence[i] = ev
500+
}
501+
return EvidenceData{Evidence: EvidenceList(evidence)}
502+
}
503+
504+
func generateRandomlySizedMessages(count, maxMsgSize int) Messages {
505+
msgs := make([]Message, count)
506+
for i := 0; i < count; i++ {
507+
msgs[i] = generateRandomMessage(rand.Intn(maxMsgSize))
508+
}
509+
510+
// this is just to let us use assert.Equal
511+
if count == 0 {
512+
msgs = nil
513+
}
514+
515+
return Messages{MessagesList: msgs}
516+
}
517+
518+
func generateRandomMessage(size int) Message {
519+
share := generateRandomNamespacedShares(1, size)[0]
520+
msg := Message{
521+
NamespaceID: share.NamespaceID(),
522+
Data: share.Data(),
523+
}
524+
return msg
525+
}
526+
527+
func generateRandomNamespacedShares(count, msgSize int) NamespacedShares {
528+
shares := generateRandNamespacedRawData(count, NamespaceSize, msgSize)
529+
msgs := make([]Message, count)
200530
for i, s := range shares {
201-
nsShares[i] = NamespacedShare{
202-
Share: s[NamespaceSize:],
203-
ID: s[:NamespaceSize],
531+
msgs[i] = Message{
532+
Data: s[NamespaceSize:],
533+
NamespaceID: s[:NamespaceSize],
204534
}
205535
}
206-
return nsShares
536+
return Messages{MessagesList: msgs}.splitIntoShares()
207537
}

0 commit comments

Comments
 (0)
Please sign in to comment.