-
Notifications
You must be signed in to change notification settings - Fork 311
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
prototype: compact blocks #1191
base: v0.34.x-celestia
Are you sure you want to change the base?
Changes from 1 commit
bf6edc0
211694b
567cf3f
22639ab
d4f62fd
27a6fe3
f0e972e
1cb6666
4874e77
187d26c
0f529f6
64d3f6d
c530d1e
3e6c075
507f3d2
9ab18d4
8f63838
71b752e
c2e6331
1cbd361
1fe8522
1846523
1c4f76e
9ad05ed
93774a7
6f8f70a
82a8958
d1b3c4d
8a5d797
e937057
74cac47
dfa4382
a56bbb7
905ba36
2273f83
513949c
567a281
dcea04c
c5fd230
b4ebb57
33004a3
1bb12bf
b1086d2
aab522a
d46ad2d
5aefb63
178f81a
aaf89cf
c42c673
9ff3319
7578e10
34774cd
955714f
5ab5034
ec3335c
bc72dc4
5b25a7c
cf354c5
dddd61e
92e20de
0a5adc0
99b4727
7218a79
bc85fc2
79947c6
a801fa3
9f48af6
594e303
7b76868
caeb9ef
91f1703
a55e1c9
77db525
f170aad
a31a8fe
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,263 @@ | ||
package cat | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"sync" | ||
"time" | ||
|
||
"github.com/tendermint/tendermint/types" | ||
) | ||
|
||
// FetchTxsFromKeys is called upon by consensus upon receiving a complete compact block. | ||
// The method iterates through the keys in the compact block. For the transactions it | ||
// already has it adds them to a list. For the transactions that are missing it uses a | ||
// block request to track and retrieve them. Once all transactions are retrieved, it returns | ||
// the complete set to the consensus engine. This can be called multiple times sequentially | ||
// with the same blockID and is thread safe | ||
func (memR *Reactor) FetchTxsFromKeys(ctx context.Context, blockID []byte, compactData [][]byte) ([][]byte, error) { | ||
if request, ok := memR.blockFetcher.GetRequest(blockID); ok { | ||
memR.Logger.Debug("tracking existing request for block transactions") | ||
// we already have a request for this block | ||
return request.WaitForBlock(ctx) | ||
} | ||
|
||
txs := make([][]byte, len(compactData)) | ||
missingKeys := make(map[int]types.TxKey, len(compactData)) | ||
|
||
// iterate through the keys to know what transactions we have and what are missing | ||
for i, key := range compactData { | ||
txKey, err := types.TxKeyFromBytes(key) | ||
if err != nil { | ||
return nil, fmt.Errorf("incorrect compact blocks format: %w", err) | ||
} | ||
wtx := memR.mempool.store.get(txKey) | ||
if wtx != nil { | ||
txs[i] = wtx.tx | ||
} else { | ||
missingKeys[i] = txKey | ||
} | ||
} | ||
memR.Logger.Info("fetching transactions from peers", "blockID", blockID, "numTxs", len(txs), "numMissing", len(missingKeys)) | ||
|
||
memR.mempool.jsonMetrics.Lock() | ||
memR.mempool.jsonMetrics.TransactionsMissing = append(memR.mempool.jsonMetrics.TransactionsMissing, uint64(len(missingKeys))) | ||
memR.mempool.jsonMetrics.Transactions = append(memR.mempool.jsonMetrics.Transactions, uint64(len(compactData))) | ||
// Check if we got lucky and already had all the transactions. | ||
if len(missingKeys) == 0 { | ||
memR.mempool.jsonMetrics.TimeTakenFetchingTxs = append(memR.mempool.jsonMetrics.TimeTakenFetchingTxs, 0) | ||
memR.mempool.jsonMetrics.Unlock() | ||
return txs, nil | ||
} | ||
memR.mempool.jsonMetrics.Unlock() | ||
|
||
// setup a request for this block and begin to track and retrieve all missing transactions | ||
request := memR.blockFetcher.NewRequest( | ||
blockID, | ||
memR.mempool.Height(), | ||
missingKeys, | ||
txs, | ||
) | ||
defer func() { | ||
timeTaken := request.TimeTaken() | ||
if timeTaken == 0 { | ||
return | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what does this do? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's for gathering metrics |
||
memR.mempool.jsonMetrics.Lock() | ||
memR.mempool.jsonMetrics.TimeTakenFetchingTxs = append(memR.mempool.jsonMetrics.TimeTakenFetchingTxs, timeTaken) | ||
memR.mempool.jsonMetrics.Unlock() | ||
}() | ||
|
||
// Wait for the reactor to retrieve and post all transactions. | ||
return request.WaitForBlock(ctx) | ||
} | ||
|
||
// FetchKeysFromTxs is in many ways the opposite method. It takes a full block generated by the application | ||
// and reduces it to the set of keys that need to be gossiped from one mempool to another nodes mempool | ||
// in order to recreate the full block. | ||
func (memR *Reactor) FetchKeysFromTxs(ctx context.Context, txs [][]byte) ([][]byte, error) { | ||
keys := make([][]byte, len(txs)) | ||
for idx, tx := range txs { | ||
// check if the context has been cancelled | ||
if ctx.Err() != nil { | ||
return nil, ctx.Err() | ||
} | ||
key := types.Tx(tx).Key() | ||
keys[idx] = key[:] | ||
has := memR.mempool.store.has(key) | ||
if !has { | ||
// If the mempool provided the initial transactions yet received from | ||
// consensus a transaction it doesn't recognize, this implies that | ||
// either a tx was mutated or was added by the application. In either | ||
// case, it is likely no other mempool has this transaction so we | ||
// preemptively broadcast it to all other peers | ||
// | ||
// We don't set the priority, gasWanted or sender fields because we | ||
// don't know them. | ||
wtx := newWrappedTx(tx, key, memR.mempool.Height(), 0, 0, "") | ||
memR.broadcastNewTx(wtx) | ||
// For safety we also store this transaction in the mempool (ignoring | ||
// all size limits) so that we can retrieve it later if needed. Note | ||
// as we're broadcasting it to all peers, we should not receive a `WantTx` | ||
// unless it gets rejected by the application in CheckTx. | ||
// | ||
// Consensus will have an in memory copy of the entire block which includes | ||
// this transaction so it should not need it. | ||
memR.mempool.store.set(wtx) | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. do we need to call checktx to avoid accidently including invalid txs? |
||
} | ||
|
||
// return the keys back to the consensus engine | ||
return keys, nil | ||
} | ||
|
||
type blockFetcher struct { | ||
// mutex to manage concurrent calls to different parts | ||
mtx sync.Mutex | ||
// requests are a map of all processing block requests | ||
// by blockID. | ||
requests map[string]*blockRequest | ||
} | ||
|
||
// NewBlockFetcher returns a new blockFetcher for managing block requests | ||
func NewBlockFetcher() *blockFetcher { | ||
return &blockFetcher{ | ||
requests: make(map[string]*blockRequest), | ||
} | ||
} | ||
|
||
func (bf *blockFetcher) GetRequest(blockID []byte) (*blockRequest, bool) { | ||
bf.mtx.Lock() | ||
defer bf.mtx.Unlock() | ||
request, ok := bf.requests[string(blockID)] | ||
return request, ok | ||
} | ||
|
||
// NewRequest creates a new block request and returns it. | ||
// If a request already exists it returns that instead | ||
func (bf *blockFetcher) NewRequest( | ||
blockID []byte, | ||
height int64, | ||
missingKeys map[int]types.TxKey, | ||
txs [][]byte, | ||
) *blockRequest { | ||
bf.mtx.Lock() | ||
defer bf.mtx.Unlock() | ||
if request, ok := bf.requests[string(blockID)]; ok { | ||
return request | ||
} | ||
request := NewBlockRequest(height, missingKeys, txs) | ||
bf.requests[string(blockID)] = request | ||
bf.pruneOldRequests(height) | ||
return request | ||
} | ||
|
||
// TryAddMissingTx loops through all current requests and tries to add | ||
// the given transaction (if it is missing). | ||
func (bf *blockFetcher) TryAddMissingTx(key types.TxKey, tx []byte) { | ||
bf.mtx.Lock() | ||
defer bf.mtx.Unlock() | ||
for _, request := range bf.requests { | ||
request.TryAddMissingTx(key, tx) | ||
} | ||
} | ||
|
||
// PruneOldRequests removes any requests that are older than the given height. | ||
func (bf *blockFetcher) pruneOldRequests(height int64) { | ||
for blockID, request := range bf.requests { | ||
if request.height < height { | ||
delete(bf.requests, blockID) | ||
} | ||
} | ||
} | ||
|
||
// blockRequests handle the lifecycle of individual block requests. | ||
type blockRequest struct { | ||
// immutable fields | ||
height int64 | ||
doneCh chan struct{} | ||
|
||
mtx sync.Mutex | ||
// track the remaining keys that are missing | ||
missingKeysByIndex map[int]types.TxKey | ||
missingKeys map[string]int | ||
// the txs in the block | ||
txs [][]byte | ||
|
||
// used for metrics | ||
startTime time.Time | ||
endTime time.Time | ||
} | ||
|
||
func NewBlockRequest( | ||
height int64, | ||
missingKeys map[int]types.TxKey, | ||
txs [][]byte, | ||
) *blockRequest { | ||
mk := make(map[string]int, len(missingKeys)) | ||
for i, key := range missingKeys { | ||
mk[key.String()] = i | ||
} | ||
return &blockRequest{ | ||
height: height, | ||
missingKeysByIndex: missingKeys, | ||
missingKeys: mk, | ||
txs: txs, | ||
doneCh: make(chan struct{}), | ||
startTime: time.Now().UTC(), | ||
} | ||
} | ||
|
||
// WaitForBlock is a blocking call that waits for the block to be fetched and completed. | ||
// It can be called concurrently. If the block was already fetched it returns immediately. | ||
func (br *blockRequest) WaitForBlock(ctx context.Context) ([][]byte, error) { | ||
if br.IsDone() { | ||
return br.txs, nil | ||
} | ||
|
||
for { | ||
select { | ||
case <-ctx.Done(): | ||
return nil, ctx.Err() | ||
case <-br.doneCh: | ||
br.mtx.Lock() | ||
defer br.mtx.Unlock() | ||
br.endTime = time.Now().UTC() | ||
return br.txs, nil | ||
} | ||
} | ||
} | ||
|
||
// TryAddMissingTx checks if a given transactions was missing and if so | ||
// adds it to the block request. | ||
func (br *blockRequest) TryAddMissingTx(key types.TxKey, tx []byte) bool { | ||
br.mtx.Lock() | ||
defer br.mtx.Unlock() | ||
if index, ok := br.missingKeys[key.String()]; ok { | ||
delete(br.missingKeys, key.String()) | ||
delete(br.missingKeysByIndex, index) | ||
br.txs[index] = tx | ||
// check if there is any more transactions remaining | ||
if len(br.missingKeys) == 0 { | ||
// Yaay! We're done! | ||
close(br.doneCh) | ||
} | ||
return true | ||
} | ||
return false | ||
} | ||
|
||
// IsDone returns whether all transactions in the block have been received. | ||
// This is done by measuring the amount of missing keys. | ||
func (br *blockRequest) IsDone() bool { | ||
br.mtx.Lock() | ||
defer br.mtx.Unlock() | ||
return len(br.missingKeys) == 0 | ||
} | ||
|
||
func (br *blockRequest) TimeTaken() uint64 { | ||
if br.endTime.IsZero() { | ||
return 0 | ||
} | ||
return uint64(br.endTime.Sub(br.startTime).Milliseconds()) | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,243 @@ | ||
package cat | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"math/rand" | ||
"sync" | ||
"testing" | ||
"time" | ||
|
||
"github.com/tendermint/tendermint/crypto/tmhash" | ||
"github.com/tendermint/tendermint/mempool" | ||
"github.com/tendermint/tendermint/p2p" | ||
memproto "github.com/tendermint/tendermint/proto/tendermint/mempool" | ||
"github.com/tendermint/tendermint/types" | ||
"github.com/stretchr/testify/require" | ||
) | ||
|
||
func TestBlockRequest(t *testing.T) { | ||
ctx, cancel := context.WithTimeout(context.Background(), time.Second) | ||
defer cancel() | ||
tx1, tx2 := types.Tx("hello"), types.Tx("world") | ||
key1, key2 := tx1.Key(), tx2.Key() | ||
txs := make([][]byte, 2) | ||
missingKeys := map[int]types.TxKey{ | ||
0: key1, | ||
1: key2, | ||
} | ||
|
||
request := NewBlockRequest(1, missingKeys, txs) | ||
|
||
require.True(t, request.TryAddMissingTx(key1, tx1)) | ||
// cannot add the same missing tx twice | ||
require.False(t, request.TryAddMissingTx(key1, tx1)) | ||
require.False(t, request.IsDone()) | ||
|
||
// test that we adhere to the context deadline | ||
shortCtx, cancel := context.WithTimeout(context.Background(), time.Millisecond) | ||
defer cancel() | ||
_, err := request.WaitForBlock(shortCtx) | ||
require.Error(t, err) | ||
|
||
// test that all txs mean the block `IsDone` | ||
require.True(t, request.TryAddMissingTx(key2, tx2)) | ||
require.True(t, request.IsDone()) | ||
|
||
// waiting for the block should instantly return | ||
txs, err = request.WaitForBlock(ctx) | ||
require.NoError(t, err) | ||
require.Equal(t, txs, [][]byte{tx1, tx2}) | ||
} | ||
|
||
func TestBlockRequestConcurrently(t *testing.T) { | ||
ctx, cancel := context.WithTimeout(context.Background(), time.Second) | ||
defer cancel() | ||
const numTxs = 10 | ||
allTxs := make([][]byte, numTxs) | ||
txKeys := make([]types.TxKey, numTxs) | ||
missingKeys := make(map[int]types.TxKey) | ||
txs := make([][]byte, numTxs) | ||
for i := 0; i < numTxs; i++ { | ||
tx := types.Tx(fmt.Sprintf("tx%d", i)) | ||
allTxs[i] = tx | ||
txKeys[i] = tx.Key() | ||
if i%3 == 0 { | ||
txs[i] = tx | ||
} else { | ||
missingKeys[i] = txKeys[i] | ||
} | ||
} | ||
|
||
request := NewBlockRequest(1, missingKeys, txs) | ||
|
||
wg := sync.WaitGroup{} | ||
for i := 0; i < numTxs; i++ { | ||
wg.Add(1) | ||
go func(i int) { | ||
defer wg.Done() | ||
request.TryAddMissingTx(txKeys[i], txs[i]) | ||
}(i) | ||
} | ||
|
||
// wait for the block | ||
result, err := request.WaitForBlock(ctx) | ||
require.NoError(t, err) | ||
require.Len(t, result, numTxs) | ||
for i := 0; i < numTxs; i++ { | ||
require.Equal(t, string(result[i]), string(txs[i])) | ||
} | ||
wg.Wait() | ||
} | ||
|
||
func TestBlockFetcherSimple(t *testing.T) { | ||
bf := NewBlockFetcher() | ||
tx := types.Tx("hello world") | ||
key := tx.Key() | ||
missingKeys := map[int]types.TxKey{ | ||
0: key, | ||
} | ||
blockID := []byte("blockID") | ||
req := bf.NewRequest(blockID, 1, missingKeys, make([][]byte, 1)) | ||
req2, ok := bf.GetRequest(blockID) | ||
require.True(t, ok) | ||
require.Equal(t, req, req2) | ||
// a different request for the same blockID should | ||
// return the same original request object. | ||
req3 := bf.NewRequest(blockID, 2, missingKeys, make([][]byte, 2)) | ||
require.Equal(t, req, req3) | ||
|
||
req4 := bf.NewRequest([]byte("differentBlockID"), 1, missingKeys, make([][]byte, 1)) | ||
|
||
bf.TryAddMissingTx(key, tx) | ||
require.False(t, req4.TryAddMissingTx(key, tx)) | ||
require.True(t, req.IsDone()) | ||
require.Len(t, bf.requests, 2) | ||
} | ||
|
||
func TestBlockFetcherConcurrentRequests(t *testing.T) { | ||
var ( | ||
bf = NewBlockFetcher() | ||
numBlocks = 5 | ||
numRequestsPerBlock = 5 | ||
numTxs = 5 | ||
requestWG = sync.WaitGroup{} | ||
goRoutinesWG = sync.WaitGroup{} | ||
allTxs = make([][]byte, numTxs) | ||
txs = make([][]byte, numTxs) | ||
missingKeys = make(map[int]types.TxKey) | ||
) | ||
|
||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) | ||
defer cancel() | ||
|
||
for i := 0; i < numTxs; i++ { | ||
tx := types.Tx(fmt.Sprintf("tx%d", i)) | ||
allTxs[i] = tx | ||
if i%3 == 0 { | ||
txs[i] = tx | ||
} else { | ||
missingKeys[i] = tx.Key() | ||
} | ||
} | ||
|
||
for i := 0; i < numBlocks; i++ { | ||
requestWG.Add(1) | ||
for j := 0; j < numRequestsPerBlock; j++ { | ||
goRoutinesWG.Add(1) | ||
go func(blockID []byte, routine int) { | ||
defer goRoutinesWG.Done() | ||
// create a copy of the missingKeys and txs | ||
mk := make(map[int]types.TxKey) | ||
for i, k := range missingKeys { | ||
mk[i] = k | ||
} | ||
txsCopy := make([][]byte, len(txs)) | ||
copy(txsCopy, txs) | ||
request := bf.NewRequest(blockID, 1, mk, txs) | ||
if routine == 0 { | ||
requestWG.Done() | ||
} | ||
_, _ = request.WaitForBlock(ctx) | ||
}([]byte(fmt.Sprintf("blockID%d", i)), j) | ||
} | ||
goRoutinesWG.Add(1) | ||
go func() { | ||
defer goRoutinesWG.Done() | ||
// Wait until all the request have started | ||
requestWG.Wait() | ||
for _, tx := range allTxs { | ||
bf.TryAddMissingTx(types.Tx(tx).Key(), tx) | ||
} | ||
}() | ||
} | ||
goRoutinesWG.Wait() | ||
|
||
for i := 0; i < numBlocks; i++ { | ||
blockID := []byte(fmt.Sprintf("blockID%d", i)) | ||
request, ok := bf.GetRequest(blockID) | ||
require.True(t, ok) | ||
require.True(t, request.IsDone()) | ||
result, err := request.WaitForBlock(ctx) | ||
require.NoError(t, err) | ||
require.Equal(t, result, txs) | ||
} | ||
} | ||
|
||
func TestFetchTxsFromKeys(t *testing.T) { | ||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) | ||
defer cancel() | ||
reactor, pool := setupReactor(t) | ||
|
||
numTxs := 10 | ||
txs := make([][]byte, numTxs) | ||
keys := make([][]byte, numTxs) | ||
peer := genPeer() | ||
blockID := tmhash.Sum([]byte("blockID")) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is the block id used to check anything or does it just serve as an index? |
||
wg := sync.WaitGroup{} | ||
for i := 0; i < numTxs; i++ { | ||
tx := newDefaultTx(fmt.Sprintf("tx%d", i)) | ||
txs[i] = tx | ||
key := tx.Key() | ||
keys[i] = key[:] | ||
// every 1 in 3 transactions proposed in the block, the node | ||
// already has in their mempool and doesn't need to fetch | ||
if i%3 == 0 { | ||
t.Log("adding tx to mempool", i) | ||
err := pool.CheckTx(tx, nil, mempool.TxInfo{}) | ||
require.NoError(t, err) | ||
} else { | ||
wg.Add(1) | ||
go func() { | ||
defer wg.Done() | ||
time.Sleep(time.Duration(rand.Int63n(100)) * time.Millisecond) | ||
reactor.ReceiveEnvelope(p2p.Envelope{ | ||
Src: peer, | ||
Message: &memproto.Txs{Txs: [][]byte{tx}}, | ||
ChannelID: mempool.MempoolChannel, | ||
}) | ||
}() | ||
} | ||
} | ||
|
||
reactor.InitPeer(peer) | ||
|
||
go func() { | ||
reactor.ReceiveEnvelope(p2p.Envelope{ | ||
Src: peer, | ||
Message: &memproto.Txs{Txs: txs}, | ||
ChannelID: mempool.MempoolChannel, | ||
}) | ||
}() | ||
Comment on lines
+224
to
+230
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. are we intenting to send all the txs to ourselves? I'm assuming no, since we are explicitly delaying the txs earlier and we can delete this and the test still passes. |
||
|
||
resultTxs, err := reactor.FetchTxsFromKeys(ctx, blockID, keys) | ||
require.NoError(t, err) | ||
require.Equal(t, len(txs), len(resultTxs)) | ||
for idx, tx := range resultTxs { | ||
require.Equal(t, txs[idx], tx) | ||
} | ||
repeatResult, err := reactor.FetchTxsFromKeys(ctx, blockID, keys) | ||
require.NoError(t, err) | ||
require.Equal(t, resultTxs, repeatResult) | ||
wg.Wait() | ||
Comment on lines
+232
to
+241
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what does repeating and comparing test? can we document this if its something not obvious or benign? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it might be to hit the cache |
||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -33,9 +33,7 @@ import ( | |
"github.com/tendermint/tendermint/libs/service" | ||
"github.com/tendermint/tendermint/light" | ||
mempl "github.com/tendermint/tendermint/mempool" | ||
mempoolv2 "github.com/tendermint/tendermint/mempool/cat" | ||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0" | ||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1" | ||
"github.com/tendermint/tendermint/mempool/cat" | ||
"github.com/tendermint/tendermint/p2p" | ||
"github.com/tendermint/tendermint/p2p/pex" | ||
"github.com/tendermint/tendermint/privval" | ||
|
@@ -379,22 +377,22 @@ func createMempoolAndMempoolReactor( | |
memplMetrics *mempl.Metrics, | ||
logger log.Logger, | ||
traceClient *trace.Client, | ||
) (mempl.Mempool, p2p.Reactor) { | ||
) (*cat.TxPool, *cat.Reactor) { | ||
switch config.Mempool.Version { | ||
case cfg.MempoolV2: | ||
mp := mempoolv2.NewTxPool( | ||
mp := cat.NewTxPool( | ||
logger, | ||
config.Mempool, | ||
proxyApp.Mempool(), | ||
state.LastBlockHeight, | ||
mempoolv2.WithMetrics(memplMetrics), | ||
mempoolv2.WithPreCheck(sm.TxPreCheck(state)), | ||
mempoolv2.WithPostCheck(sm.TxPostCheck(state)), | ||
cat.WithMetrics(memplMetrics), | ||
cat.WithPreCheck(sm.TxPreCheck(state)), | ||
cat.WithPostCheck(sm.TxPostCheck(state)), | ||
) | ||
|
||
reactor, err := mempoolv2.NewReactor( | ||
reactor, err := cat.NewReactor( | ||
mp, | ||
&mempoolv2.ReactorOptions{ | ||
&cat.ReactorOptions{ | ||
ListenOnly: !config.Mempool.Broadcast, | ||
MaxTxSize: config.Mempool.MaxTxBytes, | ||
TraceClient: traceClient, | ||
|
@@ -410,52 +408,6 @@ func createMempoolAndMempoolReactor( | |
} | ||
reactor.SetLogger(logger) | ||
|
||
return mp, reactor | ||
case cfg.MempoolV1: | ||
mp := mempoolv1.NewTxMempool( | ||
logger, | ||
config.Mempool, | ||
proxyApp.Mempool(), | ||
state.LastBlockHeight, | ||
mempoolv1.WithMetrics(memplMetrics), | ||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)), | ||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)), | ||
mempoolv1.WithTraceClient(traceClient), | ||
) | ||
|
||
reactor := mempoolv1.NewReactor( | ||
config.Mempool, | ||
mp, | ||
traceClient, | ||
) | ||
if config.Consensus.WaitForTxs() { | ||
mp.EnableTxsAvailable() | ||
} | ||
reactor.SetLogger(logger) | ||
|
||
return mp, reactor | ||
|
||
case cfg.MempoolV0: | ||
mp := mempoolv0.NewCListMempool( | ||
config.Mempool, | ||
proxyApp.Mempool(), | ||
state.LastBlockHeight, | ||
mempoolv0.WithMetrics(memplMetrics), | ||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)), | ||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)), | ||
) | ||
|
||
mp.SetLogger(logger) | ||
|
||
reactor := mempoolv0.NewReactor( | ||
config.Mempool, | ||
mp, | ||
) | ||
if config.Consensus.WaitForTxs() { | ||
mp.EnableTxsAvailable() | ||
} | ||
reactor.SetLogger(logger) | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it won't let me highlight the part I want, the default will return nil (if not selecting v2 in the config), which could be confusing. The consensus reactor will pick a nil txFetcher, which uses will just return the keys instead of actually getting the txs. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It assumes that compact blocks is not being used in which case the keys are the full transactions |
||
return mp, reactor | ||
|
||
default: | ||
|
@@ -508,7 +460,8 @@ func createConsensusReactor(config *cfg.Config, | |
state sm.State, | ||
blockExec *sm.BlockExecutor, | ||
blockStore sm.BlockStore, | ||
mempool mempl.Mempool, | ||
catpool *cat.TxPool, | ||
catReactor *cat.Reactor, | ||
evidencePool *evidence.Pool, | ||
privValidator types.PrivValidator, | ||
csMetrics *cs.Metrics, | ||
|
@@ -522,7 +475,8 @@ func createConsensusReactor(config *cfg.Config, | |
state.Copy(), | ||
blockExec, | ||
blockStore, | ||
mempool, | ||
catpool, | ||
catReactor, | ||
evidencePool, | ||
cs.StateMetrics(csMetrics), | ||
cs.SetTraceClient(traceClient), | ||
|
@@ -902,7 +856,7 @@ func NewNode(config *cfg.Config, | |
csMetrics.FastSyncing.Set(1) | ||
} | ||
consensusReactor, consensusState := createConsensusReactor( | ||
config, state, blockExec, blockStore, mempool, evidencePool, | ||
config, state, blockExec, blockStore, mempool, mempoolReactor, evidencePool, | ||
privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, influxdbClient, | ||
) | ||
|
||
|
@@ -1483,7 +1437,7 @@ func makeNodeInfo( | |
} | ||
|
||
if config.Mempool.Version == cfg.MempoolV2 { | ||
nodeInfo.Channels = append(nodeInfo.Channels, mempoolv2.MempoolStateChannel) | ||
nodeInfo.Channels = append(nodeInfo.Channels, cat.MempoolStateChannel) | ||
} | ||
|
||
lAddr := config.P2P.ExternalAddress | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -97,7 +97,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( | |
height int64, | ||
state State, commit *types.Commit, | ||
proposerAddr []byte, | ||
) (*types.Block, *types.PartSet) { | ||
) *types.Block { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why get rid of the partset here? |
||
|
||
maxBytes := state.ConsensusParams.Block.MaxBytes | ||
maxGas := state.ConsensusParams.Block.MaxGas | ||
|
@@ -158,13 +158,14 @@ func (blockExec *BlockExecutor) CreateProposalBlock( | |
panic(err) | ||
} | ||
|
||
return state.MakeBlock( | ||
block, _ := state.MakeBlock( | ||
height, | ||
newData, | ||
commit, | ||
evidence, | ||
proposerAddr, | ||
) | ||
return block | ||
} | ||
|
||
func (blockExec *BlockExecutor) ProcessProposal( | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
note for after prototype: we need var names plus more dovs here