Skip to content

Commit 1846523

Browse files
committedJul 30, 2024·
add more metrics for compact blocks
1 parent 1fe8522 commit 1846523

File tree

6 files changed

+36
-31
lines changed

6 files changed

+36
-31
lines changed
 

‎consensus/metrics.go

+26
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,10 @@ type Metrics struct {
102102

103103
// The amount of proposals that failed to be received in time
104104
TimedOutProposals metrics.Counter
105+
106+
CompactBlocksReceived metrics.Counter
107+
CompactBlocksSent metrics.Counter
108+
CompactBlocksFailed metrics.Counter
105109
}
106110

107111
// PrometheusMetrics returns Metrics build using Prometheus client library.
@@ -279,6 +283,24 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
279283
Name: "timed_out_proposals",
280284
Help: "Number of proposals that failed to be received in time",
281285
}, labels).With(labelsAndValues...),
286+
CompactBlocksReceived: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
287+
Namespace: namespace,
288+
Subsystem: MetricsSubsystem,
289+
Name: "compact_blocks_received",
290+
Help: "Number of compact blocks received by the node",
291+
}, labels).With(labelsAndValues...),
292+
CompactBlocksSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
293+
Namespace: namespace,
294+
Subsystem: MetricsSubsystem,
295+
Name: "compact_blocks_sent",
296+
Help: "Number of compact blocks sent by the node",
297+
}, labels).With(labelsAndValues...),
298+
CompactBlocksFailed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
299+
Namespace: namespace,
300+
Subsystem: MetricsSubsystem,
301+
Name: "compact_blocks_failed",
302+
Help: "Number of compact blocks failed to be received by the node",
303+
}, labels).With(labelsAndValues...),
282304
}
283305
}
284306

@@ -317,6 +339,10 @@ func NopMetrics() *Metrics {
317339
FullPrevoteMessageDelay: discard.NewGauge(),
318340
ApplicationRejectedProposals: discard.NewCounter(),
319341
TimedOutProposals: discard.NewCounter(),
342+
343+
CompactBlocksReceived: discard.NewCounter(),
344+
CompactBlocksSent: discard.NewCounter(),
345+
CompactBlocksFailed: discard.NewCounter(),
320346
}
321347
}
322348

‎consensus/reactor.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -694,7 +694,7 @@ OUTER_LOOP:
694694
},
695695
}, logger) {
696696
ps.SetHasBlock(prs.Height, prs.Round)
697-
conR.conS.jsonMetrics.SentCompactBlocks++
697+
conR.conS.metrics.CompactBlocksSent.Add(1)
698698
}
699699
continue OUTER_LOOP
700700
}
@@ -841,7 +841,6 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt
841841
string(peer.ID()),
842842
schema.Upload,
843843
)
844-
conR.conS.jsonMetrics.SentBlockParts++
845844
} else {
846845
logger.Debug("Sending block part for catchup failed")
847846
// sleep to avoid retrying too fast

‎consensus/state.go

+2-11
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,6 @@ type State struct {
167167

168168
// for reporting metrics
169169
metrics *Metrics
170-
jsonMetrics *JSONMetrics
171170
traceClient trace.Tracer
172171
}
173172

@@ -206,7 +205,6 @@ func NewState(
206205
evpool: evpool,
207206
evsw: cmtevents.NewEventSwitch(),
208207
metrics: NopMetrics(),
209-
jsonMetrics: NewJSONMetrics(path),
210208
traceClient: trace.NoOpTracer(),
211209
}
212210

@@ -1801,12 +1799,6 @@ func (cs *State) finalizeCommit(height int64) {
18011799
// NewHeightStep!
18021800
cs.updateToState(stateCopy)
18031801

1804-
cs.jsonMetrics.Blocks++
1805-
// Save every 20 blocks
1806-
if cs.Height%20 == 0 {
1807-
cs.jsonMetrics.Save()
1808-
}
1809-
18101802
fail.Fail() // XXX
18111803

18121804
// Private validator might have changed it's key pair => refetch pubkey.
@@ -1979,7 +1971,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
19791971
func (cs *State) addCompactBlock(msg *CompactBlockMessage, peerID p2p.ID) error {
19801972
compactBlock := msg.Block
19811973
height := compactBlock.Height
1982-
cs.jsonMetrics.ReceivedCompactBlocks++
1974+
cs.metrics.CompactBlocksReceived.Add(1)
19831975

19841976
if cs.ProposalBlock != nil {
19851977
// We already have the proposal block.
@@ -2020,7 +2012,7 @@ func (cs *State) addCompactBlock(msg *CompactBlockMessage, peerID p2p.ID) error
20202012

20212013
cs.mtx.Lock()
20222014
if err != nil {
2023-
cs.jsonMetrics.CompactBlockFailures++
2015+
cs.metrics.CompactBlocksFailed.Add(1)
20242016
if ctx.Err() != nil {
20252017
cs.Logger.Info("failed to fetch transactions within the timeout", "timeout", timeout)
20262018
return nil
@@ -2101,7 +2093,6 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add
21012093
}
21022094

21032095
cs.metrics.BlockGossipPartsReceived.With("matches_current", "true").Add(1)
2104-
cs.jsonMetrics.ReceivedBlockParts++
21052096

21062097
if cs.ProposalBlockParts.ByteSize() > cs.state.ConsensusParams.Block.MaxBytes {
21072098
return added, fmt.Errorf("total size of proposal block parts exceeds maximum block bytes (%d > %d)",

‎mempool/cat/block_builder.go

+3-12
Original file line numberDiff line numberDiff line change
@@ -39,17 +39,13 @@ func (memR *Reactor) FetchTxsFromKeys(ctx context.Context, blockID []byte, compa
3939
}
4040
}
4141
memR.Logger.Info("fetching transactions from peers", "blockID", blockID, "numTxs", len(txs), "numMissing", len(missingKeys))
42+
memR.mempool.metrics.MissingTxs.Add(float64(len(missingKeys)))
4243

43-
memR.mempool.jsonMetrics.Lock()
44-
memR.mempool.jsonMetrics.TransactionsMissing = append(memR.mempool.jsonMetrics.TransactionsMissing, uint64(len(missingKeys)))
45-
memR.mempool.jsonMetrics.Transactions = append(memR.mempool.jsonMetrics.Transactions, uint64(len(compactData)))
4644
// Check if we got lucky and already had all the transactions.
4745
if len(missingKeys) == 0 {
48-
memR.mempool.jsonMetrics.TimeTakenFetchingTxs = append(memR.mempool.jsonMetrics.TimeTakenFetchingTxs, 0)
49-
memR.mempool.jsonMetrics.Unlock()
46+
memR.Logger.Info("fetched all txs, none missing", "blockID", blockID)
5047
return txs, nil
5148
}
52-
memR.mempool.jsonMetrics.Unlock()
5349

5450
// setup a request for this block and begin to track and retrieve all missing transactions
5551
request := memR.blockFetcher.newRequest(
@@ -60,12 +56,7 @@ func (memR *Reactor) FetchTxsFromKeys(ctx context.Context, blockID []byte, compa
6056
)
6157
defer func() {
6258
timeTaken := request.TimeTaken()
63-
if timeTaken == 0 {
64-
return
65-
}
66-
memR.mempool.jsonMetrics.Lock()
67-
memR.mempool.jsonMetrics.TimeTakenFetchingTxs = append(memR.mempool.jsonMetrics.TimeTakenFetchingTxs, timeTaken)
68-
memR.mempool.jsonMetrics.Unlock()
59+
memR.Logger.Info("fetched txs", "timeTaken", timeTaken, "numMissing", len(missingKeys))
6960
}()
7061

7162
// request the missing transactions if we haven't already

‎mempool/cat/pool.go

-6
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,6 @@ type TxPool struct {
5454
config *config.MempoolConfig
5555
proxyAppConn proxy.AppConnMempool
5656
metrics *mempool.Metrics
57-
jsonMetrics *mempool.JSONMetrics
5857

5958
// these values are modified once per height
6059
updateMtx sync.Mutex
@@ -111,7 +110,6 @@ func NewTxPool(
111110
store: newStore(),
112111
broadcastCh: make(chan *wrappedTx),
113112
txsToBeBroadcast: make([]types.TxKey, 0),
114-
jsonMetrics: mempool.NewJSONMetrics(path),
115113
}
116114

117115
for _, opt := range options {
@@ -559,10 +557,6 @@ func (txmp *TxPool) Update(
559557
txmp.notifyTxsAvailable()
560558
}
561559
}
562-
// save every 20 blocks
563-
if blockHeight%20 == 0 {
564-
txmp.jsonMetrics.Save()
565-
}
566560
return nil
567561
}
568562

‎mempool/metrics.go

+4
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,10 @@ type Metrics struct {
6464
// RerequestedTxs defines the number of times that a requested tx
6565
// never received a response in time and a new request was made.
6666
RerequestedTxs metrics.Counter
67+
68+
// MissingTxs defines the number of transactions that were not found in the mempool
69+
// from the current proposal
70+
MissingTxs metrics.Counter
6771
}
6872

6973
// PrometheusMetrics returns Metrics build using Prometheus client library.

0 commit comments

Comments
 (0)
Please sign in to comment.