Skip to content

Commit fdf4123

Browse files
ziweixiaokuba-moo
authored andcommittedApr 19, 2024
gve: Remove qpl_cfg struct since qpl_ids map with queues respectively
The qpl_cfg struct was used to make sure that no two different queues are using QPL with the same qpl_id. We can remove that qpl_cfg struct since now the qpl_ids map with the queues respectively as follows: For tx queues: qpl_id = tx_qid For rx queues: qpl_id = max_tx_queues + rx_qid And when XDP is used, it will need the user to reduce the tx queues to be at most half of the max_tx_queues. Then it will use the same number of tx queues starting from the end of existing tx queues for XDP. So the XDP queues will not exceed the max_tx_queues range and will not overlap with the rx queues, where the qpl_ids will not have overlapping too. Considering of that, we remove the qpl_cfg struct to get the qpl_id directly based on the queue id. Unless we are erroneously allocating a rx/tx queue that has already been allocated, we would never allocate the qpl with the same qpl_id twice. In that case, it should fail much earlier than the QPL assignment. Suggested-by: Praveen Kaligineedi <[email protected]> Signed-off-by: Ziwei Xiao <[email protected]> Reviewed-by: Harshitha Ramamurthy <[email protected]> Reviewed-by: Shailend Chand <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 33d21bd commit fdf4123

File tree

7 files changed

+20
-113
lines changed

7 files changed

+20
-113
lines changed
 

‎drivers/net/ethernet/google/gve/gve.h

+2-37
Original file line numberDiff line numberDiff line change
@@ -639,7 +639,6 @@ struct gve_ptype_lut {
639639

640640
/* Parameters for allocating queue page lists */
641641
struct gve_qpls_alloc_cfg {
642-
struct gve_qpl_config *qpl_cfg;
643642
struct gve_queue_config *tx_cfg;
644643
struct gve_queue_config *rx_cfg;
645644

@@ -655,9 +654,8 @@ struct gve_qpls_alloc_cfg {
655654
struct gve_tx_alloc_rings_cfg {
656655
struct gve_queue_config *qcfg;
657656

658-
/* qpls and qpl_cfg must already be allocated */
657+
/* qpls must already be allocated */
659658
struct gve_queue_page_list *qpls;
660-
struct gve_qpl_config *qpl_cfg;
661659

662660
u16 ring_size;
663661
u16 start_idx;
@@ -674,9 +672,8 @@ struct gve_rx_alloc_rings_cfg {
674672
struct gve_queue_config *qcfg;
675673
struct gve_queue_config *qcfg_tx;
676674

677-
/* qpls and qpl_cfg must already be allocated */
675+
/* qpls must already be allocated */
678676
struct gve_queue_page_list *qpls;
679-
struct gve_qpl_config *qpl_cfg;
680677

681678
u16 ring_size;
682679
u16 packet_buffer_size;
@@ -732,7 +729,6 @@ struct gve_priv {
732729
u16 num_xdp_queues;
733730
struct gve_queue_config tx_cfg;
734731
struct gve_queue_config rx_cfg;
735-
struct gve_qpl_config qpl_cfg; /* map used QPL ids */
736732
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
737733

738734
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -1053,37 +1049,6 @@ static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
10531049
return 2 * rx_desc_cnt;
10541050
}
10551051

1056-
/* Returns a pointer to the next available tx qpl in the list of qpls */
1057-
static inline
1058-
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
1059-
int tx_qid)
1060-
{
1061-
/* QPL already in use */
1062-
if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
1063-
return NULL;
1064-
set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
1065-
return &cfg->qpls[tx_qid];
1066-
}
1067-
1068-
/* Returns a pointer to the next available rx qpl in the list of qpls */
1069-
static inline
1070-
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
1071-
int rx_qid)
1072-
{
1073-
int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
1074-
/* QPL already in use */
1075-
if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
1076-
return NULL;
1077-
set_bit(id, cfg->qpl_cfg->qpl_id_map);
1078-
return &cfg->qpls[id];
1079-
}
1080-
1081-
/* Unassigns the qpl with the given id */
1082-
static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
1083-
{
1084-
clear_bit(id, qpl_cfg->qpl_id_map);
1085-
}
1086-
10871052
/* Returns the correct dma direction for tx and rx qpls */
10881053
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
10891054
int id)

‎drivers/net/ethernet/google/gve/gve_ethtool.c

-9
Original file line numberDiff line numberDiff line change
@@ -510,7 +510,6 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv,
510510
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
511511
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
512512
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
513-
struct gve_qpl_config new_qpl_cfg;
514513
int err;
515514

516515
/* get current queue configuration */
@@ -521,14 +520,6 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv,
521520
tx_alloc_cfg.ring_size = new_tx_desc_cnt;
522521
rx_alloc_cfg.ring_size = new_rx_desc_cnt;
523522

524-
/* qpl_cfg is not read-only, it contains a map that gets updated as
525-
* rings are allocated, which is why we cannot use the yet unreleased
526-
* one in priv.
527-
*/
528-
qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
529-
tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
530-
rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
531-
532523
if (netif_running(priv->dev)) {
533524
err = gve_adjust_config(priv, &qpls_alloc_cfg,
534525
&tx_alloc_cfg, &rx_alloc_cfg);

‎drivers/net/ethernet/google/gve/gve_main.c

+1-37
Original file line numberDiff line numberDiff line change
@@ -829,7 +829,6 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
829829
cfg->qcfg = &priv->tx_cfg;
830830
cfg->raw_addressing = !gve_is_qpl(priv);
831831
cfg->qpls = priv->qpls;
832-
cfg->qpl_cfg = &priv->qpl_cfg;
833832
cfg->ring_size = priv->tx_desc_cnt;
834833
cfg->start_idx = 0;
835834
cfg->num_rings = gve_num_tx_queues(priv);
@@ -1119,22 +1118,13 @@ static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
11191118
if (!qpls)
11201119
return -ENOMEM;
11211120

1122-
cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
1123-
sizeof(unsigned long) * BITS_PER_BYTE;
1124-
cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
1125-
sizeof(unsigned long), GFP_KERNEL);
1126-
if (!cfg->qpl_cfg->qpl_id_map) {
1127-
err = -ENOMEM;
1128-
goto free_qpl_array;
1129-
}
1130-
11311121
/* Allocate TX QPLs */
11321122
page_count = priv->tx_pages_per_qpl;
11331123
tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
11341124
gve_is_qpl(priv));
11351125
err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
11361126
if (err)
1137-
goto free_qpl_map;
1127+
goto free_qpl_array;
11381128

11391129
/* Allocate RX QPLs */
11401130
rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
@@ -1157,9 +1147,6 @@ static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
11571147

11581148
free_tx_qpls:
11591149
gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
1160-
free_qpl_map:
1161-
kvfree(cfg->qpl_cfg->qpl_id_map);
1162-
cfg->qpl_cfg->qpl_id_map = NULL;
11631150
free_qpl_array:
11641151
kvfree(qpls);
11651152
return err;
@@ -1175,9 +1162,6 @@ static void gve_free_qpls(struct gve_priv *priv,
11751162
if (!qpls)
11761163
return;
11771164

1178-
kvfree(cfg->qpl_cfg->qpl_id_map);
1179-
cfg->qpl_cfg->qpl_id_map = NULL;
1180-
11811165
for (i = 0; i < max_queues; i++)
11821166
gve_free_queue_page_list(priv, &qpls[i], i);
11831167

@@ -1292,7 +1276,6 @@ static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
12921276
cfg->raw_addressing = !gve_is_qpl(priv);
12931277
cfg->is_gqi = gve_is_gqi(priv);
12941278
cfg->num_xdp_queues = priv->num_xdp_queues;
1295-
cfg->qpl_cfg = &priv->qpl_cfg;
12961279
cfg->tx_cfg = &priv->tx_cfg;
12971280
cfg->rx_cfg = &priv->rx_cfg;
12981281
cfg->qpls = priv->qpls;
@@ -1306,7 +1289,6 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
13061289
cfg->raw_addressing = !gve_is_qpl(priv);
13071290
cfg->enable_header_split = priv->header_split_enabled;
13081291
cfg->qpls = priv->qpls;
1309-
cfg->qpl_cfg = &priv->qpl_cfg;
13101292
cfg->ring_size = priv->rx_desc_cnt;
13111293
cfg->packet_buffer_size = gve_is_gqi(priv) ?
13121294
GVE_DEFAULT_RX_BUFFER_SIZE :
@@ -1419,7 +1401,6 @@ static int gve_queues_start(struct gve_priv *priv,
14191401
priv->rx = rx_alloc_cfg->rx;
14201402

14211403
/* Record new configs into priv */
1422-
priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
14231404
priv->tx_cfg = *tx_alloc_cfg->qcfg;
14241405
priv->rx_cfg = *rx_alloc_cfg->qcfg;
14251406
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
@@ -1916,20 +1897,11 @@ int gve_adjust_queues(struct gve_priv *priv,
19161897
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
19171898
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
19181899
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
1919-
struct gve_qpl_config new_qpl_cfg;
19201900
int err;
19211901

19221902
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
19231903
&tx_alloc_cfg, &rx_alloc_cfg);
19241904

1925-
/* qpl_cfg is not read-only, it contains a map that gets updated as
1926-
* rings are allocated, which is why we cannot use the yet unreleased
1927-
* one in priv.
1928-
*/
1929-
qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
1930-
tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
1931-
rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
1932-
19331905
/* Relay the new config from ethtool */
19341906
qpls_alloc_cfg.tx_cfg = &new_tx_config;
19351907
tx_alloc_cfg.qcfg = &new_tx_config;
@@ -2121,18 +2093,10 @@ static int gve_set_features(struct net_device *netdev,
21212093
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
21222094
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
21232095
struct gve_priv *priv = netdev_priv(netdev);
2124-
struct gve_qpl_config new_qpl_cfg;
21252096
int err;
21262097

21272098
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
21282099
&tx_alloc_cfg, &rx_alloc_cfg);
2129-
/* qpl_cfg is not read-only, it contains a map that gets updated as
2130-
* rings are allocated, which is why we cannot use the yet unreleased
2131-
* one in priv.
2132-
*/
2133-
qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
2134-
tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
2135-
rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
21362100

21372101
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
21382102
netdev->features ^= NETIF_F_LRO;

‎drivers/net/ethernet/google/gve/gve_rx.c

+4-8
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
3838
for (i = 0; i < slots; i++)
3939
page_ref_sub(rx->data.page_info[i].page,
4040
rx->data.page_info[i].pagecnt_bias - 1);
41-
gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
4241
rx->data.qpl = NULL;
4342

4443
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
@@ -145,13 +144,11 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
145144
return -ENOMEM;
146145

147146
if (!rx->data.raw_addressing) {
148-
rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
149-
if (!rx->data.qpl) {
150-
kvfree(rx->data.page_info);
151-
rx->data.page_info = NULL;
152-
return -ENOMEM;
153-
}
147+
u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
148+
149+
rx->data.qpl = &cfg->qpls[qpl_id];
154150
}
151+
155152
for (i = 0; i < slots; i++) {
156153
if (!rx->data.raw_addressing) {
157154
struct page *page = rx->data.qpl->pages[i];
@@ -204,7 +201,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
204201
page_ref_sub(rx->data.page_info[i].page,
205202
rx->data.page_info[i].pagecnt_bias - 1);
206203

207-
gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
208204
rx->data.qpl = NULL;
209205

210206
return err;

‎drivers/net/ethernet/google/gve/gve_rx_dqo.c

+5-7
Original file line numberDiff line numberDiff line change
@@ -247,10 +247,8 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
247247
if (bs->page_info.page)
248248
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
249249
}
250-
if (rx->dqo.qpl) {
251-
gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id);
252-
rx->dqo.qpl = NULL;
253-
}
250+
251+
rx->dqo.qpl = NULL;
254252

255253
if (rx->dqo.bufq.desc_ring) {
256254
size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
@@ -359,9 +357,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
359357
goto err;
360358

361359
if (!cfg->raw_addressing) {
362-
rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
363-
if (!rx->dqo.qpl)
364-
goto err;
360+
u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
361+
362+
rx->dqo.qpl = &cfg->qpls[qpl_id];
365363
rx->dqo.next_qpl_page_idx = 0;
366364
}
367365

‎drivers/net/ethernet/google/gve/gve_tx.c

+4-8
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,6 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
225225

226226
if (!tx->raw_addressing) {
227227
gve_tx_fifo_release(priv, &tx->tx_fifo);
228-
gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
229228
tx->tx_fifo.qpl = NULL;
230229
}
231230

@@ -280,12 +279,12 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
280279
tx->raw_addressing = cfg->raw_addressing;
281280
tx->dev = hdev;
282281
if (!tx->raw_addressing) {
283-
tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx);
284-
if (!tx->tx_fifo.qpl)
285-
goto abort_with_desc;
282+
u32 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
283+
284+
tx->tx_fifo.qpl = &cfg->qpls[qpl_id];
286285
/* map Tx FIFO */
287286
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
288-
goto abort_with_qpl;
287+
goto abort_with_desc;
289288
}
290289

291290
tx->q_resources =
@@ -301,9 +300,6 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
301300
abort_with_fifo:
302301
if (!tx->raw_addressing)
303302
gve_tx_fifo_release(priv, &tx->tx_fifo);
304-
abort_with_qpl:
305-
if (!tx->raw_addressing)
306-
gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
307303
abort_with_desc:
308304
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
309305
tx->desc = NULL;

‎drivers/net/ethernet/google/gve/gve_tx_dqo.c

+4-7
Original file line numberDiff line numberDiff line change
@@ -236,10 +236,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
236236
kvfree(tx->dqo.tx_qpl_buf_next);
237237
tx->dqo.tx_qpl_buf_next = NULL;
238238

239-
if (tx->dqo.qpl) {
240-
gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
241-
tx->dqo.qpl = NULL;
242-
}
239+
tx->dqo.qpl = NULL;
243240

244241
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
245242
}
@@ -352,9 +349,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
352349
goto err;
353350

354351
if (!cfg->raw_addressing) {
355-
tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx);
356-
if (!tx->dqo.qpl)
357-
goto err;
352+
u32 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
353+
354+
tx->dqo.qpl = &cfg->qpls[qpl_id];
358355

359356
if (gve_tx_qpl_buf_init(tx))
360357
goto err;

0 commit comments

Comments
 (0)
Please sign in to comment.