From c47a703f9899df15511312517daaf0a8413593dd Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Tue, 10 Oct 2023 12:45:48 +0200 Subject: [PATCH 1/9] idpf: fix page offset computing in construct skb The page pool feature allows for setting the page offset as a one of creation parameters. Such offset can be used for XDP-specific configuration of page pool when we need some extra space reserved for the packet headroom. Unfortunately, such page offset value (from the page pool) was never used during SKB build what can have a negative impact when XDP_PASS action is returned and the received packet should be passed to the kernel network stack. Address such a problem by adding the page offset from the page pool when SKB offset is being computed. Fixes: 3a8845af66edb ("idpf: add RX splitq napi poll support") Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 1646ff3877baa8..66ca9226c7c309 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -2981,7 +2981,8 @@ void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size) { skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, - rx_buf->page_offset, size, rx_buf->truesize); + rx_buf->page_offset + rx_buf->page->pp->p.offset, + size, rx_buf->truesize); rx_buf->page = NULL; } @@ -3004,7 +3005,8 @@ struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, struct sk_buff *skb; void *va; - va = page_address(rx_buf->page) + rx_buf->page_offset; + va = page_address(rx_buf->page) + rx_buf->page_offset + + rx_buf->page->pp->p.offset; /* prefetch first cache line of first page */ net_prefetch(va); From 545992cba46b01ed6fb7e9b554ebc4b7ceb02973 Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Tue, 3 Oct 2023 20:47:46 +0200 Subject: [PATCH 2/9] idpf: add function for hsplit configuring The IDPF driver supports the header split feature and that feature is always enabled by default. However, for flexibility reasons and to simplify the implementation of some features it would be useful to have the support for switching the header split off. Address that need by adding the user config parameter and the function for disabling (or enabling) the header split feature. Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf.h | 2 ++ drivers/net/ethernet/intel/idpf/idpf_lib.c | 21 +++++++++++++++++++ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 14 +++++++------ .../net/ethernet/intel/idpf/idpf_virtchnl.c | 2 ++ 4 files changed, 33 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index bee73353b56a8b..64af36f2cdba64 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -423,6 +423,7 @@ struct idpf_vport { * @__IDPF_USER_FLAGS_NBITS: Must be last */ enum idpf_user_flags { + __IDPF_PRIV_FLAGS_HDR_SPLIT = 0, __IDPF_PROMISC_UC = 32, __IDPF_PROMISC_MC, @@ -939,6 +940,7 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, u16 msg_size, u8 *msg); void idpf_set_ethtool_ops(struct net_device *netdev); +void idpf_vport_set_hsplit(struct idpf_vport *vport, bool ena); int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, struct idpf_vport_max_q *max_q); void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 19809b0ddcd909..33e98f34942761 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1057,6 +1057,27 @@ static void idpf_vport_dealloc(struct idpf_vport *vport) adapter->next_vport = idpf_get_free_slot(adapter); } +/** + * idpf_vport_set_hsplit - enable or disable header split on a given vport + * @vport: virtual port + * @ena: flag controlling header split, On (true) or Off (false) + */ +void idpf_vport_set_hsplit(struct idpf_vport *vport, bool ena) +{ + struct idpf_vport_user_config_data *config_data; + + config_data = &vport->adapter->vport_config[vport->idx]->user_config; + if (!ena) { + clear_bit(__IDPF_PRIV_FLAGS_HDR_SPLIT, config_data->user_flags); + return; + } + + if (idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS, + IDPF_CAP_HSPLIT) && + idpf_is_queue_model_split(vport->rxq_model)) + set_bit(__IDPF_PRIV_FLAGS_HDR_SPLIT, config_data->user_flags); +} + /** * idpf_vport_alloc - Allocates the next available struct vport in the adapter * @adapter: board private structure diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 66ca9226c7c309..72bd5ad0a81605 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1251,8 +1251,11 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + struct idpf_vport_user_config_data *config_data; + u16 idx = vport->idx; int j; + config_data = &adapter->vport_config[idx]->user_config; rx_qgrp->vport = vport; if (!idpf_is_queue_model_split(vport->rxq_model)) { rx_qgrp->singleq.num_rxq = num_rxq; @@ -1301,9 +1304,9 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) q->rx_buf_size = vport->bufq_size[j]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; q->rx_buf_stride = IDPF_RX_BUF_STRIDE; - if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, - IDPF_CAP_HSPLIT) && - idpf_is_queue_model_split(vport->rxq_model)) { + + if (test_bit(__IDPF_PRIV_FLAGS_HDR_SPLIT, + config_data->user_flags)) { q->rx_hsplit_en = true; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; } @@ -1347,9 +1350,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) rx_qgrp->splitq.rxq_sets[j]->refillq1 = &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; - if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, - IDPF_CAP_HSPLIT) && - idpf_is_queue_model_split(vport->rxq_model)) { + if (test_bit(__IDPF_PRIV_FLAGS_HDR_SPLIT, + config_data->user_flags)) { q->rx_hsplit_en = true; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index e276b5360c2ed8..04e56a4002dae7 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -3284,6 +3284,8 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); + idpf_vport_set_hsplit(vport, true); + idpf_vport_init_num_qs(vport, vport_msg); idpf_vport_calc_num_q_desc(vport); idpf_vport_calc_num_q_groups(vport); From c5929512724f74dfff4a3987f461e75a4513b779 Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Tue, 3 Oct 2023 11:29:25 +0200 Subject: [PATCH 3/9] idpf: prepare structures to support xdp Extend basic structures of the driver (e.g. 'idpf_vport', 'idpf_queue', 'idpf_vport_user_config_data') by adding members necessary to support XDP. Add extra XDP Tx queues needed to support XDP_TX and XDP_REDIRECT actions without interfering a regular Tx traffic. Also add functions dedicated to support XDP initialization for Rx and Tx queues and call those functions from the existing algorithms of queues configuration. Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf.h | 24 +++ .../net/ethernet/intel/idpf/idpf_ethtool.c | 2 + drivers/net/ethernet/intel/idpf/idpf_lib.c | 26 ++- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 183 +++++++++++++++++- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 7 + .../net/ethernet/intel/idpf/idpf_virtchnl.c | 16 +- 6 files changed, 249 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 64af36f2cdba64..a66c2e65303516 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -16,6 +16,9 @@ struct idpf_vport_max_q; #include #include #include +#include +#include +#include #include #include @@ -376,6 +379,13 @@ struct idpf_vport { struct idpf_queue **txqs; bool crc_enable; + int num_xdp_txq; + int num_xdp_rxq; + int num_xdp_complq; + int xdp_txq_offset; + int xdp_rxq_offset; + int xdp_complq_offset; + u16 num_rxq; u16 num_bufq; u32 rxq_desc_count; @@ -467,6 +477,8 @@ struct idpf_vport_user_config_data { u16 num_req_rx_qs; u32 num_req_txq_desc; u32 num_req_rxq_desc; + /* Duplicated in queue structure for performance reasons */ + struct bpf_prog *xdp_prog; DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS); struct list_head mac_filter_list; }; @@ -685,6 +697,18 @@ static inline int idpf_is_queue_model_split(u16 q_model) return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; } +/** + * idpf_xdp_is_prog_ena - check if there is an XDP program on adapter + * @vport: vport to check + */ +static inline bool idpf_xdp_is_prog_ena(struct idpf_vport *vport) +{ + if (!vport->adapter) + return false; + + return !!vport->adapter->vport_config[vport->idx]->user_config.xdp_prog; +} + #define idpf_is_cap_ena(adapter, field, flag) \ idpf_is_capability_ena(adapter, false, field, flag) #define idpf_is_cap_ena_all(adapter, field, flag) \ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 52ea38669f85b3..01f58ab9932703 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -900,6 +900,8 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, if (!txq) idpf_add_empty_queue_stats(&data, qtype); + else if (test_bit(__IDPF_Q_XDP, txq->flags)) + continue; else idpf_add_queue_stats(&data, txq); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 33e98f34942761..401f5d12cabe0b 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1255,13 +1255,18 @@ static void idpf_restore_features(struct idpf_vport *vport) */ static int idpf_set_real_num_queues(struct idpf_vport *vport) { - int err; + int num_txq, err; err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); if (err) return err; - return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq); + if (idpf_xdp_is_prog_ena(vport)) + num_txq = vport->num_txq - vport->num_xdp_txq; + else + num_txq = vport->num_txq; + + return netif_set_real_num_tx_queues(vport->netdev, num_txq); } /** @@ -1374,6 +1379,15 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) idpf_rx_init_buf_tail(vport); + if (idpf_xdp_is_prog_ena(vport)) { + err = idpf_xdp_rxq_info_init_all(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize XDP info for vport %u, %d\n", + vport->vport_id, err); + goto intr_deinit; + } + } + err = idpf_send_config_queues_msg(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n", @@ -2213,10 +2227,18 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu) idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); + if (idpf_xdp_is_prog_ena(vport) && new_mtu > IDPF_XDP_MAX_MTU) { + netdev_err(netdev, "New MTU value is not valid. The maximum MTU value is %d.\n", + IDPF_XDP_MAX_MTU); + err = -EINVAL; + goto unlock_exit; + } + netdev->mtu = new_mtu; err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE); +unlock_exit: idpf_vport_ctrl_unlock(netdev); return err; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 72bd5ad0a81605..98eb7eeaec03d8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -705,6 +705,73 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) return 0; } +/** + * idpf_xdp_rxq_info_init - Setup XDP for a given Rx queue + * @rxq: Rx queue for which the resources are setup + * @splitq: flag indicating if the HW works in split queue mode + * + * Returns 0 on success, negative on failure + */ +static int idpf_xdp_rxq_info_init(struct idpf_queue *rxq, bool splitq) +{ + struct page_pool *pp; + int err; + + if (!xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + xdp_rxq_info_reg(&rxq->xdp_rxq, rxq->vport->netdev, + rxq->idx, rxq->q_vector->napi.napi_id); + + if (splitq) { + int num_bufq = rxq->vport->num_bufqs_per_qgrp; + + if (num_bufq != IDPF_SINGLE_BUFQ_PER_RXQ_GRP) + return -EINVAL; + pp = rxq->rxq_grp->splitq.bufq_sets[0].bufq.pp; + } else { + pp = rxq->pp; + } + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, + MEM_TYPE_PAGE_POOL, pp); + + return err; +} + +/** + * idpf_xdp_rxq_info_init_all - Initiate XDP for all Rx queues in vport + * @vport: vport to setup the XDP + * + * Returns 0 on success, negative on failure + */ +int idpf_xdp_rxq_info_init_all(struct idpf_vport *vport) +{ + bool splitq = idpf_is_queue_model_split(vport->rxq_model); + struct idpf_rxq_group *rx_qgrp; + struct idpf_queue *q; + int i, j, err; + u16 num_rxq; + + for (i = 0; i < vport->num_rxq_grp; i++) { + rx_qgrp = &vport->rxq_grps[i]; + if (splitq) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (splitq) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + err = idpf_xdp_rxq_info_init(q, splitq); + if (err) + return err; + } + } + + return 0; +} + /** * idpf_rx_desc_alloc - Allocate queue Rx resources * @rxq: Rx queue for which the resources are setup @@ -966,6 +1033,23 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, if (idpf_is_queue_model_split(vport->rxq_model)) vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); + vport->num_xdp_rxq = 0; + vport->xdp_rxq_offset = 0; + if (!idpf_xdp_is_prog_ena(vport)) { + vport->num_xdp_txq = 0; + vport->xdp_txq_offset = 0; + goto adjust_bufqs; + } + /* Do not create dummy Rx queues by default */ + vport->num_xdp_txq = le16_to_cpu(vport_msg->num_rx_q); + vport->xdp_txq_offset = le16_to_cpu(vport_msg->num_tx_q) - + le16_to_cpu(vport_msg->num_rx_q); + + if (idpf_is_queue_model_split(vport->txq_model)) { + vport->num_xdp_complq = vport->num_xdp_txq; + vport->xdp_complq_offset = vport->xdp_txq_offset; + } +adjust_bufqs: /* Adjust number of buffer queues per Rx queue group. */ if (!idpf_is_queue_model_split(vport->rxq_model)) { vport->num_bufqs_per_qgrp = 0; @@ -974,12 +1058,20 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, return; } - vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; - /* Bufq[0] default buffer size is 4K - * Bufq[1] default buffer size is 2K - */ - vport->bufq_size[0] = IDPF_RX_BUF_4096; - vport->bufq_size[1] = IDPF_RX_BUF_2048; + if (idpf_xdp_is_prog_ena(vport)) { + /* After loading the XDP program we will have only one buffer + * queue per group with buffer size 4kB. + */ + vport->num_bufqs_per_qgrp = IDPF_SINGLE_BUFQ_PER_RXQ_GRP; + vport->bufq_size[0] = IDPF_RX_BUF_4096; + } else { + vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; + /* Bufq[0] default buffer size is 4K + * Bufq[1] default buffer size is 2K + */ + vport->bufq_size[0] = IDPF_RX_BUF_4096; + vport->bufq_size[1] = IDPF_RX_BUF_2048; + } } /** @@ -1092,6 +1184,22 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx, vport_msg->num_rx_q = cpu_to_le16(num_qs); vport_msg->num_rx_bufq = 0; } + if (!vport_config || !vport_config->user_config.xdp_prog) + return 0; + + /* As we now know new number of Rx and Tx queues, we can request + * additional Tx queues for XDP. For each Rx queue request additional + * Tx queue for XDP use. + */ + vport_msg->num_tx_q = + cpu_to_le16(le16_to_cpu(vport_msg->num_tx_q) + + le16_to_cpu(vport_msg->num_rx_q)); + if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) + vport_msg->num_tx_complq = vport_msg->num_tx_q; + + /* For XDP request only one bufq per Rx queue group */ + if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) + vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps); return 0; } @@ -1437,6 +1545,13 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport) if (err) goto err_out; + if (idpf_xdp_is_prog_ena(vport)) { + int j; + + for (j = vport->xdp_txq_offset; j < vport->num_txq; j++) + __set_bit(__IDPF_Q_XDP, vport->txqs[j]->flags); + } + return 0; err_out: @@ -3964,13 +4079,24 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) */ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) { + bool is_xdp_prog_ena = idpf_xdp_is_prog_ena(vport); u16 num_txq_grp = vport->num_txq_grp; int i, j, qv_idx, bufq_vidx = 0; struct idpf_rxq_group *rx_qgrp; struct idpf_txq_group *tx_qgrp; struct idpf_queue *q, *bufq; + int num_active_rxq; u16 q_index; + if (is_xdp_prog_ena) + /* XDP Tx queues are handled within Rx loop, + * correct num_txq_grp so that it stores number of + * regular Tx queue groups. This way when we later assign Tx to + * qvector, we go only through regular Tx queues. + */ + if (idpf_is_queue_model_split(vport->txq_model)) + num_txq_grp = vport->xdp_txq_offset; + for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { u16 num_rxq; @@ -3980,6 +4106,8 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) else num_rxq = rx_qgrp->singleq.num_rxq; + num_active_rxq = num_rxq - vport->num_xdp_rxq; + for (j = 0; j < num_rxq; j++) { if (qv_idx >= vport->num_q_vectors) qv_idx = 0; @@ -3992,6 +4120,30 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) q_index = q->q_vector->num_rxq; q->q_vector->rx[q_index] = q; q->q_vector->num_rxq++; + + /* Do not setup XDP Tx queues for dummy Rx queues. */ + if (j >= num_active_rxq) + goto skip_xdp_txq_config; + + if (is_xdp_prog_ena) { + if (idpf_is_queue_model_split(vport->txq_model)) { + tx_qgrp = &vport->txq_grps[i + vport->xdp_txq_offset]; + q = tx_qgrp->complq; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_txq; + q->q_vector->tx[q_index] = q; + q->q_vector->num_txq++; + } else { + tx_qgrp = &vport->txq_grps[i]; + q = tx_qgrp->txqs[j + vport->xdp_txq_offset]; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_txq; + q->q_vector->tx[q_index] = q; + q->q_vector->num_txq++; + } + } + +skip_xdp_txq_config: qv_idx++; } @@ -4025,6 +4177,9 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) q->q_vector->num_txq++; qv_idx++; } else { + num_txq = is_xdp_prog_ena ? tx_qgrp->num_txq - vport->xdp_txq_offset + : tx_qgrp->num_txq; + for (j = 0; j < num_txq; j++) { if (qv_idx >= vport->num_q_vectors) qv_idx = 0; @@ -4126,6 +4281,13 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) DIV_ROUND_UP(vport->num_rxq_grp, vport->num_q_vectors); + /* For XDP we assign both Tx and XDP Tx queues + * to the same q_vector. + * Reserve doubled number of Tx queues per vector. + */ + if (idpf_xdp_is_prog_ena(vport)) + txqs_per_vector *= 2; + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { q_vector = &vport->q_vectors[v_idx]; q_vector->vport = vport; @@ -4246,6 +4408,15 @@ static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport) rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + /* When we use this code for legacy devices (e.g. in AVF driver), some + * Rx queues may not be used because we would not be able to create XDP + * Tx queues for them. In such a case do not add their queue IDs to the + * RSS LUT by setting the number of active Rx queues to XDP Tx queues + * count. + */ + if (idpf_xdp_is_prog_ena(vport)) + num_active_rxq -= vport->num_xdp_rxq; + for (i = 0; i < rss_data->rss_lut_size; i++) { rss_data->rss_lut[i] = i % num_active_rxq; rss_data->cached_lut[i] = rss_data->rss_lut[i]; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index df76493faa7569..5013053c90648f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -164,6 +164,8 @@ do { \ #define IDPF_TX_FLAGS_IPV6 BIT(2) #define IDPF_TX_FLAGS_TUNNEL BIT(3) +#define IDPF_XDP_MAX_MTU 3046 + union idpf_tx_flex_desc { struct idpf_flex_tx_desc q; /* queue based scheduling */ struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */ @@ -463,6 +465,7 @@ enum idpf_queue_flags_t { __IDPF_Q_FLOW_SCH_EN, __IDPF_Q_SW_MARKER, __IDPF_Q_POLL_MODE, + __IDPF_Q_XDP, __IDPF_Q_FLAGS_NBITS, }; @@ -733,6 +736,9 @@ struct idpf_queue { dma_addr_t dma; void *desc_ring; + struct bpf_prog *xdp_prog; + struct xdp_rxq_info xdp_rxq; + u16 tx_max_bufs; u8 tx_min_pkt_len; @@ -993,6 +999,7 @@ int idpf_config_rss(struct idpf_vport *vport); int idpf_init_rss(struct idpf_vport *vport); void idpf_deinit_rss(struct idpf_vport *vport); int idpf_rx_bufs_init_all(struct idpf_vport *vport); +int idpf_xdp_rxq_info_init_all(struct idpf_vport *vport); void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size); struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 04e56a4002dae7..c9a1076aa58a3f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -3228,6 +3228,17 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) vec_info.default_vport = vport->default_vport; vec_info.index = vport->idx; + /* Additional XDP Tx queues share the q_vector with regular Tx and Rx + * queues to which they are assigned. Also, XDP shall request additional + * Tx queues via VIRTCHNL. Therefore, to avoid exceeding over + * "vport->q_vector_idxs array", do not request empty q_vectors + * for XDP Tx queues. + */ + if (idpf_xdp_is_prog_ena(vport)) + vec_info.num_req_vecs = max_t(u16, + vport->num_txq - vport->num_xdp_txq, + vport->num_rxq); + num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, vport->q_vector_idxs, &vec_info); @@ -3284,7 +3295,10 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); - idpf_vport_set_hsplit(vport, true); + if (idpf_xdp_is_prog_ena(vport)) + idpf_vport_set_hsplit(vport, false); + else + idpf_vport_set_hsplit(vport, true); idpf_vport_init_num_qs(vport, vport_msg); idpf_vport_calc_num_q_desc(vport); From 853e4dc29776b0cf00881b5c81390d425519337e Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Wed, 4 Oct 2023 17:49:07 +0200 Subject: [PATCH 4/9] idpf: implement XDP_SETUP_PROG in ndo_bpf for splitq Implement loading the XDP program using ndo_bpf callback for splitq and XDP_SETUP_PROG parameter. Add functions for stopping, reconfiguring and restarting all queues when needed. Also, implement the XDP hot swap mechanism when the existing XDP program is replaced by another one (without a necessity of reconfiguring anything). Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf_lib.c | 189 ++++++++++++++++++++ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 21 +++ 2 files changed, 210 insertions(+) diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 401f5d12cabe0b..1943423acab2d8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -2305,6 +2305,194 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } +/** + * idpf_copy_xdp_prog_to_qs - set pointers to xdp program for each Rx queue + * @vport: vport to setup XDP for + * @xdp_prog: XDP program that should be copied to all Rx queues + */ +static void +idpf_copy_xdp_prog_to_qs(struct idpf_vport *vport, struct bpf_prog *xdp_prog) +{ + int i; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + struct idpf_queue *q; + u16 j, num_rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + WRITE_ONCE(q->xdp_prog, xdp_prog); + } + + if (!idpf_is_queue_model_split(vport->rxq_model)) + continue; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + WRITE_ONCE(q->xdp_prog, xdp_prog); + } + } +} + +/** + * idpf_xdp_reconfig_queues - reconfigure queues after the XDP setup + * @vport: vport to load or unload XDP for + */ +static int idpf_xdp_reconfig_queues(struct idpf_vport *vport) +{ + int err; + + err = idpf_vport_adjust_qs(vport); + if (err) { + netdev_err(vport->netdev, + "Could not adjust queue number for XDP\n"); + return err; + } + idpf_vport_calc_num_q_desc(vport); + + err = idpf_vport_queues_alloc(vport); + if (err) { + netdev_err(vport->netdev, + "Could not allocate queues for XDP\n"); + return err; + } + + err = idpf_send_add_queues_msg(vport, vport->num_txq, + vport->num_complq, + vport->num_rxq, vport->num_bufq); + if (err) { + netdev_err(vport->netdev, + "Could not add queues for XDP, VC message sent failed\n"); + return err; + } + + idpf_vport_alloc_vec_indexes(vport); + + return 0; +} + +/** + * idpf_assign_bpf_prog - Assign a given BPF program to vport + * @current_prog: pointer to XDP program in user config data + * @prog: BPF program to be assigned to vport + */ +static void idpf_assign_bpf_prog(struct bpf_prog **current_prog, + struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + + old_prog = xchg(current_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); +} + +/** + * idpf_xdp_setup_prog - Add or remove XDP eBPF program + * @vport: vport to setup XDP for + * @prog: XDP program + * @extack: netlink extended ack + */ +static int +idpf_xdp_setup_prog(struct idpf_vport *vport, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + int frame_size = vport->netdev->mtu; + bool needs_reconfig, vport_is_up; + struct bpf_prog **current_prog; + u16 idx = vport->idx; + int err; + + if (frame_size > IDPF_XDP_MAX_MTU || + frame_size > vport->bufq_size[0]) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); + return -EOPNOTSUPP; + } + + vport_is_up = np->state == __IDPF_VPORT_UP; + + current_prog = &vport->adapter->vport_config[idx]->user_config.xdp_prog; + needs_reconfig = !!(*current_prog) != !!prog; + + if (!needs_reconfig) { + idpf_copy_xdp_prog_to_qs(vport, prog); + idpf_assign_bpf_prog(current_prog, prog); + + return 0; + } + + if (!vport_is_up) { + idpf_send_delete_queues_msg(vport); + } else { + set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags); + idpf_vport_stop(vport); + } + + idpf_deinit_rss(vport); + + if (!*current_prog && prog) { + netdev_warn(vport->netdev, + "Setting up XDP disables header split\n"); + idpf_vport_set_hsplit(vport, false); + } else { + idpf_vport_set_hsplit(vport, true); + } + + idpf_assign_bpf_prog(current_prog, prog); + + err = idpf_xdp_reconfig_queues(vport); + if (err) { + netdev_err(vport->netdev, + "Could not reconfigure the queues after XDP setup\n"); + return err; + } + + if (vport_is_up) { + err = idpf_vport_open(vport, false); + if (err) { + netdev_err(vport->netdev, + "Could not re-open the vport after XDP setup\n"); + return err; + } + } + + return 0; +} + +/** + * idpf_xdp - implements XDP handler + * @netdev: netdevice + * @xdp: XDP command + */ +static int idpf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + err = idpf_xdp_setup_prog(vport, xdp->prog, xdp->extack); + break; + default: + err = -EINVAL; + } + + idpf_vport_ctrl_unlock(netdev); + return err; +} + /** * idpf_set_mac - NDO callback to set port mac address * @netdev: network interface device structure @@ -2405,6 +2593,7 @@ static const struct net_device_ops idpf_netdev_ops_splitq = { .ndo_get_stats64 = idpf_get_stats64, .ndo_set_features = idpf_set_features, .ndo_tx_timeout = idpf_tx_timeout, + .ndo_bpf = idpf_xdp, }; static const struct net_device_ops idpf_netdev_ops_singleq = { diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 98eb7eeaec03d8..d517921d8f8428 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -705,6 +705,24 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) return 0; } +/** + * idpf_xdp_rxbufq_init - Prepare and configure XDP structures on Rx queue + * @q: rx queue where XDP should be initialized + * + * Returns 0 on success or error code in case of any failure + */ +static void idpf_xdp_rxbufq_init(struct idpf_queue *q) +{ + struct idpf_vport_user_config_data *config_data; + struct idpf_adapter *adapter; + int idx = q->vport->idx; + + adapter = q->vport->adapter; + config_data = &adapter->vport_config[idx]->user_config; + + WRITE_ONCE(q->xdp_prog, config_data->xdp_prog); +} + /** * idpf_xdp_rxq_info_init - Setup XDP for a given Rx queue * @rxq: Rx queue for which the resources are setup @@ -806,6 +824,9 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) rxq->next_to_use = 0; set_bit(__IDPF_Q_GEN_CHK, rxq->flags); + if (idpf_xdp_is_prog_ena(rxq->vport)) + idpf_xdp_rxbufq_init(rxq); + return 0; } From e2b318bf25111702b13bbae6cf9f73f32a1ecb3c Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Mon, 9 Oct 2023 15:35:54 +0200 Subject: [PATCH 5/9] idpf: add support for XDP_PASS and XDP_DROP Implement basic setup of the XDP program. Extend the function for creating the page pool by adding a support for XDP headroom configuration. Add handling of XDP_PASS and XDP_DROP action. Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 102 +++++++++++++++++--- 1 file changed, 89 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index d517921d8f8428..043db565f4a361 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -21,6 +21,17 @@ static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack, return 0; } +/** + * iavf_is_xdp_enabled - Check if XDP is enabled on the rx or buffer queue + * @rxbufq: rx or buffer queue + * + * Returns true, if the queue has been configured for XDP. + */ +static bool idpf_is_xdp_enabled(const struct idpf_queue *rxbufq) +{ + return !!rcu_access_pointer(rxbufq->xdp_prog); +} + /** * idpf_buf_lifo_pop - pop a buffer pointer from stack * @stack: pointer to stack struct @@ -579,11 +590,14 @@ static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) /** * idpf_rx_create_page_pool - Create a page pool * @rxbufq: RX queue to create page pool for + * @xdp: flag indicating if XDP program is loaded * * Returns &page_pool on success, casted -errno on failure */ -static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) +static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq, + bool xdp) { + u32 hr = xdp ? XDP_PACKET_HEADROOM : 0; struct page_pool_params pp = { .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .order = 0, @@ -591,8 +605,8 @@ static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) .nid = NUMA_NO_NODE, .dev = rxbufq->vport->netdev->dev.parent, .max_len = PAGE_SIZE, - .dma_dir = DMA_FROM_DEVICE, - .offset = 0, + .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = hr, }; if (rxbufq->rx_buf_size == IDPF_RX_BUF_2048) @@ -655,7 +669,8 @@ static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) { struct page_pool *pool; - pool = idpf_rx_create_page_pool(rxbufq); + pool = idpf_rx_create_page_pool(rxbufq, + idpf_is_xdp_enabled(rxbufq)); if (IS_ERR(pool)) return PTR_ERR(pool); @@ -3248,6 +3263,47 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de IDPF_RXD_EOF_SPLITQ)); } +/** + * idpf_run_xdp - Run XDP program and perform the resulting action + * @rx_bufq: Rx buffer queue + * @rx_buf: Rx buffer containing the packet + * @xdp: an initiated XDP buffer + * @xdp_prog: an XDP program assigned to the vport + * @size: size of the packet + * + * Returns the resulting XDP action. + */ +static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq, + struct idpf_rx_buf *rx_buf, + struct xdp_buff *xdp, + struct bpf_prog *xdp_prog, + unsigned int size) +{ + u32 hr = rx_bufq->pp->p.offset; + unsigned int xdp_act; + + xdp_prepare_buff(xdp, page_address(rx_buf->page), hr, size, true); + + xdp_act = bpf_prog_run_xdp(xdp_prog, xdp); + rx_buf->truesize = max_t(u32, xdp->data_end - xdp->data_hard_start - hr, + rx_buf->truesize); + switch (xdp_act) { + case XDP_PASS: + case XDP_DROP: + break; + default: + bpf_warn_invalid_xdp_action(rx_bufq->vport->netdev, xdp_prog, + xdp_act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_bufq->vport->netdev, xdp_prog, xdp_act); + + return XDP_DROP; + } + + return xdp_act; +} + /** * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue * @rxq: Rx descriptor queue to retrieve receive buffer queue @@ -3266,6 +3322,11 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) struct idpf_queue *rx_bufq = NULL; struct sk_buff *skb = rxq->skb; u16 ntc = rxq->next_to_clean; + struct bpf_prog *xdp_prog; + struct xdp_buff xdp; + + xdp_prog = rcu_dereference(rxq->xdp_prog); + xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); /* Process Rx packets bounded by budget */ while (likely(total_rx_pkts < budget)) { @@ -3273,11 +3334,12 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) struct idpf_sw_queue *refillq = NULL; struct idpf_rxq_set *rxq_set = NULL; struct idpf_rx_buf *rx_buf = NULL; + unsigned int xdp_act = XDP_PASS; union virtchnl2_rx_desc *desc; unsigned int pkt_len = 0; unsigned int hdr_len = 0; u16 gen_id, buf_id = 0; - /* Header buffer overflow only valid for header split */ + /* Header buffer overflow only valid for header split */ bool hbo = false; int bufq_id; u8 rxdid; @@ -3359,17 +3421,31 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) u64_stats_update_end(&rxq->stats_sync); } - if (pkt_len) { - idpf_rx_sync_for_cpu(rx_buf, pkt_len); - if (skb) - idpf_rx_add_frag(rx_buf, skb, pkt_len); - else - skb = idpf_rx_construct_skb(rxq, rx_buf, - pkt_len); - } else { + if (!pkt_len) { idpf_rx_put_page(rx_buf); + goto pkt_len_zero; } + idpf_rx_sync_for_cpu(rx_buf, pkt_len); + if (xdp_prog) + xdp_act = idpf_run_xdp(rx_bufq, rx_buf, &xdp, xdp_prog, + pkt_len); + if (xdp_act != XDP_PASS) { + idpf_rx_put_page(rx_buf); + + total_rx_bytes += pkt_len; + total_rx_pkts++; + idpf_rx_post_buf_refill(refillq, buf_id); + IDPF_RX_BUMP_NTC(rxq, ntc); + continue; + } + + if (skb) + idpf_rx_add_frag(rx_buf, skb, pkt_len); + else + skb = idpf_rx_construct_skb(rxq, rx_buf, + pkt_len); +pkt_len_zero: /* exit if we failed to retrieve a buffer */ if (!skb) break; From f59079c561e47e3836c627336083530517f1eb2d Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Fri, 20 Oct 2023 11:56:51 +0200 Subject: [PATCH 6/9] idpf: make complq cleaning dependent on scheduling mode Implement two separate completion queue cleaning functions which should be used depending on the scheduling mode: - queue-based scheduling (idpf_tx_clean_qb_complq) - flow-based scheduling (idpf_tx_clean_fb_complq). Add 4-byte descriptor for queue-based scheduling mode and perform some refactoring to extract the common code for both scheduling modes. Signed-off-by: Michal Kubiak --- .../net/ethernet/intel/idpf/idpf_lan_txrx.h | 6 +- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 344 +++++++++++++----- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 2 + 3 files changed, 255 insertions(+), 97 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h index a5752dcab8887c..7f8fc9b61e902c 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h @@ -184,13 +184,17 @@ struct idpf_base_tx_desc { __le64 qw1; /* type_cmd_offset_bsz_l2tag1 */ }; /* read used with buffer queues */ -struct idpf_splitq_tx_compl_desc { +struct idpf_splitq_4b_tx_compl_desc { /* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */ __le16 qid_comptype_gen; union { __le16 q_head; /* Queue head */ __le16 compl_tag; /* Completion tag */ } q_head_compl_tag; +}; /* writeback used with completion queues */ + +struct idpf_splitq_tx_compl_desc { + struct idpf_splitq_4b_tx_compl_desc common; u8 ts[3]; u8 rsvd; /* Reserved */ }; /* writeback used with completion queues */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 043db565f4a361..9aeab320ae6540 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1917,8 +1917,8 @@ static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, } /** - * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers - * whether on the buffer ring or in the hash table + * idpf_tx_handle_rs_cmpl_qb - clean a single packet and all of its buffers + * whether the Tx queue is working in queue-based scheduling * @txq: Tx ring to clean * @desc: pointer to completion queue descriptor to extract completion * information from @@ -1927,20 +1927,33 @@ static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, * * Returns bytes/packets cleaned */ -static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, - struct idpf_splitq_tx_compl_desc *desc, - struct idpf_cleaned_stats *cleaned, - int budget) +static void idpf_tx_handle_rs_cmpl_qb(struct idpf_queue *txq, + struct idpf_splitq_4b_tx_compl_desc *desc, + struct idpf_cleaned_stats *cleaned, + int budget) { - u16 compl_tag; + u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); - if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) { - u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); - - return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); - } + return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); +} - compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag); +/** + * idpf_tx_handle_rs_cmpl_fb - clean a single packet and all of its buffers + * whether on the buffer ring or in the hash table (flow-based scheduling only) + * @txq: Tx ring to clean + * @desc: pointer to completion queue descriptor to extract completion + * information from + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @budget: Used to determine if we are in netpoll + * + * Returns bytes/packets cleaned + */ +static void idpf_tx_handle_rs_cmpl_fb(struct idpf_queue *txq, + struct idpf_splitq_tx_compl_desc *desc, + struct idpf_cleaned_stats *cleaned, + int budget) +{ + u16 compl_tag = le16_to_cpu(desc->common.q_head_compl_tag.compl_tag); /* If we didn't clean anything on the ring, this packet must be * in the hash table. Go clean it there. @@ -1950,71 +1963,248 @@ static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, } /** - * idpf_tx_clean_complq - Reclaim resources on completion queue + * idpf_tx_finalize_complq - Finalize completion queue cleaning + * @complq: completion queue to finalize + * @ntc: next to complete index + * @gen_flag: current state of generation flag + * @cleaned: returns number of packets cleaned + */ +static void idpf_tx_finalize_complq(struct idpf_queue *complq, int ntc, + bool gen_flag, int *cleaned) +{ + struct idpf_netdev_priv *np; + bool complq_ok = true; + int i; + + /* Store the state of the complq to be used later in deciding if a + * TXQ can be started again + */ + if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) > + IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq))) + complq_ok = false; + + np = netdev_priv(complq->vport->netdev); + for (i = 0; i < complq->txq_grp->num_txq; ++i) { + struct idpf_queue *tx_q = complq->txq_grp->txqs[i]; + struct netdev_queue *nq; + bool dont_wake; + + /* We didn't clean anything on this queue, move along */ + if (!tx_q->cleaned_bytes) + continue; + + *cleaned += tx_q->cleaned_pkts; + + /* Update BQL */ + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + + dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || + np->state != __IDPF_VPORT_UP || + !netif_carrier_ok(tx_q->vport->netdev); + /* Check if the TXQ needs to and can be restarted */ + __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, + IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, + dont_wake); + + /* Reset cleaned stats for the next time this queue is + * cleaned + */ + tx_q->cleaned_bytes = 0; + tx_q->cleaned_pkts = 0; + } + + ntc += complq->desc_count; + complq->next_to_clean = ntc; + if (gen_flag) + set_bit(__IDPF_Q_GEN_CHK, complq->flags); + else + clear_bit(__IDPF_Q_GEN_CHK, complq->flags); +} + +/** + * idpf_parse_compl_desc - Parse the completion descriptor + * @desc: completion descriptor to be parsed + * @complq: completion queue containing the descriptor + * @txq: returns corresponding Tx queue for a given descriptor + * @gen_flag: current generation flag in the completion queue + * + * Returns completion type from descriptor or negative value in case of error: + * -ENODATA if there is no completion descriptor to be cleaned + * -EINVAL if no Tx queue has been found for the completion queue + */ +static int idpf_parse_compl_desc(struct idpf_splitq_4b_tx_compl_desc *desc, + struct idpf_queue *complq, + struct idpf_queue **txq, + bool gen_flag) +{ + int rel_tx_qid; + u8 ctype; /* completion type */ + u16 gen; + + /* if the descriptor isn't done, no work yet to do */ + gen = (le16_to_cpu(desc->qid_comptype_gen) & + IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S; + if (gen_flag != gen) + return -ENODATA; + + /* Find necessary info of TX queue to clean buffers */ + rel_tx_qid = (le16_to_cpu(desc->qid_comptype_gen) & + IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S; + if (rel_tx_qid >= complq->txq_grp->num_txq || + !complq->txq_grp->txqs[rel_tx_qid]) { + dev_err(&complq->vport->adapter->pdev->dev, + "TxQ not found\n"); + return -EINVAL; + } + *txq = complq->txq_grp->txqs[rel_tx_qid]; + + /* Determine completion type */ + ctype = (le16_to_cpu(desc->qid_comptype_gen) & + IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> + IDPF_TXD_COMPLQ_COMPL_TYPE_S; + + return ctype; +} + +/** + * idpf_tx_update_compl_stats - Update Tx and completion queue counters + * @complq: completion queue to update + * @txq: Tx queue to update + * @stats: stats structure containig source completion data + */ +static void idpf_tx_update_compl_stats(struct idpf_queue *complq, + struct idpf_queue *txq, + struct idpf_cleaned_stats *stats) +{ + u64_stats_update_begin(&txq->stats_sync); + u64_stats_add(&txq->q_stats.tx.packets, stats->packets); + u64_stats_add(&txq->q_stats.tx.bytes, stats->bytes); + txq->cleaned_pkts += stats->packets; + txq->cleaned_bytes += stats->bytes; + complq->num_completions++; + u64_stats_update_end(&txq->stats_sync); +} + + +/** + * idpf_tx_clean_qb_complq - Reclaim resources on completion queue working + * in queue-based scheduling mode. * @complq: Tx ring to clean * @budget: Used to determine if we are in netpoll * @cleaned: returns number of packets cleaned * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, - int *cleaned) +static bool idpf_tx_clean_qb_complq(struct idpf_queue *complq, int budget, + int *cleaned) { - struct idpf_splitq_tx_compl_desc *tx_desc; + struct idpf_splitq_4b_tx_compl_desc *tx_desc; struct idpf_vport *vport = complq->vport; s16 ntc = complq->next_to_clean; - struct idpf_netdev_priv *np; unsigned int complq_budget; - bool complq_ok = true; - int i; + bool gen_flag; + gen_flag = test_bit(__IDPF_Q_GEN_CHK, complq->flags); complq_budget = vport->compln_clean_budget; - tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); + tx_desc = IDPF_SPLITQ_4B_TX_COMPLQ_DESC(complq, ntc); ntc -= complq->desc_count; do { struct idpf_cleaned_stats cleaned_stats = { }; struct idpf_queue *tx_q; - int rel_tx_qid; - u16 hw_head; - u8 ctype; /* completion type */ - u16 gen; + int ctype; - /* if the descriptor isn't done, no work yet to do */ - gen = (le16_to_cpu(tx_desc->qid_comptype_gen) & - IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S; - if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen) + ctype = idpf_parse_compl_desc(tx_desc, complq, &tx_q, gen_flag); + switch (ctype) { + case IDPF_TXD_COMPLT_RS: + idpf_tx_handle_rs_cmpl_qb(tx_q, tx_desc, + &cleaned_stats, + budget); break; - - /* Find necessary info of TX queue to clean buffers */ - rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) & - IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S; - if (rel_tx_qid >= complq->txq_grp->num_txq || - !complq->txq_grp->txqs[rel_tx_qid]) { - dev_err(&complq->vport->adapter->pdev->dev, - "TxQ not found\n"); + case IDPF_TXD_COMPLT_SW_MARKER: + idpf_tx_handle_sw_marker(tx_q); + break; + case -ENODATA: + goto exit_clean_complq; + case -EINVAL: + goto fetch_next_desc; + default: + dev_err(&tx_q->vport->adapter->pdev->dev, + "Unknown TX completion type: %d\n", + ctype); goto fetch_next_desc; } - tx_q = complq->txq_grp->txqs[rel_tx_qid]; - /* Determine completion type */ - ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) & - IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> - IDPF_TXD_COMPLQ_COMPL_TYPE_S; + idpf_tx_update_compl_stats(complq, tx_q, &cleaned_stats); +fetch_next_desc: + tx_desc++; + ntc++; + if (unlikely(!ntc)) { + ntc -= complq->desc_count; + tx_desc = IDPF_SPLITQ_4B_TX_COMPLQ_DESC(complq, 0); + gen_flag = !gen_flag; + } + + prefetch(tx_desc); + + /* update budget accounting */ + complq_budget--; + } while (likely(complq_budget)); + +exit_clean_complq: + idpf_tx_finalize_complq(complq, ntc, gen_flag, cleaned); + return !!complq_budget; +} + +/** + * idpf_tx_clean_fb_complq - Reclaim resources on completion queue working + * in flow-based scheduling mode. + * @complq: Tx ring to clean + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns true if there's any budget left (e.g. the clean is finished) + */ +static bool idpf_tx_clean_fb_complq(struct idpf_queue *complq, int budget, + int *cleaned) +{ + struct idpf_splitq_tx_compl_desc *tx_desc; + struct idpf_vport *vport = complq->vport; + s16 ntc = complq->next_to_clean; + unsigned int complq_budget; + bool gen_flag; + + gen_flag = test_bit(__IDPF_Q_GEN_CHK, complq->flags); + complq_budget = vport->compln_clean_budget; + tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); + ntc -= complq->desc_count; + + do { + struct idpf_cleaned_stats cleaned_stats = { }; + struct idpf_queue *tx_q; + u16 hw_head; + int ctype; + + ctype = idpf_parse_compl_desc(&tx_desc->common, complq, + &tx_q, gen_flag); switch (ctype) { case IDPF_TXD_COMPLT_RE: - hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head); + hw_head = le16_to_cpu(tx_desc->common.q_head_compl_tag.q_head); idpf_tx_splitq_clean(tx_q, hw_head, budget, &cleaned_stats, true); break; case IDPF_TXD_COMPLT_RS: - idpf_tx_handle_rs_completion(tx_q, tx_desc, - &cleaned_stats, budget); + idpf_tx_handle_rs_cmpl_fb(tx_q, tx_desc, + &cleaned_stats, budget); break; case IDPF_TXD_COMPLT_SW_MARKER: idpf_tx_handle_sw_marker(tx_q); break; + case -ENODATA: + goto exit_clean_complq; + case -EINVAL: + goto fetch_next_desc; default: dev_err(&tx_q->vport->adapter->pdev->dev, "Unknown TX completion type: %d\n", @@ -2022,21 +2212,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, goto fetch_next_desc; } - u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets); - u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes); - tx_q->cleaned_pkts += cleaned_stats.packets; - tx_q->cleaned_bytes += cleaned_stats.bytes; - complq->num_completions++; - u64_stats_update_end(&tx_q->stats_sync); - + idpf_tx_update_compl_stats(complq, tx_q, &cleaned_stats); fetch_next_desc: tx_desc++; ntc++; if (unlikely(!ntc)) { ntc -= complq->desc_count; tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); - change_bit(__IDPF_Q_GEN_CHK, complq->flags); + gen_flag = !gen_flag; } prefetch(tx_desc); @@ -2045,46 +2228,8 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, complq_budget--; } while (likely(complq_budget)); - /* Store the state of the complq to be used later in deciding if a - * TXQ can be started again - */ - if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) > - IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq))) - complq_ok = false; - - np = netdev_priv(complq->vport->netdev); - for (i = 0; i < complq->txq_grp->num_txq; ++i) { - struct idpf_queue *tx_q = complq->txq_grp->txqs[i]; - struct netdev_queue *nq; - bool dont_wake; - - /* We didn't clean anything on this queue, move along */ - if (!tx_q->cleaned_bytes) - continue; - - *cleaned += tx_q->cleaned_pkts; - - /* Update BQL */ - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); - - dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || - np->state != __IDPF_VPORT_UP || - !netif_carrier_ok(tx_q->vport->netdev); - /* Check if the TXQ needs to and can be restarted */ - __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, - IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, - dont_wake); - - /* Reset cleaned stats for the next time this queue is - * cleaned - */ - tx_q->cleaned_bytes = 0; - tx_q->cleaned_pkts = 0; - } - - ntc += complq->desc_count; - complq->next_to_clean = ntc; - +exit_clean_complq: + idpf_tx_finalize_complq(complq, ntc, gen_flag, cleaned); return !!complq_budget; } @@ -4078,9 +4223,16 @@ static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec, return true; budget_per_q = DIV_ROUND_UP(budget, num_txq); + for (i = 0; i < num_txq; i++) - clean_complete &= idpf_tx_clean_complq(q_vec->tx[i], - budget_per_q, cleaned); + if (test_bit(__IDPF_Q_FLOW_SCH_EN, q_vec->tx[i]->flags)) + clean_complete &= idpf_tx_clean_fb_complq(q_vec->tx[i], + budget_per_q, + cleaned); + else + clean_complete &= idpf_tx_clean_qb_complq(q_vec->tx[i], + budget_per_q, + cleaned); return clean_complete; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 5013053c90648f..8e5388ca696987 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -127,6 +127,8 @@ do { \ (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i])) #define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \ (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i])) +#define IDPF_SPLITQ_4B_TX_COMPLQ_DESC(txcq, i) \ + (&(((struct idpf_splitq_4b_tx_compl_desc *)((txcq)->desc_ring))[i])) #define IDPF_FLEX_TX_DESC(txq, i) \ (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i])) From 65b0b93365cb77b7c313330c5e8572856f8b6d1e Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Fri, 27 Oct 2023 21:23:33 +0200 Subject: [PATCH 7/9] idpf: add support for XDP_TX Implement sending the packet from an XDP ring. XDP path functions are separate from the general Tx routines, because this allows to simplify and therefore speedup the process. It also makes code more friendly to future XDP-specific optimizations Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf_lib.c | 1 + drivers/net/ethernet/intel/idpf/idpf_txrx.c | 237 ++++++++++++++++++-- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 61 ++++- 3 files changed, 281 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 1943423acab2d8..96d0ff57d88b1a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -839,6 +839,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) netdev->features |= dflt_features; netdev->hw_features |= dflt_features | offloads; netdev->hw_enc_features |= dflt_features | offloads; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC; idpf_set_ethtool_ops(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 9aeab320ae6540..1c61df4dcaa6d4 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -65,6 +65,23 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) } } +/** + * idpf_xdp_buf_rel - Release an XDP Tx buffer + * @xdp_buf: the buffer to free + */ +static void idpf_xdp_buf_rel(struct idpf_tx_buf *xdp_buf) +{ + struct page *page; + u32 put_size; + + page = xdp_buf->page; + put_size = dma_unmap_len(xdp_buf, len); + if (page) { + page_pool_put_page(page->pp, page, put_size, true); + xdp_buf->page = NULL; + } +} + /** * idpf_tx_buf_rel - Release a Tx buffer * @tx_q: the queue that owns the buffer @@ -105,8 +122,12 @@ static void idpf_tx_buf_rel_all(struct idpf_queue *txq) return; /* Free all the Tx buffer sk_buffs */ - for (i = 0; i < txq->desc_count; i++) - idpf_tx_buf_rel(txq, &txq->tx_buf[i]); + for (i = 0; i < txq->desc_count; i++) { + if (test_bit(__IDPF_Q_XDP, txq->flags)) + idpf_xdp_buf_rel(&txq->tx_buf[i]); + else + idpf_tx_buf_rel(txq, &txq->tx_buf[i]); + } kfree(txq->tx_buf); txq->tx_buf = NULL; @@ -767,6 +788,8 @@ static int idpf_xdp_rxq_info_init(struct idpf_queue *rxq, bool splitq) err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp); + rxq->xdpq = rxq->vport->txqs[rxq->idx + rxq->vport->xdp_txq_offset]; + return err; } @@ -1345,6 +1368,11 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) if (flow_sch_en) set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); + if (idpf_xdp_is_prog_ena(vport) && + (q->idx >= vport->xdp_txq_offset)) { + clear_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); + set_bit(__IDPF_Q_XDP, q->flags); + } } if (!idpf_is_queue_model_split(vport->txq_model)) @@ -1584,8 +1612,13 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport) if (idpf_xdp_is_prog_ena(vport)) { int j; - for (j = vport->xdp_txq_offset; j < vport->num_txq; j++) + for (j = vport->xdp_txq_offset; j < vport->num_txq; j++) { + __clear_bit(__IDPF_Q_FLOW_SCH_EN, + vport->txqs[j]->flags); + __clear_bit(__IDPF_Q_FLOW_SCH_EN, + vport->txqs[j]->txq_grp->complq->flags); __set_bit(__IDPF_Q_XDP, vport->txqs[j]->flags); + } } return 0; @@ -2041,6 +2074,8 @@ static int idpf_parse_compl_desc(struct idpf_splitq_4b_tx_compl_desc *desc, u8 ctype; /* completion type */ u16 gen; + *txq = NULL; + /* if the descriptor isn't done, no work yet to do */ gen = (le16_to_cpu(desc->qid_comptype_gen) & IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S; @@ -2101,8 +2136,8 @@ static bool idpf_tx_clean_qb_complq(struct idpf_queue *complq, int budget, struct idpf_splitq_4b_tx_compl_desc *tx_desc; struct idpf_vport *vport = complq->vport; s16 ntc = complq->next_to_clean; + bool finalize = true, gen_flag; unsigned int complq_budget; - bool gen_flag; gen_flag = test_bit(__IDPF_Q_GEN_CHK, complq->flags); complq_budget = vport->compln_clean_budget; @@ -2117,6 +2152,10 @@ static bool idpf_tx_clean_qb_complq(struct idpf_queue *complq, int budget, ctype = idpf_parse_compl_desc(tx_desc, complq, &tx_q, gen_flag); switch (ctype) { case IDPF_TXD_COMPLT_RS: + if (test_bit(__IDPF_Q_XDP, tx_q->flags)) { + finalize = false; + goto fetch_next_desc; + } idpf_tx_handle_rs_cmpl_qb(tx_q, tx_desc, &cleaned_stats, budget); @@ -2152,7 +2191,8 @@ static bool idpf_tx_clean_qb_complq(struct idpf_queue *complq, int budget, } while (likely(complq_budget)); exit_clean_complq: - idpf_tx_finalize_complq(complq, ntc, gen_flag, cleaned); + if (finalize) + idpf_tx_finalize_complq(complq, ntc, gen_flag, cleaned); return !!complq_budget; } @@ -3408,12 +3448,159 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de IDPF_RXD_EOF_SPLITQ)); } +/** + * idpf_clean_xdp_irq - Reclaim a batch of TX resources from completed XDP_TX + * @xdpq: XDP Tx queue + * + * Returns number of cleaned descriptors. + */ +static u32 idpf_clean_xdp_irq(struct idpf_queue *xdpq) +{ + struct idpf_queue *complq = xdpq->txq_grp->complq, *txq; + struct idpf_splitq_4b_tx_compl_desc *last_rs_desc; + int complq_budget = complq->desc_count; + u32 tx_ntc = xdpq->next_to_clean; + u32 ntc = complq->next_to_clean; + u32 cnt = xdpq->desc_count; + u32 done_frames = 0, i = 0; + int head = tx_ntc; + bool gen_flag; + + last_rs_desc = IDPF_SPLITQ_4B_TX_COMPLQ_DESC(complq, ntc); + gen_flag = test_bit(__IDPF_Q_GEN_CHK, complq->flags); + + do { + int ctype = idpf_parse_compl_desc(last_rs_desc, complq, + &txq, gen_flag); + if (txq && txq != xdpq) { + dev_err(&xdpq->vport->adapter->pdev->dev, + "Found TxQ is not XDP queue\n"); + goto fetch_next_desc; + } + + switch (ctype) { + case IDPF_TXD_COMPLT_RS: + break; + case -ENODATA: + goto exit_xdp_irq; + case -EINVAL: + goto fetch_next_desc; + default: + dev_err(&xdpq->vport->adapter->pdev->dev, + "Unsupported completion type for XDP\n"); + goto fetch_next_desc; + } + + head = le16_to_cpu(last_rs_desc->q_head_compl_tag.q_head); +fetch_next_desc: + last_rs_desc++; + ntc++; + if (unlikely(ntc == complq->desc_count)) { + ntc = 0; + last_rs_desc = IDPF_SPLITQ_4B_TX_COMPLQ_DESC(complq, 0); + gen_flag = !gen_flag; + change_bit(__IDPF_Q_GEN_CHK, complq->flags); + } + prefetch(last_rs_desc); + complq_budget--; + } while (likely(complq_budget)); + +exit_xdp_irq: + complq->next_to_clean = ntc; + done_frames = head >= tx_ntc ? head - tx_ntc : + head + cnt - tx_ntc; + + for (i = 0; i < done_frames; i++) { + struct idpf_tx_buf *tx_buf = &xdpq->tx_buf[tx_ntc]; + + idpf_xdp_buf_rel(tx_buf); + + tx_ntc++; + if (tx_ntc >= xdpq->desc_count) + tx_ntc = 0; + } + + xdpq->next_to_clean = tx_ntc; + + return i; +} + +/** + * idpf_xmit_xdp_buff - submit single buffer to XDP queue for transmission + * @xdp: XDP buffer pointer + * @xdpq: XDP queue for transmission + * @map: whether to map the buffer + * + * Returns negative on failure, 0 on success. + */ +static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp, + struct idpf_queue *xdpq, + bool map) +{ + struct idpf_tx_splitq_params tx_params = { }; + u32 batch_sz = IDPF_QUEUE_QUARTER(xdpq); + u32 size = xdp->data_end - xdp->data; + union idpf_tx_flex_desc *tx_desc; + u32 ntu = xdpq->next_to_use; + struct idpf_tx_buf *tx_buf; + void *data = xdp->data; + dma_addr_t dma; + u32 free; + + free = IDPF_DESC_UNUSED(xdpq); + if (unlikely(free < batch_sz)) + free += idpf_clean_xdp_irq(xdpq); + if (unlikely(!free)) + return -EBUSY; + + if (map) { + dma = dma_map_single(xdpq->dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(xdpq->dev, dma)) + return -ENOMEM; + } else { + struct page *page = virt_to_page(data); + u32 hr = data - xdp->data_hard_start; + + dma = page_pool_get_dma_addr(page) + hr; + dma_sync_single_for_device(xdpq->dev, dma, size, + DMA_BIDIRECTIONAL); + } + + tx_buf = &xdpq->tx_buf[ntu]; + tx_buf->bytecount = size; + tx_buf->gso_segs = 1; + tx_buf->page = virt_to_page(data); + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + + tx_desc = IDPF_FLEX_TX_DESC(xdpq, ntu); + tx_desc->q.buf_addr = cpu_to_le64(dma); + + tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2; + tx_params.eop_cmd = IDPF_TX_DESC_CMD_EOP; + + idpf_tx_splitq_build_desc(tx_desc, &tx_params, + tx_params.eop_cmd | tx_params.offload.td_cmd, + size); + ntu++; + if (ntu == xdpq->desc_count) + ntu = 0; + + xdpq->next_to_use = ntu; + + return 0; +} + /** * idpf_run_xdp - Run XDP program and perform the resulting action * @rx_bufq: Rx buffer queue * @rx_buf: Rx buffer containing the packet * @xdp: an initiated XDP buffer * @xdp_prog: an XDP program assigned to the vport + * @xdpq: XDP Tx queue associated with the Rx queue + * @rxq_xdp_act: logical OR of flags of XDP actions that require finalization * @size: size of the packet * * Returns the resulting XDP action. @@ -3422,6 +3609,8 @@ static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq, struct idpf_rx_buf *rx_buf, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, + struct idpf_queue *xdpq, + u32 *rxq_xdp_act, unsigned int size) { u32 hr = rx_bufq->pp->p.offset; @@ -3436,11 +3625,18 @@ static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq, case XDP_PASS: case XDP_DROP: break; + case XDP_TX: + if (unlikely(idpf_xmit_xdp_buff(xdp, xdpq, false))) + goto xdp_err; + + *rxq_xdp_act |= IDPF_XDP_ACT_FINALIZE_TX; + break; default: bpf_warn_invalid_xdp_action(rx_bufq->vport->netdev, xdp_prog, xdp_act); fallthrough; case XDP_ABORTED: +xdp_err: trace_xdp_exception(rx_bufq->vport->netdev, xdp_prog, xdp_act); return XDP_DROP; @@ -3464,11 +3660,13 @@ static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq, static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) { int total_rx_bytes = 0, total_rx_pkts = 0; + struct idpf_queue *xdpq = rxq->xdpq; struct idpf_queue *rx_bufq = NULL; struct sk_buff *skb = rxq->skb; u16 ntc = rxq->next_to_clean; struct bpf_prog *xdp_prog; struct xdp_buff xdp; + u32 rxq_xdp_act = 0; xdp_prog = rcu_dereference(rxq->xdp_prog); xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); @@ -3572,19 +3770,22 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) } idpf_rx_sync_for_cpu(rx_buf, pkt_len); - if (xdp_prog) - xdp_act = idpf_run_xdp(rx_bufq, rx_buf, &xdp, xdp_prog, - pkt_len); - if (xdp_act != XDP_PASS) { + if (!xdp_prog) + goto construct_skb; + + xdp_act = idpf_run_xdp(rx_bufq, rx_buf, &xdp, xdp_prog, + xdpq, &rxq_xdp_act, pkt_len); + if (xdp_act == XDP_PASS) + goto construct_skb; + if (xdp_act == XDP_DROP) idpf_rx_put_page(rx_buf); - total_rx_bytes += pkt_len; - total_rx_pkts++; - idpf_rx_post_buf_refill(refillq, buf_id); - IDPF_RX_BUMP_NTC(rxq, ntc); - continue; - } - + total_rx_bytes += pkt_len; + total_rx_pkts++; + idpf_rx_post_buf_refill(refillq, buf_id); + IDPF_RX_BUMP_NTC(rxq, ntc); + continue; +construct_skb: if (skb) idpf_rx_add_frag(rx_buf, skb, pkt_len); else @@ -3627,8 +3828,10 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) } rxq->next_to_clean = ntc; - rxq->skb = skb; + + idpf_finalize_xdp_rx(xdpq, rxq_xdp_act); + u64_stats_update_begin(&rxq->stats_sync); u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts); u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 8e5388ca696987..932381987cdf6d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -159,6 +159,8 @@ do { \ ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \ 0 : (txq)->compl_tag_cur_gen) +#define IDPF_QUEUE_QUARTER(Q) ((Q)->desc_count >> 2) + #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS) #define IDPF_TX_FLAGS_TSO BIT(0) @@ -166,6 +168,8 @@ do { \ #define IDPF_TX_FLAGS_IPV6 BIT(2) #define IDPF_TX_FLAGS_TUNNEL BIT(3) +#define IDPF_XDP_ACT_FINALIZE_TX BIT(0) + #define IDPF_XDP_MAX_MTU 3046 union idpf_tx_flex_desc { @@ -197,7 +201,10 @@ union idpf_tx_flex_desc { */ struct idpf_tx_buf { void *next_to_watch; - struct sk_buff *skb; + union { + struct sk_buff *skb; + struct page *page; + }; DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); unsigned int bytecount; @@ -740,6 +747,7 @@ struct idpf_queue { struct bpf_prog *xdp_prog; struct xdp_rxq_info xdp_rxq; + struct idpf_queue *xdpq; u16 tx_max_bufs; u8 tx_min_pkt_len; @@ -1029,4 +1037,55 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); +/** + * idpf_xdpq_update_tail - Updates the XDP Tx queue tail register + * @xdpq: XDP Tx queue + * + * This function updates the XDP Tx queue tail register. + */ +static inline void idpf_xdpq_update_tail(struct idpf_queue *xdpq) +{ + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel_relaxed(xdpq->next_to_use, xdpq->tail); +} + +/** + * idpf_set_rs_bit - set RS bit on last produced descriptor. + * @xdpq: XDP queue to produce the HW Tx descriptors on + * + * Returns the index of descriptor RS bit was set on (one behind current NTU). + */ +static inline void idpf_set_rs_bit(struct idpf_queue *xdpq) +{ + int rs_idx = xdpq->next_to_use ? xdpq->next_to_use - 1 : + xdpq->desc_count - 1; + u32 last_desc = IDPF_TXD_LAST_DESC_CMD; + union idpf_tx_flex_desc *tx_desc; + + tx_desc = IDPF_FLEX_TX_DESC(xdpq, rs_idx); + tx_desc->q.qw1.cmd_dtype |= + cpu_to_le16((last_desc << IDPF_FLEX_TXD_QW1_CMD_S) & + IDPF_FLEX_TXD_QW1_CMD_M); +} + +/** + * idpf_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map + * @xdpq: XDP Tx queue + * @xdp_act: Logical OR of flags of XDP actions that require finalization + * + * This function bumps XDP Tx tail and/or flush redirect map, and + * should be called when a batch of packets has been processed in the + * napi loop. + */ +static inline void idpf_finalize_xdp_rx(struct idpf_queue *xdpq, u32 xdp_act) +{ + if (xdp_act & IDPF_XDP_ACT_FINALIZE_TX) { + idpf_set_rs_bit(xdpq); + idpf_xdpq_update_tail(xdpq); + } +} + #endif /* !_IDPF_TXRX_H_ */ From 8b4e315a23ebd536be9f874d4bfb714b0eec0726 Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Mon, 30 Oct 2023 16:10:26 +0100 Subject: [PATCH 8/9] idpf: add support for XDP_REDIRECT Implement XDP_REDIRECT action and ndo_xdp_xmit() callback. For now, packets redirected from CPU with index greater than XDP queues number are just dropped with an error. This is a rather common situation and it will be addressed in later patches. Patch also refactors RX XDP handling to use switch statement due to increased number of actions. Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf_lib.c | 7 +- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 104 +++++++++++++++++--- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 23 ++++- 3 files changed, 120 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 96d0ff57d88b1a..b3038ed19e5250 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -839,7 +839,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) netdev->features |= dflt_features; netdev->hw_features |= dflt_features | offloads; netdev->hw_enc_features |= dflt_features | offloads; - netdev->xdp_features = NETDEV_XDP_ACT_BASIC; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; idpf_set_ethtool_ops(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); @@ -2456,6 +2456,10 @@ idpf_xdp_setup_prog(struct idpf_vport *vport, struct bpf_prog *prog, "Could not reconfigure the queues after XDP setup\n"); return err; } + if (prog) + xdp_features_set_redirect_target(vport->netdev, true); + else + xdp_features_clear_redirect_target(vport->netdev); if (vport_is_up) { err = idpf_vport_open(vport, false); @@ -2595,6 +2599,7 @@ static const struct net_device_ops idpf_netdev_ops_splitq = { .ndo_set_features = idpf_set_features, .ndo_tx_timeout = idpf_tx_timeout, .ndo_bpf = idpf_xdp, + .ndo_xdp_xmit = idpf_xdp_xmit, }; static const struct net_device_ops idpf_netdev_ops_singleq = { diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 1c61df4dcaa6d4..3cf2920fee5daf 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -67,19 +67,31 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) /** * idpf_xdp_buf_rel - Release an XDP Tx buffer + * @xdpq: XDP Tx queue * @xdp_buf: the buffer to free */ -static void idpf_xdp_buf_rel(struct idpf_tx_buf *xdp_buf) +static void idpf_xdp_buf_rel(struct idpf_queue *xdpq, + struct idpf_tx_buf *xdp_buf) { struct page *page; u32 put_size; - page = xdp_buf->page; - put_size = dma_unmap_len(xdp_buf, len); - if (page) { + switch (xdp_buf->xdp_type) { + case IDPF_XDP_BUFFER_TX: + page = xdp_buf->page; + put_size = dma_unmap_len(xdp_buf, len); page_pool_put_page(page->pp, page, put_size, true); - xdp_buf->page = NULL; + break; + case IDPF_XDP_BUFFER_FRAME: + dma_unmap_page(xdpq->dev, + dma_unmap_addr(xdp_buf, dma), + dma_unmap_len(xdp_buf, len), + DMA_TO_DEVICE); + xdp_return_frame(xdp_buf->xdpf); + break; } + + xdp_buf->xdp_type = IDPF_XDP_BUFFER_NONE; } /** @@ -124,7 +136,7 @@ static void idpf_tx_buf_rel_all(struct idpf_queue *txq) /* Free all the Tx buffer sk_buffs */ for (i = 0; i < txq->desc_count; i++) { if (test_bit(__IDPF_Q_XDP, txq->flags)) - idpf_xdp_buf_rel(&txq->tx_buf[i]); + idpf_xdp_buf_rel(txq, &txq->tx_buf[i]); else idpf_tx_buf_rel(txq, &txq->tx_buf[i]); } @@ -3513,7 +3525,7 @@ static u32 idpf_clean_xdp_irq(struct idpf_queue *xdpq) for (i = 0; i < done_frames; i++) { struct idpf_tx_buf *tx_buf = &xdpq->tx_buf[tx_ntc]; - idpf_xdp_buf_rel(tx_buf); + idpf_xdp_buf_rel(xdpq, tx_buf); tx_ntc++; if (tx_ntc >= xdpq->desc_count) @@ -3529,13 +3541,13 @@ static u32 idpf_clean_xdp_irq(struct idpf_queue *xdpq) * idpf_xmit_xdp_buff - submit single buffer to XDP queue for transmission * @xdp: XDP buffer pointer * @xdpq: XDP queue for transmission - * @map: whether to map the buffer + * @frame: whether the function is called from .ndo_xdp_xmit() * * Returns negative on failure, 0 on success. */ static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp, struct idpf_queue *xdpq, - bool map) + bool frame) { struct idpf_tx_splitq_params tx_params = { }; u32 batch_sz = IDPF_QUEUE_QUARTER(xdpq); @@ -3553,7 +3565,7 @@ static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp, if (unlikely(!free)) return -EBUSY; - if (map) { + if (frame) { dma = dma_map_single(xdpq->dev, data, size, DMA_TO_DEVICE); if (dma_mapping_error(xdpq->dev, dma)) return -ENOMEM; @@ -3569,7 +3581,14 @@ static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp, tx_buf = &xdpq->tx_buf[ntu]; tx_buf->bytecount = size; tx_buf->gso_segs = 1; - tx_buf->page = virt_to_page(data); + + if (frame) { + tx_buf->xdp_type = IDPF_XDP_BUFFER_FRAME; + tx_buf->xdpf = xdp->data_hard_start; + } else { + tx_buf->xdp_type = IDPF_XDP_BUFFER_TX; + tx_buf->page = virt_to_page(data); + } /* record length, and DMA address */ dma_unmap_len_set(tx_buf, len, size); @@ -3593,6 +3612,62 @@ static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp, return 0; } +/** + * idpf_xdp_xmit - submit packets to xdp ring for transmission + * @dev: netdev + * @n: number of xdp frames to be transmitted + * @frames: xdp frames to be transmitted + * @flags: transmit flags + * + * Returns number of frames successfully sent. Frames that fail are + * free'ed via XDP return API. + * For error cases, a negative errno code is returned and no-frames + * are transmitted (caller must handle freeing frames). + */ +int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ + struct idpf_netdev_priv *np = netdev_priv(dev); + struct idpf_vport *vport = np->vport; + u32 queue_index, nxmit = 0; + struct idpf_queue *xdpq; + int i, err = 0; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + if (unlikely(!netif_carrier_ok(dev) || !vport->link_up)) + return -ENETDOWN; + if (unlikely(!idpf_xdp_is_prog_ena(vport))) + return -ENXIO; + + queue_index = smp_processor_id(); + if (queue_index >= vport->num_xdp_txq) + return -ENXIO; + + xdpq = vport->txqs[queue_index + vport->xdp_txq_offset]; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + struct xdp_buff xdp; + + xdp_convert_frame_to_buff(xdpf, &xdp); + err = idpf_xmit_xdp_buff(&xdp, xdpq, true); + if (unlikely(err)) { + netdev_err(dev, "XDP frame TX failed, error: %d\n", + err); + break; + } + nxmit++; + } + + if (likely(nxmit)) + idpf_set_rs_bit(xdpq); + if (flags & XDP_XMIT_FLUSH) + idpf_xdpq_update_tail(xdpq); + + return nxmit; +} + /** * idpf_run_xdp - Run XDP program and perform the resulting action * @rx_bufq: Rx buffer queue @@ -3613,6 +3688,7 @@ static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq, u32 *rxq_xdp_act, unsigned int size) { + struct net_device *netdev = rx_bufq->vport->netdev; u32 hr = rx_bufq->pp->p.offset; unsigned int xdp_act; @@ -3631,6 +3707,12 @@ static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq, *rxq_xdp_act |= IDPF_XDP_ACT_FINALIZE_TX; break; + case XDP_REDIRECT: + if (unlikely(xdp_do_redirect(netdev, xdp, xdp_prog))) + goto xdp_err; + + *rxq_xdp_act |= IDPF_XDP_ACT_FINALIZE_REDIR; + break; default: bpf_warn_invalid_xdp_action(rx_bufq->vport->netdev, xdp_prog, xdp_act); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 932381987cdf6d..f69bec01f26d38 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -169,6 +169,7 @@ do { \ #define IDPF_TX_FLAGS_TUNNEL BIT(3) #define IDPF_XDP_ACT_FINALIZE_TX BIT(0) +#define IDPF_XDP_ACT_FINALIZE_REDIR BIT(1) #define IDPF_XDP_MAX_MTU 3046 @@ -177,6 +178,18 @@ union idpf_tx_flex_desc { struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */ }; +/** + * enum idpf_xdp_buffer_type - type of &idpf_tx_buf on XDP queue + * @IDPF_XDP_BUFFER_NONE: unused, no action required + * @IDPF_XDP_BUFFER_TX: free according to our memory model + * @IDPF_XDP_BUFFER_FRAME: use xdp_return_frame() + */ +enum idpf_xdp_buffer_type { + IDPF_XDP_BUFFER_NONE = 0U, + IDPF_XDP_BUFFER_TX, + IDPF_XDP_BUFFER_FRAME, +}; + /** * struct idpf_tx_buf * @next_to_watch: Next descriptor to clean @@ -202,13 +215,15 @@ union idpf_tx_flex_desc { struct idpf_tx_buf { void *next_to_watch; union { - struct sk_buff *skb; - struct page *page; + struct sk_buff *skb; /* used for .ndo_start_xmit() */ + struct page *page; /* used for XDP_TX */ + struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */ }; DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); unsigned int bytecount; unsigned short gso_segs; + unsigned short xdp_type; union { int compl_tag; @@ -1036,6 +1051,8 @@ netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); +int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); /** * idpf_xdpq_update_tail - Updates the XDP Tx queue tail register @@ -1082,6 +1099,8 @@ static inline void idpf_set_rs_bit(struct idpf_queue *xdpq) */ static inline void idpf_finalize_xdp_rx(struct idpf_queue *xdpq, u32 xdp_act) { + if (xdp_act & IDPF_XDP_ACT_FINALIZE_REDIR) + xdp_do_flush_map(); if (xdp_act & IDPF_XDP_ACT_FINALIZE_TX) { idpf_set_rs_bit(xdpq); idpf_xdpq_update_tail(xdpq); From 1721bb40c9333fa9b78fe9aa937f0a79f03a3a3c Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Tue, 7 Nov 2023 13:58:59 +0100 Subject: [PATCH 9/9] idpf: allow xdp txq sharing Port of commit 22bf877 ("ice: introduce XDP_TX fallback path"). The patch handles the case, when queue number is not sufficient for the current number of CPUs. To avoid dropping some packets redirected from other interfaces, XDP TxQs are allowed to be shared between CPUs, which imposes the locking requirement. Static key approach has little to none performance penalties when sharing is not needed. Suggested-by: Larysa Zaremba Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 54 +++++++++++++++++++-- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 12 ++++- 2 files changed, 62 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 3cf2920fee5daf..df55c1be8e059d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3,6 +3,8 @@ #include "idpf.h" +DEFINE_STATIC_KEY_FALSE(idpf_xdp_locking_key); + /** * idpf_buf_lifo_push - push a buffer pointer onto stack * @stack: pointer to stack struct @@ -189,6 +191,9 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport) if (!vport->txq_grps) return; + if (static_key_enabled(&idpf_xdp_locking_key)) + static_branch_dec(&idpf_xdp_locking_key); + for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; @@ -1594,6 +1599,26 @@ static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport) return err; } +/** + * idpf_xdp_cfg_tx_sharing - Enable XDP TxQ sharing, if needed + * @vport: vport where the XDP is being configured + * + * If there is more CPUs than rings, sharing XDP TxQ allows us + * to handle XDP_REDIRECT from other interfaces. + */ +static void idpf_xdp_cfg_tx_sharing(struct idpf_vport *vport) +{ + u32 num_xdpq = vport->num_xdp_txq; + u32 num_cpus = num_online_cpus(); + + if (num_xdpq >= num_cpus) + return; + + netdev_warn(vport->netdev, "System has %u CPUs, but only %u XDP queues can be configured, entering XDP TxQ sharing mode, performance is decreased\n", + num_cpus, num_xdpq); + static_branch_inc(&idpf_xdp_locking_key); +} + /** * idpf_vport_queues_alloc - Allocate memory for all queues * @vport: virtual port @@ -1630,7 +1655,9 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport) __clear_bit(__IDPF_Q_FLOW_SCH_EN, vport->txqs[j]->txq_grp->complq->flags); __set_bit(__IDPF_Q_XDP, vport->txqs[j]->flags); + spin_lock_init(&vport->txqs[j]->tx_lock); } + idpf_xdp_cfg_tx_sharing(vport); } return 0; @@ -3612,6 +3639,22 @@ static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp, return 0; } +static bool idpf_xdp_xmit_back(const struct xdp_buff *buff, + struct idpf_queue *xdpq) +{ + bool ret; + + if (static_branch_unlikely(&idpf_xdp_locking_key)) + spin_lock(&xdpq->tx_lock); + + ret = !idpf_xmit_xdp_buff(buff, xdpq, false); + + if (static_branch_unlikely(&idpf_xdp_locking_key)) + spin_unlock(&xdpq->tx_lock); + + return ret; +} + /** * idpf_xdp_xmit - submit packets to xdp ring for transmission * @dev: netdev @@ -3641,11 +3684,14 @@ int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, return -ENXIO; queue_index = smp_processor_id(); - if (queue_index >= vport->num_xdp_txq) - return -ENXIO; + if (static_branch_unlikely(&idpf_xdp_locking_key)) + queue_index %= vport->num_xdp_txq; xdpq = vport->txqs[queue_index + vport->xdp_txq_offset]; + if (static_branch_unlikely(&idpf_xdp_locking_key)) + spin_lock(&xdpq->tx_lock); + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; struct xdp_buff xdp; @@ -3664,6 +3710,8 @@ int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, idpf_set_rs_bit(xdpq); if (flags & XDP_XMIT_FLUSH) idpf_xdpq_update_tail(xdpq); + if (static_branch_unlikely(&idpf_xdp_locking_key)) + spin_unlock(&xdpq->tx_lock); return nxmit; } @@ -3702,7 +3750,7 @@ static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq, case XDP_DROP: break; case XDP_TX: - if (unlikely(idpf_xmit_xdp_buff(xdp, xdpq, false))) + if (unlikely(!idpf_xdp_xmit_back(xdp, xdpq))) goto xdp_err; *rxq_xdp_act |= IDPF_XDP_ACT_FINALIZE_TX; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index f69bec01f26d38..2eeba07ffe2708 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -732,7 +732,10 @@ struct idpf_queue { } rx_buf; }; struct page_pool *pp; - struct sk_buff *skb; + union { + struct sk_buff *skb; + spinlock_t tx_lock; + }; u16 q_type; u32 q_id; u16 desc_count; @@ -1051,6 +1054,9 @@ netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); + +DECLARE_STATIC_KEY_FALSE(idpf_xdp_locking_key); + int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); @@ -1102,8 +1108,12 @@ static inline void idpf_finalize_xdp_rx(struct idpf_queue *xdpq, u32 xdp_act) if (xdp_act & IDPF_XDP_ACT_FINALIZE_REDIR) xdp_do_flush_map(); if (xdp_act & IDPF_XDP_ACT_FINALIZE_TX) { + if (static_branch_unlikely(&idpf_xdp_locking_key)) + spin_lock(&xdpq->tx_lock); idpf_set_rs_bit(xdpq); idpf_xdpq_update_tail(xdpq); + if (static_branch_unlikely(&idpf_xdp_locking_key)) + spin_unlock(&xdpq->tx_lock); } }