Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

XDP and AF_XDP based on net-next (Dec 18) #5

Open
wants to merge 21 commits into
base: net-next-main
Choose a base branch
from
Open
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
idpf: add support for XDP_REDIRECT
Implement XDP_REDIRECT action and ndo_xdp_xmit() callback.

For now, packets redirected from CPU with index greater than
XDP queues number are just dropped with an error.
This is a rather common situation and it will be addressed in later patches.

Patch also refactors RX XDP handling to use switch statement due to
increased number of actions.

Signed-off-by: Michal Kubiak <[email protected]>
michalQb committed Dec 19, 2023
commit 80583f9e4c1442fa34731efacadf792403c04d5b
7 changes: 6 additions & 1 deletion drivers/net/ethernet/intel/idpf/idpf_lib.c
Original file line number Diff line number Diff line change
@@ -839,7 +839,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
netdev->features |= dflt_features;
netdev->hw_features |= dflt_features | offloads;
netdev->hw_enc_features |= dflt_features | offloads;
netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
idpf_set_ethtool_ops(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);

@@ -2500,6 +2500,10 @@ idpf_xdp_setup_prog(struct idpf_vport *vport, struct bpf_prog *prog,
"Could not reconfigure the queues after XDP setup\n");
return err;
}
if (prog)
xdp_features_set_redirect_target(vport->netdev, true);
else
xdp_features_clear_redirect_target(vport->netdev);

if (vport_is_up) {
err = idpf_vport_open(vport, false);
@@ -2639,6 +2643,7 @@ static const struct net_device_ops idpf_netdev_ops_splitq = {
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
.ndo_bpf = idpf_xdp,
.ndo_xdp_xmit = idpf_xdp_xmit,
};

static const struct net_device_ops idpf_netdev_ops_singleq = {
104 changes: 93 additions & 11 deletions drivers/net/ethernet/intel/idpf/idpf_txrx.c
Original file line number Diff line number Diff line change
@@ -67,19 +67,31 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)

/**
* idpf_xdp_buf_rel - Release an XDP Tx buffer
* @xdpq: XDP Tx queue
* @xdp_buf: the buffer to free
*/
static void idpf_xdp_buf_rel(struct idpf_tx_buf *xdp_buf)
static void idpf_xdp_buf_rel(struct idpf_queue *xdpq,
struct idpf_tx_buf *xdp_buf)
{
struct page *page;
u32 put_size;

page = xdp_buf->page;
put_size = dma_unmap_len(xdp_buf, len);
if (page) {
switch (xdp_buf->xdp_type) {
case IDPF_XDP_BUFFER_TX:
page = xdp_buf->page;
put_size = dma_unmap_len(xdp_buf, len);
page_pool_put_page(page->pp, page, put_size, true);
xdp_buf->page = NULL;
break;
case IDPF_XDP_BUFFER_FRAME:
dma_unmap_page(xdpq->dev,
dma_unmap_addr(xdp_buf, dma),
dma_unmap_len(xdp_buf, len),
DMA_TO_DEVICE);
xdp_return_frame(xdp_buf->xdpf);
break;
}

xdp_buf->xdp_type = IDPF_XDP_BUFFER_NONE;
}

/**
@@ -124,7 +136,7 @@ static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
/* Free all the Tx buffer sk_buffs */
for (i = 0; i < txq->desc_count; i++) {
if (test_bit(__IDPF_Q_XDP, txq->flags))
idpf_xdp_buf_rel(&txq->tx_buf[i]);
idpf_xdp_buf_rel(txq, &txq->tx_buf[i]);
else
idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
}
@@ -3507,7 +3519,7 @@ static u32 idpf_clean_xdp_irq(struct idpf_queue *xdpq)
for (i = 0; i < done_frames; i++) {
struct idpf_tx_buf *tx_buf = &xdpq->tx_buf[tx_ntc];

idpf_xdp_buf_rel(tx_buf);
idpf_xdp_buf_rel(xdpq, tx_buf);

tx_ntc++;
if (tx_ntc >= xdpq->desc_count)
@@ -3523,13 +3535,13 @@ static u32 idpf_clean_xdp_irq(struct idpf_queue *xdpq)
* idpf_xmit_xdp_buff - submit single buffer to XDP queue for transmission
* @xdp: XDP buffer pointer
* @xdpq: XDP queue for transmission
* @map: whether to map the buffer
* @frame: whether the function is called from .ndo_xdp_xmit()
*
* Returns negative on failure, 0 on success.
*/
static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp,
struct idpf_queue *xdpq,
bool map)
bool frame)
{
struct idpf_tx_splitq_params tx_params = { };
u32 batch_sz = IDPF_QUEUE_QUARTER(xdpq);
@@ -3547,7 +3559,7 @@ static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp,
if (unlikely(!free))
return -EBUSY;

if (map) {
if (frame) {
dma = dma_map_single(xdpq->dev, data, size, DMA_TO_DEVICE);
if (dma_mapping_error(xdpq->dev, dma))
return -ENOMEM;
@@ -3563,7 +3575,14 @@ static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp,
tx_buf = &xdpq->tx_buf[ntu];
tx_buf->bytecount = size;
tx_buf->gso_segs = 1;
tx_buf->page = virt_to_page(data);

if (frame) {
tx_buf->xdp_type = IDPF_XDP_BUFFER_FRAME;
tx_buf->xdpf = xdp->data_hard_start;
} else {
tx_buf->xdp_type = IDPF_XDP_BUFFER_TX;
tx_buf->page = virt_to_page(data);
}

/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -3587,6 +3606,62 @@ static int idpf_xmit_xdp_buff(const struct xdp_buff *xdp,
return 0;
}

/**
* idpf_xdp_xmit - submit packets to xdp ring for transmission
* @dev: netdev
* @n: number of xdp frames to be transmitted
* @frames: xdp frames to be transmitted
* @flags: transmit flags
*
* Returns number of frames successfully sent. Frames that fail are
* free'ed via XDP return API.
* For error cases, a negative errno code is returned and no-frames
* are transmitted (caller must handle freeing frames).
*/
int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags)
{
struct idpf_netdev_priv *np = netdev_priv(dev);
struct idpf_vport *vport = np->vport;
u32 queue_index, nxmit = 0;
struct idpf_queue *xdpq;
int i, err = 0;

if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
return -ENETDOWN;
if (unlikely(!idpf_xdp_is_prog_ena(vport)))
return -ENXIO;

queue_index = smp_processor_id();
if (queue_index >= vport->num_xdp_txq)
return -ENXIO;

xdpq = vport->txqs[queue_index + vport->xdp_txq_offset];

for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
struct xdp_buff xdp;

xdp_convert_frame_to_buff(xdpf, &xdp);
err = idpf_xmit_xdp_buff(&xdp, xdpq, true);
if (unlikely(err)) {
netdev_err(dev, "XDP frame TX failed, error: %d\n",
err);
break;
}
nxmit++;
}

if (likely(nxmit))
idpf_set_rs_bit(xdpq);
if (flags & XDP_XMIT_FLUSH)
idpf_xdpq_update_tail(xdpq);

return nxmit;
}

/**
* idpf_run_xdp - Run XDP program and perform the resulting action
* @rx_bufq: Rx buffer queue
@@ -3607,6 +3682,7 @@ static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq,
u32 *rxq_xdp_act,
unsigned int size)
{
struct net_device *netdev = rx_bufq->vport->netdev;
u32 hr = rx_bufq->pp->p.offset;
unsigned int xdp_act;

@@ -3625,6 +3701,12 @@ static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq,

*rxq_xdp_act |= IDPF_XDP_ACT_FINALIZE_TX;
break;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(netdev, xdp, xdp_prog)))
goto xdp_err;

*rxq_xdp_act |= IDPF_XDP_ACT_FINALIZE_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(rx_bufq->vport->netdev, xdp_prog,
xdp_act);
23 changes: 21 additions & 2 deletions drivers/net/ethernet/intel/idpf/idpf_txrx.h
Original file line number Diff line number Diff line change
@@ -169,6 +169,7 @@ do { \
#define IDPF_TX_FLAGS_TUNNEL BIT(3)

#define IDPF_XDP_ACT_FINALIZE_TX BIT(0)
#define IDPF_XDP_ACT_FINALIZE_REDIR BIT(1)

#define IDPF_XDP_MAX_MTU 3046

@@ -177,6 +178,18 @@ union idpf_tx_flex_desc {
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
};

/**
* enum idpf_xdp_buffer_type - type of &idpf_tx_buf on XDP queue
* @IDPF_XDP_BUFFER_NONE: unused, no action required
* @IDPF_XDP_BUFFER_TX: free according to our memory model
* @IDPF_XDP_BUFFER_FRAME: use xdp_return_frame()
*/
enum idpf_xdp_buffer_type {
IDPF_XDP_BUFFER_NONE = 0U,
IDPF_XDP_BUFFER_TX,
IDPF_XDP_BUFFER_FRAME,
};

/**
* struct idpf_tx_buf
* @next_to_watch: Next descriptor to clean
@@ -202,13 +215,15 @@ union idpf_tx_flex_desc {
struct idpf_tx_buf {
void *next_to_watch;
union {
struct sk_buff *skb;
struct page *page;
struct sk_buff *skb; /* used for .ndo_start_xmit() */
struct page *page; /* used for XDP_TX */
struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */
};
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
unsigned int bytecount;
unsigned short gso_segs;
unsigned short xdp_type;

union {
int compl_tag;
@@ -1062,6 +1077,8 @@ netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);

/**
* idpf_xdpq_update_tail - Updates the XDP Tx queue tail register
@@ -1108,6 +1125,8 @@ static inline void idpf_set_rs_bit(struct idpf_queue *xdpq)
*/
static inline void idpf_finalize_xdp_rx(struct idpf_queue *xdpq, u32 xdp_act)
{
if (xdp_act & IDPF_XDP_ACT_FINALIZE_REDIR)
xdp_do_flush();
if (xdp_act & IDPF_XDP_ACT_FINALIZE_TX) {
idpf_set_rs_bit(xdpq);
idpf_xdpq_update_tail(xdpq);