Skip to content

Commit 348bfec

Browse files
committed
Merge branch 'qed-XDP-support'
Yuval Mintz says: ==================== qed*: Add XDP support This patch series is intended to add XDP to the qede driver, although it contains quite a bit of cleanups, refactorings and infrastructure changes as well. The content of this series can be roughly divided into: - Datapath improvements - mostly focused on having the datapath utilize parameters which can be more tightly contained in cachelines. Patches #1, #2, #8, #9 belong to this group. - Refactoring - done mostly in favour of XDP. Patches #3, #4, #5, #9. - Infrastructure changes - done in favour of XDP. Paches #6 and #7 belong to this category [#7 being by far the biggest patch in the series]. - Actual XDP support - last two patches [#10, #11]. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents f54b8cd + cb6aeb0 commit 348bfec

File tree

14 files changed

+1879
-1291
lines changed

14 files changed

+1879
-1291
lines changed

drivers/net/ethernet/qlogic/qed/qed.h

-12
Original file line numberDiff line numberDiff line change
@@ -241,15 +241,6 @@ struct qed_hw_info {
241241
enum qed_wol_support b_wol_support;
242242
};
243243

244-
struct qed_hw_cid_data {
245-
u32 cid;
246-
bool b_cid_allocated;
247-
248-
/* Additional identifiers */
249-
u16 opaque_fid;
250-
u8 vport_id;
251-
};
252-
253244
/* maximun size of read/write commands (HW limit) */
254245
#define DMAE_MAX_RW_SIZE 0x2000
255246

@@ -416,9 +407,6 @@ struct qed_hwfn {
416407

417408
struct qed_dcbx_info *p_dcbx_info;
418409

419-
struct qed_hw_cid_data *p_tx_cids;
420-
struct qed_hw_cid_data *p_rx_cids;
421-
422410
struct qed_dmae_info dmae_info;
423411

424412
/* QM init */

drivers/net/ethernet/qlogic/qed/qed_dev.c

+4-29
Original file line numberDiff line numberDiff line change
@@ -134,15 +134,6 @@ void qed_resc_free(struct qed_dev *cdev)
134134

135135
kfree(cdev->reset_stats);
136136

137-
for_each_hwfn(cdev, i) {
138-
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
139-
140-
kfree(p_hwfn->p_tx_cids);
141-
p_hwfn->p_tx_cids = NULL;
142-
kfree(p_hwfn->p_rx_cids);
143-
p_hwfn->p_rx_cids = NULL;
144-
}
145-
146137
for_each_hwfn(cdev, i) {
147138
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
148139

@@ -425,23 +416,6 @@ int qed_resc_alloc(struct qed_dev *cdev)
425416
if (!cdev->fw_data)
426417
return -ENOMEM;
427418

428-
/* Allocate Memory for the Queue->CID mapping */
429-
for_each_hwfn(cdev, i) {
430-
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
431-
int tx_size = sizeof(struct qed_hw_cid_data) *
432-
RESC_NUM(p_hwfn, QED_L2_QUEUE);
433-
int rx_size = sizeof(struct qed_hw_cid_data) *
434-
RESC_NUM(p_hwfn, QED_L2_QUEUE);
435-
436-
p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
437-
if (!p_hwfn->p_tx_cids)
438-
goto alloc_no_mem;
439-
440-
p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
441-
if (!p_hwfn->p_rx_cids)
442-
goto alloc_no_mem;
443-
}
444-
445419
for_each_hwfn(cdev, i) {
446420
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
447421
u32 n_eqes, num_cons;
@@ -2283,12 +2257,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
22832257
{
22842258
void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
22852259
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
2286-
u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
2260+
u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
22872261

22882262
if (!pp_virt_addr_tbl)
22892263
return;
22902264

2291-
if (!p_chain->pbl.p_virt_table)
2265+
if (!p_pbl_virt)
22922266
goto out;
22932267

22942268
for (i = 0; i < page_cnt; i++) {
@@ -2306,7 +2280,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
23062280
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
23072281
dma_free_coherent(&cdev->pdev->dev,
23082282
pbl_size,
2309-
p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
2283+
p_chain->pbl_sp.p_virt_table,
2284+
p_chain->pbl_sp.p_phys_table);
23102285
out:
23112286
vfree(p_chain->pbl.pp_virt_addr_tbl);
23122287
}

0 commit comments

Comments
 (0)