@@ -715,7 +715,7 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
715
715
dma_sync_single_range_for_cpu (rx_ring -> dev ,
716
716
rx_bi -> dma ,
717
717
rx_bi -> page_offset ,
718
- rx_ring -> rx_buf_len ,
718
+ IAVF_RXBUFFER_3072 ,
719
719
DMA_FROM_DEVICE );
720
720
721
721
/* free resources associated with mapping */
@@ -724,7 +724,7 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
724
724
DMA_FROM_DEVICE ,
725
725
IAVF_RX_DMA_ATTR );
726
726
727
- __page_frag_cache_drain (rx_bi -> page , rx_bi -> pagecnt_bias );
727
+ __free_page (rx_bi -> page );
728
728
729
729
rx_bi -> page = NULL ;
730
730
rx_bi -> page_offset = 0 ;
@@ -736,7 +736,6 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
736
736
/* Zero out the descriptor ring */
737
737
memset (rx_ring -> desc , 0 , rx_ring -> size );
738
738
739
- rx_ring -> next_to_alloc = 0 ;
740
739
rx_ring -> next_to_clean = 0 ;
741
740
rx_ring -> next_to_use = 0 ;
742
741
}
@@ -792,7 +791,6 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
792
791
goto err ;
793
792
}
794
793
795
- rx_ring -> next_to_alloc = 0 ;
796
794
rx_ring -> next_to_clean = 0 ;
797
795
rx_ring -> next_to_use = 0 ;
798
796
@@ -812,9 +810,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
812
810
{
813
811
rx_ring -> next_to_use = val ;
814
812
815
- /* update next to alloc since we have filled the ring */
816
- rx_ring -> next_to_alloc = val ;
817
-
818
813
/* Force memory writes to complete before letting h/w
819
814
* know there are new descriptors to fetch. (Only
820
815
* applicable for weak-ordered memory model archs,
@@ -838,12 +833,6 @@ static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
838
833
struct page * page = bi -> page ;
839
834
dma_addr_t dma ;
840
835
841
- /* since we are recycling buffers we should seldom need to alloc */
842
- if (likely (page )) {
843
- rx_ring -> rx_stats .page_reuse_count ++ ;
844
- return true;
845
- }
846
-
847
836
/* alloc new page for storage */
848
837
page = dev_alloc_pages (iavf_rx_pg_order (rx_ring ));
849
838
if (unlikely (!page )) {
@@ -870,9 +859,6 @@ static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
870
859
bi -> page = page ;
871
860
bi -> page_offset = IAVF_SKB_PAD ;
872
861
873
- /* initialize pagecnt_bias to 1 representing we fully own page */
874
- bi -> pagecnt_bias = 1 ;
875
-
876
862
return true;
877
863
}
878
864
@@ -924,7 +910,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
924
910
/* sync the buffer for use by the device */
925
911
dma_sync_single_range_for_device (rx_ring -> dev , bi -> dma ,
926
912
bi -> page_offset ,
927
- rx_ring -> rx_buf_len ,
913
+ IAVF_RXBUFFER_3072 ,
928
914
DMA_FROM_DEVICE );
929
915
930
916
/* Refresh the desc even if buffer_addrs didn't change
@@ -1102,91 +1088,6 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1102
1088
return false;
1103
1089
}
1104
1090
1105
- /**
1106
- * iavf_reuse_rx_page - page flip buffer and store it back on the ring
1107
- * @rx_ring: rx descriptor ring to store buffers on
1108
- * @old_buff: donor buffer to have page reused
1109
- *
1110
- * Synchronizes page for reuse by the adapter
1111
- **/
1112
- static void iavf_reuse_rx_page (struct iavf_ring * rx_ring ,
1113
- struct iavf_rx_buffer * old_buff )
1114
- {
1115
- struct iavf_rx_buffer * new_buff ;
1116
- u16 nta = rx_ring -> next_to_alloc ;
1117
-
1118
- new_buff = & rx_ring -> rx_bi [nta ];
1119
-
1120
- /* update, and store next to alloc */
1121
- nta ++ ;
1122
- rx_ring -> next_to_alloc = (nta < rx_ring -> count ) ? nta : 0 ;
1123
-
1124
- /* transfer page from old buffer to new buffer */
1125
- new_buff -> dma = old_buff -> dma ;
1126
- new_buff -> page = old_buff -> page ;
1127
- new_buff -> page_offset = old_buff -> page_offset ;
1128
- new_buff -> pagecnt_bias = old_buff -> pagecnt_bias ;
1129
- }
1130
-
1131
- /**
1132
- * iavf_can_reuse_rx_page - Determine if this page can be reused by
1133
- * the adapter for another receive
1134
- *
1135
- * @rx_buffer: buffer containing the page
1136
- *
1137
- * If page is reusable, rx_buffer->page_offset is adjusted to point to
1138
- * an unused region in the page.
1139
- *
1140
- * For small pages, @truesize will be a constant value, half the size
1141
- * of the memory at page. We'll attempt to alternate between high and
1142
- * low halves of the page, with one half ready for use by the hardware
1143
- * and the other half being consumed by the stack. We use the page
1144
- * ref count to determine whether the stack has finished consuming the
1145
- * portion of this page that was passed up with a previous packet. If
1146
- * the page ref count is >1, we'll assume the "other" half page is
1147
- * still busy, and this page cannot be reused.
1148
- *
1149
- * For larger pages, @truesize will be the actual space used by the
1150
- * received packet (adjusted upward to an even multiple of the cache
1151
- * line size). This will advance through the page by the amount
1152
- * actually consumed by the received packets while there is still
1153
- * space for a buffer. Each region of larger pages will be used at
1154
- * most once, after which the page will not be reused.
1155
- *
1156
- * In either case, if the page is reusable its refcount is increased.
1157
- **/
1158
- static bool iavf_can_reuse_rx_page (struct iavf_rx_buffer * rx_buffer )
1159
- {
1160
- unsigned int pagecnt_bias = rx_buffer -> pagecnt_bias ;
1161
- struct page * page = rx_buffer -> page ;
1162
-
1163
- /* Is any reuse possible? */
1164
- if (!dev_page_is_reusable (page ))
1165
- return false;
1166
-
1167
- #if (PAGE_SIZE < 8192 )
1168
- /* if we are only owner of page we can reuse it */
1169
- if (unlikely ((page_count (page ) - pagecnt_bias ) > 1 ))
1170
- return false;
1171
- #else
1172
- #define IAVF_LAST_OFFSET \
1173
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1174
- if (rx_buffer -> page_offset > IAVF_LAST_OFFSET )
1175
- return false;
1176
- #endif
1177
-
1178
- /* If we have drained the page fragment pool we need to update
1179
- * the pagecnt_bias and page count so that we fully restock the
1180
- * number of references the driver holds.
1181
- */
1182
- if (unlikely (!pagecnt_bias )) {
1183
- page_ref_add (page , USHRT_MAX );
1184
- rx_buffer -> pagecnt_bias = USHRT_MAX ;
1185
- }
1186
-
1187
- return true;
1188
- }
1189
-
1190
1091
/**
1191
1092
* iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
1192
1093
* @rx_ring: rx descriptor ring to transact packets on
@@ -1204,24 +1105,13 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1204
1105
struct sk_buff * skb ,
1205
1106
unsigned int size )
1206
1107
{
1207
- #if (PAGE_SIZE < 8192 )
1208
- unsigned int truesize = iavf_rx_pg_size (rx_ring ) / 2 ;
1209
- #else
1210
1108
unsigned int truesize = SKB_DATA_ALIGN (size + IAVF_SKB_PAD );
1211
- #endif
1212
1109
1213
1110
if (!size )
1214
1111
return ;
1215
1112
1216
1113
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags , rx_buffer -> page ,
1217
1114
rx_buffer -> page_offset , size , truesize );
1218
-
1219
- /* page is being used so we must update the page offset */
1220
- #if (PAGE_SIZE < 8192 )
1221
- rx_buffer -> page_offset ^= truesize ;
1222
- #else
1223
- rx_buffer -> page_offset += truesize ;
1224
- #endif
1225
1115
}
1226
1116
1227
1117
/**
@@ -1249,9 +1139,6 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1249
1139
size ,
1250
1140
DMA_FROM_DEVICE );
1251
1141
1252
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
1253
- rx_buffer -> pagecnt_bias -- ;
1254
-
1255
1142
return rx_buffer ;
1256
1143
}
1257
1144
@@ -1269,12 +1156,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1269
1156
unsigned int size )
1270
1157
{
1271
1158
void * va ;
1272
- #if (PAGE_SIZE < 8192 )
1273
- unsigned int truesize = iavf_rx_pg_size (rx_ring ) / 2 ;
1274
- #else
1275
1159
unsigned int truesize = SKB_DATA_ALIGN (sizeof (struct skb_shared_info )) +
1276
1160
SKB_DATA_ALIGN (IAVF_SKB_PAD + size );
1277
- #endif
1278
1161
struct sk_buff * skb ;
1279
1162
1280
1163
if (!rx_buffer || !size )
@@ -1292,42 +1175,25 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1292
1175
skb_reserve (skb , IAVF_SKB_PAD );
1293
1176
__skb_put (skb , size );
1294
1177
1295
- /* buffer is used by skb, update page_offset */
1296
- #if (PAGE_SIZE < 8192 )
1297
- rx_buffer -> page_offset ^= truesize ;
1298
- #else
1299
- rx_buffer -> page_offset += truesize ;
1300
- #endif
1301
-
1302
1178
return skb ;
1303
1179
}
1304
1180
1305
1181
/**
1306
- * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
1182
+ * iavf_put_rx_buffer - Unmap used buffer
1307
1183
* @rx_ring: rx descriptor ring to transact packets on
1308
1184
* @rx_buffer: rx buffer to pull data from
1309
1185
*
1310
- * This function will clean up the contents of the rx_buffer. It will
1311
- * either recycle the buffer or unmap it and free the associated resources.
1186
+ * This function will unmap the buffer after it's written by HW.
1312
1187
*/
1313
1188
static void iavf_put_rx_buffer (struct iavf_ring * rx_ring ,
1314
1189
struct iavf_rx_buffer * rx_buffer )
1315
1190
{
1316
1191
if (!rx_buffer )
1317
1192
return ;
1318
1193
1319
- if (iavf_can_reuse_rx_page (rx_buffer )) {
1320
- /* hand second half of page back to the ring */
1321
- iavf_reuse_rx_page (rx_ring , rx_buffer );
1322
- rx_ring -> rx_stats .page_reuse_count ++ ;
1323
- } else {
1324
- /* we are not reusing the buffer so unmap it */
1325
- dma_unmap_page_attrs (rx_ring -> dev , rx_buffer -> dma ,
1326
- iavf_rx_pg_size (rx_ring ),
1327
- DMA_FROM_DEVICE , IAVF_RX_DMA_ATTR );
1328
- __page_frag_cache_drain (rx_buffer -> page ,
1329
- rx_buffer -> pagecnt_bias );
1330
- }
1194
+ /* we are not reusing the buffer so unmap it */
1195
+ dma_unmap_page_attrs (rx_ring -> dev , rx_buffer -> dma , PAGE_SIZE ,
1196
+ DMA_FROM_DEVICE , IAVF_RX_DMA_ATTR );
1331
1197
1332
1198
/* clear contents of buffer_info */
1333
1199
rx_buffer -> page = NULL ;
@@ -1432,8 +1298,6 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1432
1298
/* exit if we failed to retrieve a buffer */
1433
1299
if (!skb ) {
1434
1300
rx_ring -> rx_stats .alloc_buff_failed ++ ;
1435
- if (rx_buffer && size )
1436
- rx_buffer -> pagecnt_bias ++ ;
1437
1301
break ;
1438
1302
}
1439
1303
0 commit comments