forked from alobakin/linux
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathidpf_lib.c
2617 lines (2170 loc) · 68.9 KB
/
idpf_lib.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
static const struct net_device_ops idpf_netdev_ops_splitq;
static const struct net_device_ops idpf_netdev_ops_singleq;
const char * const idpf_vport_vc_state_str[] = {
IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
};
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
* @adapter: private data struct
*
* Return 0 on success, error on failure
*/
static int idpf_init_vector_stack(struct idpf_adapter *adapter)
{
struct idpf_vector_lifo *stack;
u16 min_vec;
u32 i;
mutex_lock(&adapter->vector_lock);
min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
stack = &adapter->vector_stack;
stack->size = adapter->num_msix_entries;
/* set the base and top to point at start of the 'free pool' to
* distribute the unused vectors on-demand basis
*/
stack->base = min_vec;
stack->top = min_vec;
stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
if (!stack->vec_idx) {
mutex_unlock(&adapter->vector_lock);
return -ENOMEM;
}
for (i = 0; i < stack->size; i++)
stack->vec_idx[i] = i;
mutex_unlock(&adapter->vector_lock);
return 0;
}
/**
* idpf_deinit_vector_stack - zero out the MSIX vector stack
* @adapter: private data struct
*/
static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
{
struct idpf_vector_lifo *stack;
mutex_lock(&adapter->vector_lock);
stack = &adapter->vector_stack;
kfree(stack->vec_idx);
stack->vec_idx = NULL;
mutex_unlock(&adapter->vector_lock);
}
/**
* idpf_mb_intr_rel_irq - Free the IRQ association with the OS
* @adapter: adapter structure
*
* This will also disable interrupt mode and queue up mailbox task. Mailbox
* task will reschedule itself if not in interrupt mode.
*/
static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
{
clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
free_irq(adapter->msix_entries[0].vector, adapter);
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
}
/**
* idpf_intr_rel - Release interrupt capabilities and free memory
* @adapter: adapter to disable interrupts on
*/
void idpf_intr_rel(struct idpf_adapter *adapter)
{
int err;
if (!adapter->msix_entries)
return;
idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev);
err = idpf_send_dealloc_vectors_msg(adapter);
if (err)
dev_err(&adapter->pdev->dev,
"Failed to deallocate vectors: %d\n", err);
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
}
/**
* idpf_mb_intr_clean - Interrupt handler for the mailbox
* @irq: interrupt number
* @data: pointer to the adapter structure
*/
static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
{
struct idpf_adapter *adapter = (struct idpf_adapter *)data;
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
return IRQ_HANDLED;
}
/**
* idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
* @adapter: adapter to get the hardware address for register write
*/
static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
{
struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
u32 val;
val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
writel(val, intr->dyn_ctl);
writel(intr->icr_ena_ctlq_m, intr->icr_ena);
}
/**
* idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
* @adapter: adapter structure to pass to the mailbox irq handler
*/
static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
{
struct idpf_q_vector *mb_vector = &adapter->mb_vector;
int irq_num, mb_vidx = 0, err;
irq_num = adapter->msix_entries[mb_vidx].vector;
mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
dev_driver_string(&adapter->pdev->dev),
"Mailbox", mb_vidx);
err = request_irq(irq_num, adapter->irq_mb_handler, 0,
mb_vector->name, adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"IRQ request for mailbox failed, error: %d\n", err);
return err;
}
set_bit(IDPF_MB_INTR_MODE, adapter->flags);
return 0;
}
/**
* idpf_set_mb_vec_id - Set vector index for mailbox
* @adapter: adapter structure to access the vector chunks
*
* The first vector id in the requested vector chunks from the CP is for
* the mailbox
*/
static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
{
if (adapter->req_vec_chunks)
adapter->mb_vector.v_idx =
le16_to_cpu(adapter->caps.mailbox_vector_id);
else
adapter->mb_vector.v_idx = 0;
}
/**
* idpf_mb_intr_init - Initialize the mailbox interrupt
* @adapter: adapter structure to store the mailbox vector
*/
static int idpf_mb_intr_init(struct idpf_adapter *adapter)
{
adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
adapter->irq_mb_handler = idpf_mb_intr_clean;
return idpf_mb_intr_req_irq(adapter);
}
/**
* idpf_vector_lifo_push - push MSIX vector index onto stack
* @adapter: private data struct
* @vec_idx: vector index to store
*/
static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
{
struct idpf_vector_lifo *stack = &adapter->vector_stack;
lockdep_assert_held(&adapter->vector_lock);
if (stack->top == stack->base) {
dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
stack->top);
return -EINVAL;
}
stack->vec_idx[--stack->top] = vec_idx;
return 0;
}
/**
* idpf_vector_lifo_pop - pop MSIX vector index from stack
* @adapter: private data struct
*/
static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
{
struct idpf_vector_lifo *stack = &adapter->vector_stack;
lockdep_assert_held(&adapter->vector_lock);
if (stack->top == stack->size) {
dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
return -EINVAL;
}
return stack->vec_idx[stack->top++];
}
/**
* idpf_vector_stash - Store the vector indexes onto the stack
* @adapter: private data struct
* @q_vector_idxs: vector index array
* @vec_info: info related to the number of vectors
*
* This function is a no-op if there are no vectors indexes to be stashed
*/
static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
struct idpf_vector_info *vec_info)
{
int i, base = 0;
u16 vec_idx;
lockdep_assert_held(&adapter->vector_lock);
if (!vec_info->num_curr_vecs)
return;
/* For default vports, no need to stash vector allocated from the
* default pool onto the stack
*/
if (vec_info->default_vport)
base = IDPF_MIN_Q_VEC;
for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
vec_idx = q_vector_idxs[i];
idpf_vector_lifo_push(adapter, vec_idx);
adapter->num_avail_msix++;
}
}
/**
* idpf_req_rel_vector_indexes - Request or release MSIX vector indexes
* @adapter: driver specific private structure
* @q_vector_idxs: vector index array
* @vec_info: info related to the number of vectors
*
* This is the core function to distribute the MSIX vectors acquired from the
* OS. It expects the caller to pass the number of vectors required and
* also previously allocated. First, it stashes previously allocated vector
* indexes on to the stack and then figures out if it can allocate requested
* vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as
* requested vectors, then this function just stashes the already allocated
* vectors and returns 0.
*
* Returns actual number of vectors allocated on success, error value on failure
* If 0 is returned, implies the stack has no vectors to allocate which is also
* a failure case for the caller
*/
int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
u16 *q_vector_idxs,
struct idpf_vector_info *vec_info)
{
u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
struct idpf_vector_lifo *stack;
int i, j, vecid;
mutex_lock(&adapter->vector_lock);
stack = &adapter->vector_stack;
num_req_vecs = vec_info->num_req_vecs;
/* Stash interrupt vector indexes onto the stack if required */
idpf_vector_stash(adapter, q_vector_idxs, vec_info);
if (!num_req_vecs)
goto rel_lock;
if (vec_info->default_vport) {
/* As IDPF_MIN_Q_VEC per default vport is put aside in the
* default pool of the stack, use them for default vports
*/
j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
num_req_vecs--;
}
}
/* Find if stack has enough vector to allocate */
max_vecs = min(adapter->num_avail_msix, num_req_vecs);
for (j = 0; j < max_vecs; j++) {
vecid = idpf_vector_lifo_pop(adapter);
q_vector_idxs[num_alloc_vecs++] = vecid;
}
adapter->num_avail_msix -= max_vecs;
rel_lock:
mutex_unlock(&adapter->vector_lock);
return num_alloc_vecs;
}
/**
* idpf_intr_req - Request interrupt capabilities
* @adapter: adapter to enable interrupts on
*
* Returns 0 on success, negative on failure
*/
int idpf_intr_req(struct idpf_adapter *adapter)
{
u16 default_vports = idpf_get_default_vports(adapter);
int num_q_vecs, total_vecs, num_vec_ids;
int min_vectors, v_actual, err;
unsigned int vector;
u16 *vecids;
total_vecs = idpf_get_reserved_vecs(adapter);
num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to allocate %d vectors: %d\n", num_q_vecs, err);
return -EAGAIN;
}
min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
total_vecs, PCI_IRQ_MSIX);
if (v_actual < min_vectors) {
dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
v_actual);
err = -EAGAIN;
goto send_dealloc_vecs;
}
adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
GFP_KERNEL);
if (!adapter->msix_entries) {
err = -ENOMEM;
goto free_irq;
}
idpf_set_mb_vec_id(adapter);
vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
err = -ENOMEM;
goto free_msix;
}
if (adapter->req_vec_chunks) {
struct virtchnl2_vector_chunks *vchunks;
struct virtchnl2_alloc_vectors *ac;
ac = adapter->req_vec_chunks;
vchunks = &ac->vchunks;
num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
vchunks);
if (num_vec_ids < v_actual) {
err = -EINVAL;
goto free_vecids;
}
} else {
int i;
for (i = 0; i < v_actual; i++)
vecids[i] = i;
}
for (vector = 0; vector < v_actual; vector++) {
adapter->msix_entries[vector].entry = vecids[vector];
adapter->msix_entries[vector].vector =
pci_irq_vector(adapter->pdev, vector);
}
adapter->num_req_msix = total_vecs;
adapter->num_msix_entries = v_actual;
/* 'num_avail_msix' is used to distribute excess vectors to the vports
* after considering the minimum vectors required per each default
* vport
*/
adapter->num_avail_msix = v_actual - min_vectors;
/* Fill MSIX vector lifo stack with vector indexes */
err = idpf_init_vector_stack(adapter);
if (err)
goto free_vecids;
err = idpf_mb_intr_init(adapter);
if (err)
goto deinit_vec_stack;
idpf_mb_irq_enable(adapter);
kfree(vecids);
return 0;
deinit_vec_stack:
idpf_deinit_vector_stack(adapter);
free_vecids:
kfree(vecids);
free_msix:
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
free_irq:
pci_free_irq_vectors(adapter->pdev);
send_dealloc_vecs:
idpf_send_dealloc_vectors_msg(adapter);
return err;
}
/**
* idpf_find_mac_filter - Search filter list for specific mac filter
* @vconfig: Vport config structure
* @macaddr: The MAC address
*
* Returns ptr to the filter object or NULL. Must be called while holding the
* mac_filter_list_lock.
**/
static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
const u8 *macaddr)
{
struct idpf_mac_filter *f;
if (!macaddr)
return NULL;
list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
if (ether_addr_equal(macaddr, f->macaddr))
return f;
}
return NULL;
}
/**
* __idpf_del_mac_filter - Delete a MAC filter from the filter list
* @vport_config: Vport config structure
* @macaddr: The MAC address
*
* Returns 0 on success, error value on failure
**/
static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
const u8 *macaddr)
{
struct idpf_mac_filter *f;
spin_lock_bh(&vport_config->mac_filter_list_lock);
f = idpf_find_mac_filter(vport_config, macaddr);
if (f) {
list_del(&f->list);
kfree(f);
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return 0;
}
/**
* idpf_del_mac_filter - Delete a MAC filter from the filter list
* @vport: Main vport structure
* @np: Netdev private structure
* @macaddr: The MAC address
* @async: Don't wait for return message
*
* Removes filter from list and if interface is up, tells hardware about the
* removed filter.
**/
static int idpf_del_mac_filter(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
const u8 *macaddr, bool async)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f;
vport_config = np->adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
f = idpf_find_mac_filter(vport_config, macaddr);
if (f) {
f->remove = true;
} else {
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return -EINVAL;
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
if (np->state == __IDPF_VPORT_UP) {
int err;
err = idpf_add_del_mac_filters(vport, np, false, async);
if (err)
return err;
}
return __idpf_del_mac_filter(vport_config, macaddr);
}
/**
* __idpf_add_mac_filter - Add mac filter helper function
* @vport_config: Vport config structure
* @macaddr: Address to add
*
* Takes mac_filter_list_lock spinlock to add new filter to list.
*/
static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
const u8 *macaddr)
{
struct idpf_mac_filter *f;
spin_lock_bh(&vport_config->mac_filter_list_lock);
f = idpf_find_mac_filter(vport_config, macaddr);
if (f) {
f->remove = false;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return 0;
}
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f) {
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return -ENOMEM;
}
ether_addr_copy(f->macaddr, macaddr);
list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
f->add = true;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return 0;
}
/**
* idpf_add_mac_filter - Add a mac filter to the filter list
* @vport: Main vport structure
* @np: Netdev private structure
* @macaddr: The MAC address
* @async: Don't wait for return message
*
* Returns 0 on success or error on failure. If interface is up, we'll also
* send the virtchnl message to tell hardware about the filter.
**/
static int idpf_add_mac_filter(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
const u8 *macaddr, bool async)
{
struct idpf_vport_config *vport_config;
int err;
vport_config = np->adapter->vport_config[np->vport_idx];
err = __idpf_add_mac_filter(vport_config, macaddr);
if (err)
return err;
if (np->state == __IDPF_VPORT_UP)
err = idpf_add_del_mac_filters(vport, np, true, async);
return err;
}
/**
* idpf_del_all_mac_filters - Delete all MAC filters in list
* @vport: main vport struct
*
* Takes mac_filter_list_lock spinlock. Deletes all filters
*/
static void idpf_del_all_mac_filters(struct idpf_vport *vport)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f, *ftmp;
vport_config = vport->adapter->vport_config[vport->idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
list) {
list_del(&f->list);
kfree(f);
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
}
/**
* idpf_restore_mac_filters - Re-add all MAC filters in list
* @vport: main vport struct
*
* Takes mac_filter_list_lock spinlock. Sets add field to true for filters to
* resync filters back to HW.
*/
static void idpf_restore_mac_filters(struct idpf_vport *vport)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f;
vport_config = vport->adapter->vport_config[vport->idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
f->add = true;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
true, false);
}
/**
* idpf_remove_mac_filters - Remove all MAC filters in list
* @vport: main vport struct
*
* Takes mac_filter_list_lock spinlock. Sets remove field to true for filters
* to remove filters in HW.
*/
static void idpf_remove_mac_filters(struct idpf_vport *vport)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f;
vport_config = vport->adapter->vport_config[vport->idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
f->remove = true;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
false, false);
}
/**
* idpf_deinit_mac_addr - deinitialize mac address for vport
* @vport: main vport structure
*/
static void idpf_deinit_mac_addr(struct idpf_vport *vport)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f;
vport_config = vport->adapter->vport_config[vport->idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
if (f) {
list_del(&f->list);
kfree(f);
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
}
/**
* idpf_init_mac_addr - initialize mac address for vport
* @vport: main vport structure
* @netdev: pointer to netdev struct associated with this vport
*/
static int idpf_init_mac_addr(struct idpf_vport *vport,
struct net_device *netdev)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_adapter *adapter = vport->adapter;
int err;
if (is_valid_ether_addr(vport->default_mac_addr)) {
eth_hw_addr_set(netdev, vport->default_mac_addr);
ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
false);
}
if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_MACFILTER)) {
dev_err(&adapter->pdev->dev,
"MAC address is not provided and capability is not set\n");
return -EINVAL;
}
eth_hw_addr_random(netdev);
err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
if (err)
return err;
dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
vport->default_mac_addr, netdev->dev_addr);
ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
return 0;
}
/**
* idpf_cfg_netdev - Allocate, configure and register a netdev
* @vport: main vport structure
*
* Returns 0 on success, negative value on failure.
*/
static int idpf_cfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
netdev_features_t dflt_features;
netdev_features_t offloads = 0;
struct idpf_netdev_priv *np;
struct net_device *netdev;
u16 idx = vport->idx;
int err;
vport_config = adapter->vport_config[idx];
/* It's possible we already have a netdev allocated and registered for
* this vport
*/
if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
netdev = adapter->netdevs[idx];
np = netdev_priv(netdev);
np->vport = vport;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
vport->netdev = netdev;
return idpf_init_mac_addr(vport, netdev);
}
netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
vport_config->max_q.max_txq,
vport_config->max_q.max_rxq);
if (!netdev)
return -ENOMEM;
vport->netdev = netdev;
np = netdev_priv(netdev);
np->vport = vport;
np->adapter = adapter;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
spin_lock_init(&np->stats_lock);
err = idpf_init_mac_addr(vport, netdev);
if (err) {
free_netdev(vport->netdev);
vport->netdev = NULL;
return err;
}
/* assign netdev_ops */
if (idpf_is_queue_model_split(vport->txq_model))
netdev->netdev_ops = &idpf_netdev_ops_splitq;
else
netdev->netdev_ops = &idpf_netdev_ops_singleq;
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
/* configure default MTU size */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = vport->max_mtu;
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA;
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
dflt_features |= NETIF_F_IP_CSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
dflt_features |= NETIF_F_IPV6_CSUM;
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
dflt_features |= NETIF_F_RXCSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
dflt_features |= NETIF_F_SCTP_CRC;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
dflt_features |= NETIF_F_TSO;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
dflt_features |= NETIF_F_TSO6;
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_IPV4_UDP |
VIRTCHNL2_CAP_SEG_IPV6_UDP))
dflt_features |= NETIF_F_GSO_UDP_L4;
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
offloads |= NETIF_F_GRO_HW;
/* advertise to stack only if offloads for encapsulated packets is
* supported
*/
if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
offloads |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
0;
if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
IDPF_CAP_TUNNEL_TX_CSUM))
netdev->gso_partial_features |=
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
offloads |= NETIF_F_TSO_MANGLEID;
}
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
offloads |= NETIF_F_LOOPBACK;
netdev->features |= dflt_features;
netdev->hw_features |= dflt_features | offloads;
netdev->hw_enc_features |= dflt_features | offloads;
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
idpf_set_ethtool_ops(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
/* carrier off on init to avoid Tx hangs */
netif_carrier_off(netdev);
/* make sure transmit queues start off as stopped */
netif_tx_stop_all_queues(netdev);
/* The vport can be arbitrarily released so we need to also track
* netdevs in the adapter struct
*/
adapter->netdevs[idx] = netdev;
return 0;
}
/**
* idpf_get_free_slot - get the next non-NULL location index in array
* @adapter: adapter in which to look for a free vport slot
*/
static int idpf_get_free_slot(struct idpf_adapter *adapter)
{
unsigned int i;
for (i = 0; i < adapter->max_vports; i++) {
if (!adapter->vports[i])
return i;
}
return IDPF_NO_FREE_SLOT;
}
/**
* idpf_remove_features - Turn off feature configs
* @vport: virtual port structure
*/
static void idpf_remove_features(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
idpf_remove_mac_filters(vport);
}
/**
* idpf_vport_stop - Disable a vport
* @vport: vport to disable
*/
static void idpf_vport_stop(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
if (np->state <= __IDPF_VPORT_DOWN)
return;
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
idpf_send_disable_vport_msg(vport);
idpf_send_disable_queues_msg(vport);
idpf_send_map_unmap_queue_vector_msg(vport, false);
/* Normally we ask for queues in create_vport, but if the number of
* initially requested queues have changed, for example via ethtool
* set channels, we do delete queues and then add the queues back
* instead of deleting and reallocating the vport.
*/
if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
idpf_send_delete_queues_msg(vport);
idpf_remove_features(vport);
vport->link_up = false;
idpf_vport_intr_deinit(vport);
idpf_vport_intr_rel(vport);
idpf_vport_queues_rel(vport);
np->state = __IDPF_VPORT_DOWN;
}
/**
* idpf_stop - Disables a network interface
* @netdev: network interface device structure
*
* The stop entry point is called when an interface is de-activated by the OS,
* and the netdevice enters the DOWN state. The hardware is still under the
* driver's control, but the netdev interface is disabled.
*
* Returns success only - not allowed to fail
*/
static int idpf_stop(struct net_device *netdev)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport *vport;
if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
return 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
idpf_vport_stop(vport);
idpf_vport_ctrl_unlock(netdev);
return 0;
}
/**
* idpf_decfg_netdev - Unregister the netdev
* @vport: vport for which netdev to be unregistered
*/
static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
unregister_netdev(vport->netdev);
free_netdev(vport->netdev);
vport->netdev = NULL;
adapter->netdevs[vport->idx] = NULL;
}
/**
* idpf_vport_rel - Delete a vport and free its resources
* @vport: the vport being removed
*/
static void idpf_vport_rel(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
struct idpf_vector_info vec_info;
struct idpf_rss_data *rss_data;
struct idpf_vport_max_q max_q;
u16 idx = vport->idx;
int i;
vport_config = adapter->vport_config[vport->idx];
idpf_deinit_rss(vport);
rss_data = &vport_config->user_config.rss_data;
kfree(rss_data->rss_key);
rss_data->rss_key = NULL;
idpf_send_destroy_vport_msg(vport);
/* Set all bits as we dont know on which vc_state the vport vhnl_wq
* is waiting on and wakeup the virtchnl workqueue even if it is
* waiting for the response as we are going down
*/
for (i = 0; i < IDPF_VC_NBITS; i++)
set_bit(i, vport->vc_state);
wake_up(&vport->vchnl_wq);
mutex_destroy(&vport->vc_buf_lock);
/* Clear all the bits */
for (i = 0; i < IDPF_VC_NBITS; i++)
clear_bit(i, vport->vc_state);