@@ -79,8 +79,23 @@ struct io_tlb_slot {
79
79
static bool swiotlb_force_bounce ;
80
80
static bool swiotlb_force_disable ;
81
81
82
+ #ifdef CONFIG_SWIOTLB_DYNAMIC
83
+
84
+ static void swiotlb_dyn_alloc (struct work_struct * work );
85
+
86
+ static struct io_tlb_mem io_tlb_default_mem = {
87
+ .lock = __SPIN_LOCK_UNLOCKED (io_tlb_default_mem .lock ),
88
+ .pools = LIST_HEAD_INIT (io_tlb_default_mem .pools ),
89
+ .dyn_alloc = __WORK_INITIALIZER (io_tlb_default_mem .dyn_alloc ,
90
+ swiotlb_dyn_alloc ),
91
+ };
92
+
93
+ #else /* !CONFIG_SWIOTLB_DYNAMIC */
94
+
82
95
static struct io_tlb_mem io_tlb_default_mem ;
83
96
97
+ #endif /* CONFIG_SWIOTLB_DYNAMIC */
98
+
84
99
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT ;
85
100
static unsigned long default_nareas ;
86
101
@@ -278,6 +293,23 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
278
293
return ;
279
294
}
280
295
296
+ /**
297
+ * add_mem_pool() - add a memory pool to the allocator
298
+ * @mem: Software IO TLB allocator.
299
+ * @pool: Memory pool to be added.
300
+ */
301
+ static void add_mem_pool (struct io_tlb_mem * mem , struct io_tlb_pool * pool )
302
+ {
303
+ #ifdef CONFIG_SWIOTLB_DYNAMIC
304
+ spin_lock (& mem -> lock );
305
+ list_add_rcu (& pool -> node , & mem -> pools );
306
+ mem -> nslabs += pool -> nslabs ;
307
+ spin_unlock (& mem -> lock );
308
+ #else
309
+ mem -> nslabs = pool -> nslabs ;
310
+ #endif
311
+ }
312
+
281
313
static void __init * swiotlb_memblock_alloc (unsigned long nslabs ,
282
314
unsigned int flags ,
283
315
int (* remap )(void * tlb , unsigned long nslabs ))
@@ -375,7 +407,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
375
407
376
408
swiotlb_init_io_tlb_pool (mem , __pa (tlb ), nslabs , false,
377
409
default_nareas );
378
- io_tlb_default_mem . nslabs = nslabs ;
410
+ add_mem_pool ( & io_tlb_default_mem , mem ) ;
379
411
380
412
if (flags & SWIOTLB_VERBOSE )
381
413
swiotlb_print_info ();
@@ -474,7 +506,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
474
506
(nslabs << IO_TLB_SHIFT ) >> PAGE_SHIFT );
475
507
swiotlb_init_io_tlb_pool (mem , virt_to_phys (vstart ), nslabs , true,
476
508
nareas );
477
- io_tlb_default_mem . nslabs = nslabs ;
509
+ add_mem_pool ( & io_tlb_default_mem , mem ) ;
478
510
479
511
swiotlb_print_info ();
480
512
return 0 ;
@@ -625,53 +657,94 @@ static void swiotlb_free_tlb(void *vaddr, size_t bytes)
625
657
/**
626
658
* swiotlb_alloc_pool() - allocate a new IO TLB memory pool
627
659
* @dev: Device for which a memory pool is allocated.
628
- * @nslabs: Desired number of slabs.
660
+ * @minslabs: Minimum number of slabs.
661
+ * @nslabs: Desired (maximum) number of slabs.
662
+ * @nareas: Number of areas.
629
663
* @phys_limit: Maximum DMA buffer physical address.
630
664
* @gfp: GFP flags for the allocations.
631
665
*
632
- * Allocate and initialize a new IO TLB memory pool.
666
+ * Allocate and initialize a new IO TLB memory pool. The actual number of
667
+ * slabs may be reduced if allocation of @nslabs fails. If even
668
+ * @minslabs cannot be allocated, this function fails.
633
669
*
634
670
* Return: New memory pool, or %NULL on allocation failure.
635
671
*/
636
672
static struct io_tlb_pool * swiotlb_alloc_pool (struct device * dev ,
637
- unsigned int nslabs , u64 phys_limit , gfp_t gfp )
673
+ unsigned long minslabs , unsigned long nslabs ,
674
+ unsigned int nareas , u64 phys_limit , gfp_t gfp )
638
675
{
639
676
struct io_tlb_pool * pool ;
677
+ unsigned int slot_order ;
640
678
struct page * tlb ;
641
679
size_t pool_size ;
642
680
size_t tlb_size ;
643
681
644
- pool_size = sizeof (* pool ) + array_size (sizeof (* pool -> areas ), 1 ) +
645
- array_size (sizeof (* pool -> slots ), nslabs );
682
+ pool_size = sizeof (* pool ) + array_size (sizeof (* pool -> areas ), nareas );
646
683
pool = kzalloc (pool_size , gfp );
647
684
if (!pool )
648
685
goto error ;
649
686
pool -> areas = (void * )pool + sizeof (* pool );
650
- pool -> slots = (void * )pool -> areas + sizeof (* pool -> areas );
651
687
652
688
tlb_size = nslabs << IO_TLB_SHIFT ;
653
- tlb = swiotlb_alloc_tlb (dev , tlb_size , phys_limit , gfp );
654
- if (!tlb )
655
- goto error_tlb ;
689
+ while (!(tlb = swiotlb_alloc_tlb (dev , tlb_size , phys_limit , gfp ))) {
690
+ if (nslabs <= minslabs )
691
+ goto error_tlb ;
692
+ nslabs = ALIGN (nslabs >> 1 , IO_TLB_SEGSIZE );
693
+ nareas = limit_nareas (nareas , nslabs );
694
+ tlb_size = nslabs << IO_TLB_SHIFT ;
695
+ }
656
696
657
- swiotlb_init_io_tlb_pool (pool , page_to_phys (tlb ), nslabs , true, 1 );
697
+ slot_order = get_order (array_size (sizeof (* pool -> slots ), nslabs ));
698
+ pool -> slots = (struct io_tlb_slot * )
699
+ __get_free_pages (gfp , slot_order );
700
+ if (!pool -> slots )
701
+ goto error_slots ;
702
+
703
+ swiotlb_init_io_tlb_pool (pool , page_to_phys (tlb ), nslabs , true, nareas );
658
704
return pool ;
659
705
706
+ error_slots :
707
+ swiotlb_free_tlb (page_address (tlb ), tlb_size );
660
708
error_tlb :
661
709
kfree (pool );
662
710
error :
663
711
return NULL ;
664
712
}
665
713
714
+ /**
715
+ * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
716
+ * @work: Pointer to dyn_alloc in struct io_tlb_mem.
717
+ */
718
+ static void swiotlb_dyn_alloc (struct work_struct * work )
719
+ {
720
+ struct io_tlb_mem * mem =
721
+ container_of (work , struct io_tlb_mem , dyn_alloc );
722
+ struct io_tlb_pool * pool ;
723
+
724
+ pool = swiotlb_alloc_pool (NULL , IO_TLB_MIN_SLABS , default_nslabs ,
725
+ default_nareas , mem -> phys_limit , GFP_KERNEL );
726
+ if (!pool ) {
727
+ pr_warn_ratelimited ("Failed to allocate new pool" );
728
+ return ;
729
+ }
730
+
731
+ add_mem_pool (mem , pool );
732
+
733
+ /* Pairs with smp_rmb() in swiotlb_find_pool(). */
734
+ smp_wmb ();
735
+ }
736
+
666
737
/**
667
738
* swiotlb_dyn_free() - RCU callback to free a memory pool
668
739
* @rcu: RCU head in the corresponding struct io_tlb_pool.
669
740
*/
670
741
static void swiotlb_dyn_free (struct rcu_head * rcu )
671
742
{
672
743
struct io_tlb_pool * pool = container_of (rcu , struct io_tlb_pool , rcu );
744
+ size_t slots_size = array_size (sizeof (* pool -> slots ), pool -> nslabs );
673
745
size_t tlb_size = pool -> end - pool -> start ;
674
746
747
+ free_pages ((unsigned long )pool -> slots , get_order (slots_size ));
675
748
swiotlb_free_tlb (pool -> vaddr , tlb_size );
676
749
kfree (pool );
677
750
}
@@ -689,15 +762,19 @@ static void swiotlb_dyn_free(struct rcu_head *rcu)
689
762
struct io_tlb_pool * swiotlb_find_pool (struct device * dev , phys_addr_t paddr )
690
763
{
691
764
struct io_tlb_mem * mem = dev -> dma_io_tlb_mem ;
692
- struct io_tlb_pool * pool = & mem -> defpool ;
693
-
694
- if (paddr >= pool -> start && paddr < pool -> end )
695
- return pool ;
765
+ struct io_tlb_pool * pool ;
696
766
697
- /* Pairs with smp_wmb() in swiotlb_find_slots(). */
767
+ /* Pairs with smp_wmb() in swiotlb_find_slots() and
768
+ * swiotlb_dyn_alloc(), which modify the RCU lists.
769
+ */
698
770
smp_rmb ();
699
771
700
772
rcu_read_lock ();
773
+ list_for_each_entry_rcu (pool , & mem -> pools , node ) {
774
+ if (paddr >= pool -> start && paddr < pool -> end )
775
+ goto out ;
776
+ }
777
+
701
778
list_for_each_entry_rcu (pool , & dev -> dma_io_tlb_pools , node ) {
702
779
if (paddr >= pool -> start && paddr < pool -> end )
703
780
goto out ;
@@ -1046,18 +1123,24 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1046
1123
u64 phys_limit ;
1047
1124
int index ;
1048
1125
1049
- pool = & mem -> defpool ;
1050
- index = swiotlb_pool_find_slots (dev , pool , orig_addr ,
1051
- alloc_size , alloc_align_mask );
1052
- if (index >= 0 )
1053
- goto found ;
1054
-
1126
+ rcu_read_lock ();
1127
+ list_for_each_entry_rcu (pool , & mem -> pools , node ) {
1128
+ index = swiotlb_pool_find_slots (dev , pool , orig_addr ,
1129
+ alloc_size , alloc_align_mask );
1130
+ if (index >= 0 ) {
1131
+ rcu_read_unlock ();
1132
+ goto found ;
1133
+ }
1134
+ }
1135
+ rcu_read_unlock ();
1055
1136
if (!mem -> can_grow )
1056
1137
return -1 ;
1057
1138
1139
+ schedule_work (& mem -> dyn_alloc );
1140
+
1058
1141
nslabs = nr_slots (alloc_size );
1059
1142
phys_limit = min_not_zero (* dev -> dma_mask , dev -> bus_dma_limit );
1060
- pool = swiotlb_alloc_pool (dev , nslabs , phys_limit ,
1143
+ pool = swiotlb_alloc_pool (dev , nslabs , nslabs , 1 , phys_limit ,
1061
1144
GFP_NOWAIT | __GFP_NOWARN );
1062
1145
if (!pool )
1063
1146
return -1 ;
@@ -1141,7 +1224,19 @@ static unsigned long mem_pool_used(struct io_tlb_pool *pool)
1141
1224
*/
1142
1225
static unsigned long mem_used (struct io_tlb_mem * mem )
1143
1226
{
1227
+ #ifdef CONFIG_SWIOTLB_DYNAMIC
1228
+ struct io_tlb_pool * pool ;
1229
+ unsigned long used = 0 ;
1230
+
1231
+ rcu_read_lock ();
1232
+ list_for_each_entry_rcu (pool , & mem -> pools , node )
1233
+ used += mem_pool_used (pool );
1234
+ rcu_read_unlock ();
1235
+
1236
+ return used ;
1237
+ #else
1144
1238
return mem_pool_used (& mem -> defpool );
1239
+ #endif
1145
1240
}
1146
1241
1147
1242
#endif /* CONFIG_DEBUG_FS */
@@ -1562,7 +1657,10 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
1562
1657
false, nareas );
1563
1658
mem -> force_bounce = true;
1564
1659
mem -> for_alloc = true;
1565
- mem -> nslabs = nslabs ;
1660
+ #ifdef CONFIG_SWIOTLB_DYNAMIC
1661
+ spin_lock_init (& mem -> lock );
1662
+ #endif
1663
+ add_mem_pool (mem , pool );
1566
1664
1567
1665
rmem -> priv = mem ;
1568
1666
0 commit comments