Skip to content

Commit 8f9a0d8

Browse files
mm/memory_hotplug: Shrink zones when offlining memory
We currently try to shrink a single zone when removing memory. We use the zone of the first page of the memory we are removing. If that memmap was never initialized (e.g., memory was never onlined), we will read garbage and can trigger kernel BUGs (due to a stale pointer): :/# [ 23.912993] BUG: unable to handle page fault for address: 000000000000353d [ 23.914219] #PF: supervisor write access in kernel mode [ 23.915199] #PF: error_code(0x0002) - not-present page [ 23.916160] PGD 0 P4D 0 [ 23.916627] Oops: 0002 [#1] SMP PTI [ 23.917256] CPU: 1 PID: 7 Comm: kworker/u8:0 Not tainted 5.3.0-rc5-next-20190820+ torvalds#317 [ 23.918900] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.4 [ 23.921194] Workqueue: kacpi_hotplug acpi_hotplug_work_fn [ 23.922249] RIP: 0010:clear_zone_contiguous+0x5/0x10 [ 23.923173] Code: 48 89 c6 48 89 c3 e8 2a fe ff ff 48 85 c0 75 cf 5b 5d c3 c6 85 fd 05 00 00 01 5b 5d c3 0f 1f 840 [ 23.926876] RSP: 0018:ffffad2400043c98 EFLAGS: 00010246 [ 23.927928] RAX: 0000000000000000 RBX: 0000000200000000 RCX: 0000000000000000 [ 23.929458] RDX: 0000000000200000 RSI: 0000000000140000 RDI: 0000000000002f40 [ 23.930899] RBP: 0000000140000000 R08: 0000000000000000 R09: 0000000000000001 [ 23.932362] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000140000 [ 23.933603] R13: 0000000000140000 R14: 0000000000002f40 R15: ffff9e3e7aff3680 [ 23.934913] FS: 0000000000000000(0000) GS:ffff9e3e7bb00000(0000) knlGS:0000000000000000 [ 23.936294] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 23.937481] CR2: 000000000000353d CR3: 0000000058610000 CR4: 00000000000006e0 [ 23.938687] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 23.939889] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 23.941168] Call Trace: [ 23.941580] __remove_pages+0x4b/0x640 [ 23.942303] ? mark_held_locks+0x49/0x70 [ 23.943149] arch_remove_memory+0x63/0x8d [ 23.943921] try_remove_memory+0xdb/0x130 [ 23.944766] ? walk_memory_blocks+0x7f/0x9e [ 23.945616] __remove_memory+0xa/0x11 [ 23.946274] acpi_memory_device_remove+0x70/0x100 [ 23.947308] acpi_bus_trim+0x55/0x90 [ 23.947914] acpi_device_hotplug+0x227/0x3a0 [ 23.948714] acpi_hotplug_work_fn+0x1a/0x30 [ 23.949433] process_one_work+0x221/0x550 [ 23.950190] worker_thread+0x50/0x3b0 [ 23.950993] kthread+0x105/0x140 [ 23.951644] ? process_one_work+0x550/0x550 [ 23.952508] ? kthread_park+0x80/0x80 [ 23.953367] ret_from_fork+0x3a/0x50 [ 23.954025] Modules linked in: [ 23.954613] CR2: 000000000000353d [ 23.955248] ---[ end trace 93d982b1fb3e1a69 ]--- Instead, shrink the zones when offlining memory or when onlining failed. Introduce and use remove_pfn_range_from_zone(() for that. We now properly shrink the zones, even if we have DIMMs whereby - Some memory blocks fall into no zone (never onlined) - Some memory blocks fall into multiple zones (offlined+re-onlined) - Multiple memory blocks that fall into different zones Drop the zone parameter (with a potential dubious value) from __remove_pages() and __remove_section(). Cc: Catalin Marinas <[email protected]> Cc: Will Deacon <[email protected]> Cc: Tony Luck <[email protected]> Cc: Fenghua Yu <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Vasily Gorbik <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Yoshinori Sato <[email protected]> Cc: Rich Felker <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: [email protected] Cc: Andrew Morton <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Steve Capper <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Yu Zhao <[email protected]> Cc: Jun Yao <[email protected]> Cc: Robin Murphy <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: "Matthew Wilcox (Oracle)" <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: "Aneesh Kumar K.V" <[email protected]> Cc: Pavel Tatashin <[email protected]> Cc: Gerald Schaefer <[email protected]> Cc: Halil Pasic <[email protected]> Cc: Tom Lendacky <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Masahiro Yamada <[email protected]> Cc: Dan Williams <[email protected]> Cc: Wei Yang <[email protected]> Cc: Qian Cai <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Logan Gunthorpe <[email protected]> Cc: Ira Weiny <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Fixes: d0dc12e ("mm/memory_hotplug: optimize memory hotplug") Signed-off-by: David Hildenbrand <[email protected]>
1 parent a6f1cbe commit 8f9a0d8

File tree

10 files changed

+29
-39
lines changed

10 files changed

+29
-39
lines changed

arch/arm64/mm/mmu.c

+1-3
Original file line numberDiff line numberDiff line change
@@ -1069,7 +1069,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
10691069
{
10701070
unsigned long start_pfn = start >> PAGE_SHIFT;
10711071
unsigned long nr_pages = size >> PAGE_SHIFT;
1072-
struct zone *zone;
10731072

10741073
/*
10751074
* FIXME: Cleanup page tables (also in arch_add_memory() in case
@@ -1078,7 +1077,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
10781077
* unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
10791078
* unlocked yet.
10801079
*/
1081-
zone = page_zone(pfn_to_page(start_pfn));
1082-
__remove_pages(zone, start_pfn, nr_pages, altmap);
1080+
__remove_pages(start_pfn, nr_pages, altmap);
10831081
}
10841082
#endif

arch/ia64/mm/init.c

+1-3
Original file line numberDiff line numberDiff line change
@@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
689689
{
690690
unsigned long start_pfn = start >> PAGE_SHIFT;
691691
unsigned long nr_pages = size >> PAGE_SHIFT;
692-
struct zone *zone;
693692

694-
zone = page_zone(pfn_to_page(start_pfn));
695-
__remove_pages(zone, start_pfn, nr_pages, altmap);
693+
__remove_pages(start_pfn, nr_pages, altmap);
696694
}
697695
#endif

arch/powerpc/mm/mem.c

+1-2
Original file line numberDiff line numberDiff line change
@@ -130,10 +130,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
130130
{
131131
unsigned long start_pfn = start >> PAGE_SHIFT;
132132
unsigned long nr_pages = size >> PAGE_SHIFT;
133-
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
134133
int ret;
135134

136-
__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
135+
__remove_pages(start_pfn, nr_pages, altmap);
137136

138137
/* Remove htab bolted mappings for this section of memory */
139138
start = (unsigned long)__va(start);

arch/s390/mm/init.c

+1-3
Original file line numberDiff line numberDiff line change
@@ -291,10 +291,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
291291
{
292292
unsigned long start_pfn = start >> PAGE_SHIFT;
293293
unsigned long nr_pages = size >> PAGE_SHIFT;
294-
struct zone *zone;
295294

296-
zone = page_zone(pfn_to_page(start_pfn));
297-
__remove_pages(zone, start_pfn, nr_pages, altmap);
295+
__remove_pages(start_pfn, nr_pages, altmap);
298296
vmem_remove_mapping(start, size);
299297
}
300298
#endif /* CONFIG_MEMORY_HOTPLUG */

arch/sh/mm/init.c

+1-3
Original file line numberDiff line numberDiff line change
@@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
434434
{
435435
unsigned long start_pfn = PFN_DOWN(start);
436436
unsigned long nr_pages = size >> PAGE_SHIFT;
437-
struct zone *zone;
438437

439-
zone = page_zone(pfn_to_page(start_pfn));
440-
__remove_pages(zone, start_pfn, nr_pages, altmap);
438+
__remove_pages(start_pfn, nr_pages, altmap);
441439
}
442440
#endif /* CONFIG_MEMORY_HOTPLUG */

arch/x86/mm/init_32.c

+1-3
Original file line numberDiff line numberDiff line change
@@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
865865
{
866866
unsigned long start_pfn = start >> PAGE_SHIFT;
867867
unsigned long nr_pages = size >> PAGE_SHIFT;
868-
struct zone *zone;
869868

870-
zone = page_zone(pfn_to_page(start_pfn));
871-
__remove_pages(zone, start_pfn, nr_pages, altmap);
869+
__remove_pages(start_pfn, nr_pages, altmap);
872870
}
873871
#endif
874872

arch/x86/mm/init_64.c

+1-3
Original file line numberDiff line numberDiff line change
@@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
12121212
{
12131213
unsigned long start_pfn = start >> PAGE_SHIFT;
12141214
unsigned long nr_pages = size >> PAGE_SHIFT;
1215-
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
1216-
struct zone *zone = page_zone(page);
12171215

1218-
__remove_pages(zone, start_pfn, nr_pages, altmap);
1216+
__remove_pages(start_pfn, nr_pages, altmap);
12191217
kernel_physical_mapping_remove(start, start + size);
12201218
}
12211219
#endif /* CONFIG_MEMORY_HOTPLUG */

include/linux/memory_hotplug.h

+5-2
Original file line numberDiff line numberDiff line change
@@ -123,8 +123,8 @@ static inline bool movable_node_is_enabled(void)
123123

124124
extern void arch_remove_memory(int nid, u64 start, u64 size,
125125
struct vmem_altmap *altmap);
126-
extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
127-
unsigned long nr_pages, struct vmem_altmap *altmap);
126+
extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
127+
struct vmem_altmap *altmap);
128128

129129
/* reasonably generic interface to expand the physical pages */
130130
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
@@ -343,6 +343,9 @@ extern int add_memory(int nid, u64 start, u64 size);
343343
extern int add_memory_resource(int nid, struct resource *resource);
344344
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
345345
unsigned long nr_pages, struct vmem_altmap *altmap);
346+
extern void remove_pfn_range_from_zone(struct zone *zone,
347+
unsigned long start_pfn,
348+
unsigned long nr_pages);
346349
extern bool is_memblock_offlined(struct memory_block *mem);
347350
extern int sparse_add_section(int nid, unsigned long pfn,
348351
unsigned long nr_pages, struct vmem_altmap *altmap);

mm/memory_hotplug.c

+16-15
Original file line numberDiff line numberDiff line change
@@ -455,8 +455,9 @@ static void update_pgdat_span(struct pglist_data *pgdat)
455455
pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
456456
}
457457

458-
static void __remove_zone(struct zone *zone, unsigned long start_pfn,
459-
unsigned long nr_pages)
458+
void __ref remove_pfn_range_from_zone(struct zone *zone,
459+
unsigned long start_pfn,
460+
unsigned long nr_pages)
460461
{
461462
struct pglist_data *pgdat = zone->zone_pgdat;
462463
unsigned long flags;
@@ -469,28 +470,30 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
469470
if (zone_idx(zone) == ZONE_DEVICE)
470471
return;
471472

473+
clear_zone_contiguous(zone);
474+
472475
pgdat_resize_lock(zone->zone_pgdat, &flags);
473476
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
474477
update_pgdat_span(pgdat);
475478
pgdat_resize_unlock(zone->zone_pgdat, &flags);
479+
480+
set_zone_contiguous(zone);
476481
}
477482

478-
static void __remove_section(struct zone *zone, unsigned long pfn,
479-
unsigned long nr_pages, unsigned long map_offset,
480-
struct vmem_altmap *altmap)
483+
static void __remove_section(unsigned long pfn, unsigned long nr_pages,
484+
unsigned long map_offset,
485+
struct vmem_altmap *altmap)
481486
{
482487
struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
483488

484489
if (WARN_ON_ONCE(!valid_section(ms)))
485490
return;
486491

487-
__remove_zone(zone, pfn, nr_pages);
488492
sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
489493
}
490494

491495
/**
492-
* __remove_pages() - remove sections of pages from a zone
493-
* @zone: zone from which pages need to be removed
496+
* __remove_pages() - remove sections of pages
494497
* @pfn: starting pageframe (must be aligned to start of a section)
495498
* @nr_pages: number of pages to remove (must be multiple of section size)
496499
* @altmap: alternative device page map or %NULL if default memmap is used
@@ -500,16 +503,14 @@ static void __remove_section(struct zone *zone, unsigned long pfn,
500503
* sure that pages are marked reserved and zones are adjust properly by
501504
* calling offline_pages().
502505
*/
503-
void __remove_pages(struct zone *zone, unsigned long pfn,
504-
unsigned long nr_pages, struct vmem_altmap *altmap)
506+
void __remove_pages(unsigned long pfn, unsigned long nr_pages,
507+
struct vmem_altmap *altmap)
505508
{
506509
unsigned long map_offset = 0;
507510
unsigned long nr, start_sec, end_sec;
508511

509512
map_offset = vmem_altmap_offset(altmap);
510513

511-
clear_zone_contiguous(zone);
512-
513514
if (check_pfn_span(pfn, nr_pages, "remove"))
514515
return;
515516

@@ -521,13 +522,11 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
521522
cond_resched();
522523
pfns = min(nr_pages, PAGES_PER_SECTION
523524
- (pfn & ~PAGE_SECTION_MASK));
524-
__remove_section(zone, pfn, pfns, map_offset, altmap);
525+
__remove_section(pfn, pfns, map_offset, altmap);
525526
pfn += pfns;
526527
nr_pages -= pfns;
527528
map_offset = 0;
528529
}
529-
530-
set_zone_contiguous(zone);
531530
}
532531

533532
int set_online_page_callback(online_page_callback_t callback)
@@ -839,6 +838,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
839838
(unsigned long long) pfn << PAGE_SHIFT,
840839
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
841840
memory_notify(MEM_CANCEL_ONLINE, &arg);
841+
remove_pfn_range_from_zone(zone, pfn, nr_pages);
842842
mem_hotplug_done();
843843
return ret;
844844
}
@@ -1585,6 +1585,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
15851585
writeback_set_ratelimit();
15861586

15871587
memory_notify(MEM_OFFLINE, &arg);
1588+
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
15881589
mem_hotplug_done();
15891590
return 0;
15901591

mm/memremap.c

+1-2
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,7 @@ void memunmap_pages(struct dev_pagemap *pgmap)
140140

141141
mem_hotplug_begin();
142142
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
143-
__remove_pages(page_zone(pfn_to_page(start_pfn)), start_pfn,
144-
nr_pages, NULL);
143+
__remove_pages(start_pfn, nr_pages, NULL);
145144
} else {
146145
arch_remove_memory(nid, res->start, resource_size(res),
147146
pgmap_altmap(pgmap));

0 commit comments

Comments
 (0)