Skip to content

Commit 41e94a8

Browse files
Christoph Hellwigdjbw
Christoph Hellwig
authored andcommitted
add devm_memremap_pages
This behaves like devm_memremap except that it ensures we have page structures available that can back the region. Signed-off-by: Christoph Hellwig <[email protected]> [djbw: catch attempts to remap RAM, drop flags] Signed-off-by: Dan Williams <[email protected]>
1 parent 033fbae commit 41e94a8

File tree

2 files changed

+73
-0
lines changed

2 files changed

+73
-0
lines changed

include/linux/io.h

+20
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,13 @@
2020

2121
#include <linux/types.h>
2222
#include <linux/init.h>
23+
#include <linux/bug.h>
24+
#include <linux/err.h>
2325
#include <asm/io.h>
2426
#include <asm/page.h>
2527

2628
struct device;
29+
struct resource;
2730

2831
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
2932
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
@@ -84,6 +87,23 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
8487
size_t size, unsigned long flags);
8588
void devm_memunmap(struct device *dev, void *addr);
8689

90+
void *__devm_memremap_pages(struct device *dev, struct resource *res);
91+
92+
#ifdef CONFIG_ZONE_DEVICE
93+
void *devm_memremap_pages(struct device *dev, struct resource *res);
94+
#else
95+
static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
96+
{
97+
/*
98+
* Fail attempts to call devm_memremap_pages() without
99+
* ZONE_DEVICE support enabled, this requires callers to fall
100+
* back to plain devm_memremap() based on config
101+
*/
102+
WARN_ON_ONCE(1);
103+
return ERR_PTR(-ENXIO);
104+
}
105+
#endif
106+
87107
/*
88108
* Some systems do not have legacy ISA devices.
89109
* /dev/port is not a valid interface on these systems.

kernel/memremap.c

+53
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <linux/types.h>
1515
#include <linux/io.h>
1616
#include <linux/mm.h>
17+
#include <linux/memory_hotplug.h>
1718

1819
#ifndef ioremap_cache
1920
/* temporary while we convert existing ioremap_cache users to memremap */
@@ -135,3 +136,55 @@ void devm_memunmap(struct device *dev, void *addr)
135136
memunmap(addr);
136137
}
137138
EXPORT_SYMBOL(devm_memunmap);
139+
140+
#ifdef CONFIG_ZONE_DEVICE
141+
struct page_map {
142+
struct resource res;
143+
};
144+
145+
static void devm_memremap_pages_release(struct device *dev, void *res)
146+
{
147+
struct page_map *page_map = res;
148+
149+
/* pages are dead and unused, undo the arch mapping */
150+
arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
151+
}
152+
153+
void *devm_memremap_pages(struct device *dev, struct resource *res)
154+
{
155+
int is_ram = region_intersects(res->start, resource_size(res),
156+
"System RAM");
157+
struct page_map *page_map;
158+
int error, nid;
159+
160+
if (is_ram == REGION_MIXED) {
161+
WARN_ONCE(1, "%s attempted on mixed region %pr\n",
162+
__func__, res);
163+
return ERR_PTR(-ENXIO);
164+
}
165+
166+
if (is_ram == REGION_INTERSECTS)
167+
return __va(res->start);
168+
169+
page_map = devres_alloc(devm_memremap_pages_release,
170+
sizeof(*page_map), GFP_KERNEL);
171+
if (!page_map)
172+
return ERR_PTR(-ENOMEM);
173+
174+
memcpy(&page_map->res, res, sizeof(*res));
175+
176+
nid = dev_to_node(dev);
177+
if (nid < 0)
178+
nid = 0;
179+
180+
error = arch_add_memory(nid, res->start, resource_size(res), true);
181+
if (error) {
182+
devres_free(page_map);
183+
return ERR_PTR(error);
184+
}
185+
186+
devres_add(dev, page_map);
187+
return __va(res->start);
188+
}
189+
EXPORT_SYMBOL(devm_memremap_pages);
190+
#endif /* CONFIG_ZONE_DEVICE */

0 commit comments

Comments
 (0)