Skip to content

Commit d9db383

Browse files
labbottsfrothwell
authored andcommitted
mm/debug-pagealloc.c: split out page poisoning from debug page_alloc
This is an implementation of page poisoning/sanitization for all arches. It takes advantage of the existing implementation for !ARCH_SUPPORTS_DEBUG_PAGEALLOC arches. This is a different approach than what the Grsecurity patches were taking but should provide equivalent functionality. For those who aren't familiar with this, the goal of sanitization is to reduce the severity of use after free and uninitialized data bugs. Memory is cleared on free so any sensitive data is no longer available. Discussion of sanitization was brough up in a thread about CVEs (lkml.kernel.org/g/<20160119112812.GA10818@mwanda>) I eventually expect Kconfig names will want to be changed and or moved if this is going to be used for security but that can happen later. Credit to Mathias Krause for the version in grsecurity This patch (of 3): For architectures that do not have debug page_alloc (!ARCH_SUPPORTS_DEBUG_PAGEALLOC), page poisoning is used instead. Even architectures that do have DEBUG_PAGEALLOC may want to take advantage of the poisoning feature. Separate out page poisoning into a separate file. This does not change the default behavior for !ARCH_SUPPORTS_DEBUG_PAGEALLOC. Credit to Mathias Krause and grsecurity for original work Signed-off-by: Laura Abbott <[email protected]> Cc: "Kirill A. Shutemov" <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Kees Cook <[email protected]> Cc: Mathias Krause <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Jianyu Zhan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent b8d733a commit d9db383

File tree

5 files changed

+164
-121
lines changed

5 files changed

+164
-121
lines changed

Documentation/kernel-parameters.txt

+5
Original file line numberDiff line numberDiff line change
@@ -2731,6 +2731,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
27312731
we can turn it on.
27322732
on: enable the feature
27332733

2734+
page_poison= [KNL] Boot-time parameter changing the state of
2735+
poisoning on the buddy allocator.
2736+
off: turn off poisoning
2737+
on: turn on poisoning
2738+
27342739
panic= [KNL] Kernel behaviour on panic: delay <timeout>
27352740
timeout > 0: seconds before rebooting
27362741
timeout = 0: wait forever

include/linux/mm.h

+10
Original file line numberDiff line numberDiff line change
@@ -2175,6 +2175,16 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
21752175
unsigned long size, pte_fn_t fn, void *data);
21762176

21772177

2178+
#ifdef CONFIG_PAGE_POISONING
2179+
extern void poison_pages(struct page *page, int n);
2180+
extern void unpoison_pages(struct page *page, int n);
2181+
extern bool page_poisoning_enabled(void);
2182+
#else
2183+
static inline void poison_pages(struct page *page, int n) { }
2184+
static inline void unpoison_pages(struct page *page, int n) { }
2185+
static inline bool page_poisoning_enabled(void) { return false; }
2186+
#endif
2187+
21782188
#ifdef CONFIG_DEBUG_PAGEALLOC
21792189
extern bool _debug_pagealloc_enabled;
21802190
extern void __kernel_map_pages(struct page *page, int numpages, int enable);

mm/Makefile

+4-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,10 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
4848
obj-$(CONFIG_SLOB) += slob.o
4949
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
5050
obj-$(CONFIG_KSM) += ksm.o
51-
obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
51+
ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
52+
obj-$(CONFIG_DEBUG_PAGEALLOC) += debug-pagealloc.o
53+
endif
54+
obj-$(CONFIG_PAGE_POISONING) += page_poison.o
5255
obj-$(CONFIG_SLAB) += slab.o
5356
obj-$(CONFIG_SLUB) += slub.o
5457
obj-$(CONFIG_KMEMCHECK) += kmemcheck.o

mm/debug-pagealloc.c

+1-120
Original file line numberDiff line numberDiff line change
@@ -6,128 +6,9 @@
66
#include <linux/poison.h>
77
#include <linux/ratelimit.h>
88

9-
static bool page_poisoning_enabled __read_mostly;
10-
11-
static bool need_page_poisoning(void)
12-
{
13-
if (!debug_pagealloc_enabled())
14-
return false;
15-
16-
return true;
17-
}
18-
19-
static void init_page_poisoning(void)
20-
{
21-
if (!debug_pagealloc_enabled())
22-
return;
23-
24-
page_poisoning_enabled = true;
25-
}
26-
27-
struct page_ext_operations page_poisoning_ops = {
28-
.need = need_page_poisoning,
29-
.init = init_page_poisoning,
30-
};
31-
32-
static inline void set_page_poison(struct page *page)
33-
{
34-
struct page_ext *page_ext;
35-
36-
page_ext = lookup_page_ext(page);
37-
__set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
38-
}
39-
40-
static inline void clear_page_poison(struct page *page)
41-
{
42-
struct page_ext *page_ext;
43-
44-
page_ext = lookup_page_ext(page);
45-
__clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
46-
}
47-
48-
static inline bool page_poison(struct page *page)
49-
{
50-
struct page_ext *page_ext;
51-
52-
page_ext = lookup_page_ext(page);
53-
return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
54-
}
55-
56-
static void poison_page(struct page *page)
57-
{
58-
void *addr = kmap_atomic(page);
59-
60-
set_page_poison(page);
61-
memset(addr, PAGE_POISON, PAGE_SIZE);
62-
kunmap_atomic(addr);
63-
}
64-
65-
static void poison_pages(struct page *page, int n)
66-
{
67-
int i;
68-
69-
for (i = 0; i < n; i++)
70-
poison_page(page + i);
71-
}
72-
73-
static bool single_bit_flip(unsigned char a, unsigned char b)
74-
{
75-
unsigned char error = a ^ b;
76-
77-
return error && !(error & (error - 1));
78-
}
79-
80-
static void check_poison_mem(unsigned char *mem, size_t bytes)
81-
{
82-
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
83-
unsigned char *start;
84-
unsigned char *end;
85-
86-
start = memchr_inv(mem, PAGE_POISON, bytes);
87-
if (!start)
88-
return;
89-
90-
for (end = mem + bytes - 1; end > start; end--) {
91-
if (*end != PAGE_POISON)
92-
break;
93-
}
94-
95-
if (!__ratelimit(&ratelimit))
96-
return;
97-
else if (start == end && single_bit_flip(*start, PAGE_POISON))
98-
printk(KERN_ERR "pagealloc: single bit error\n");
99-
else
100-
printk(KERN_ERR "pagealloc: memory corruption\n");
101-
102-
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
103-
end - start + 1, 1);
104-
dump_stack();
105-
}
106-
107-
static void unpoison_page(struct page *page)
108-
{
109-
void *addr;
110-
111-
if (!page_poison(page))
112-
return;
113-
114-
addr = kmap_atomic(page);
115-
check_poison_mem(addr, PAGE_SIZE);
116-
clear_page_poison(page);
117-
kunmap_atomic(addr);
118-
}
119-
120-
static void unpoison_pages(struct page *page, int n)
121-
{
122-
int i;
123-
124-
for (i = 0; i < n; i++)
125-
unpoison_page(page + i);
126-
}
127-
1289
void __kernel_map_pages(struct page *page, int numpages, int enable)
12910
{
130-
if (!page_poisoning_enabled)
11+
if (!page_poisoning_enabled())
13112
return;
13213

13314
if (enable)

mm/page_poison.c

+144
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
#include <linux/kernel.h>
2+
#include <linux/string.h>
3+
#include <linux/mm.h>
4+
#include <linux/highmem.h>
5+
#include <linux/page_ext.h>
6+
#include <linux/poison.h>
7+
#include <linux/ratelimit.h>
8+
9+
static bool __page_poisoning_enabled __read_mostly;
10+
static bool want_page_poisoning __read_mostly =
11+
!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC);
12+
13+
static int early_page_poison_param(char *buf)
14+
{
15+
if (!buf)
16+
return -EINVAL;
17+
18+
if (strcmp(buf, "on") == 0)
19+
want_page_poisoning = true;
20+
else if (strcmp(buf, "off") == 0)
21+
want_page_poisoning = false;
22+
23+
return 0;
24+
}
25+
early_param("page_poison", early_page_poison_param);
26+
27+
bool page_poisoning_enabled(void)
28+
{
29+
return __page_poisoning_enabled;
30+
}
31+
32+
static bool need_page_poisoning(void)
33+
{
34+
return want_page_poisoning;
35+
}
36+
37+
static void init_page_poisoning(void)
38+
{
39+
if (!want_page_poisoning)
40+
return;
41+
42+
__page_poisoning_enabled = true;
43+
}
44+
45+
struct page_ext_operations page_poisoning_ops = {
46+
.need = need_page_poisoning,
47+
.init = init_page_poisoning,
48+
};
49+
50+
static inline void set_page_poison(struct page *page)
51+
{
52+
struct page_ext *page_ext;
53+
54+
page_ext = lookup_page_ext(page);
55+
__set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
56+
}
57+
58+
static inline void clear_page_poison(struct page *page)
59+
{
60+
struct page_ext *page_ext;
61+
62+
page_ext = lookup_page_ext(page);
63+
__clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
64+
}
65+
66+
static inline bool page_poison(struct page *page)
67+
{
68+
struct page_ext *page_ext;
69+
70+
page_ext = lookup_page_ext(page);
71+
return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
72+
}
73+
74+
static void poison_page(struct page *page)
75+
{
76+
void *addr = kmap_atomic(page);
77+
78+
set_page_poison(page);
79+
memset(addr, PAGE_POISON, PAGE_SIZE);
80+
kunmap_atomic(addr);
81+
}
82+
83+
void poison_pages(struct page *page, int n)
84+
{
85+
int i;
86+
87+
for (i = 0; i < n; i++)
88+
poison_page(page + i);
89+
}
90+
91+
static bool single_bit_flip(unsigned char a, unsigned char b)
92+
{
93+
unsigned char error = a ^ b;
94+
95+
return error && !(error & (error - 1));
96+
}
97+
98+
static void check_poison_mem(unsigned char *mem, size_t bytes)
99+
{
100+
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
101+
unsigned char *start;
102+
unsigned char *end;
103+
104+
start = memchr_inv(mem, PAGE_POISON, bytes);
105+
if (!start)
106+
return;
107+
108+
for (end = mem + bytes - 1; end > start; end--) {
109+
if (*end != PAGE_POISON)
110+
break;
111+
}
112+
113+
if (!__ratelimit(&ratelimit))
114+
return;
115+
else if (start == end && single_bit_flip(*start, PAGE_POISON))
116+
printk(KERN_ERR "pagealloc: single bit error\n");
117+
else
118+
printk(KERN_ERR "pagealloc: memory corruption\n");
119+
120+
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
121+
end - start + 1, 1);
122+
dump_stack();
123+
}
124+
125+
static void unpoison_page(struct page *page)
126+
{
127+
void *addr;
128+
129+
if (!page_poison(page))
130+
return;
131+
132+
addr = kmap_atomic(page);
133+
check_poison_mem(addr, PAGE_SIZE);
134+
clear_page_poison(page);
135+
kunmap_atomic(addr);
136+
}
137+
138+
void unpoison_pages(struct page *page, int n)
139+
{
140+
int i;
141+
142+
for (i = 0; i < n; i++)
143+
unpoison_page(page + i);
144+
}

0 commit comments

Comments
 (0)