blob: 5ea5da9e8a8d836ae96d9d3f3265bbb463c36291
1 | /* |
2 | * CFI (Control Flow Integrity) error and slowpath handling |
3 | * |
4 | * Copyright (C) 2017 Google, Inc. |
5 | */ |
6 | |
7 | #include <linux/gfp.h> |
8 | #include <linux/module.h> |
9 | #include <linux/printk.h> |
10 | #include <linux/ratelimit.h> |
11 | #include <linux/rcupdate.h> |
12 | #include <linux/spinlock.h> |
13 | #include <asm/bug.h> |
14 | #include <asm/cacheflush.h> |
15 | #include <asm/memory.h> |
16 | |
17 | /* Compiler-defined handler names */ |
18 | #ifdef CONFIG_CFI_PERMISSIVE |
19 | #define cfi_failure_handler __ubsan_handle_cfi_check_fail |
20 | #define cfi_slowpath_handler __cfi_slowpath_diag |
21 | #else /* enforcing */ |
22 | #define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort |
23 | #define cfi_slowpath_handler __cfi_slowpath |
24 | #endif /* CONFIG_CFI_PERMISSIVE */ |
25 | |
26 | static inline void handle_cfi_failure(void *ptr) |
27 | { |
28 | #ifdef CONFIG_CFI_PERMISSIVE |
29 | WARN_RATELIMIT(1, "CFI failure (target: [<%px>] %pF):\n", ptr, ptr); |
30 | #else |
31 | pr_err("CFI failure (target: [<%px>] %pF):\n", ptr, ptr); |
32 | BUG(); |
33 | #endif |
34 | } |
35 | |
36 | #ifdef CONFIG_MODULES |
37 | #ifdef CONFIG_CFI_CLANG_SHADOW |
38 | struct shadow_range { |
39 | /* Module address range */ |
40 | unsigned long mod_min_addr; |
41 | unsigned long mod_max_addr; |
42 | /* Module page range */ |
43 | unsigned long min_page; |
44 | unsigned long max_page; |
45 | }; |
46 | |
47 | #define SHADOW_ORDER 1 |
48 | #define SHADOW_PAGES (1 << SHADOW_ORDER) |
49 | #define SHADOW_SIZE \ |
50 | ((SHADOW_PAGES * PAGE_SIZE - sizeof(struct shadow_range)) / sizeof(u16)) |
51 | #define SHADOW_INVALID 0xFFFF |
52 | |
53 | struct cfi_shadow { |
54 | /* Page range covered by the shadow */ |
55 | struct shadow_range r; |
56 | /* Page offsets to __cfi_check functions in modules */ |
57 | u16 shadow[SHADOW_SIZE]; |
58 | }; |
59 | |
60 | static DEFINE_SPINLOCK(shadow_update_lock); |
61 | static struct cfi_shadow __rcu *cfi_shadow __read_mostly = NULL; |
62 | |
63 | static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr) |
64 | { |
65 | unsigned long index; |
66 | unsigned long page = ptr >> PAGE_SHIFT; |
67 | |
68 | if (unlikely(page < s->r.min_page)) |
69 | return -1; /* Outside of module area */ |
70 | |
71 | index = page - s->r.min_page; |
72 | |
73 | if (index >= SHADOW_SIZE) |
74 | return -1; /* Cannot be addressed with shadow */ |
75 | |
76 | return (int)index; |
77 | } |
78 | |
79 | static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s, |
80 | int index) |
81 | { |
82 | BUG_ON(index < 0 || index >= SHADOW_SIZE); |
83 | |
84 | if (unlikely(s->shadow[index] == SHADOW_INVALID)) |
85 | return 0; |
86 | |
87 | return (s->r.min_page + s->shadow[index]) << PAGE_SHIFT; |
88 | } |
89 | |
90 | static inline unsigned long shadow_to_page(const struct cfi_shadow *s, |
91 | int index) |
92 | { |
93 | BUG_ON(index < 0 || index >= SHADOW_SIZE); |
94 | |
95 | return (s->r.min_page + index) << PAGE_SHIFT; |
96 | } |
97 | |
98 | static void prepare_next_shadow(const struct cfi_shadow __rcu *prev, |
99 | struct cfi_shadow *next) |
100 | { |
101 | int i, index, check; |
102 | |
103 | /* Mark everything invalid */ |
104 | memset(next->shadow, 0xFF, sizeof(next->shadow)); |
105 | |
106 | if (!prev) |
107 | return; /* No previous shadow */ |
108 | |
109 | /* If the base address didn't change, update is not needed */ |
110 | if (prev->r.min_page == next->r.min_page) { |
111 | memcpy(next->shadow, prev->shadow, sizeof(next->shadow)); |
112 | return; |
113 | } |
114 | |
115 | /* Convert the previous shadow to the new address range */ |
116 | for (i = 0; i < SHADOW_SIZE; ++i) { |
117 | if (prev->shadow[i] == SHADOW_INVALID) |
118 | continue; |
119 | |
120 | index = ptr_to_shadow(next, shadow_to_page(prev, i)); |
121 | if (index < 0) |
122 | continue; |
123 | |
124 | check = ptr_to_shadow(next, |
125 | shadow_to_ptr(prev, prev->shadow[i])); |
126 | if (check < 0) |
127 | continue; |
128 | |
129 | next->shadow[index] = (u16)check; |
130 | } |
131 | } |
132 | |
133 | static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod) |
134 | { |
135 | unsigned long ptr; |
136 | unsigned long min_page_addr; |
137 | unsigned long max_page_addr; |
138 | unsigned long check = (unsigned long)mod->cfi_check; |
139 | int check_index = ptr_to_shadow(s, check); |
140 | |
141 | BUG_ON((check & PAGE_MASK) != check); /* Must be page aligned */ |
142 | |
143 | if (check_index < 0) |
144 | return; /* Module not addressable with shadow */ |
145 | |
146 | min_page_addr = (unsigned long)mod->core_layout.base & PAGE_MASK; |
147 | max_page_addr = (unsigned long)mod->core_layout.base + |
148 | mod->core_layout.text_size; |
149 | max_page_addr &= PAGE_MASK; |
150 | |
151 | /* For each page, store the check function index in the shadow */ |
152 | for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) { |
153 | int index = ptr_to_shadow(s, ptr); |
154 | if (index >= 0) { |
155 | /* Assume a page only contains code for one module */ |
156 | BUG_ON(s->shadow[index] != SHADOW_INVALID); |
157 | s->shadow[index] = (u16)check_index; |
158 | } |
159 | } |
160 | } |
161 | |
162 | static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod) |
163 | { |
164 | unsigned long ptr; |
165 | unsigned long min_page_addr; |
166 | unsigned long max_page_addr; |
167 | |
168 | min_page_addr = (unsigned long)mod->core_layout.base & PAGE_MASK; |
169 | max_page_addr = (unsigned long)mod->core_layout.base + |
170 | mod->core_layout.text_size; |
171 | max_page_addr &= PAGE_MASK; |
172 | |
173 | for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) { |
174 | int index = ptr_to_shadow(s, ptr); |
175 | if (index >= 0) |
176 | s->shadow[index] = SHADOW_INVALID; |
177 | } |
178 | } |
179 | |
180 | typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *); |
181 | |
182 | static void update_shadow(struct module *mod, unsigned long min_addr, |
183 | unsigned long max_addr, update_shadow_fn fn) |
184 | { |
185 | struct cfi_shadow *prev; |
186 | struct cfi_shadow *next = (struct cfi_shadow *) |
187 | __get_free_pages(GFP_KERNEL, SHADOW_ORDER); |
188 | |
189 | BUG_ON(!next); |
190 | |
191 | next->r.mod_min_addr = min_addr; |
192 | next->r.mod_max_addr = max_addr; |
193 | next->r.min_page = min_addr >> PAGE_SHIFT; |
194 | next->r.max_page = max_addr >> PAGE_SHIFT; |
195 | |
196 | spin_lock(&shadow_update_lock); |
197 | prev = rcu_dereference_protected(cfi_shadow, 1); |
198 | prepare_next_shadow(prev, next); |
199 | |
200 | fn(next, mod); |
201 | set_memory_ro((unsigned long)next, SHADOW_PAGES); |
202 | rcu_assign_pointer(cfi_shadow, next); |
203 | |
204 | spin_unlock(&shadow_update_lock); |
205 | synchronize_rcu(); |
206 | |
207 | if (prev) { |
208 | set_memory_rw((unsigned long)prev, SHADOW_PAGES); |
209 | free_pages((unsigned long)prev, SHADOW_ORDER); |
210 | } |
211 | } |
212 | |
213 | void cfi_module_add(struct module *mod, unsigned long min_addr, |
214 | unsigned long max_addr) |
215 | { |
216 | update_shadow(mod, min_addr, max_addr, add_module_to_shadow); |
217 | } |
218 | EXPORT_SYMBOL(cfi_module_add); |
219 | |
220 | void cfi_module_remove(struct module *mod, unsigned long min_addr, |
221 | unsigned long max_addr) |
222 | { |
223 | update_shadow(mod, min_addr, max_addr, remove_module_from_shadow); |
224 | } |
225 | EXPORT_SYMBOL(cfi_module_remove); |
226 | |
227 | static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s, |
228 | unsigned long ptr) |
229 | { |
230 | int index; |
231 | |
232 | if (unlikely(!s)) |
233 | return NULL; /* No shadow available */ |
234 | |
235 | if (ptr < s->r.mod_min_addr || ptr > s->r.mod_max_addr) |
236 | return NULL; /* Not in a mapped module */ |
237 | |
238 | index = ptr_to_shadow(s, ptr); |
239 | if (index < 0) |
240 | return NULL; /* Cannot be addressed with shadow */ |
241 | |
242 | return (cfi_check_fn)shadow_to_ptr(s, index); |
243 | } |
244 | #endif /* CONFIG_CFI_CLANG_SHADOW */ |
245 | |
246 | static inline cfi_check_fn find_module_cfi_check(void *ptr) |
247 | { |
248 | struct module *mod; |
249 | |
250 | preempt_disable(); |
251 | mod = __module_address((unsigned long)ptr); |
252 | preempt_enable(); |
253 | |
254 | if (mod) |
255 | return mod->cfi_check; |
256 | |
257 | return CFI_CHECK_FN; |
258 | } |
259 | |
260 | static inline cfi_check_fn find_cfi_check(void *ptr) |
261 | { |
262 | #ifdef CONFIG_CFI_CLANG_SHADOW |
263 | cfi_check_fn f; |
264 | |
265 | if (!rcu_access_pointer(cfi_shadow)) |
266 | return CFI_CHECK_FN; /* No loaded modules */ |
267 | |
268 | /* Look up the __cfi_check function to use */ |
269 | rcu_read_lock(); |
270 | f = ptr_to_check_fn(rcu_dereference(cfi_shadow), (unsigned long)ptr); |
271 | rcu_read_unlock(); |
272 | |
273 | if (f) |
274 | return f; |
275 | |
276 | /* |
277 | * Fall back to find_module_cfi_check, which works also for a larger |
278 | * module address space, but is slower. |
279 | */ |
280 | #endif /* CONFIG_CFI_CLANG_SHADOW */ |
281 | |
282 | return find_module_cfi_check(ptr); |
283 | } |
284 | |
285 | void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag) |
286 | { |
287 | cfi_check_fn check = find_cfi_check(ptr); |
288 | |
289 | if (likely(check)) |
290 | check(id, ptr, diag); |
291 | else /* Don't allow unchecked modules */ |
292 | handle_cfi_failure(ptr); |
293 | } |
294 | EXPORT_SYMBOL(cfi_slowpath_handler); |
295 | #endif /* CONFIG_MODULES */ |
296 | |
297 | void cfi_failure_handler(void *data, void *ptr, void *vtable) |
298 | { |
299 | handle_cfi_failure(ptr); |
300 | } |
301 | EXPORT_SYMBOL(cfi_failure_handler); |
302 | |
303 | void __cfi_check_fail(void *data, void *ptr) |
304 | { |
305 | handle_cfi_failure(ptr); |
306 | } |
307 |