blob: def4548ea40cbfc2dbf334928a4196772c60dcdc
1 | /* |
2 | * jump label support |
3 | * |
4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
5 | * Copyright (C) 2011 Peter Zijlstra |
6 | * |
7 | */ |
8 | #include <linux/memory.h> |
9 | #include <linux/uaccess.h> |
10 | #include <linux/module.h> |
11 | #include <linux/list.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/sort.h> |
14 | #include <linux/err.h> |
15 | #include <linux/static_key.h> |
16 | #include <linux/jump_label_ratelimit.h> |
17 | #include <linux/bug.h> |
18 | |
19 | #ifdef HAVE_JUMP_LABEL |
20 | |
21 | /* mutex to protect coming/going of the the jump_label table */ |
22 | static DEFINE_MUTEX(jump_label_mutex); |
23 | |
24 | void jump_label_lock(void) |
25 | { |
26 | mutex_lock(&jump_label_mutex); |
27 | } |
28 | |
29 | void jump_label_unlock(void) |
30 | { |
31 | mutex_unlock(&jump_label_mutex); |
32 | } |
33 | |
34 | static int jump_label_cmp(const void *a, const void *b) |
35 | { |
36 | const struct jump_entry *jea = a; |
37 | const struct jump_entry *jeb = b; |
38 | |
39 | if (jea->key < jeb->key) |
40 | return -1; |
41 | |
42 | if (jea->key > jeb->key) |
43 | return 1; |
44 | |
45 | return 0; |
46 | } |
47 | |
48 | static void |
49 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
50 | { |
51 | unsigned long size; |
52 | |
53 | size = (((unsigned long)stop - (unsigned long)start) |
54 | / sizeof(struct jump_entry)); |
55 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
56 | } |
57 | |
58 | static void jump_label_update(struct static_key *key); |
59 | |
60 | /* |
61 | * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h. |
62 | * The use of 'atomic_read()' requires atomic.h and its problematic for some |
63 | * kernel headers such as kernel.h and others. Since static_key_count() is not |
64 | * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok |
65 | * to have it be a function here. Similarly, for 'static_key_enable()' and |
66 | * 'static_key_disable()', which require bug.h. This should allow jump_label.h |
67 | * to be included from most/all places for HAVE_JUMP_LABEL. |
68 | */ |
69 | int static_key_count(struct static_key *key) |
70 | { |
71 | /* |
72 | * -1 means the first static_key_slow_inc() is in progress. |
73 | * static_key_enabled() must return true, so return 1 here. |
74 | */ |
75 | int n = atomic_read(&key->enabled); |
76 | |
77 | return n >= 0 ? n : 1; |
78 | } |
79 | EXPORT_SYMBOL_GPL(static_key_count); |
80 | |
81 | void static_key_enable(struct static_key *key) |
82 | { |
83 | int count = static_key_count(key); |
84 | |
85 | WARN_ON_ONCE(count < 0 || count > 1); |
86 | |
87 | if (!count) |
88 | static_key_slow_inc(key); |
89 | } |
90 | EXPORT_SYMBOL_GPL(static_key_enable); |
91 | |
92 | void static_key_disable(struct static_key *key) |
93 | { |
94 | int count = static_key_count(key); |
95 | |
96 | WARN_ON_ONCE(count < 0 || count > 1); |
97 | |
98 | if (count) |
99 | static_key_slow_dec(key); |
100 | } |
101 | EXPORT_SYMBOL_GPL(static_key_disable); |
102 | |
103 | void static_key_slow_inc(struct static_key *key) |
104 | { |
105 | int v, v1; |
106 | |
107 | STATIC_KEY_CHECK_USE(); |
108 | |
109 | /* |
110 | * Careful if we get concurrent static_key_slow_inc() calls; |
111 | * later calls must wait for the first one to _finish_ the |
112 | * jump_label_update() process. At the same time, however, |
113 | * the jump_label_update() call below wants to see |
114 | * static_key_enabled(&key) for jumps to be updated properly. |
115 | * |
116 | * So give a special meaning to negative key->enabled: it sends |
117 | * static_key_slow_inc() down the slow path, and it is non-zero |
118 | * so it counts as "enabled" in jump_label_update(). Note that |
119 | * atomic_inc_unless_negative() checks >= 0, so roll our own. |
120 | */ |
121 | for (v = atomic_read(&key->enabled); v > 0; v = v1) { |
122 | v1 = atomic_cmpxchg(&key->enabled, v, v + 1); |
123 | if (likely(v1 == v)) |
124 | return; |
125 | } |
126 | |
127 | jump_label_lock(); |
128 | if (atomic_read(&key->enabled) == 0) { |
129 | atomic_set(&key->enabled, -1); |
130 | jump_label_update(key); |
131 | atomic_set(&key->enabled, 1); |
132 | } else { |
133 | atomic_inc(&key->enabled); |
134 | } |
135 | jump_label_unlock(); |
136 | } |
137 | EXPORT_SYMBOL_GPL(static_key_slow_inc); |
138 | |
139 | static void __static_key_slow_dec(struct static_key *key, |
140 | unsigned long rate_limit, struct delayed_work *work) |
141 | { |
142 | /* |
143 | * The negative count check is valid even when a negative |
144 | * key->enabled is in use by static_key_slow_inc(); a |
145 | * __static_key_slow_dec() before the first static_key_slow_inc() |
146 | * returns is unbalanced, because all other static_key_slow_inc() |
147 | * instances block while the update is in progress. |
148 | */ |
149 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { |
150 | WARN(atomic_read(&key->enabled) < 0, |
151 | "jump label: negative count!\n"); |
152 | return; |
153 | } |
154 | |
155 | if (rate_limit) { |
156 | atomic_inc(&key->enabled); |
157 | schedule_delayed_work(work, rate_limit); |
158 | } else { |
159 | jump_label_update(key); |
160 | } |
161 | jump_label_unlock(); |
162 | } |
163 | |
164 | static void jump_label_update_timeout(struct work_struct *work) |
165 | { |
166 | struct static_key_deferred *key = |
167 | container_of(work, struct static_key_deferred, work.work); |
168 | __static_key_slow_dec(&key->key, 0, NULL); |
169 | } |
170 | |
171 | void static_key_slow_dec(struct static_key *key) |
172 | { |
173 | STATIC_KEY_CHECK_USE(); |
174 | __static_key_slow_dec(key, 0, NULL); |
175 | } |
176 | EXPORT_SYMBOL_GPL(static_key_slow_dec); |
177 | |
178 | void static_key_slow_dec_deferred(struct static_key_deferred *key) |
179 | { |
180 | STATIC_KEY_CHECK_USE(); |
181 | __static_key_slow_dec(&key->key, key->timeout, &key->work); |
182 | } |
183 | EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); |
184 | |
185 | void static_key_deferred_flush(struct static_key_deferred *key) |
186 | { |
187 | STATIC_KEY_CHECK_USE(); |
188 | flush_delayed_work(&key->work); |
189 | } |
190 | EXPORT_SYMBOL_GPL(static_key_deferred_flush); |
191 | |
192 | void jump_label_rate_limit(struct static_key_deferred *key, |
193 | unsigned long rl) |
194 | { |
195 | STATIC_KEY_CHECK_USE(); |
196 | key->timeout = rl; |
197 | INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); |
198 | } |
199 | EXPORT_SYMBOL_GPL(jump_label_rate_limit); |
200 | |
201 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) |
202 | { |
203 | if (entry->code <= (unsigned long)end && |
204 | entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) |
205 | return 1; |
206 | |
207 | return 0; |
208 | } |
209 | |
210 | static int __jump_label_text_reserved(struct jump_entry *iter_start, |
211 | struct jump_entry *iter_stop, void *start, void *end) |
212 | { |
213 | struct jump_entry *iter; |
214 | |
215 | iter = iter_start; |
216 | while (iter < iter_stop) { |
217 | if (addr_conflict(iter, start, end)) |
218 | return 1; |
219 | iter++; |
220 | } |
221 | |
222 | return 0; |
223 | } |
224 | |
225 | /* |
226 | * Update code which is definitely not currently executing. |
227 | * Architectures which need heavyweight synchronization to modify |
228 | * running code can override this to make the non-live update case |
229 | * cheaper. |
230 | */ |
231 | void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, |
232 | enum jump_label_type type) |
233 | { |
234 | arch_jump_label_transform(entry, type); |
235 | } |
236 | |
237 | static inline struct jump_entry *static_key_entries(struct static_key *key) |
238 | { |
239 | return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK); |
240 | } |
241 | |
242 | static inline bool static_key_type(struct static_key *key) |
243 | { |
244 | return (unsigned long)key->entries & JUMP_TYPE_MASK; |
245 | } |
246 | |
247 | static inline struct static_key *jump_entry_key(struct jump_entry *entry) |
248 | { |
249 | return (struct static_key *)((unsigned long)entry->key & ~1UL); |
250 | } |
251 | |
252 | static bool jump_entry_branch(struct jump_entry *entry) |
253 | { |
254 | return (unsigned long)entry->key & 1UL; |
255 | } |
256 | |
257 | static enum jump_label_type jump_label_type(struct jump_entry *entry) |
258 | { |
259 | struct static_key *key = jump_entry_key(entry); |
260 | bool enabled = static_key_enabled(key); |
261 | bool branch = jump_entry_branch(entry); |
262 | |
263 | /* See the comment in linux/jump_label.h */ |
264 | return enabled ^ branch; |
265 | } |
266 | |
267 | static void __jump_label_update(struct static_key *key, |
268 | struct jump_entry *entry, |
269 | struct jump_entry *stop) |
270 | { |
271 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
272 | /* |
273 | * entry->code set to 0 invalidates module init text sections |
274 | * kernel_text_address() verifies we are not in core kernel |
275 | * init code, see jump_label_invalidate_module_init(). |
276 | */ |
277 | if (entry->code && kernel_text_address(entry->code)) |
278 | arch_jump_label_transform(entry, jump_label_type(entry)); |
279 | } |
280 | } |
281 | |
282 | void __init jump_label_init(void) |
283 | { |
284 | struct jump_entry *iter_start = __start___jump_table; |
285 | struct jump_entry *iter_stop = __stop___jump_table; |
286 | struct static_key *key = NULL; |
287 | struct jump_entry *iter; |
288 | |
289 | /* |
290 | * Since we are initializing the static_key.enabled field with |
291 | * with the 'raw' int values (to avoid pulling in atomic.h) in |
292 | * jump_label.h, let's make sure that is safe. There are only two |
293 | * cases to check since we initialize to 0 or 1. |
294 | */ |
295 | BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); |
296 | BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); |
297 | |
298 | if (static_key_initialized) |
299 | return; |
300 | |
301 | jump_label_lock(); |
302 | jump_label_sort_entries(iter_start, iter_stop); |
303 | |
304 | for (iter = iter_start; iter < iter_stop; iter++) { |
305 | struct static_key *iterk; |
306 | |
307 | /* rewrite NOPs */ |
308 | if (jump_label_type(iter) == JUMP_LABEL_NOP) |
309 | arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); |
310 | |
311 | iterk = jump_entry_key(iter); |
312 | if (iterk == key) |
313 | continue; |
314 | |
315 | key = iterk; |
316 | /* |
317 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. |
318 | */ |
319 | *((unsigned long *)&key->entries) += (unsigned long)iter; |
320 | #ifdef CONFIG_MODULES |
321 | key->next = NULL; |
322 | #endif |
323 | } |
324 | static_key_initialized = true; |
325 | jump_label_unlock(); |
326 | } |
327 | |
328 | #ifdef CONFIG_MODULES |
329 | |
330 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) |
331 | { |
332 | struct static_key *key = jump_entry_key(entry); |
333 | bool type = static_key_type(key); |
334 | bool branch = jump_entry_branch(entry); |
335 | |
336 | /* See the comment in linux/jump_label.h */ |
337 | return type ^ branch; |
338 | } |
339 | |
340 | struct static_key_mod { |
341 | struct static_key_mod *next; |
342 | struct jump_entry *entries; |
343 | struct module *mod; |
344 | }; |
345 | |
346 | static int __jump_label_mod_text_reserved(void *start, void *end) |
347 | { |
348 | struct module *mod; |
349 | |
350 | preempt_disable(); |
351 | mod = __module_text_address((unsigned long)start); |
352 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); |
353 | preempt_enable(); |
354 | |
355 | if (!mod) |
356 | return 0; |
357 | |
358 | |
359 | return __jump_label_text_reserved(mod->jump_entries, |
360 | mod->jump_entries + mod->num_jump_entries, |
361 | start, end); |
362 | } |
363 | |
364 | static void __jump_label_mod_update(struct static_key *key) |
365 | { |
366 | struct static_key_mod *mod; |
367 | |
368 | for (mod = key->next; mod; mod = mod->next) { |
369 | struct module *m = mod->mod; |
370 | |
371 | __jump_label_update(key, mod->entries, |
372 | m->jump_entries + m->num_jump_entries); |
373 | } |
374 | } |
375 | |
376 | /*** |
377 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() |
378 | * @mod: module to patch |
379 | * |
380 | * Allow for run-time selection of the optimal nops. Before the module |
381 | * loads patch these with arch_get_jump_label_nop(), which is specified by |
382 | * the arch specific jump label code. |
383 | */ |
384 | void jump_label_apply_nops(struct module *mod) |
385 | { |
386 | struct jump_entry *iter_start = mod->jump_entries; |
387 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
388 | struct jump_entry *iter; |
389 | |
390 | /* if the module doesn't have jump label entries, just return */ |
391 | if (iter_start == iter_stop) |
392 | return; |
393 | |
394 | for (iter = iter_start; iter < iter_stop; iter++) { |
395 | /* Only write NOPs for arch_branch_static(). */ |
396 | if (jump_label_init_type(iter) == JUMP_LABEL_NOP) |
397 | arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); |
398 | } |
399 | } |
400 | |
401 | static int jump_label_add_module(struct module *mod) |
402 | { |
403 | struct jump_entry *iter_start = mod->jump_entries; |
404 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
405 | struct jump_entry *iter; |
406 | struct static_key *key = NULL; |
407 | struct static_key_mod *jlm; |
408 | |
409 | /* if the module doesn't have jump label entries, just return */ |
410 | if (iter_start == iter_stop) |
411 | return 0; |
412 | |
413 | jump_label_sort_entries(iter_start, iter_stop); |
414 | |
415 | for (iter = iter_start; iter < iter_stop; iter++) { |
416 | struct static_key *iterk; |
417 | |
418 | iterk = jump_entry_key(iter); |
419 | if (iterk == key) |
420 | continue; |
421 | |
422 | key = iterk; |
423 | if (within_module(iter->key, mod)) { |
424 | /* |
425 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. |
426 | */ |
427 | *((unsigned long *)&key->entries) += (unsigned long)iter; |
428 | key->next = NULL; |
429 | continue; |
430 | } |
431 | jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); |
432 | if (!jlm) |
433 | return -ENOMEM; |
434 | jlm->mod = mod; |
435 | jlm->entries = iter; |
436 | jlm->next = key->next; |
437 | key->next = jlm; |
438 | |
439 | /* Only update if we've changed from our initial state */ |
440 | if (jump_label_type(iter) != jump_label_init_type(iter)) |
441 | __jump_label_update(key, iter, iter_stop); |
442 | } |
443 | |
444 | return 0; |
445 | } |
446 | |
447 | static void jump_label_del_module(struct module *mod) |
448 | { |
449 | struct jump_entry *iter_start = mod->jump_entries; |
450 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
451 | struct jump_entry *iter; |
452 | struct static_key *key = NULL; |
453 | struct static_key_mod *jlm, **prev; |
454 | |
455 | for (iter = iter_start; iter < iter_stop; iter++) { |
456 | if (jump_entry_key(iter) == key) |
457 | continue; |
458 | |
459 | key = jump_entry_key(iter); |
460 | |
461 | if (within_module(iter->key, mod)) |
462 | continue; |
463 | |
464 | prev = &key->next; |
465 | jlm = key->next; |
466 | |
467 | while (jlm && jlm->mod != mod) { |
468 | prev = &jlm->next; |
469 | jlm = jlm->next; |
470 | } |
471 | |
472 | if (jlm) { |
473 | *prev = jlm->next; |
474 | kfree(jlm); |
475 | } |
476 | } |
477 | } |
478 | |
479 | static void jump_label_invalidate_module_init(struct module *mod) |
480 | { |
481 | struct jump_entry *iter_start = mod->jump_entries; |
482 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
483 | struct jump_entry *iter; |
484 | |
485 | for (iter = iter_start; iter < iter_stop; iter++) { |
486 | if (within_module_init(iter->code, mod)) |
487 | iter->code = 0; |
488 | } |
489 | } |
490 | |
491 | static int |
492 | jump_label_module_notify(struct notifier_block *self, unsigned long val, |
493 | void *data) |
494 | { |
495 | struct module *mod = data; |
496 | int ret = 0; |
497 | |
498 | switch (val) { |
499 | case MODULE_STATE_COMING: |
500 | jump_label_lock(); |
501 | ret = jump_label_add_module(mod); |
502 | if (ret) |
503 | jump_label_del_module(mod); |
504 | jump_label_unlock(); |
505 | break; |
506 | case MODULE_STATE_GOING: |
507 | jump_label_lock(); |
508 | jump_label_del_module(mod); |
509 | jump_label_unlock(); |
510 | break; |
511 | case MODULE_STATE_LIVE: |
512 | jump_label_lock(); |
513 | jump_label_invalidate_module_init(mod); |
514 | jump_label_unlock(); |
515 | break; |
516 | } |
517 | |
518 | return notifier_from_errno(ret); |
519 | } |
520 | |
521 | static struct notifier_block jump_label_module_nb = { |
522 | .notifier_call = jump_label_module_notify, |
523 | .priority = 1, /* higher than tracepoints */ |
524 | }; |
525 | |
526 | static __init int jump_label_init_module(void) |
527 | { |
528 | return register_module_notifier(&jump_label_module_nb); |
529 | } |
530 | early_initcall(jump_label_init_module); |
531 | |
532 | #endif /* CONFIG_MODULES */ |
533 | |
534 | /*** |
535 | * jump_label_text_reserved - check if addr range is reserved |
536 | * @start: start text addr |
537 | * @end: end text addr |
538 | * |
539 | * checks if the text addr located between @start and @end |
540 | * overlaps with any of the jump label patch addresses. Code |
541 | * that wants to modify kernel text should first verify that |
542 | * it does not overlap with any of the jump label addresses. |
543 | * Caller must hold jump_label_mutex. |
544 | * |
545 | * returns 1 if there is an overlap, 0 otherwise |
546 | */ |
547 | int jump_label_text_reserved(void *start, void *end) |
548 | { |
549 | int ret = __jump_label_text_reserved(__start___jump_table, |
550 | __stop___jump_table, start, end); |
551 | |
552 | if (ret) |
553 | return ret; |
554 | |
555 | #ifdef CONFIG_MODULES |
556 | ret = __jump_label_mod_text_reserved(start, end); |
557 | #endif |
558 | return ret; |
559 | } |
560 | |
561 | static void jump_label_update(struct static_key *key) |
562 | { |
563 | struct jump_entry *stop = __stop___jump_table; |
564 | struct jump_entry *entry = static_key_entries(key); |
565 | #ifdef CONFIG_MODULES |
566 | struct module *mod; |
567 | |
568 | __jump_label_mod_update(key); |
569 | |
570 | preempt_disable(); |
571 | mod = __module_address((unsigned long)key); |
572 | if (mod) |
573 | stop = mod->jump_entries + mod->num_jump_entries; |
574 | preempt_enable(); |
575 | #endif |
576 | /* if there are no users, entry can be NULL */ |
577 | if (entry) |
578 | __jump_label_update(key, entry, stop); |
579 | } |
580 | |
581 | #ifdef CONFIG_STATIC_KEYS_SELFTEST |
582 | static DEFINE_STATIC_KEY_TRUE(sk_true); |
583 | static DEFINE_STATIC_KEY_FALSE(sk_false); |
584 | |
585 | static __init int jump_label_test(void) |
586 | { |
587 | int i; |
588 | |
589 | for (i = 0; i < 2; i++) { |
590 | WARN_ON(static_key_enabled(&sk_true.key) != true); |
591 | WARN_ON(static_key_enabled(&sk_false.key) != false); |
592 | |
593 | WARN_ON(!static_branch_likely(&sk_true)); |
594 | WARN_ON(!static_branch_unlikely(&sk_true)); |
595 | WARN_ON(static_branch_likely(&sk_false)); |
596 | WARN_ON(static_branch_unlikely(&sk_false)); |
597 | |
598 | static_branch_disable(&sk_true); |
599 | static_branch_enable(&sk_false); |
600 | |
601 | WARN_ON(static_key_enabled(&sk_true.key) == true); |
602 | WARN_ON(static_key_enabled(&sk_false.key) == false); |
603 | |
604 | WARN_ON(static_branch_likely(&sk_true)); |
605 | WARN_ON(static_branch_unlikely(&sk_true)); |
606 | WARN_ON(!static_branch_likely(&sk_false)); |
607 | WARN_ON(!static_branch_unlikely(&sk_false)); |
608 | |
609 | static_branch_enable(&sk_true); |
610 | static_branch_disable(&sk_false); |
611 | } |
612 | |
613 | return 0; |
614 | } |
615 | early_initcall(jump_label_test); |
616 | #endif /* STATIC_KEYS_SELFTEST */ |
617 | |
618 | #endif /* HAVE_JUMP_LABEL */ |
619 |