blob: e2845dd53b30ba8f8d80ac3f33260fdeef56f902
1 | /* |
2 | * Kernel Probes (KProbes) |
3 | * kernel/kprobes.c |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
18 | * |
19 | * Copyright (C) IBM Corporation, 2002, 2004 |
20 | * |
21 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel |
22 | * Probes initial implementation (includes suggestions from |
23 | * Rusty Russell). |
24 | * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with |
25 | * hlists and exceptions notifier as suggested by Andi Kleen. |
26 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes |
27 | * interface to access function arguments. |
28 | * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes |
29 | * exceptions notifier to be first on the priority list. |
30 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston |
31 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
32 | * <prasanna@in.ibm.com> added function-return probes. |
33 | */ |
34 | #include <linux/kprobes.h> |
35 | #include <linux/hash.h> |
36 | #include <linux/init.h> |
37 | #include <linux/slab.h> |
38 | #include <linux/stddef.h> |
39 | #include <linux/export.h> |
40 | #include <linux/moduleloader.h> |
41 | #include <linux/kallsyms.h> |
42 | #include <linux/freezer.h> |
43 | #include <linux/seq_file.h> |
44 | #include <linux/debugfs.h> |
45 | #include <linux/sysctl.h> |
46 | #include <linux/kdebug.h> |
47 | #include <linux/memory.h> |
48 | #include <linux/ftrace.h> |
49 | #include <linux/cpu.h> |
50 | #include <linux/jump_label.h> |
51 | |
52 | #include <asm/sections.h> |
53 | #include <asm/cacheflush.h> |
54 | #include <asm/errno.h> |
55 | #include <asm/uaccess.h> |
56 | |
57 | #define KPROBE_HASH_BITS 6 |
58 | #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) |
59 | |
60 | |
61 | /* |
62 | * Some oddball architectures like 64bit powerpc have function descriptors |
63 | * so this must be overridable. |
64 | */ |
65 | #ifndef kprobe_lookup_name |
66 | #define kprobe_lookup_name(name, addr) \ |
67 | addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) |
68 | #endif |
69 | |
70 | static int kprobes_initialized; |
71 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
72 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
73 | |
74 | /* NOTE: change this value only with kprobe_mutex held */ |
75 | static bool kprobes_all_disarmed; |
76 | |
77 | /* This protects kprobe_table and optimizing_list */ |
78 | static DEFINE_MUTEX(kprobe_mutex); |
79 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
80 | static struct { |
81 | raw_spinlock_t lock ____cacheline_aligned_in_smp; |
82 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; |
83 | |
84 | static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) |
85 | { |
86 | return &(kretprobe_table_locks[hash].lock); |
87 | } |
88 | |
89 | /* Blacklist -- list of struct kprobe_blacklist_entry */ |
90 | static LIST_HEAD(kprobe_blacklist); |
91 | |
92 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
93 | /* |
94 | * kprobe->ainsn.insn points to the copy of the instruction to be |
95 | * single-stepped. x86_64, POWER4 and above have no-exec support and |
96 | * stepping on the instruction on a vmalloced/kmalloced/data page |
97 | * is a recipe for disaster |
98 | */ |
99 | struct kprobe_insn_page { |
100 | struct list_head list; |
101 | kprobe_opcode_t *insns; /* Page of instruction slots */ |
102 | struct kprobe_insn_cache *cache; |
103 | int nused; |
104 | int ngarbage; |
105 | char slot_used[]; |
106 | }; |
107 | |
108 | #define KPROBE_INSN_PAGE_SIZE(slots) \ |
109 | (offsetof(struct kprobe_insn_page, slot_used) + \ |
110 | (sizeof(char) * (slots))) |
111 | |
112 | static int slots_per_page(struct kprobe_insn_cache *c) |
113 | { |
114 | return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); |
115 | } |
116 | |
117 | enum kprobe_slot_state { |
118 | SLOT_CLEAN = 0, |
119 | SLOT_DIRTY = 1, |
120 | SLOT_USED = 2, |
121 | }; |
122 | |
123 | static void *alloc_insn_page(void) |
124 | { |
125 | return module_alloc(PAGE_SIZE); |
126 | } |
127 | |
128 | void __weak free_insn_page(void *page) |
129 | { |
130 | module_memfree(page); |
131 | } |
132 | |
133 | struct kprobe_insn_cache kprobe_insn_slots = { |
134 | .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), |
135 | .alloc = alloc_insn_page, |
136 | .free = free_insn_page, |
137 | .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), |
138 | .insn_size = MAX_INSN_SIZE, |
139 | .nr_garbage = 0, |
140 | }; |
141 | static int collect_garbage_slots(struct kprobe_insn_cache *c); |
142 | |
143 | /** |
144 | * __get_insn_slot() - Find a slot on an executable page for an instruction. |
145 | * We allocate an executable page if there's no room on existing ones. |
146 | */ |
147 | kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) |
148 | { |
149 | struct kprobe_insn_page *kip; |
150 | kprobe_opcode_t *slot = NULL; |
151 | |
152 | mutex_lock(&c->mutex); |
153 | retry: |
154 | list_for_each_entry(kip, &c->pages, list) { |
155 | if (kip->nused < slots_per_page(c)) { |
156 | int i; |
157 | for (i = 0; i < slots_per_page(c); i++) { |
158 | if (kip->slot_used[i] == SLOT_CLEAN) { |
159 | kip->slot_used[i] = SLOT_USED; |
160 | kip->nused++; |
161 | slot = kip->insns + (i * c->insn_size); |
162 | goto out; |
163 | } |
164 | } |
165 | /* kip->nused is broken. Fix it. */ |
166 | kip->nused = slots_per_page(c); |
167 | WARN_ON(1); |
168 | } |
169 | } |
170 | |
171 | /* If there are any garbage slots, collect it and try again. */ |
172 | if (c->nr_garbage && collect_garbage_slots(c) == 0) |
173 | goto retry; |
174 | |
175 | /* All out of space. Need to allocate a new page. */ |
176 | kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); |
177 | if (!kip) |
178 | goto out; |
179 | |
180 | /* |
181 | * Use module_alloc so this page is within +/- 2GB of where the |
182 | * kernel image and loaded module images reside. This is required |
183 | * so x86_64 can correctly handle the %rip-relative fixups. |
184 | */ |
185 | kip->insns = c->alloc(); |
186 | if (!kip->insns) { |
187 | kfree(kip); |
188 | goto out; |
189 | } |
190 | INIT_LIST_HEAD(&kip->list); |
191 | memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); |
192 | kip->slot_used[0] = SLOT_USED; |
193 | kip->nused = 1; |
194 | kip->ngarbage = 0; |
195 | kip->cache = c; |
196 | list_add(&kip->list, &c->pages); |
197 | slot = kip->insns; |
198 | out: |
199 | mutex_unlock(&c->mutex); |
200 | return slot; |
201 | } |
202 | |
203 | /* Return 1 if all garbages are collected, otherwise 0. */ |
204 | static int collect_one_slot(struct kprobe_insn_page *kip, int idx) |
205 | { |
206 | kip->slot_used[idx] = SLOT_CLEAN; |
207 | kip->nused--; |
208 | if (kip->nused == 0) { |
209 | /* |
210 | * Page is no longer in use. Free it unless |
211 | * it's the last one. We keep the last one |
212 | * so as not to have to set it up again the |
213 | * next time somebody inserts a probe. |
214 | */ |
215 | if (!list_is_singular(&kip->list)) { |
216 | list_del(&kip->list); |
217 | kip->cache->free(kip->insns); |
218 | kfree(kip); |
219 | } |
220 | return 1; |
221 | } |
222 | return 0; |
223 | } |
224 | |
225 | static int collect_garbage_slots(struct kprobe_insn_cache *c) |
226 | { |
227 | struct kprobe_insn_page *kip, *next; |
228 | |
229 | /* Ensure no-one is interrupted on the garbages */ |
230 | synchronize_sched(); |
231 | |
232 | list_for_each_entry_safe(kip, next, &c->pages, list) { |
233 | int i; |
234 | if (kip->ngarbage == 0) |
235 | continue; |
236 | kip->ngarbage = 0; /* we will collect all garbages */ |
237 | for (i = 0; i < slots_per_page(c); i++) { |
238 | if (kip->slot_used[i] == SLOT_DIRTY && |
239 | collect_one_slot(kip, i)) |
240 | break; |
241 | } |
242 | } |
243 | c->nr_garbage = 0; |
244 | return 0; |
245 | } |
246 | |
247 | void __free_insn_slot(struct kprobe_insn_cache *c, |
248 | kprobe_opcode_t *slot, int dirty) |
249 | { |
250 | struct kprobe_insn_page *kip; |
251 | |
252 | mutex_lock(&c->mutex); |
253 | list_for_each_entry(kip, &c->pages, list) { |
254 | long idx = ((long)slot - (long)kip->insns) / |
255 | (c->insn_size * sizeof(kprobe_opcode_t)); |
256 | if (idx >= 0 && idx < slots_per_page(c)) { |
257 | WARN_ON(kip->slot_used[idx] != SLOT_USED); |
258 | if (dirty) { |
259 | kip->slot_used[idx] = SLOT_DIRTY; |
260 | kip->ngarbage++; |
261 | if (++c->nr_garbage > slots_per_page(c)) |
262 | collect_garbage_slots(c); |
263 | } else |
264 | collect_one_slot(kip, idx); |
265 | goto out; |
266 | } |
267 | } |
268 | /* Could not free this slot. */ |
269 | WARN_ON(1); |
270 | out: |
271 | mutex_unlock(&c->mutex); |
272 | } |
273 | |
274 | #ifdef CONFIG_OPTPROBES |
275 | /* For optimized_kprobe buffer */ |
276 | struct kprobe_insn_cache kprobe_optinsn_slots = { |
277 | .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), |
278 | .alloc = alloc_insn_page, |
279 | .free = free_insn_page, |
280 | .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), |
281 | /* .insn_size is initialized later */ |
282 | .nr_garbage = 0, |
283 | }; |
284 | #endif |
285 | #endif |
286 | |
287 | /* We have preemption disabled.. so it is safe to use __ versions */ |
288 | static inline void set_kprobe_instance(struct kprobe *kp) |
289 | { |
290 | __this_cpu_write(kprobe_instance, kp); |
291 | } |
292 | |
293 | static inline void reset_kprobe_instance(void) |
294 | { |
295 | __this_cpu_write(kprobe_instance, NULL); |
296 | } |
297 | |
298 | /* |
299 | * This routine is called either: |
300 | * - under the kprobe_mutex - during kprobe_[un]register() |
301 | * OR |
302 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c |
303 | */ |
304 | struct kprobe *get_kprobe(void *addr) |
305 | { |
306 | struct hlist_head *head; |
307 | struct kprobe *p; |
308 | |
309 | head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; |
310 | hlist_for_each_entry_rcu(p, head, hlist) { |
311 | if (p->addr == addr) |
312 | return p; |
313 | } |
314 | |
315 | return NULL; |
316 | } |
317 | NOKPROBE_SYMBOL(get_kprobe); |
318 | |
319 | static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); |
320 | |
321 | /* Return true if the kprobe is an aggregator */ |
322 | static inline int kprobe_aggrprobe(struct kprobe *p) |
323 | { |
324 | return p->pre_handler == aggr_pre_handler; |
325 | } |
326 | |
327 | /* Return true(!0) if the kprobe is unused */ |
328 | static inline int kprobe_unused(struct kprobe *p) |
329 | { |
330 | return kprobe_aggrprobe(p) && kprobe_disabled(p) && |
331 | list_empty(&p->list); |
332 | } |
333 | |
334 | /* |
335 | * Keep all fields in the kprobe consistent |
336 | */ |
337 | static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) |
338 | { |
339 | memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); |
340 | memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); |
341 | } |
342 | |
343 | #ifdef CONFIG_OPTPROBES |
344 | /* NOTE: change this value only with kprobe_mutex held */ |
345 | static bool kprobes_allow_optimization; |
346 | |
347 | /* |
348 | * Call all pre_handler on the list, but ignores its return value. |
349 | * This must be called from arch-dep optimized caller. |
350 | */ |
351 | void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) |
352 | { |
353 | struct kprobe *kp; |
354 | |
355 | list_for_each_entry_rcu(kp, &p->list, list) { |
356 | if (kp->pre_handler && likely(!kprobe_disabled(kp))) { |
357 | set_kprobe_instance(kp); |
358 | kp->pre_handler(kp, regs); |
359 | } |
360 | reset_kprobe_instance(); |
361 | } |
362 | } |
363 | NOKPROBE_SYMBOL(opt_pre_handler); |
364 | |
365 | /* Free optimized instructions and optimized_kprobe */ |
366 | static void free_aggr_kprobe(struct kprobe *p) |
367 | { |
368 | struct optimized_kprobe *op; |
369 | |
370 | op = container_of(p, struct optimized_kprobe, kp); |
371 | arch_remove_optimized_kprobe(op); |
372 | arch_remove_kprobe(p); |
373 | kfree(op); |
374 | } |
375 | |
376 | /* Return true(!0) if the kprobe is ready for optimization. */ |
377 | static inline int kprobe_optready(struct kprobe *p) |
378 | { |
379 | struct optimized_kprobe *op; |
380 | |
381 | if (kprobe_aggrprobe(p)) { |
382 | op = container_of(p, struct optimized_kprobe, kp); |
383 | return arch_prepared_optinsn(&op->optinsn); |
384 | } |
385 | |
386 | return 0; |
387 | } |
388 | |
389 | /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ |
390 | static inline int kprobe_disarmed(struct kprobe *p) |
391 | { |
392 | struct optimized_kprobe *op; |
393 | |
394 | /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ |
395 | if (!kprobe_aggrprobe(p)) |
396 | return kprobe_disabled(p); |
397 | |
398 | op = container_of(p, struct optimized_kprobe, kp); |
399 | |
400 | return kprobe_disabled(p) && list_empty(&op->list); |
401 | } |
402 | |
403 | /* Return true(!0) if the probe is queued on (un)optimizing lists */ |
404 | static int kprobe_queued(struct kprobe *p) |
405 | { |
406 | struct optimized_kprobe *op; |
407 | |
408 | if (kprobe_aggrprobe(p)) { |
409 | op = container_of(p, struct optimized_kprobe, kp); |
410 | if (!list_empty(&op->list)) |
411 | return 1; |
412 | } |
413 | return 0; |
414 | } |
415 | |
416 | /* |
417 | * Return an optimized kprobe whose optimizing code replaces |
418 | * instructions including addr (exclude breakpoint). |
419 | */ |
420 | static struct kprobe *get_optimized_kprobe(unsigned long addr) |
421 | { |
422 | int i; |
423 | struct kprobe *p = NULL; |
424 | struct optimized_kprobe *op; |
425 | |
426 | /* Don't check i == 0, since that is a breakpoint case. */ |
427 | for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) |
428 | p = get_kprobe((void *)(addr - i)); |
429 | |
430 | if (p && kprobe_optready(p)) { |
431 | op = container_of(p, struct optimized_kprobe, kp); |
432 | if (arch_within_optimized_kprobe(op, addr)) |
433 | return p; |
434 | } |
435 | |
436 | return NULL; |
437 | } |
438 | |
439 | /* Optimization staging list, protected by kprobe_mutex */ |
440 | static LIST_HEAD(optimizing_list); |
441 | static LIST_HEAD(unoptimizing_list); |
442 | static LIST_HEAD(freeing_list); |
443 | |
444 | static void kprobe_optimizer(struct work_struct *work); |
445 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
446 | #define OPTIMIZE_DELAY 5 |
447 | |
448 | /* |
449 | * Optimize (replace a breakpoint with a jump) kprobes listed on |
450 | * optimizing_list. |
451 | */ |
452 | static void do_optimize_kprobes(void) |
453 | { |
454 | /* Optimization never be done when disarmed */ |
455 | if (kprobes_all_disarmed || !kprobes_allow_optimization || |
456 | list_empty(&optimizing_list)) |
457 | return; |
458 | |
459 | /* |
460 | * The optimization/unoptimization refers online_cpus via |
461 | * stop_machine() and cpu-hotplug modifies online_cpus. |
462 | * And same time, text_mutex will be held in cpu-hotplug and here. |
463 | * This combination can cause a deadlock (cpu-hotplug try to lock |
464 | * text_mutex but stop_machine can not be done because online_cpus |
465 | * has been changed) |
466 | * To avoid this deadlock, we need to call get_online_cpus() |
467 | * for preventing cpu-hotplug outside of text_mutex locking. |
468 | */ |
469 | get_online_cpus(); |
470 | mutex_lock(&text_mutex); |
471 | arch_optimize_kprobes(&optimizing_list); |
472 | mutex_unlock(&text_mutex); |
473 | put_online_cpus(); |
474 | } |
475 | |
476 | /* |
477 | * Unoptimize (replace a jump with a breakpoint and remove the breakpoint |
478 | * if need) kprobes listed on unoptimizing_list. |
479 | */ |
480 | static void do_unoptimize_kprobes(void) |
481 | { |
482 | struct optimized_kprobe *op, *tmp; |
483 | |
484 | /* Unoptimization must be done anytime */ |
485 | if (list_empty(&unoptimizing_list)) |
486 | return; |
487 | |
488 | /* Ditto to do_optimize_kprobes */ |
489 | get_online_cpus(); |
490 | mutex_lock(&text_mutex); |
491 | arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); |
492 | /* Loop free_list for disarming */ |
493 | list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
494 | /* Disarm probes if marked disabled */ |
495 | if (kprobe_disabled(&op->kp)) |
496 | arch_disarm_kprobe(&op->kp); |
497 | if (kprobe_unused(&op->kp)) { |
498 | /* |
499 | * Remove unused probes from hash list. After waiting |
500 | * for synchronization, these probes are reclaimed. |
501 | * (reclaiming is done by do_free_cleaned_kprobes.) |
502 | */ |
503 | hlist_del_rcu(&op->kp.hlist); |
504 | } else |
505 | list_del_init(&op->list); |
506 | } |
507 | mutex_unlock(&text_mutex); |
508 | put_online_cpus(); |
509 | } |
510 | |
511 | /* Reclaim all kprobes on the free_list */ |
512 | static void do_free_cleaned_kprobes(void) |
513 | { |
514 | struct optimized_kprobe *op, *tmp; |
515 | |
516 | list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
517 | BUG_ON(!kprobe_unused(&op->kp)); |
518 | list_del_init(&op->list); |
519 | free_aggr_kprobe(&op->kp); |
520 | } |
521 | } |
522 | |
523 | /* Start optimizer after OPTIMIZE_DELAY passed */ |
524 | static void kick_kprobe_optimizer(void) |
525 | { |
526 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); |
527 | } |
528 | |
529 | /* Kprobe jump optimizer */ |
530 | static void kprobe_optimizer(struct work_struct *work) |
531 | { |
532 | mutex_lock(&kprobe_mutex); |
533 | /* Lock modules while optimizing kprobes */ |
534 | mutex_lock(&module_mutex); |
535 | |
536 | /* |
537 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) |
538 | * kprobes before waiting for quiesence period. |
539 | */ |
540 | do_unoptimize_kprobes(); |
541 | |
542 | /* |
543 | * Step 2: Wait for quiesence period to ensure all running interrupts |
544 | * are done. Because optprobe may modify multiple instructions |
545 | * there is a chance that Nth instruction is interrupted. In that |
546 | * case, running interrupt can return to 2nd-Nth byte of jump |
547 | * instruction. This wait is for avoiding it. |
548 | */ |
549 | synchronize_sched(); |
550 | |
551 | /* Step 3: Optimize kprobes after quiesence period */ |
552 | do_optimize_kprobes(); |
553 | |
554 | /* Step 4: Free cleaned kprobes after quiesence period */ |
555 | do_free_cleaned_kprobes(); |
556 | |
557 | mutex_unlock(&module_mutex); |
558 | mutex_unlock(&kprobe_mutex); |
559 | |
560 | /* Step 5: Kick optimizer again if needed */ |
561 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) |
562 | kick_kprobe_optimizer(); |
563 | } |
564 | |
565 | /* Wait for completing optimization and unoptimization */ |
566 | void wait_for_kprobe_optimizer(void) |
567 | { |
568 | mutex_lock(&kprobe_mutex); |
569 | |
570 | while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { |
571 | mutex_unlock(&kprobe_mutex); |
572 | |
573 | /* this will also make optimizing_work execute immmediately */ |
574 | flush_delayed_work(&optimizing_work); |
575 | /* @optimizing_work might not have been queued yet, relax */ |
576 | cpu_relax(); |
577 | |
578 | mutex_lock(&kprobe_mutex); |
579 | } |
580 | |
581 | mutex_unlock(&kprobe_mutex); |
582 | } |
583 | |
584 | /* Optimize kprobe if p is ready to be optimized */ |
585 | static void optimize_kprobe(struct kprobe *p) |
586 | { |
587 | struct optimized_kprobe *op; |
588 | |
589 | /* Check if the kprobe is disabled or not ready for optimization. */ |
590 | if (!kprobe_optready(p) || !kprobes_allow_optimization || |
591 | (kprobe_disabled(p) || kprobes_all_disarmed)) |
592 | return; |
593 | |
594 | /* Both of break_handler and post_handler are not supported. */ |
595 | if (p->break_handler || p->post_handler) |
596 | return; |
597 | |
598 | op = container_of(p, struct optimized_kprobe, kp); |
599 | |
600 | /* Check there is no other kprobes at the optimized instructions */ |
601 | if (arch_check_optimized_kprobe(op) < 0) |
602 | return; |
603 | |
604 | /* Check if it is already optimized. */ |
605 | if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) |
606 | return; |
607 | op->kp.flags |= KPROBE_FLAG_OPTIMIZED; |
608 | |
609 | if (!list_empty(&op->list)) |
610 | /* This is under unoptimizing. Just dequeue the probe */ |
611 | list_del_init(&op->list); |
612 | else { |
613 | list_add(&op->list, &optimizing_list); |
614 | kick_kprobe_optimizer(); |
615 | } |
616 | } |
617 | |
618 | /* Short cut to direct unoptimizing */ |
619 | static void force_unoptimize_kprobe(struct optimized_kprobe *op) |
620 | { |
621 | get_online_cpus(); |
622 | arch_unoptimize_kprobe(op); |
623 | put_online_cpus(); |
624 | if (kprobe_disabled(&op->kp)) |
625 | arch_disarm_kprobe(&op->kp); |
626 | } |
627 | |
628 | /* Unoptimize a kprobe if p is optimized */ |
629 | static void unoptimize_kprobe(struct kprobe *p, bool force) |
630 | { |
631 | struct optimized_kprobe *op; |
632 | |
633 | if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) |
634 | return; /* This is not an optprobe nor optimized */ |
635 | |
636 | op = container_of(p, struct optimized_kprobe, kp); |
637 | if (!kprobe_optimized(p)) { |
638 | /* Unoptimized or unoptimizing case */ |
639 | if (force && !list_empty(&op->list)) { |
640 | /* |
641 | * Only if this is unoptimizing kprobe and forced, |
642 | * forcibly unoptimize it. (No need to unoptimize |
643 | * unoptimized kprobe again :) |
644 | */ |
645 | list_del_init(&op->list); |
646 | force_unoptimize_kprobe(op); |
647 | } |
648 | return; |
649 | } |
650 | |
651 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
652 | if (!list_empty(&op->list)) { |
653 | /* Dequeue from the optimization queue */ |
654 | list_del_init(&op->list); |
655 | return; |
656 | } |
657 | /* Optimized kprobe case */ |
658 | if (force) |
659 | /* Forcibly update the code: this is a special case */ |
660 | force_unoptimize_kprobe(op); |
661 | else { |
662 | list_add(&op->list, &unoptimizing_list); |
663 | kick_kprobe_optimizer(); |
664 | } |
665 | } |
666 | |
667 | /* Cancel unoptimizing for reusing */ |
668 | static int reuse_unused_kprobe(struct kprobe *ap) |
669 | { |
670 | struct optimized_kprobe *op; |
671 | |
672 | BUG_ON(!kprobe_unused(ap)); |
673 | /* |
674 | * Unused kprobe MUST be on the way of delayed unoptimizing (means |
675 | * there is still a relative jump) and disabled. |
676 | */ |
677 | op = container_of(ap, struct optimized_kprobe, kp); |
678 | if (unlikely(list_empty(&op->list))) |
679 | printk(KERN_WARNING "Warning: found a stray unused " |
680 | "aggrprobe@%p\n", ap->addr); |
681 | /* Enable the probe again */ |
682 | ap->flags &= ~KPROBE_FLAG_DISABLED; |
683 | /* Optimize it again (remove from op->list) */ |
684 | if (!kprobe_optready(ap)) |
685 | return -EINVAL; |
686 | |
687 | optimize_kprobe(ap); |
688 | return 0; |
689 | } |
690 | |
691 | /* Remove optimized instructions */ |
692 | static void kill_optimized_kprobe(struct kprobe *p) |
693 | { |
694 | struct optimized_kprobe *op; |
695 | |
696 | op = container_of(p, struct optimized_kprobe, kp); |
697 | if (!list_empty(&op->list)) |
698 | /* Dequeue from the (un)optimization queue */ |
699 | list_del_init(&op->list); |
700 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
701 | |
702 | if (kprobe_unused(p)) { |
703 | /* Enqueue if it is unused */ |
704 | list_add(&op->list, &freeing_list); |
705 | /* |
706 | * Remove unused probes from the hash list. After waiting |
707 | * for synchronization, this probe is reclaimed. |
708 | * (reclaiming is done by do_free_cleaned_kprobes().) |
709 | */ |
710 | hlist_del_rcu(&op->kp.hlist); |
711 | } |
712 | |
713 | /* Don't touch the code, because it is already freed. */ |
714 | arch_remove_optimized_kprobe(op); |
715 | } |
716 | |
717 | /* Try to prepare optimized instructions */ |
718 | static void prepare_optimized_kprobe(struct kprobe *p) |
719 | { |
720 | struct optimized_kprobe *op; |
721 | |
722 | op = container_of(p, struct optimized_kprobe, kp); |
723 | arch_prepare_optimized_kprobe(op, p); |
724 | } |
725 | |
726 | /* Allocate new optimized_kprobe and try to prepare optimized instructions */ |
727 | static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
728 | { |
729 | struct optimized_kprobe *op; |
730 | |
731 | op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); |
732 | if (!op) |
733 | return NULL; |
734 | |
735 | INIT_LIST_HEAD(&op->list); |
736 | op->kp.addr = p->addr; |
737 | arch_prepare_optimized_kprobe(op, p); |
738 | |
739 | return &op->kp; |
740 | } |
741 | |
742 | static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); |
743 | |
744 | /* |
745 | * Prepare an optimized_kprobe and optimize it |
746 | * NOTE: p must be a normal registered kprobe |
747 | */ |
748 | static void try_to_optimize_kprobe(struct kprobe *p) |
749 | { |
750 | struct kprobe *ap; |
751 | struct optimized_kprobe *op; |
752 | |
753 | /* Impossible to optimize ftrace-based kprobe */ |
754 | if (kprobe_ftrace(p)) |
755 | return; |
756 | |
757 | /* For preparing optimization, jump_label_text_reserved() is called */ |
758 | jump_label_lock(); |
759 | mutex_lock(&text_mutex); |
760 | |
761 | ap = alloc_aggr_kprobe(p); |
762 | if (!ap) |
763 | goto out; |
764 | |
765 | op = container_of(ap, struct optimized_kprobe, kp); |
766 | if (!arch_prepared_optinsn(&op->optinsn)) { |
767 | /* If failed to setup optimizing, fallback to kprobe */ |
768 | arch_remove_optimized_kprobe(op); |
769 | kfree(op); |
770 | goto out; |
771 | } |
772 | |
773 | init_aggr_kprobe(ap, p); |
774 | optimize_kprobe(ap); /* This just kicks optimizer thread */ |
775 | |
776 | out: |
777 | mutex_unlock(&text_mutex); |
778 | jump_label_unlock(); |
779 | } |
780 | |
781 | #ifdef CONFIG_SYSCTL |
782 | static void optimize_all_kprobes(void) |
783 | { |
784 | struct hlist_head *head; |
785 | struct kprobe *p; |
786 | unsigned int i; |
787 | |
788 | mutex_lock(&kprobe_mutex); |
789 | /* If optimization is already allowed, just return */ |
790 | if (kprobes_allow_optimization) |
791 | goto out; |
792 | |
793 | kprobes_allow_optimization = true; |
794 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
795 | head = &kprobe_table[i]; |
796 | hlist_for_each_entry_rcu(p, head, hlist) |
797 | if (!kprobe_disabled(p)) |
798 | optimize_kprobe(p); |
799 | } |
800 | printk(KERN_INFO "Kprobes globally optimized\n"); |
801 | out: |
802 | mutex_unlock(&kprobe_mutex); |
803 | } |
804 | |
805 | static void unoptimize_all_kprobes(void) |
806 | { |
807 | struct hlist_head *head; |
808 | struct kprobe *p; |
809 | unsigned int i; |
810 | |
811 | mutex_lock(&kprobe_mutex); |
812 | /* If optimization is already prohibited, just return */ |
813 | if (!kprobes_allow_optimization) { |
814 | mutex_unlock(&kprobe_mutex); |
815 | return; |
816 | } |
817 | |
818 | kprobes_allow_optimization = false; |
819 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
820 | head = &kprobe_table[i]; |
821 | hlist_for_each_entry_rcu(p, head, hlist) { |
822 | if (!kprobe_disabled(p)) |
823 | unoptimize_kprobe(p, false); |
824 | } |
825 | } |
826 | mutex_unlock(&kprobe_mutex); |
827 | |
828 | /* Wait for unoptimizing completion */ |
829 | wait_for_kprobe_optimizer(); |
830 | printk(KERN_INFO "Kprobes globally unoptimized\n"); |
831 | } |
832 | |
833 | static DEFINE_MUTEX(kprobe_sysctl_mutex); |
834 | int sysctl_kprobes_optimization; |
835 | int proc_kprobes_optimization_handler(struct ctl_table *table, int write, |
836 | void __user *buffer, size_t *length, |
837 | loff_t *ppos) |
838 | { |
839 | int ret; |
840 | |
841 | mutex_lock(&kprobe_sysctl_mutex); |
842 | sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; |
843 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); |
844 | |
845 | if (sysctl_kprobes_optimization) |
846 | optimize_all_kprobes(); |
847 | else |
848 | unoptimize_all_kprobes(); |
849 | mutex_unlock(&kprobe_sysctl_mutex); |
850 | |
851 | return ret; |
852 | } |
853 | #endif /* CONFIG_SYSCTL */ |
854 | |
855 | /* Put a breakpoint for a probe. Must be called with text_mutex locked */ |
856 | static void __arm_kprobe(struct kprobe *p) |
857 | { |
858 | struct kprobe *_p; |
859 | |
860 | /* Check collision with other optimized kprobes */ |
861 | _p = get_optimized_kprobe((unsigned long)p->addr); |
862 | if (unlikely(_p)) |
863 | /* Fallback to unoptimized kprobe */ |
864 | unoptimize_kprobe(_p, true); |
865 | |
866 | arch_arm_kprobe(p); |
867 | optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ |
868 | } |
869 | |
870 | /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ |
871 | static void __disarm_kprobe(struct kprobe *p, bool reopt) |
872 | { |
873 | struct kprobe *_p; |
874 | |
875 | /* Try to unoptimize */ |
876 | unoptimize_kprobe(p, kprobes_all_disarmed); |
877 | |
878 | if (!kprobe_queued(p)) { |
879 | arch_disarm_kprobe(p); |
880 | /* If another kprobe was blocked, optimize it. */ |
881 | _p = get_optimized_kprobe((unsigned long)p->addr); |
882 | if (unlikely(_p) && reopt) |
883 | optimize_kprobe(_p); |
884 | } |
885 | /* TODO: reoptimize others after unoptimized this probe */ |
886 | } |
887 | |
888 | #else /* !CONFIG_OPTPROBES */ |
889 | |
890 | #define optimize_kprobe(p) do {} while (0) |
891 | #define unoptimize_kprobe(p, f) do {} while (0) |
892 | #define kill_optimized_kprobe(p) do {} while (0) |
893 | #define prepare_optimized_kprobe(p) do {} while (0) |
894 | #define try_to_optimize_kprobe(p) do {} while (0) |
895 | #define __arm_kprobe(p) arch_arm_kprobe(p) |
896 | #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) |
897 | #define kprobe_disarmed(p) kprobe_disabled(p) |
898 | #define wait_for_kprobe_optimizer() do {} while (0) |
899 | |
900 | static int reuse_unused_kprobe(struct kprobe *ap) |
901 | { |
902 | /* |
903 | * If the optimized kprobe is NOT supported, the aggr kprobe is |
904 | * released at the same time that the last aggregated kprobe is |
905 | * unregistered. |
906 | * Thus there should be no chance to reuse unused kprobe. |
907 | */ |
908 | printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); |
909 | return -EINVAL; |
910 | } |
911 | |
912 | static void free_aggr_kprobe(struct kprobe *p) |
913 | { |
914 | arch_remove_kprobe(p); |
915 | kfree(p); |
916 | } |
917 | |
918 | static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
919 | { |
920 | return kzalloc(sizeof(struct kprobe), GFP_KERNEL); |
921 | } |
922 | #endif /* CONFIG_OPTPROBES */ |
923 | |
924 | #ifdef CONFIG_KPROBES_ON_FTRACE |
925 | static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { |
926 | .func = kprobe_ftrace_handler, |
927 | .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, |
928 | }; |
929 | static int kprobe_ftrace_enabled; |
930 | |
931 | /* Must ensure p->addr is really on ftrace */ |
932 | static int prepare_kprobe(struct kprobe *p) |
933 | { |
934 | if (!kprobe_ftrace(p)) |
935 | return arch_prepare_kprobe(p); |
936 | |
937 | return arch_prepare_kprobe_ftrace(p); |
938 | } |
939 | |
940 | /* Caller must lock kprobe_mutex */ |
941 | static void arm_kprobe_ftrace(struct kprobe *p) |
942 | { |
943 | int ret; |
944 | |
945 | ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, |
946 | (unsigned long)p->addr, 0, 0); |
947 | WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); |
948 | kprobe_ftrace_enabled++; |
949 | if (kprobe_ftrace_enabled == 1) { |
950 | ret = register_ftrace_function(&kprobe_ftrace_ops); |
951 | WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); |
952 | } |
953 | } |
954 | |
955 | /* Caller must lock kprobe_mutex */ |
956 | static void disarm_kprobe_ftrace(struct kprobe *p) |
957 | { |
958 | int ret; |
959 | |
960 | kprobe_ftrace_enabled--; |
961 | if (kprobe_ftrace_enabled == 0) { |
962 | ret = unregister_ftrace_function(&kprobe_ftrace_ops); |
963 | WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); |
964 | } |
965 | ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, |
966 | (unsigned long)p->addr, 1, 0); |
967 | WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); |
968 | } |
969 | #else /* !CONFIG_KPROBES_ON_FTRACE */ |
970 | #define prepare_kprobe(p) arch_prepare_kprobe(p) |
971 | #define arm_kprobe_ftrace(p) do {} while (0) |
972 | #define disarm_kprobe_ftrace(p) do {} while (0) |
973 | #endif |
974 | |
975 | /* Arm a kprobe with text_mutex */ |
976 | static void arm_kprobe(struct kprobe *kp) |
977 | { |
978 | if (unlikely(kprobe_ftrace(kp))) { |
979 | arm_kprobe_ftrace(kp); |
980 | return; |
981 | } |
982 | /* |
983 | * Here, since __arm_kprobe() doesn't use stop_machine(), |
984 | * this doesn't cause deadlock on text_mutex. So, we don't |
985 | * need get_online_cpus(). |
986 | */ |
987 | mutex_lock(&text_mutex); |
988 | __arm_kprobe(kp); |
989 | mutex_unlock(&text_mutex); |
990 | } |
991 | |
992 | /* Disarm a kprobe with text_mutex */ |
993 | static void disarm_kprobe(struct kprobe *kp, bool reopt) |
994 | { |
995 | if (unlikely(kprobe_ftrace(kp))) { |
996 | disarm_kprobe_ftrace(kp); |
997 | return; |
998 | } |
999 | /* Ditto */ |
1000 | mutex_lock(&text_mutex); |
1001 | __disarm_kprobe(kp, reopt); |
1002 | mutex_unlock(&text_mutex); |
1003 | } |
1004 | |
1005 | /* |
1006 | * Aggregate handlers for multiple kprobes support - these handlers |
1007 | * take care of invoking the individual kprobe handlers on p->list |
1008 | */ |
1009 | static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) |
1010 | { |
1011 | struct kprobe *kp; |
1012 | |
1013 | list_for_each_entry_rcu(kp, &p->list, list) { |
1014 | if (kp->pre_handler && likely(!kprobe_disabled(kp))) { |
1015 | set_kprobe_instance(kp); |
1016 | if (kp->pre_handler(kp, regs)) |
1017 | return 1; |
1018 | } |
1019 | reset_kprobe_instance(); |
1020 | } |
1021 | return 0; |
1022 | } |
1023 | NOKPROBE_SYMBOL(aggr_pre_handler); |
1024 | |
1025 | static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, |
1026 | unsigned long flags) |
1027 | { |
1028 | struct kprobe *kp; |
1029 | |
1030 | list_for_each_entry_rcu(kp, &p->list, list) { |
1031 | if (kp->post_handler && likely(!kprobe_disabled(kp))) { |
1032 | set_kprobe_instance(kp); |
1033 | kp->post_handler(kp, regs, flags); |
1034 | reset_kprobe_instance(); |
1035 | } |
1036 | } |
1037 | } |
1038 | NOKPROBE_SYMBOL(aggr_post_handler); |
1039 | |
1040 | static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, |
1041 | int trapnr) |
1042 | { |
1043 | struct kprobe *cur = __this_cpu_read(kprobe_instance); |
1044 | |
1045 | /* |
1046 | * if we faulted "during" the execution of a user specified |
1047 | * probe handler, invoke just that probe's fault handler |
1048 | */ |
1049 | if (cur && cur->fault_handler) { |
1050 | if (cur->fault_handler(cur, regs, trapnr)) |
1051 | return 1; |
1052 | } |
1053 | return 0; |
1054 | } |
1055 | NOKPROBE_SYMBOL(aggr_fault_handler); |
1056 | |
1057 | static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) |
1058 | { |
1059 | struct kprobe *cur = __this_cpu_read(kprobe_instance); |
1060 | int ret = 0; |
1061 | |
1062 | if (cur && cur->break_handler) { |
1063 | if (cur->break_handler(cur, regs)) |
1064 | ret = 1; |
1065 | } |
1066 | reset_kprobe_instance(); |
1067 | return ret; |
1068 | } |
1069 | NOKPROBE_SYMBOL(aggr_break_handler); |
1070 | |
1071 | /* Walks the list and increments nmissed count for multiprobe case */ |
1072 | void kprobes_inc_nmissed_count(struct kprobe *p) |
1073 | { |
1074 | struct kprobe *kp; |
1075 | if (!kprobe_aggrprobe(p)) { |
1076 | p->nmissed++; |
1077 | } else { |
1078 | list_for_each_entry_rcu(kp, &p->list, list) |
1079 | kp->nmissed++; |
1080 | } |
1081 | return; |
1082 | } |
1083 | NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); |
1084 | |
1085 | void recycle_rp_inst(struct kretprobe_instance *ri, |
1086 | struct hlist_head *head) |
1087 | { |
1088 | struct kretprobe *rp = ri->rp; |
1089 | |
1090 | /* remove rp inst off the rprobe_inst_table */ |
1091 | hlist_del(&ri->hlist); |
1092 | INIT_HLIST_NODE(&ri->hlist); |
1093 | if (likely(rp)) { |
1094 | raw_spin_lock(&rp->lock); |
1095 | hlist_add_head(&ri->hlist, &rp->free_instances); |
1096 | raw_spin_unlock(&rp->lock); |
1097 | } else |
1098 | /* Unregistering */ |
1099 | hlist_add_head(&ri->hlist, head); |
1100 | } |
1101 | NOKPROBE_SYMBOL(recycle_rp_inst); |
1102 | |
1103 | void kretprobe_hash_lock(struct task_struct *tsk, |
1104 | struct hlist_head **head, unsigned long *flags) |
1105 | __acquires(hlist_lock) |
1106 | { |
1107 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
1108 | raw_spinlock_t *hlist_lock; |
1109 | |
1110 | *head = &kretprobe_inst_table[hash]; |
1111 | hlist_lock = kretprobe_table_lock_ptr(hash); |
1112 | raw_spin_lock_irqsave(hlist_lock, *flags); |
1113 | } |
1114 | NOKPROBE_SYMBOL(kretprobe_hash_lock); |
1115 | |
1116 | static void kretprobe_table_lock(unsigned long hash, |
1117 | unsigned long *flags) |
1118 | __acquires(hlist_lock) |
1119 | { |
1120 | raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
1121 | raw_spin_lock_irqsave(hlist_lock, *flags); |
1122 | } |
1123 | NOKPROBE_SYMBOL(kretprobe_table_lock); |
1124 | |
1125 | void kretprobe_hash_unlock(struct task_struct *tsk, |
1126 | unsigned long *flags) |
1127 | __releases(hlist_lock) |
1128 | { |
1129 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
1130 | raw_spinlock_t *hlist_lock; |
1131 | |
1132 | hlist_lock = kretprobe_table_lock_ptr(hash); |
1133 | raw_spin_unlock_irqrestore(hlist_lock, *flags); |
1134 | } |
1135 | NOKPROBE_SYMBOL(kretprobe_hash_unlock); |
1136 | |
1137 | static void kretprobe_table_unlock(unsigned long hash, |
1138 | unsigned long *flags) |
1139 | __releases(hlist_lock) |
1140 | { |
1141 | raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
1142 | raw_spin_unlock_irqrestore(hlist_lock, *flags); |
1143 | } |
1144 | NOKPROBE_SYMBOL(kretprobe_table_unlock); |
1145 | |
1146 | /* |
1147 | * This function is called from finish_task_switch when task tk becomes dead, |
1148 | * so that we can recycle any function-return probe instances associated |
1149 | * with this task. These left over instances represent probed functions |
1150 | * that have been called but will never return. |
1151 | */ |
1152 | void kprobe_flush_task(struct task_struct *tk) |
1153 | { |
1154 | struct kretprobe_instance *ri; |
1155 | struct hlist_head *head, empty_rp; |
1156 | struct hlist_node *tmp; |
1157 | unsigned long hash, flags = 0; |
1158 | |
1159 | if (unlikely(!kprobes_initialized)) |
1160 | /* Early boot. kretprobe_table_locks not yet initialized. */ |
1161 | return; |
1162 | |
1163 | INIT_HLIST_HEAD(&empty_rp); |
1164 | hash = hash_ptr(tk, KPROBE_HASH_BITS); |
1165 | head = &kretprobe_inst_table[hash]; |
1166 | kretprobe_table_lock(hash, &flags); |
1167 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
1168 | if (ri->task == tk) |
1169 | recycle_rp_inst(ri, &empty_rp); |
1170 | } |
1171 | kretprobe_table_unlock(hash, &flags); |
1172 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
1173 | hlist_del(&ri->hlist); |
1174 | kfree(ri); |
1175 | } |
1176 | } |
1177 | NOKPROBE_SYMBOL(kprobe_flush_task); |
1178 | |
1179 | static inline void free_rp_inst(struct kretprobe *rp) |
1180 | { |
1181 | struct kretprobe_instance *ri; |
1182 | struct hlist_node *next; |
1183 | |
1184 | hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { |
1185 | hlist_del(&ri->hlist); |
1186 | kfree(ri); |
1187 | } |
1188 | } |
1189 | |
1190 | static void cleanup_rp_inst(struct kretprobe *rp) |
1191 | { |
1192 | unsigned long flags, hash; |
1193 | struct kretprobe_instance *ri; |
1194 | struct hlist_node *next; |
1195 | struct hlist_head *head; |
1196 | |
1197 | /* No race here */ |
1198 | for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { |
1199 | kretprobe_table_lock(hash, &flags); |
1200 | head = &kretprobe_inst_table[hash]; |
1201 | hlist_for_each_entry_safe(ri, next, head, hlist) { |
1202 | if (ri->rp == rp) |
1203 | ri->rp = NULL; |
1204 | } |
1205 | kretprobe_table_unlock(hash, &flags); |
1206 | } |
1207 | free_rp_inst(rp); |
1208 | } |
1209 | NOKPROBE_SYMBOL(cleanup_rp_inst); |
1210 | |
1211 | /* |
1212 | * Add the new probe to ap->list. Fail if this is the |
1213 | * second jprobe at the address - two jprobes can't coexist |
1214 | */ |
1215 | static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) |
1216 | { |
1217 | BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); |
1218 | |
1219 | if (p->break_handler || p->post_handler) |
1220 | unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ |
1221 | |
1222 | if (p->break_handler) { |
1223 | if (ap->break_handler) |
1224 | return -EEXIST; |
1225 | list_add_tail_rcu(&p->list, &ap->list); |
1226 | ap->break_handler = aggr_break_handler; |
1227 | } else |
1228 | list_add_rcu(&p->list, &ap->list); |
1229 | if (p->post_handler && !ap->post_handler) |
1230 | ap->post_handler = aggr_post_handler; |
1231 | |
1232 | return 0; |
1233 | } |
1234 | |
1235 | /* |
1236 | * Fill in the required fields of the "manager kprobe". Replace the |
1237 | * earlier kprobe in the hlist with the manager kprobe |
1238 | */ |
1239 | static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) |
1240 | { |
1241 | /* Copy p's insn slot to ap */ |
1242 | copy_kprobe(p, ap); |
1243 | flush_insn_slot(ap); |
1244 | ap->addr = p->addr; |
1245 | ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; |
1246 | ap->pre_handler = aggr_pre_handler; |
1247 | ap->fault_handler = aggr_fault_handler; |
1248 | /* We don't care the kprobe which has gone. */ |
1249 | if (p->post_handler && !kprobe_gone(p)) |
1250 | ap->post_handler = aggr_post_handler; |
1251 | if (p->break_handler && !kprobe_gone(p)) |
1252 | ap->break_handler = aggr_break_handler; |
1253 | |
1254 | INIT_LIST_HEAD(&ap->list); |
1255 | INIT_HLIST_NODE(&ap->hlist); |
1256 | |
1257 | list_add_rcu(&p->list, &ap->list); |
1258 | hlist_replace_rcu(&p->hlist, &ap->hlist); |
1259 | } |
1260 | |
1261 | /* |
1262 | * This is the second or subsequent kprobe at the address - handle |
1263 | * the intricacies |
1264 | */ |
1265 | static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) |
1266 | { |
1267 | int ret = 0; |
1268 | struct kprobe *ap = orig_p; |
1269 | |
1270 | /* For preparing optimization, jump_label_text_reserved() is called */ |
1271 | jump_label_lock(); |
1272 | /* |
1273 | * Get online CPUs to avoid text_mutex deadlock.with stop machine, |
1274 | * which is invoked by unoptimize_kprobe() in add_new_kprobe() |
1275 | */ |
1276 | get_online_cpus(); |
1277 | mutex_lock(&text_mutex); |
1278 | |
1279 | if (!kprobe_aggrprobe(orig_p)) { |
1280 | /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ |
1281 | ap = alloc_aggr_kprobe(orig_p); |
1282 | if (!ap) { |
1283 | ret = -ENOMEM; |
1284 | goto out; |
1285 | } |
1286 | init_aggr_kprobe(ap, orig_p); |
1287 | } else if (kprobe_unused(ap)) { |
1288 | /* This probe is going to die. Rescue it */ |
1289 | ret = reuse_unused_kprobe(ap); |
1290 | if (ret) |
1291 | goto out; |
1292 | } |
1293 | |
1294 | if (kprobe_gone(ap)) { |
1295 | /* |
1296 | * Attempting to insert new probe at the same location that |
1297 | * had a probe in the module vaddr area which already |
1298 | * freed. So, the instruction slot has already been |
1299 | * released. We need a new slot for the new probe. |
1300 | */ |
1301 | ret = arch_prepare_kprobe(ap); |
1302 | if (ret) |
1303 | /* |
1304 | * Even if fail to allocate new slot, don't need to |
1305 | * free aggr_probe. It will be used next time, or |
1306 | * freed by unregister_kprobe. |
1307 | */ |
1308 | goto out; |
1309 | |
1310 | /* Prepare optimized instructions if possible. */ |
1311 | prepare_optimized_kprobe(ap); |
1312 | |
1313 | /* |
1314 | * Clear gone flag to prevent allocating new slot again, and |
1315 | * set disabled flag because it is not armed yet. |
1316 | */ |
1317 | ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) |
1318 | | KPROBE_FLAG_DISABLED; |
1319 | } |
1320 | |
1321 | /* Copy ap's insn slot to p */ |
1322 | copy_kprobe(ap, p); |
1323 | ret = add_new_kprobe(ap, p); |
1324 | |
1325 | out: |
1326 | mutex_unlock(&text_mutex); |
1327 | put_online_cpus(); |
1328 | jump_label_unlock(); |
1329 | |
1330 | if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { |
1331 | ap->flags &= ~KPROBE_FLAG_DISABLED; |
1332 | if (!kprobes_all_disarmed) |
1333 | /* Arm the breakpoint again. */ |
1334 | arm_kprobe(ap); |
1335 | } |
1336 | return ret; |
1337 | } |
1338 | |
1339 | bool __weak arch_within_kprobe_blacklist(unsigned long addr) |
1340 | { |
1341 | /* The __kprobes marked functions and entry code must not be probed */ |
1342 | return addr >= (unsigned long)__kprobes_text_start && |
1343 | addr < (unsigned long)__kprobes_text_end; |
1344 | } |
1345 | |
1346 | bool within_kprobe_blacklist(unsigned long addr) |
1347 | { |
1348 | struct kprobe_blacklist_entry *ent; |
1349 | |
1350 | if (arch_within_kprobe_blacklist(addr)) |
1351 | return true; |
1352 | /* |
1353 | * If there exists a kprobe_blacklist, verify and |
1354 | * fail any probe registration in the prohibited area |
1355 | */ |
1356 | list_for_each_entry(ent, &kprobe_blacklist, list) { |
1357 | if (addr >= ent->start_addr && addr < ent->end_addr) |
1358 | return true; |
1359 | } |
1360 | |
1361 | return false; |
1362 | } |
1363 | |
1364 | /* |
1365 | * If we have a symbol_name argument, look it up and add the offset field |
1366 | * to it. This way, we can specify a relative address to a symbol. |
1367 | * This returns encoded errors if it fails to look up symbol or invalid |
1368 | * combination of parameters. |
1369 | */ |
1370 | static kprobe_opcode_t *kprobe_addr(struct kprobe *p) |
1371 | { |
1372 | kprobe_opcode_t *addr = p->addr; |
1373 | |
1374 | if ((p->symbol_name && p->addr) || |
1375 | (!p->symbol_name && !p->addr)) |
1376 | goto invalid; |
1377 | |
1378 | if (p->symbol_name) { |
1379 | kprobe_lookup_name(p->symbol_name, addr); |
1380 | if (!addr) |
1381 | return ERR_PTR(-ENOENT); |
1382 | } |
1383 | |
1384 | addr = (kprobe_opcode_t *)(((char *)addr) + p->offset); |
1385 | if (addr) |
1386 | return addr; |
1387 | |
1388 | invalid: |
1389 | return ERR_PTR(-EINVAL); |
1390 | } |
1391 | |
1392 | /* Check passed kprobe is valid and return kprobe in kprobe_table. */ |
1393 | static struct kprobe *__get_valid_kprobe(struct kprobe *p) |
1394 | { |
1395 | struct kprobe *ap, *list_p; |
1396 | |
1397 | ap = get_kprobe(p->addr); |
1398 | if (unlikely(!ap)) |
1399 | return NULL; |
1400 | |
1401 | if (p != ap) { |
1402 | list_for_each_entry_rcu(list_p, &ap->list, list) |
1403 | if (list_p == p) |
1404 | /* kprobe p is a valid probe */ |
1405 | goto valid; |
1406 | return NULL; |
1407 | } |
1408 | valid: |
1409 | return ap; |
1410 | } |
1411 | |
1412 | /* Return error if the kprobe is being re-registered */ |
1413 | static inline int check_kprobe_rereg(struct kprobe *p) |
1414 | { |
1415 | int ret = 0; |
1416 | |
1417 | mutex_lock(&kprobe_mutex); |
1418 | if (__get_valid_kprobe(p)) |
1419 | ret = -EINVAL; |
1420 | mutex_unlock(&kprobe_mutex); |
1421 | |
1422 | return ret; |
1423 | } |
1424 | |
1425 | int __weak arch_check_ftrace_location(struct kprobe *p) |
1426 | { |
1427 | unsigned long ftrace_addr; |
1428 | |
1429 | ftrace_addr = ftrace_location((unsigned long)p->addr); |
1430 | if (ftrace_addr) { |
1431 | #ifdef CONFIG_KPROBES_ON_FTRACE |
1432 | /* Given address is not on the instruction boundary */ |
1433 | if ((unsigned long)p->addr != ftrace_addr) |
1434 | return -EILSEQ; |
1435 | p->flags |= KPROBE_FLAG_FTRACE; |
1436 | #else /* !CONFIG_KPROBES_ON_FTRACE */ |
1437 | return -EINVAL; |
1438 | #endif |
1439 | } |
1440 | return 0; |
1441 | } |
1442 | |
1443 | static int check_kprobe_address_safe(struct kprobe *p, |
1444 | struct module **probed_mod) |
1445 | { |
1446 | int ret; |
1447 | |
1448 | ret = arch_check_ftrace_location(p); |
1449 | if (ret) |
1450 | return ret; |
1451 | jump_label_lock(); |
1452 | preempt_disable(); |
1453 | |
1454 | /* Ensure it is not in reserved area nor out of text */ |
1455 | if (!kernel_text_address((unsigned long) p->addr) || |
1456 | within_kprobe_blacklist((unsigned long) p->addr) || |
1457 | jump_label_text_reserved(p->addr, p->addr)) { |
1458 | ret = -EINVAL; |
1459 | goto out; |
1460 | } |
1461 | |
1462 | /* Check if are we probing a module */ |
1463 | *probed_mod = __module_text_address((unsigned long) p->addr); |
1464 | if (*probed_mod) { |
1465 | /* |
1466 | * We must hold a refcount of the probed module while updating |
1467 | * its code to prohibit unexpected unloading. |
1468 | */ |
1469 | if (unlikely(!try_module_get(*probed_mod))) { |
1470 | ret = -ENOENT; |
1471 | goto out; |
1472 | } |
1473 | |
1474 | /* |
1475 | * If the module freed .init.text, we couldn't insert |
1476 | * kprobes in there. |
1477 | */ |
1478 | if (within_module_init((unsigned long)p->addr, *probed_mod) && |
1479 | (*probed_mod)->state != MODULE_STATE_COMING) { |
1480 | module_put(*probed_mod); |
1481 | *probed_mod = NULL; |
1482 | ret = -ENOENT; |
1483 | } |
1484 | } |
1485 | out: |
1486 | preempt_enable(); |
1487 | jump_label_unlock(); |
1488 | |
1489 | return ret; |
1490 | } |
1491 | |
1492 | int register_kprobe(struct kprobe *p) |
1493 | { |
1494 | int ret; |
1495 | struct kprobe *old_p; |
1496 | struct module *probed_mod; |
1497 | kprobe_opcode_t *addr; |
1498 | |
1499 | /* Adjust probe address from symbol */ |
1500 | addr = kprobe_addr(p); |
1501 | if (IS_ERR(addr)) |
1502 | return PTR_ERR(addr); |
1503 | p->addr = addr; |
1504 | |
1505 | ret = check_kprobe_rereg(p); |
1506 | if (ret) |
1507 | return ret; |
1508 | |
1509 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ |
1510 | p->flags &= KPROBE_FLAG_DISABLED; |
1511 | p->nmissed = 0; |
1512 | INIT_LIST_HEAD(&p->list); |
1513 | |
1514 | ret = check_kprobe_address_safe(p, &probed_mod); |
1515 | if (ret) |
1516 | return ret; |
1517 | |
1518 | mutex_lock(&kprobe_mutex); |
1519 | |
1520 | old_p = get_kprobe(p->addr); |
1521 | if (old_p) { |
1522 | /* Since this may unoptimize old_p, locking text_mutex. */ |
1523 | ret = register_aggr_kprobe(old_p, p); |
1524 | goto out; |
1525 | } |
1526 | |
1527 | mutex_lock(&text_mutex); /* Avoiding text modification */ |
1528 | ret = prepare_kprobe(p); |
1529 | mutex_unlock(&text_mutex); |
1530 | if (ret) |
1531 | goto out; |
1532 | |
1533 | INIT_HLIST_NODE(&p->hlist); |
1534 | hlist_add_head_rcu(&p->hlist, |
1535 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
1536 | |
1537 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) |
1538 | arm_kprobe(p); |
1539 | |
1540 | /* Try to optimize kprobe */ |
1541 | try_to_optimize_kprobe(p); |
1542 | |
1543 | out: |
1544 | mutex_unlock(&kprobe_mutex); |
1545 | |
1546 | if (probed_mod) |
1547 | module_put(probed_mod); |
1548 | |
1549 | return ret; |
1550 | } |
1551 | EXPORT_SYMBOL_GPL(register_kprobe); |
1552 | |
1553 | /* Check if all probes on the aggrprobe are disabled */ |
1554 | static int aggr_kprobe_disabled(struct kprobe *ap) |
1555 | { |
1556 | struct kprobe *kp; |
1557 | |
1558 | list_for_each_entry_rcu(kp, &ap->list, list) |
1559 | if (!kprobe_disabled(kp)) |
1560 | /* |
1561 | * There is an active probe on the list. |
1562 | * We can't disable this ap. |
1563 | */ |
1564 | return 0; |
1565 | |
1566 | return 1; |
1567 | } |
1568 | |
1569 | /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ |
1570 | static struct kprobe *__disable_kprobe(struct kprobe *p) |
1571 | { |
1572 | struct kprobe *orig_p; |
1573 | |
1574 | /* Get an original kprobe for return */ |
1575 | orig_p = __get_valid_kprobe(p); |
1576 | if (unlikely(orig_p == NULL)) |
1577 | return NULL; |
1578 | |
1579 | if (!kprobe_disabled(p)) { |
1580 | /* Disable probe if it is a child probe */ |
1581 | if (p != orig_p) |
1582 | p->flags |= KPROBE_FLAG_DISABLED; |
1583 | |
1584 | /* Try to disarm and disable this/parent probe */ |
1585 | if (p == orig_p || aggr_kprobe_disabled(orig_p)) { |
1586 | /* |
1587 | * If kprobes_all_disarmed is set, orig_p |
1588 | * should have already been disarmed, so |
1589 | * skip unneed disarming process. |
1590 | */ |
1591 | if (!kprobes_all_disarmed) |
1592 | disarm_kprobe(orig_p, true); |
1593 | orig_p->flags |= KPROBE_FLAG_DISABLED; |
1594 | } |
1595 | } |
1596 | |
1597 | return orig_p; |
1598 | } |
1599 | |
1600 | /* |
1601 | * Unregister a kprobe without a scheduler synchronization. |
1602 | */ |
1603 | static int __unregister_kprobe_top(struct kprobe *p) |
1604 | { |
1605 | struct kprobe *ap, *list_p; |
1606 | |
1607 | /* Disable kprobe. This will disarm it if needed. */ |
1608 | ap = __disable_kprobe(p); |
1609 | if (ap == NULL) |
1610 | return -EINVAL; |
1611 | |
1612 | if (ap == p) |
1613 | /* |
1614 | * This probe is an independent(and non-optimized) kprobe |
1615 | * (not an aggrprobe). Remove from the hash list. |
1616 | */ |
1617 | goto disarmed; |
1618 | |
1619 | /* Following process expects this probe is an aggrprobe */ |
1620 | WARN_ON(!kprobe_aggrprobe(ap)); |
1621 | |
1622 | if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) |
1623 | /* |
1624 | * !disarmed could be happen if the probe is under delayed |
1625 | * unoptimizing. |
1626 | */ |
1627 | goto disarmed; |
1628 | else { |
1629 | /* If disabling probe has special handlers, update aggrprobe */ |
1630 | if (p->break_handler && !kprobe_gone(p)) |
1631 | ap->break_handler = NULL; |
1632 | if (p->post_handler && !kprobe_gone(p)) { |
1633 | list_for_each_entry_rcu(list_p, &ap->list, list) { |
1634 | if ((list_p != p) && (list_p->post_handler)) |
1635 | goto noclean; |
1636 | } |
1637 | ap->post_handler = NULL; |
1638 | } |
1639 | noclean: |
1640 | /* |
1641 | * Remove from the aggrprobe: this path will do nothing in |
1642 | * __unregister_kprobe_bottom(). |
1643 | */ |
1644 | list_del_rcu(&p->list); |
1645 | if (!kprobe_disabled(ap) && !kprobes_all_disarmed) |
1646 | /* |
1647 | * Try to optimize this probe again, because post |
1648 | * handler may have been changed. |
1649 | */ |
1650 | optimize_kprobe(ap); |
1651 | } |
1652 | return 0; |
1653 | |
1654 | disarmed: |
1655 | BUG_ON(!kprobe_disarmed(ap)); |
1656 | hlist_del_rcu(&ap->hlist); |
1657 | return 0; |
1658 | } |
1659 | |
1660 | static void __unregister_kprobe_bottom(struct kprobe *p) |
1661 | { |
1662 | struct kprobe *ap; |
1663 | |
1664 | if (list_empty(&p->list)) |
1665 | /* This is an independent kprobe */ |
1666 | arch_remove_kprobe(p); |
1667 | else if (list_is_singular(&p->list)) { |
1668 | /* This is the last child of an aggrprobe */ |
1669 | ap = list_entry(p->list.next, struct kprobe, list); |
1670 | list_del(&p->list); |
1671 | free_aggr_kprobe(ap); |
1672 | } |
1673 | /* Otherwise, do nothing. */ |
1674 | } |
1675 | |
1676 | int register_kprobes(struct kprobe **kps, int num) |
1677 | { |
1678 | int i, ret = 0; |
1679 | |
1680 | if (num <= 0) |
1681 | return -EINVAL; |
1682 | for (i = 0; i < num; i++) { |
1683 | ret = register_kprobe(kps[i]); |
1684 | if (ret < 0) { |
1685 | if (i > 0) |
1686 | unregister_kprobes(kps, i); |
1687 | break; |
1688 | } |
1689 | } |
1690 | return ret; |
1691 | } |
1692 | EXPORT_SYMBOL_GPL(register_kprobes); |
1693 | |
1694 | void unregister_kprobe(struct kprobe *p) |
1695 | { |
1696 | unregister_kprobes(&p, 1); |
1697 | } |
1698 | EXPORT_SYMBOL_GPL(unregister_kprobe); |
1699 | |
1700 | void unregister_kprobes(struct kprobe **kps, int num) |
1701 | { |
1702 | int i; |
1703 | |
1704 | if (num <= 0) |
1705 | return; |
1706 | mutex_lock(&kprobe_mutex); |
1707 | for (i = 0; i < num; i++) |
1708 | if (__unregister_kprobe_top(kps[i]) < 0) |
1709 | kps[i]->addr = NULL; |
1710 | mutex_unlock(&kprobe_mutex); |
1711 | |
1712 | synchronize_sched(); |
1713 | for (i = 0; i < num; i++) |
1714 | if (kps[i]->addr) |
1715 | __unregister_kprobe_bottom(kps[i]); |
1716 | } |
1717 | EXPORT_SYMBOL_GPL(unregister_kprobes); |
1718 | |
1719 | static struct notifier_block kprobe_exceptions_nb = { |
1720 | .notifier_call = kprobe_exceptions_notify, |
1721 | .priority = 0x7fffffff /* we need to be notified first */ |
1722 | }; |
1723 | |
1724 | unsigned long __weak arch_deref_entry_point(void *entry) |
1725 | { |
1726 | return (unsigned long)entry; |
1727 | } |
1728 | |
1729 | int register_jprobes(struct jprobe **jps, int num) |
1730 | { |
1731 | struct jprobe *jp; |
1732 | int ret = 0, i; |
1733 | |
1734 | if (num <= 0) |
1735 | return -EINVAL; |
1736 | for (i = 0; i < num; i++) { |
1737 | unsigned long addr, offset; |
1738 | jp = jps[i]; |
1739 | addr = arch_deref_entry_point(jp->entry); |
1740 | |
1741 | /* Verify probepoint is a function entry point */ |
1742 | if (kallsyms_lookup_size_offset(addr, NULL, &offset) && |
1743 | offset == 0) { |
1744 | jp->kp.pre_handler = setjmp_pre_handler; |
1745 | jp->kp.break_handler = longjmp_break_handler; |
1746 | ret = register_kprobe(&jp->kp); |
1747 | } else |
1748 | ret = -EINVAL; |
1749 | |
1750 | if (ret < 0) { |
1751 | if (i > 0) |
1752 | unregister_jprobes(jps, i); |
1753 | break; |
1754 | } |
1755 | } |
1756 | return ret; |
1757 | } |
1758 | EXPORT_SYMBOL_GPL(register_jprobes); |
1759 | |
1760 | int register_jprobe(struct jprobe *jp) |
1761 | { |
1762 | return register_jprobes(&jp, 1); |
1763 | } |
1764 | EXPORT_SYMBOL_GPL(register_jprobe); |
1765 | |
1766 | void unregister_jprobe(struct jprobe *jp) |
1767 | { |
1768 | unregister_jprobes(&jp, 1); |
1769 | } |
1770 | EXPORT_SYMBOL_GPL(unregister_jprobe); |
1771 | |
1772 | void unregister_jprobes(struct jprobe **jps, int num) |
1773 | { |
1774 | int i; |
1775 | |
1776 | if (num <= 0) |
1777 | return; |
1778 | mutex_lock(&kprobe_mutex); |
1779 | for (i = 0; i < num; i++) |
1780 | if (__unregister_kprobe_top(&jps[i]->kp) < 0) |
1781 | jps[i]->kp.addr = NULL; |
1782 | mutex_unlock(&kprobe_mutex); |
1783 | |
1784 | synchronize_sched(); |
1785 | for (i = 0; i < num; i++) { |
1786 | if (jps[i]->kp.addr) |
1787 | __unregister_kprobe_bottom(&jps[i]->kp); |
1788 | } |
1789 | } |
1790 | EXPORT_SYMBOL_GPL(unregister_jprobes); |
1791 | |
1792 | #ifdef CONFIG_KRETPROBES |
1793 | /* |
1794 | * This kprobe pre_handler is registered with every kretprobe. When probe |
1795 | * hits it will set up the return probe. |
1796 | */ |
1797 | static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) |
1798 | { |
1799 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); |
1800 | unsigned long hash, flags = 0; |
1801 | struct kretprobe_instance *ri; |
1802 | |
1803 | /* |
1804 | * To avoid deadlocks, prohibit return probing in NMI contexts, |
1805 | * just skip the probe and increase the (inexact) 'nmissed' |
1806 | * statistical counter, so that the user is informed that |
1807 | * something happened: |
1808 | */ |
1809 | if (unlikely(in_nmi())) { |
1810 | rp->nmissed++; |
1811 | return 0; |
1812 | } |
1813 | |
1814 | /* TODO: consider to only swap the RA after the last pre_handler fired */ |
1815 | hash = hash_ptr(current, KPROBE_HASH_BITS); |
1816 | raw_spin_lock_irqsave(&rp->lock, flags); |
1817 | if (!hlist_empty(&rp->free_instances)) { |
1818 | ri = hlist_entry(rp->free_instances.first, |
1819 | struct kretprobe_instance, hlist); |
1820 | hlist_del(&ri->hlist); |
1821 | raw_spin_unlock_irqrestore(&rp->lock, flags); |
1822 | |
1823 | ri->rp = rp; |
1824 | ri->task = current; |
1825 | |
1826 | if (rp->entry_handler && rp->entry_handler(ri, regs)) { |
1827 | raw_spin_lock_irqsave(&rp->lock, flags); |
1828 | hlist_add_head(&ri->hlist, &rp->free_instances); |
1829 | raw_spin_unlock_irqrestore(&rp->lock, flags); |
1830 | return 0; |
1831 | } |
1832 | |
1833 | arch_prepare_kretprobe(ri, regs); |
1834 | |
1835 | /* XXX(hch): why is there no hlist_move_head? */ |
1836 | INIT_HLIST_NODE(&ri->hlist); |
1837 | kretprobe_table_lock(hash, &flags); |
1838 | hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); |
1839 | kretprobe_table_unlock(hash, &flags); |
1840 | } else { |
1841 | rp->nmissed++; |
1842 | raw_spin_unlock_irqrestore(&rp->lock, flags); |
1843 | } |
1844 | return 0; |
1845 | } |
1846 | NOKPROBE_SYMBOL(pre_handler_kretprobe); |
1847 | |
1848 | int register_kretprobe(struct kretprobe *rp) |
1849 | { |
1850 | int ret = 0; |
1851 | struct kretprobe_instance *inst; |
1852 | int i; |
1853 | void *addr; |
1854 | |
1855 | if (kretprobe_blacklist_size) { |
1856 | addr = kprobe_addr(&rp->kp); |
1857 | if (IS_ERR(addr)) |
1858 | return PTR_ERR(addr); |
1859 | |
1860 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { |
1861 | if (kretprobe_blacklist[i].addr == addr) |
1862 | return -EINVAL; |
1863 | } |
1864 | } |
1865 | |
1866 | rp->kp.pre_handler = pre_handler_kretprobe; |
1867 | rp->kp.post_handler = NULL; |
1868 | rp->kp.fault_handler = NULL; |
1869 | rp->kp.break_handler = NULL; |
1870 | |
1871 | /* Pre-allocate memory for max kretprobe instances */ |
1872 | if (rp->maxactive <= 0) { |
1873 | #ifdef CONFIG_PREEMPT |
1874 | rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); |
1875 | #else |
1876 | rp->maxactive = num_possible_cpus(); |
1877 | #endif |
1878 | } |
1879 | raw_spin_lock_init(&rp->lock); |
1880 | INIT_HLIST_HEAD(&rp->free_instances); |
1881 | for (i = 0; i < rp->maxactive; i++) { |
1882 | inst = kmalloc(sizeof(struct kretprobe_instance) + |
1883 | rp->data_size, GFP_KERNEL); |
1884 | if (inst == NULL) { |
1885 | free_rp_inst(rp); |
1886 | return -ENOMEM; |
1887 | } |
1888 | INIT_HLIST_NODE(&inst->hlist); |
1889 | hlist_add_head(&inst->hlist, &rp->free_instances); |
1890 | } |
1891 | |
1892 | rp->nmissed = 0; |
1893 | /* Establish function entry probe point */ |
1894 | ret = register_kprobe(&rp->kp); |
1895 | if (ret != 0) |
1896 | free_rp_inst(rp); |
1897 | return ret; |
1898 | } |
1899 | EXPORT_SYMBOL_GPL(register_kretprobe); |
1900 | |
1901 | int register_kretprobes(struct kretprobe **rps, int num) |
1902 | { |
1903 | int ret = 0, i; |
1904 | |
1905 | if (num <= 0) |
1906 | return -EINVAL; |
1907 | for (i = 0; i < num; i++) { |
1908 | ret = register_kretprobe(rps[i]); |
1909 | if (ret < 0) { |
1910 | if (i > 0) |
1911 | unregister_kretprobes(rps, i); |
1912 | break; |
1913 | } |
1914 | } |
1915 | return ret; |
1916 | } |
1917 | EXPORT_SYMBOL_GPL(register_kretprobes); |
1918 | |
1919 | void unregister_kretprobe(struct kretprobe *rp) |
1920 | { |
1921 | unregister_kretprobes(&rp, 1); |
1922 | } |
1923 | EXPORT_SYMBOL_GPL(unregister_kretprobe); |
1924 | |
1925 | void unregister_kretprobes(struct kretprobe **rps, int num) |
1926 | { |
1927 | int i; |
1928 | |
1929 | if (num <= 0) |
1930 | return; |
1931 | mutex_lock(&kprobe_mutex); |
1932 | for (i = 0; i < num; i++) |
1933 | if (__unregister_kprobe_top(&rps[i]->kp) < 0) |
1934 | rps[i]->kp.addr = NULL; |
1935 | mutex_unlock(&kprobe_mutex); |
1936 | |
1937 | synchronize_sched(); |
1938 | for (i = 0; i < num; i++) { |
1939 | if (rps[i]->kp.addr) { |
1940 | __unregister_kprobe_bottom(&rps[i]->kp); |
1941 | cleanup_rp_inst(rps[i]); |
1942 | } |
1943 | } |
1944 | } |
1945 | EXPORT_SYMBOL_GPL(unregister_kretprobes); |
1946 | |
1947 | #else /* CONFIG_KRETPROBES */ |
1948 | int register_kretprobe(struct kretprobe *rp) |
1949 | { |
1950 | return -ENOSYS; |
1951 | } |
1952 | EXPORT_SYMBOL_GPL(register_kretprobe); |
1953 | |
1954 | int register_kretprobes(struct kretprobe **rps, int num) |
1955 | { |
1956 | return -ENOSYS; |
1957 | } |
1958 | EXPORT_SYMBOL_GPL(register_kretprobes); |
1959 | |
1960 | void unregister_kretprobe(struct kretprobe *rp) |
1961 | { |
1962 | } |
1963 | EXPORT_SYMBOL_GPL(unregister_kretprobe); |
1964 | |
1965 | void unregister_kretprobes(struct kretprobe **rps, int num) |
1966 | { |
1967 | } |
1968 | EXPORT_SYMBOL_GPL(unregister_kretprobes); |
1969 | |
1970 | static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) |
1971 | { |
1972 | return 0; |
1973 | } |
1974 | NOKPROBE_SYMBOL(pre_handler_kretprobe); |
1975 | |
1976 | #endif /* CONFIG_KRETPROBES */ |
1977 | |
1978 | /* Set the kprobe gone and remove its instruction buffer. */ |
1979 | static void kill_kprobe(struct kprobe *p) |
1980 | { |
1981 | struct kprobe *kp; |
1982 | |
1983 | p->flags |= KPROBE_FLAG_GONE; |
1984 | if (kprobe_aggrprobe(p)) { |
1985 | /* |
1986 | * If this is an aggr_kprobe, we have to list all the |
1987 | * chained probes and mark them GONE. |
1988 | */ |
1989 | list_for_each_entry_rcu(kp, &p->list, list) |
1990 | kp->flags |= KPROBE_FLAG_GONE; |
1991 | p->post_handler = NULL; |
1992 | p->break_handler = NULL; |
1993 | kill_optimized_kprobe(p); |
1994 | } |
1995 | /* |
1996 | * Here, we can remove insn_slot safely, because no thread calls |
1997 | * the original probed function (which will be freed soon) any more. |
1998 | */ |
1999 | arch_remove_kprobe(p); |
2000 | } |
2001 | |
2002 | /* Disable one kprobe */ |
2003 | int disable_kprobe(struct kprobe *kp) |
2004 | { |
2005 | int ret = 0; |
2006 | |
2007 | mutex_lock(&kprobe_mutex); |
2008 | |
2009 | /* Disable this kprobe */ |
2010 | if (__disable_kprobe(kp) == NULL) |
2011 | ret = -EINVAL; |
2012 | |
2013 | mutex_unlock(&kprobe_mutex); |
2014 | return ret; |
2015 | } |
2016 | EXPORT_SYMBOL_GPL(disable_kprobe); |
2017 | |
2018 | /* Enable one kprobe */ |
2019 | int enable_kprobe(struct kprobe *kp) |
2020 | { |
2021 | int ret = 0; |
2022 | struct kprobe *p; |
2023 | |
2024 | mutex_lock(&kprobe_mutex); |
2025 | |
2026 | /* Check whether specified probe is valid. */ |
2027 | p = __get_valid_kprobe(kp); |
2028 | if (unlikely(p == NULL)) { |
2029 | ret = -EINVAL; |
2030 | goto out; |
2031 | } |
2032 | |
2033 | if (kprobe_gone(kp)) { |
2034 | /* This kprobe has gone, we couldn't enable it. */ |
2035 | ret = -EINVAL; |
2036 | goto out; |
2037 | } |
2038 | |
2039 | if (p != kp) |
2040 | kp->flags &= ~KPROBE_FLAG_DISABLED; |
2041 | |
2042 | if (!kprobes_all_disarmed && kprobe_disabled(p)) { |
2043 | p->flags &= ~KPROBE_FLAG_DISABLED; |
2044 | arm_kprobe(p); |
2045 | } |
2046 | out: |
2047 | mutex_unlock(&kprobe_mutex); |
2048 | return ret; |
2049 | } |
2050 | EXPORT_SYMBOL_GPL(enable_kprobe); |
2051 | |
2052 | void dump_kprobe(struct kprobe *kp) |
2053 | { |
2054 | printk(KERN_WARNING "Dumping kprobe:\n"); |
2055 | printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", |
2056 | kp->symbol_name, kp->addr, kp->offset); |
2057 | } |
2058 | NOKPROBE_SYMBOL(dump_kprobe); |
2059 | |
2060 | /* |
2061 | * Lookup and populate the kprobe_blacklist. |
2062 | * |
2063 | * Unlike the kretprobe blacklist, we'll need to determine |
2064 | * the range of addresses that belong to the said functions, |
2065 | * since a kprobe need not necessarily be at the beginning |
2066 | * of a function. |
2067 | */ |
2068 | static int __init populate_kprobe_blacklist(unsigned long *start, |
2069 | unsigned long *end) |
2070 | { |
2071 | unsigned long *iter; |
2072 | struct kprobe_blacklist_entry *ent; |
2073 | unsigned long entry, offset = 0, size = 0; |
2074 | |
2075 | for (iter = start; iter < end; iter++) { |
2076 | entry = arch_deref_entry_point((void *)*iter); |
2077 | |
2078 | if (!kernel_text_address(entry) || |
2079 | !kallsyms_lookup_size_offset(entry, &size, &offset)) { |
2080 | pr_err("Failed to find blacklist at %p\n", |
2081 | (void *)entry); |
2082 | continue; |
2083 | } |
2084 | |
2085 | ent = kmalloc(sizeof(*ent), GFP_KERNEL); |
2086 | if (!ent) |
2087 | return -ENOMEM; |
2088 | ent->start_addr = entry; |
2089 | ent->end_addr = entry + size; |
2090 | INIT_LIST_HEAD(&ent->list); |
2091 | list_add_tail(&ent->list, &kprobe_blacklist); |
2092 | } |
2093 | return 0; |
2094 | } |
2095 | |
2096 | /* Module notifier call back, checking kprobes on the module */ |
2097 | static int kprobes_module_callback(struct notifier_block *nb, |
2098 | unsigned long val, void *data) |
2099 | { |
2100 | struct module *mod = data; |
2101 | struct hlist_head *head; |
2102 | struct kprobe *p; |
2103 | unsigned int i; |
2104 | int checkcore = (val == MODULE_STATE_GOING); |
2105 | |
2106 | if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) |
2107 | return NOTIFY_DONE; |
2108 | |
2109 | /* |
2110 | * When MODULE_STATE_GOING was notified, both of module .text and |
2111 | * .init.text sections would be freed. When MODULE_STATE_LIVE was |
2112 | * notified, only .init.text section would be freed. We need to |
2113 | * disable kprobes which have been inserted in the sections. |
2114 | */ |
2115 | mutex_lock(&kprobe_mutex); |
2116 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2117 | head = &kprobe_table[i]; |
2118 | hlist_for_each_entry_rcu(p, head, hlist) |
2119 | if (within_module_init((unsigned long)p->addr, mod) || |
2120 | (checkcore && |
2121 | within_module_core((unsigned long)p->addr, mod))) { |
2122 | /* |
2123 | * The vaddr this probe is installed will soon |
2124 | * be vfreed buy not synced to disk. Hence, |
2125 | * disarming the breakpoint isn't needed. |
2126 | */ |
2127 | kill_kprobe(p); |
2128 | } |
2129 | } |
2130 | mutex_unlock(&kprobe_mutex); |
2131 | return NOTIFY_DONE; |
2132 | } |
2133 | |
2134 | static struct notifier_block kprobe_module_nb = { |
2135 | .notifier_call = kprobes_module_callback, |
2136 | .priority = 0 |
2137 | }; |
2138 | |
2139 | /* Markers of _kprobe_blacklist section */ |
2140 | extern unsigned long __start_kprobe_blacklist[]; |
2141 | extern unsigned long __stop_kprobe_blacklist[]; |
2142 | |
2143 | static int __init init_kprobes(void) |
2144 | { |
2145 | int i, err = 0; |
2146 | |
2147 | /* FIXME allocate the probe table, currently defined statically */ |
2148 | /* initialize all list heads */ |
2149 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2150 | INIT_HLIST_HEAD(&kprobe_table[i]); |
2151 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
2152 | raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); |
2153 | } |
2154 | |
2155 | err = populate_kprobe_blacklist(__start_kprobe_blacklist, |
2156 | __stop_kprobe_blacklist); |
2157 | if (err) { |
2158 | pr_err("kprobes: failed to populate blacklist: %d\n", err); |
2159 | pr_err("Please take care of using kprobes.\n"); |
2160 | } |
2161 | |
2162 | if (kretprobe_blacklist_size) { |
2163 | /* lookup the function address from its name */ |
2164 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { |
2165 | kprobe_lookup_name(kretprobe_blacklist[i].name, |
2166 | kretprobe_blacklist[i].addr); |
2167 | if (!kretprobe_blacklist[i].addr) |
2168 | printk("kretprobe: lookup failed: %s\n", |
2169 | kretprobe_blacklist[i].name); |
2170 | } |
2171 | } |
2172 | |
2173 | #if defined(CONFIG_OPTPROBES) |
2174 | #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) |
2175 | /* Init kprobe_optinsn_slots */ |
2176 | kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; |
2177 | #endif |
2178 | /* By default, kprobes can be optimized */ |
2179 | kprobes_allow_optimization = true; |
2180 | #endif |
2181 | |
2182 | /* By default, kprobes are armed */ |
2183 | kprobes_all_disarmed = false; |
2184 | |
2185 | err = arch_init_kprobes(); |
2186 | if (!err) |
2187 | err = register_die_notifier(&kprobe_exceptions_nb); |
2188 | if (!err) |
2189 | err = register_module_notifier(&kprobe_module_nb); |
2190 | |
2191 | kprobes_initialized = (err == 0); |
2192 | |
2193 | if (!err) |
2194 | init_test_probes(); |
2195 | return err; |
2196 | } |
2197 | |
2198 | #ifdef CONFIG_DEBUG_FS |
2199 | static void report_probe(struct seq_file *pi, struct kprobe *p, |
2200 | const char *sym, int offset, char *modname, struct kprobe *pp) |
2201 | { |
2202 | char *kprobe_type; |
2203 | |
2204 | if (p->pre_handler == pre_handler_kretprobe) |
2205 | kprobe_type = "r"; |
2206 | else if (p->pre_handler == setjmp_pre_handler) |
2207 | kprobe_type = "j"; |
2208 | else |
2209 | kprobe_type = "k"; |
2210 | |
2211 | if (sym) |
2212 | seq_printf(pi, "%p %s %s+0x%x %s ", |
2213 | p->addr, kprobe_type, sym, offset, |
2214 | (modname ? modname : " ")); |
2215 | else |
2216 | seq_printf(pi, "%p %s %p ", |
2217 | p->addr, kprobe_type, p->addr); |
2218 | |
2219 | if (!pp) |
2220 | pp = p; |
2221 | seq_printf(pi, "%s%s%s%s\n", |
2222 | (kprobe_gone(p) ? "[GONE]" : ""), |
2223 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), |
2224 | (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), |
2225 | (kprobe_ftrace(pp) ? "[FTRACE]" : "")); |
2226 | } |
2227 | |
2228 | static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
2229 | { |
2230 | return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; |
2231 | } |
2232 | |
2233 | static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) |
2234 | { |
2235 | (*pos)++; |
2236 | if (*pos >= KPROBE_TABLE_SIZE) |
2237 | return NULL; |
2238 | return pos; |
2239 | } |
2240 | |
2241 | static void kprobe_seq_stop(struct seq_file *f, void *v) |
2242 | { |
2243 | /* Nothing to do */ |
2244 | } |
2245 | |
2246 | static int show_kprobe_addr(struct seq_file *pi, void *v) |
2247 | { |
2248 | struct hlist_head *head; |
2249 | struct kprobe *p, *kp; |
2250 | const char *sym = NULL; |
2251 | unsigned int i = *(loff_t *) v; |
2252 | unsigned long offset = 0; |
2253 | char *modname, namebuf[KSYM_NAME_LEN]; |
2254 | |
2255 | head = &kprobe_table[i]; |
2256 | preempt_disable(); |
2257 | hlist_for_each_entry_rcu(p, head, hlist) { |
2258 | sym = kallsyms_lookup((unsigned long)p->addr, NULL, |
2259 | &offset, &modname, namebuf); |
2260 | if (kprobe_aggrprobe(p)) { |
2261 | list_for_each_entry_rcu(kp, &p->list, list) |
2262 | report_probe(pi, kp, sym, offset, modname, p); |
2263 | } else |
2264 | report_probe(pi, p, sym, offset, modname, NULL); |
2265 | } |
2266 | preempt_enable(); |
2267 | return 0; |
2268 | } |
2269 | |
2270 | static const struct seq_operations kprobes_seq_ops = { |
2271 | .start = kprobe_seq_start, |
2272 | .next = kprobe_seq_next, |
2273 | .stop = kprobe_seq_stop, |
2274 | .show = show_kprobe_addr |
2275 | }; |
2276 | |
2277 | static int kprobes_open(struct inode *inode, struct file *filp) |
2278 | { |
2279 | return seq_open(filp, &kprobes_seq_ops); |
2280 | } |
2281 | |
2282 | static const struct file_operations debugfs_kprobes_operations = { |
2283 | .open = kprobes_open, |
2284 | .read = seq_read, |
2285 | .llseek = seq_lseek, |
2286 | .release = seq_release, |
2287 | }; |
2288 | |
2289 | /* kprobes/blacklist -- shows which functions can not be probed */ |
2290 | static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) |
2291 | { |
2292 | return seq_list_start(&kprobe_blacklist, *pos); |
2293 | } |
2294 | |
2295 | static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) |
2296 | { |
2297 | return seq_list_next(v, &kprobe_blacklist, pos); |
2298 | } |
2299 | |
2300 | static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) |
2301 | { |
2302 | struct kprobe_blacklist_entry *ent = |
2303 | list_entry(v, struct kprobe_blacklist_entry, list); |
2304 | |
2305 | seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr, |
2306 | (void *)ent->end_addr, (void *)ent->start_addr); |
2307 | return 0; |
2308 | } |
2309 | |
2310 | static const struct seq_operations kprobe_blacklist_seq_ops = { |
2311 | .start = kprobe_blacklist_seq_start, |
2312 | .next = kprobe_blacklist_seq_next, |
2313 | .stop = kprobe_seq_stop, /* Reuse void function */ |
2314 | .show = kprobe_blacklist_seq_show, |
2315 | }; |
2316 | |
2317 | static int kprobe_blacklist_open(struct inode *inode, struct file *filp) |
2318 | { |
2319 | return seq_open(filp, &kprobe_blacklist_seq_ops); |
2320 | } |
2321 | |
2322 | static const struct file_operations debugfs_kprobe_blacklist_ops = { |
2323 | .open = kprobe_blacklist_open, |
2324 | .read = seq_read, |
2325 | .llseek = seq_lseek, |
2326 | .release = seq_release, |
2327 | }; |
2328 | |
2329 | static void arm_all_kprobes(void) |
2330 | { |
2331 | struct hlist_head *head; |
2332 | struct kprobe *p; |
2333 | unsigned int i; |
2334 | |
2335 | mutex_lock(&kprobe_mutex); |
2336 | |
2337 | /* If kprobes are armed, just return */ |
2338 | if (!kprobes_all_disarmed) |
2339 | goto already_enabled; |
2340 | |
2341 | /* |
2342 | * optimize_kprobe() called by arm_kprobe() checks |
2343 | * kprobes_all_disarmed, so set kprobes_all_disarmed before |
2344 | * arm_kprobe. |
2345 | */ |
2346 | kprobes_all_disarmed = false; |
2347 | /* Arming kprobes doesn't optimize kprobe itself */ |
2348 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2349 | head = &kprobe_table[i]; |
2350 | hlist_for_each_entry_rcu(p, head, hlist) |
2351 | if (!kprobe_disabled(p)) |
2352 | arm_kprobe(p); |
2353 | } |
2354 | |
2355 | printk(KERN_INFO "Kprobes globally enabled\n"); |
2356 | |
2357 | already_enabled: |
2358 | mutex_unlock(&kprobe_mutex); |
2359 | return; |
2360 | } |
2361 | |
2362 | static void disarm_all_kprobes(void) |
2363 | { |
2364 | struct hlist_head *head; |
2365 | struct kprobe *p; |
2366 | unsigned int i; |
2367 | |
2368 | mutex_lock(&kprobe_mutex); |
2369 | |
2370 | /* If kprobes are already disarmed, just return */ |
2371 | if (kprobes_all_disarmed) { |
2372 | mutex_unlock(&kprobe_mutex); |
2373 | return; |
2374 | } |
2375 | |
2376 | kprobes_all_disarmed = true; |
2377 | printk(KERN_INFO "Kprobes globally disabled\n"); |
2378 | |
2379 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2380 | head = &kprobe_table[i]; |
2381 | hlist_for_each_entry_rcu(p, head, hlist) { |
2382 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) |
2383 | disarm_kprobe(p, false); |
2384 | } |
2385 | } |
2386 | mutex_unlock(&kprobe_mutex); |
2387 | |
2388 | /* Wait for disarming all kprobes by optimizer */ |
2389 | wait_for_kprobe_optimizer(); |
2390 | } |
2391 | |
2392 | /* |
2393 | * XXX: The debugfs bool file interface doesn't allow for callbacks |
2394 | * when the bool state is switched. We can reuse that facility when |
2395 | * available |
2396 | */ |
2397 | static ssize_t read_enabled_file_bool(struct file *file, |
2398 | char __user *user_buf, size_t count, loff_t *ppos) |
2399 | { |
2400 | char buf[3]; |
2401 | |
2402 | if (!kprobes_all_disarmed) |
2403 | buf[0] = '1'; |
2404 | else |
2405 | buf[0] = '0'; |
2406 | buf[1] = '\n'; |
2407 | buf[2] = 0x00; |
2408 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); |
2409 | } |
2410 | |
2411 | static ssize_t write_enabled_file_bool(struct file *file, |
2412 | const char __user *user_buf, size_t count, loff_t *ppos) |
2413 | { |
2414 | char buf[32]; |
2415 | size_t buf_size; |
2416 | |
2417 | buf_size = min(count, (sizeof(buf)-1)); |
2418 | if (copy_from_user(buf, user_buf, buf_size)) |
2419 | return -EFAULT; |
2420 | |
2421 | buf[buf_size] = '\0'; |
2422 | switch (buf[0]) { |
2423 | case 'y': |
2424 | case 'Y': |
2425 | case '1': |
2426 | arm_all_kprobes(); |
2427 | break; |
2428 | case 'n': |
2429 | case 'N': |
2430 | case '0': |
2431 | disarm_all_kprobes(); |
2432 | break; |
2433 | default: |
2434 | return -EINVAL; |
2435 | } |
2436 | |
2437 | return count; |
2438 | } |
2439 | |
2440 | static const struct file_operations fops_kp = { |
2441 | .read = read_enabled_file_bool, |
2442 | .write = write_enabled_file_bool, |
2443 | .llseek = default_llseek, |
2444 | }; |
2445 | |
2446 | static int __init debugfs_kprobe_init(void) |
2447 | { |
2448 | struct dentry *dir, *file; |
2449 | unsigned int value = 1; |
2450 | |
2451 | dir = debugfs_create_dir("kprobes", NULL); |
2452 | if (!dir) |
2453 | return -ENOMEM; |
2454 | |
2455 | file = debugfs_create_file("list", 0400, dir, NULL, |
2456 | &debugfs_kprobes_operations); |
2457 | if (!file) |
2458 | goto error; |
2459 | |
2460 | file = debugfs_create_file("enabled", 0600, dir, |
2461 | &value, &fops_kp); |
2462 | if (!file) |
2463 | goto error; |
2464 | |
2465 | file = debugfs_create_file("blacklist", 0400, dir, NULL, |
2466 | &debugfs_kprobe_blacklist_ops); |
2467 | if (!file) |
2468 | goto error; |
2469 | |
2470 | return 0; |
2471 | |
2472 | error: |
2473 | debugfs_remove(dir); |
2474 | return -ENOMEM; |
2475 | } |
2476 | |
2477 | late_initcall(debugfs_kprobe_init); |
2478 | #endif /* CONFIG_DEBUG_FS */ |
2479 | |
2480 | module_init(init_kprobes); |
2481 | |
2482 | /* defined in arch/.../kernel/kprobes.c */ |
2483 | EXPORT_SYMBOL_GPL(jprobe_return); |
2484 |