blob: 2dbccf2d806c66d76a59a208cb6a000819bf5db4
1 | /* |
2 | * linux/kernel/profile.c |
3 | * Simple profiling. Manages a direct-mapped profile hit count buffer, |
4 | * with configurable resolution, support for restricting the cpus on |
5 | * which profiling is done, and switching between cpu time and |
6 | * schedule() calls via kernel command line parameters passed at boot. |
7 | * |
8 | * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, |
9 | * Red Hat, July 2004 |
10 | * Consolidation of architecture support code for profiling, |
11 | * Nadia Yvette Chambers, Oracle, July 2004 |
12 | * Amortized hit count accounting via per-cpu open-addressed hashtables |
13 | * to resolve timer interrupt livelocks, Nadia Yvette Chambers, |
14 | * Oracle, 2004 |
15 | */ |
16 | |
17 | #include <linux/export.h> |
18 | #include <linux/profile.h> |
19 | #include <linux/bootmem.h> |
20 | #include <linux/notifier.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/cpumask.h> |
23 | #include <linux/cpu.h> |
24 | #include <linux/highmem.h> |
25 | #include <linux/mutex.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/vmalloc.h> |
28 | #include <asm/sections.h> |
29 | #include <asm/irq_regs.h> |
30 | #include <asm/ptrace.h> |
31 | |
32 | struct profile_hit { |
33 | u32 pc, hits; |
34 | }; |
35 | #define PROFILE_GRPSHIFT 3 |
36 | #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT) |
37 | #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) |
38 | #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) |
39 | |
40 | static atomic_t *prof_buffer; |
41 | static unsigned long prof_len, prof_shift; |
42 | |
43 | int prof_on __read_mostly; |
44 | EXPORT_SYMBOL_GPL(prof_on); |
45 | |
46 | static cpumask_var_t prof_cpu_mask; |
47 | #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) |
48 | static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); |
49 | static DEFINE_PER_CPU(int, cpu_profile_flip); |
50 | static DEFINE_MUTEX(profile_flip_mutex); |
51 | #endif /* CONFIG_SMP */ |
52 | |
53 | int profile_setup(char *str) |
54 | { |
55 | static const char schedstr[] = "schedule"; |
56 | static const char sleepstr[] = "sleep"; |
57 | static const char kvmstr[] = "kvm"; |
58 | int par; |
59 | |
60 | if (!strncmp(str, sleepstr, strlen(sleepstr))) { |
61 | #ifdef CONFIG_SCHEDSTATS |
62 | force_schedstat_enabled(); |
63 | prof_on = SLEEP_PROFILING; |
64 | if (str[strlen(sleepstr)] == ',') |
65 | str += strlen(sleepstr) + 1; |
66 | if (get_option(&str, &par)) |
67 | prof_shift = par; |
68 | pr_info("kernel sleep profiling enabled (shift: %ld)\n", |
69 | prof_shift); |
70 | #else |
71 | pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); |
72 | #endif /* CONFIG_SCHEDSTATS */ |
73 | } else if (!strncmp(str, schedstr, strlen(schedstr))) { |
74 | prof_on = SCHED_PROFILING; |
75 | if (str[strlen(schedstr)] == ',') |
76 | str += strlen(schedstr) + 1; |
77 | if (get_option(&str, &par)) |
78 | prof_shift = par; |
79 | pr_info("kernel schedule profiling enabled (shift: %ld)\n", |
80 | prof_shift); |
81 | } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { |
82 | prof_on = KVM_PROFILING; |
83 | if (str[strlen(kvmstr)] == ',') |
84 | str += strlen(kvmstr) + 1; |
85 | if (get_option(&str, &par)) |
86 | prof_shift = par; |
87 | pr_info("kernel KVM profiling enabled (shift: %ld)\n", |
88 | prof_shift); |
89 | } else if (get_option(&str, &par)) { |
90 | prof_shift = par; |
91 | prof_on = CPU_PROFILING; |
92 | pr_info("kernel profiling enabled (shift: %ld)\n", |
93 | prof_shift); |
94 | } |
95 | return 1; |
96 | } |
97 | __setup("profile=", profile_setup); |
98 | |
99 | |
100 | int __ref profile_init(void) |
101 | { |
102 | int buffer_bytes; |
103 | if (!prof_on) |
104 | return 0; |
105 | |
106 | /* only text is profiled */ |
107 | prof_len = (_etext - _stext) >> prof_shift; |
108 | buffer_bytes = prof_len*sizeof(atomic_t); |
109 | |
110 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) |
111 | return -ENOMEM; |
112 | |
113 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); |
114 | |
115 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); |
116 | if (prof_buffer) |
117 | return 0; |
118 | |
119 | prof_buffer = alloc_pages_exact(buffer_bytes, |
120 | GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); |
121 | if (prof_buffer) |
122 | return 0; |
123 | |
124 | prof_buffer = vzalloc(buffer_bytes); |
125 | if (prof_buffer) |
126 | return 0; |
127 | |
128 | free_cpumask_var(prof_cpu_mask); |
129 | return -ENOMEM; |
130 | } |
131 | |
132 | /* Profile event notifications */ |
133 | |
134 | static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); |
135 | static ATOMIC_NOTIFIER_HEAD(task_free_notifier); |
136 | static BLOCKING_NOTIFIER_HEAD(munmap_notifier); |
137 | |
138 | void profile_task_exit(struct task_struct *task) |
139 | { |
140 | blocking_notifier_call_chain(&task_exit_notifier, 0, task); |
141 | } |
142 | |
143 | int profile_handoff_task(struct task_struct *task) |
144 | { |
145 | int ret; |
146 | ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); |
147 | return (ret == NOTIFY_OK) ? 1 : 0; |
148 | } |
149 | |
150 | void profile_munmap(unsigned long addr) |
151 | { |
152 | blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); |
153 | } |
154 | |
155 | int task_handoff_register(struct notifier_block *n) |
156 | { |
157 | return atomic_notifier_chain_register(&task_free_notifier, n); |
158 | } |
159 | EXPORT_SYMBOL_GPL(task_handoff_register); |
160 | |
161 | int task_handoff_unregister(struct notifier_block *n) |
162 | { |
163 | return atomic_notifier_chain_unregister(&task_free_notifier, n); |
164 | } |
165 | EXPORT_SYMBOL_GPL(task_handoff_unregister); |
166 | |
167 | int profile_event_register(enum profile_type type, struct notifier_block *n) |
168 | { |
169 | int err = -EINVAL; |
170 | |
171 | switch (type) { |
172 | case PROFILE_TASK_EXIT: |
173 | err = blocking_notifier_chain_register( |
174 | &task_exit_notifier, n); |
175 | break; |
176 | case PROFILE_MUNMAP: |
177 | err = blocking_notifier_chain_register( |
178 | &munmap_notifier, n); |
179 | break; |
180 | } |
181 | |
182 | return err; |
183 | } |
184 | EXPORT_SYMBOL_GPL(profile_event_register); |
185 | |
186 | int profile_event_unregister(enum profile_type type, struct notifier_block *n) |
187 | { |
188 | int err = -EINVAL; |
189 | |
190 | switch (type) { |
191 | case PROFILE_TASK_EXIT: |
192 | err = blocking_notifier_chain_unregister( |
193 | &task_exit_notifier, n); |
194 | break; |
195 | case PROFILE_MUNMAP: |
196 | err = blocking_notifier_chain_unregister( |
197 | &munmap_notifier, n); |
198 | break; |
199 | } |
200 | |
201 | return err; |
202 | } |
203 | EXPORT_SYMBOL_GPL(profile_event_unregister); |
204 | |
205 | #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) |
206 | /* |
207 | * Each cpu has a pair of open-addressed hashtables for pending |
208 | * profile hits. read_profile() IPI's all cpus to request them |
209 | * to flip buffers and flushes their contents to prof_buffer itself. |
210 | * Flip requests are serialized by the profile_flip_mutex. The sole |
211 | * use of having a second hashtable is for avoiding cacheline |
212 | * contention that would otherwise happen during flushes of pending |
213 | * profile hits required for the accuracy of reported profile hits |
214 | * and so resurrect the interrupt livelock issue. |
215 | * |
216 | * The open-addressed hashtables are indexed by profile buffer slot |
217 | * and hold the number of pending hits to that profile buffer slot on |
218 | * a cpu in an entry. When the hashtable overflows, all pending hits |
219 | * are accounted to their corresponding profile buffer slots with |
220 | * atomic_add() and the hashtable emptied. As numerous pending hits |
221 | * may be accounted to a profile buffer slot in a hashtable entry, |
222 | * this amortizes a number of atomic profile buffer increments likely |
223 | * to be far larger than the number of entries in the hashtable, |
224 | * particularly given that the number of distinct profile buffer |
225 | * positions to which hits are accounted during short intervals (e.g. |
226 | * several seconds) is usually very small. Exclusion from buffer |
227 | * flipping is provided by interrupt disablement (note that for |
228 | * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from |
229 | * process context). |
230 | * The hash function is meant to be lightweight as opposed to strong, |
231 | * and was vaguely inspired by ppc64 firmware-supported inverted |
232 | * pagetable hash functions, but uses a full hashtable full of finite |
233 | * collision chains, not just pairs of them. |
234 | * |
235 | * -- nyc |
236 | */ |
237 | static void __profile_flip_buffers(void *unused) |
238 | { |
239 | int cpu = smp_processor_id(); |
240 | |
241 | per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); |
242 | } |
243 | |
244 | static void profile_flip_buffers(void) |
245 | { |
246 | int i, j, cpu; |
247 | |
248 | mutex_lock(&profile_flip_mutex); |
249 | j = per_cpu(cpu_profile_flip, get_cpu()); |
250 | put_cpu(); |
251 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
252 | for_each_online_cpu(cpu) { |
253 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; |
254 | for (i = 0; i < NR_PROFILE_HIT; ++i) { |
255 | if (!hits[i].hits) { |
256 | if (hits[i].pc) |
257 | hits[i].pc = 0; |
258 | continue; |
259 | } |
260 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); |
261 | hits[i].hits = hits[i].pc = 0; |
262 | } |
263 | } |
264 | mutex_unlock(&profile_flip_mutex); |
265 | } |
266 | |
267 | static void profile_discard_flip_buffers(void) |
268 | { |
269 | int i, cpu; |
270 | |
271 | mutex_lock(&profile_flip_mutex); |
272 | i = per_cpu(cpu_profile_flip, get_cpu()); |
273 | put_cpu(); |
274 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
275 | for_each_online_cpu(cpu) { |
276 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; |
277 | memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); |
278 | } |
279 | mutex_unlock(&profile_flip_mutex); |
280 | } |
281 | |
282 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
283 | { |
284 | unsigned long primary, secondary, flags, pc = (unsigned long)__pc; |
285 | int i, j, cpu; |
286 | struct profile_hit *hits; |
287 | |
288 | pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); |
289 | i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
290 | secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
291 | cpu = get_cpu(); |
292 | hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; |
293 | if (!hits) { |
294 | put_cpu(); |
295 | return; |
296 | } |
297 | /* |
298 | * We buffer the global profiler buffer into a per-CPU |
299 | * queue and thus reduce the number of global (and possibly |
300 | * NUMA-alien) accesses. The write-queue is self-coalescing: |
301 | */ |
302 | local_irq_save(flags); |
303 | do { |
304 | for (j = 0; j < PROFILE_GRPSZ; ++j) { |
305 | if (hits[i + j].pc == pc) { |
306 | hits[i + j].hits += nr_hits; |
307 | goto out; |
308 | } else if (!hits[i + j].hits) { |
309 | hits[i + j].pc = pc; |
310 | hits[i + j].hits = nr_hits; |
311 | goto out; |
312 | } |
313 | } |
314 | i = (i + secondary) & (NR_PROFILE_HIT - 1); |
315 | } while (i != primary); |
316 | |
317 | /* |
318 | * Add the current hit(s) and flush the write-queue out |
319 | * to the global buffer: |
320 | */ |
321 | atomic_add(nr_hits, &prof_buffer[pc]); |
322 | for (i = 0; i < NR_PROFILE_HIT; ++i) { |
323 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); |
324 | hits[i].pc = hits[i].hits = 0; |
325 | } |
326 | out: |
327 | local_irq_restore(flags); |
328 | put_cpu(); |
329 | } |
330 | |
331 | static int profile_dead_cpu(unsigned int cpu) |
332 | { |
333 | struct page *page; |
334 | int i; |
335 | |
336 | if (prof_cpu_mask != NULL) |
337 | cpumask_clear_cpu(cpu, prof_cpu_mask); |
338 | |
339 | for (i = 0; i < 2; i++) { |
340 | if (per_cpu(cpu_profile_hits, cpu)[i]) { |
341 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); |
342 | per_cpu(cpu_profile_hits, cpu)[i] = NULL; |
343 | __free_page(page); |
344 | } |
345 | } |
346 | return 0; |
347 | } |
348 | |
349 | static int profile_prepare_cpu(unsigned int cpu) |
350 | { |
351 | int i, node = cpu_to_mem(cpu); |
352 | struct page *page; |
353 | |
354 | per_cpu(cpu_profile_flip, cpu) = 0; |
355 | |
356 | for (i = 0; i < 2; i++) { |
357 | if (per_cpu(cpu_profile_hits, cpu)[i]) |
358 | continue; |
359 | |
360 | page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); |
361 | if (!page) { |
362 | profile_dead_cpu(cpu); |
363 | return -ENOMEM; |
364 | } |
365 | per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); |
366 | |
367 | } |
368 | return 0; |
369 | } |
370 | |
371 | static int profile_online_cpu(unsigned int cpu) |
372 | { |
373 | if (prof_cpu_mask != NULL) |
374 | cpumask_set_cpu(cpu, prof_cpu_mask); |
375 | |
376 | return 0; |
377 | } |
378 | |
379 | #else /* !CONFIG_SMP */ |
380 | #define profile_flip_buffers() do { } while (0) |
381 | #define profile_discard_flip_buffers() do { } while (0) |
382 | |
383 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
384 | { |
385 | unsigned long pc; |
386 | pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; |
387 | atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); |
388 | } |
389 | #endif /* !CONFIG_SMP */ |
390 | |
391 | void profile_hits(int type, void *__pc, unsigned int nr_hits) |
392 | { |
393 | if (prof_on != type || !prof_buffer) |
394 | return; |
395 | do_profile_hits(type, __pc, nr_hits); |
396 | } |
397 | EXPORT_SYMBOL_GPL(profile_hits); |
398 | |
399 | void profile_tick(int type) |
400 | { |
401 | struct pt_regs *regs = get_irq_regs(); |
402 | |
403 | if (!user_mode(regs) && prof_cpu_mask != NULL && |
404 | cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) |
405 | profile_hit(type, (void *)profile_pc(regs)); |
406 | } |
407 | |
408 | #ifdef CONFIG_PROC_FS |
409 | #include <linux/proc_fs.h> |
410 | #include <linux/seq_file.h> |
411 | #include <asm/uaccess.h> |
412 | |
413 | static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) |
414 | { |
415 | seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask)); |
416 | return 0; |
417 | } |
418 | |
419 | static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file) |
420 | { |
421 | return single_open(file, prof_cpu_mask_proc_show, NULL); |
422 | } |
423 | |
424 | static ssize_t prof_cpu_mask_proc_write(struct file *file, |
425 | const char __user *buffer, size_t count, loff_t *pos) |
426 | { |
427 | cpumask_var_t new_value; |
428 | int err; |
429 | |
430 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
431 | return -ENOMEM; |
432 | |
433 | err = cpumask_parse_user(buffer, count, new_value); |
434 | if (!err) { |
435 | cpumask_copy(prof_cpu_mask, new_value); |
436 | err = count; |
437 | } |
438 | free_cpumask_var(new_value); |
439 | return err; |
440 | } |
441 | |
442 | static const struct file_operations prof_cpu_mask_proc_fops = { |
443 | .open = prof_cpu_mask_proc_open, |
444 | .read = seq_read, |
445 | .llseek = seq_lseek, |
446 | .release = single_release, |
447 | .write = prof_cpu_mask_proc_write, |
448 | }; |
449 | |
450 | void create_prof_cpu_mask(void) |
451 | { |
452 | /* create /proc/irq/prof_cpu_mask */ |
453 | proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops); |
454 | } |
455 | |
456 | /* |
457 | * This function accesses profiling information. The returned data is |
458 | * binary: the sampling step and the actual contents of the profile |
459 | * buffer. Use of the program readprofile is recommended in order to |
460 | * get meaningful info out of these data. |
461 | */ |
462 | static ssize_t |
463 | read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
464 | { |
465 | unsigned long p = *ppos; |
466 | ssize_t read; |
467 | char *pnt; |
468 | unsigned int sample_step = 1 << prof_shift; |
469 | |
470 | profile_flip_buffers(); |
471 | if (p >= (prof_len+1)*sizeof(unsigned int)) |
472 | return 0; |
473 | if (count > (prof_len+1)*sizeof(unsigned int) - p) |
474 | count = (prof_len+1)*sizeof(unsigned int) - p; |
475 | read = 0; |
476 | |
477 | while (p < sizeof(unsigned int) && count > 0) { |
478 | if (put_user(*((char *)(&sample_step)+p), buf)) |
479 | return -EFAULT; |
480 | buf++; p++; count--; read++; |
481 | } |
482 | pnt = (char *)prof_buffer + p - sizeof(atomic_t); |
483 | if (copy_to_user(buf, (void *)pnt, count)) |
484 | return -EFAULT; |
485 | read += count; |
486 | *ppos += read; |
487 | return read; |
488 | } |
489 | |
490 | /* |
491 | * Writing to /proc/profile resets the counters |
492 | * |
493 | * Writing a 'profiling multiplier' value into it also re-sets the profiling |
494 | * interrupt frequency, on architectures that support this. |
495 | */ |
496 | static ssize_t write_profile(struct file *file, const char __user *buf, |
497 | size_t count, loff_t *ppos) |
498 | { |
499 | #ifdef CONFIG_SMP |
500 | extern int setup_profiling_timer(unsigned int multiplier); |
501 | |
502 | if (count == sizeof(int)) { |
503 | unsigned int multiplier; |
504 | |
505 | if (copy_from_user(&multiplier, buf, sizeof(int))) |
506 | return -EFAULT; |
507 | |
508 | if (setup_profiling_timer(multiplier)) |
509 | return -EINVAL; |
510 | } |
511 | #endif |
512 | profile_discard_flip_buffers(); |
513 | memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); |
514 | return count; |
515 | } |
516 | |
517 | static const struct file_operations proc_profile_operations = { |
518 | .read = read_profile, |
519 | .write = write_profile, |
520 | .llseek = default_llseek, |
521 | }; |
522 | |
523 | int __ref create_proc_profile(void) |
524 | { |
525 | struct proc_dir_entry *entry; |
526 | #ifdef CONFIG_SMP |
527 | enum cpuhp_state online_state; |
528 | #endif |
529 | |
530 | int err = 0; |
531 | |
532 | if (!prof_on) |
533 | return 0; |
534 | #ifdef CONFIG_SMP |
535 | err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE", |
536 | profile_prepare_cpu, profile_dead_cpu); |
537 | if (err) |
538 | return err; |
539 | |
540 | err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE", |
541 | profile_online_cpu, NULL); |
542 | if (err < 0) |
543 | goto err_state_prep; |
544 | online_state = err; |
545 | err = 0; |
546 | #endif |
547 | entry = proc_create("profile", S_IWUSR | S_IRUGO, |
548 | NULL, &proc_profile_operations); |
549 | if (!entry) |
550 | goto err_state_onl; |
551 | proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); |
552 | |
553 | return err; |
554 | err_state_onl: |
555 | #ifdef CONFIG_SMP |
556 | cpuhp_remove_state(online_state); |
557 | err_state_prep: |
558 | cpuhp_remove_state(CPUHP_PROFILE_PREPARE); |
559 | #endif |
560 | return err; |
561 | } |
562 | subsys_initcall(create_proc_profile); |
563 | #endif /* CONFIG_PROC_FS */ |
564 |