blob: b5c30d9f46c5084acddbd34e533f91e9c98e8300
1 | /* |
2 | * latencytop.c: Latency display infrastructure |
3 | * |
4 | * (C) Copyright 2008 Intel Corporation |
5 | * Author: Arjan van de Ven <arjan@linux.intel.com> |
6 | * |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; version 2 |
10 | * of the License. |
11 | */ |
12 | |
13 | /* |
14 | * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is |
15 | * used by the "latencytop" userspace tool. The latency that is tracked is not |
16 | * the 'traditional' interrupt latency (which is primarily caused by something |
17 | * else consuming CPU), but instead, it is the latency an application encounters |
18 | * because the kernel sleeps on its behalf for various reasons. |
19 | * |
20 | * This code tracks 2 levels of statistics: |
21 | * 1) System level latency |
22 | * 2) Per process latency |
23 | * |
24 | * The latency is stored in fixed sized data structures in an accumulated form; |
25 | * if the "same" latency cause is hit twice, this will be tracked as one entry |
26 | * in the data structure. Both the count, total accumulated latency and maximum |
27 | * latency are tracked in this data structure. When the fixed size structure is |
28 | * full, no new causes are tracked until the buffer is flushed by writing to |
29 | * the /proc file; the userspace tool does this on a regular basis. |
30 | * |
31 | * A latency cause is identified by a stringified backtrace at the point that |
32 | * the scheduler gets invoked. The userland tool will use this string to |
33 | * identify the cause of the latency in human readable form. |
34 | * |
35 | * The information is exported via /proc/latency_stats and /proc/<pid>/latency. |
36 | * These files look like this: |
37 | * |
38 | * Latency Top version : v0.1 |
39 | * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl |
40 | * | | | | |
41 | * | | | +----> the stringified backtrace |
42 | * | | +---------> The maximum latency for this entry in microseconds |
43 | * | +--------------> The accumulated latency for this entry (microseconds) |
44 | * +-------------------> The number of times this entry is hit |
45 | * |
46 | * (note: the average latency is the accumulated latency divided by the number |
47 | * of times) |
48 | */ |
49 | |
50 | #include <linux/kallsyms.h> |
51 | #include <linux/seq_file.h> |
52 | #include <linux/notifier.h> |
53 | #include <linux/spinlock.h> |
54 | #include <linux/proc_fs.h> |
55 | #include <linux/latencytop.h> |
56 | #include <linux/export.h> |
57 | #include <linux/sched.h> |
58 | #include <linux/list.h> |
59 | #include <linux/stacktrace.h> |
60 | |
61 | static DEFINE_RAW_SPINLOCK(latency_lock); |
62 | |
63 | #define MAXLR 128 |
64 | static struct latency_record latency_record[MAXLR]; |
65 | |
66 | int latencytop_enabled; |
67 | |
68 | void clear_all_latency_tracing(struct task_struct *p) |
69 | { |
70 | unsigned long flags; |
71 | |
72 | if (!latencytop_enabled) |
73 | return; |
74 | |
75 | raw_spin_lock_irqsave(&latency_lock, flags); |
76 | memset(&p->latency_record, 0, sizeof(p->latency_record)); |
77 | p->latency_record_count = 0; |
78 | raw_spin_unlock_irqrestore(&latency_lock, flags); |
79 | } |
80 | |
81 | static void clear_global_latency_tracing(void) |
82 | { |
83 | unsigned long flags; |
84 | |
85 | raw_spin_lock_irqsave(&latency_lock, flags); |
86 | memset(&latency_record, 0, sizeof(latency_record)); |
87 | raw_spin_unlock_irqrestore(&latency_lock, flags); |
88 | } |
89 | |
90 | static void __sched |
91 | account_global_scheduler_latency(struct task_struct *tsk, |
92 | struct latency_record *lat) |
93 | { |
94 | int firstnonnull = MAXLR + 1; |
95 | int i; |
96 | |
97 | if (!latencytop_enabled) |
98 | return; |
99 | |
100 | /* skip kernel threads for now */ |
101 | if (!tsk->mm) |
102 | return; |
103 | |
104 | for (i = 0; i < MAXLR; i++) { |
105 | int q, same = 1; |
106 | |
107 | /* Nothing stored: */ |
108 | if (!latency_record[i].backtrace[0]) { |
109 | if (firstnonnull > i) |
110 | firstnonnull = i; |
111 | continue; |
112 | } |
113 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
114 | unsigned long record = lat->backtrace[q]; |
115 | |
116 | if (latency_record[i].backtrace[q] != record) { |
117 | same = 0; |
118 | break; |
119 | } |
120 | |
121 | /* 0 and ULONG_MAX entries mean end of backtrace: */ |
122 | if (record == 0 || record == ULONG_MAX) |
123 | break; |
124 | } |
125 | if (same) { |
126 | latency_record[i].count++; |
127 | latency_record[i].time += lat->time; |
128 | if (lat->time > latency_record[i].max) |
129 | latency_record[i].max = lat->time; |
130 | return; |
131 | } |
132 | } |
133 | |
134 | i = firstnonnull; |
135 | if (i >= MAXLR - 1) |
136 | return; |
137 | |
138 | /* Allocted a new one: */ |
139 | memcpy(&latency_record[i], lat, sizeof(struct latency_record)); |
140 | } |
141 | |
142 | /* |
143 | * Iterator to store a backtrace into a latency record entry |
144 | */ |
145 | static inline void store_stacktrace(struct task_struct *tsk, |
146 | struct latency_record *lat) |
147 | { |
148 | struct stack_trace trace; |
149 | |
150 | memset(&trace, 0, sizeof(trace)); |
151 | trace.max_entries = LT_BACKTRACEDEPTH; |
152 | trace.entries = &lat->backtrace[0]; |
153 | save_stack_trace_tsk(tsk, &trace); |
154 | } |
155 | |
156 | /** |
157 | * __account_scheduler_latency - record an occurred latency |
158 | * @tsk - the task struct of the task hitting the latency |
159 | * @usecs - the duration of the latency in microseconds |
160 | * @inter - 1 if the sleep was interruptible, 0 if uninterruptible |
161 | * |
162 | * This function is the main entry point for recording latency entries |
163 | * as called by the scheduler. |
164 | * |
165 | * This function has a few special cases to deal with normal 'non-latency' |
166 | * sleeps: specifically, interruptible sleep longer than 5 msec is skipped |
167 | * since this usually is caused by waiting for events via select() and co. |
168 | * |
169 | * Negative latencies (caused by time going backwards) are also explicitly |
170 | * skipped. |
171 | */ |
172 | void __sched |
173 | __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) |
174 | { |
175 | unsigned long flags; |
176 | int i, q; |
177 | struct latency_record lat; |
178 | |
179 | /* Long interruptible waits are generally user requested... */ |
180 | if (inter && usecs > 5000) |
181 | return; |
182 | |
183 | /* Negative sleeps are time going backwards */ |
184 | /* Zero-time sleeps are non-interesting */ |
185 | if (usecs <= 0) |
186 | return; |
187 | |
188 | memset(&lat, 0, sizeof(lat)); |
189 | lat.count = 1; |
190 | lat.time = usecs; |
191 | lat.max = usecs; |
192 | store_stacktrace(tsk, &lat); |
193 | |
194 | raw_spin_lock_irqsave(&latency_lock, flags); |
195 | |
196 | account_global_scheduler_latency(tsk, &lat); |
197 | |
198 | for (i = 0; i < tsk->latency_record_count; i++) { |
199 | struct latency_record *mylat; |
200 | int same = 1; |
201 | |
202 | mylat = &tsk->latency_record[i]; |
203 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
204 | unsigned long record = lat.backtrace[q]; |
205 | |
206 | if (mylat->backtrace[q] != record) { |
207 | same = 0; |
208 | break; |
209 | } |
210 | |
211 | /* 0 and ULONG_MAX entries mean end of backtrace: */ |
212 | if (record == 0 || record == ULONG_MAX) |
213 | break; |
214 | } |
215 | if (same) { |
216 | mylat->count++; |
217 | mylat->time += lat.time; |
218 | if (lat.time > mylat->max) |
219 | mylat->max = lat.time; |
220 | goto out_unlock; |
221 | } |
222 | } |
223 | |
224 | /* |
225 | * short term hack; if we're > 32 we stop; future we recycle: |
226 | */ |
227 | if (tsk->latency_record_count >= LT_SAVECOUNT) |
228 | goto out_unlock; |
229 | |
230 | /* Allocated a new one: */ |
231 | i = tsk->latency_record_count++; |
232 | memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); |
233 | |
234 | out_unlock: |
235 | raw_spin_unlock_irqrestore(&latency_lock, flags); |
236 | } |
237 | |
238 | static int lstats_show(struct seq_file *m, void *v) |
239 | { |
240 | int i; |
241 | |
242 | seq_puts(m, "Latency Top version : v0.1\n"); |
243 | |
244 | for (i = 0; i < MAXLR; i++) { |
245 | struct latency_record *lr = &latency_record[i]; |
246 | |
247 | if (lr->backtrace[0]) { |
248 | int q; |
249 | seq_printf(m, "%i %lu %lu", |
250 | lr->count, lr->time, lr->max); |
251 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
252 | unsigned long bt = lr->backtrace[q]; |
253 | if (!bt) |
254 | break; |
255 | if (bt == ULONG_MAX) |
256 | break; |
257 | seq_printf(m, " %ps", (void *)bt); |
258 | } |
259 | seq_puts(m, "\n"); |
260 | } |
261 | } |
262 | return 0; |
263 | } |
264 | |
265 | static ssize_t |
266 | lstats_write(struct file *file, const char __user *buf, size_t count, |
267 | loff_t *offs) |
268 | { |
269 | clear_global_latency_tracing(); |
270 | |
271 | return count; |
272 | } |
273 | |
274 | static int lstats_open(struct inode *inode, struct file *filp) |
275 | { |
276 | return single_open(filp, lstats_show, NULL); |
277 | } |
278 | |
279 | static const struct file_operations lstats_fops = { |
280 | .open = lstats_open, |
281 | .read = seq_read, |
282 | .write = lstats_write, |
283 | .llseek = seq_lseek, |
284 | .release = single_release, |
285 | }; |
286 | |
287 | static int __init init_lstats_procfs(void) |
288 | { |
289 | proc_create("latency_stats", 0644, NULL, &lstats_fops); |
290 | return 0; |
291 | } |
292 | |
293 | int sysctl_latencytop(struct ctl_table *table, int write, |
294 | void __user *buffer, size_t *lenp, loff_t *ppos) |
295 | { |
296 | int err; |
297 | |
298 | err = proc_dointvec(table, write, buffer, lenp, ppos); |
299 | if (latencytop_enabled) |
300 | force_schedstat_enabled(); |
301 | |
302 | return err; |
303 | } |
304 | device_initcall(init_lstats_procfs); |
305 |