blob: f4259e496f83a6d0465bba0a95e918980395030f
1 | /* |
2 | * linux/mm/mmu_notifier.c |
3 | * |
4 | * Copyright (C) 2008 Qumranet, Inc. |
5 | * Copyright (C) 2008 SGI |
6 | * Christoph Lameter <cl@linux.com> |
7 | * |
8 | * This work is licensed under the terms of the GNU GPL, version 2. See |
9 | * the COPYING file in the top-level directory. |
10 | */ |
11 | |
12 | #include <linux/rculist.h> |
13 | #include <linux/mmu_notifier.h> |
14 | #include <linux/export.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/err.h> |
17 | #include <linux/srcu.h> |
18 | #include <linux/rcupdate.h> |
19 | #include <linux/sched.h> |
20 | #include <linux/slab.h> |
21 | |
22 | /* global SRCU for all MMs */ |
23 | static struct srcu_struct srcu; |
24 | |
25 | /* |
26 | * This function allows mmu_notifier::release callback to delay a call to |
27 | * a function that will free appropriate resources. The function must be |
28 | * quick and must not block. |
29 | */ |
30 | void mmu_notifier_call_srcu(struct rcu_head *rcu, |
31 | void (*func)(struct rcu_head *rcu)) |
32 | { |
33 | call_srcu(&srcu, rcu, func); |
34 | } |
35 | EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu); |
36 | |
37 | void mmu_notifier_synchronize(void) |
38 | { |
39 | /* Wait for any running method to finish. */ |
40 | srcu_barrier(&srcu); |
41 | } |
42 | EXPORT_SYMBOL_GPL(mmu_notifier_synchronize); |
43 | |
44 | /* |
45 | * This function can't run concurrently against mmu_notifier_register |
46 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap |
47 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers |
48 | * in parallel despite there being no task using this mm any more, |
49 | * through the vmas outside of the exit_mmap context, such as with |
50 | * vmtruncate. This serializes against mmu_notifier_unregister with |
51 | * the mmu_notifier_mm->lock in addition to SRCU and it serializes |
52 | * against the other mmu notifiers with SRCU. struct mmu_notifier_mm |
53 | * can't go away from under us as exit_mmap holds an mm_count pin |
54 | * itself. |
55 | */ |
56 | void __mmu_notifier_release(struct mm_struct *mm) |
57 | { |
58 | struct mmu_notifier *mn; |
59 | int id; |
60 | |
61 | /* |
62 | * SRCU here will block mmu_notifier_unregister until |
63 | * ->release returns. |
64 | */ |
65 | id = srcu_read_lock(&srcu); |
66 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) |
67 | /* |
68 | * If ->release runs before mmu_notifier_unregister it must be |
69 | * handled, as it's the only way for the driver to flush all |
70 | * existing sptes and stop the driver from establishing any more |
71 | * sptes before all the pages in the mm are freed. |
72 | */ |
73 | if (mn->ops->release) |
74 | mn->ops->release(mn, mm); |
75 | |
76 | spin_lock(&mm->mmu_notifier_mm->lock); |
77 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { |
78 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, |
79 | struct mmu_notifier, |
80 | hlist); |
81 | /* |
82 | * We arrived before mmu_notifier_unregister so |
83 | * mmu_notifier_unregister will do nothing other than to wait |
84 | * for ->release to finish and for mmu_notifier_unregister to |
85 | * return. |
86 | */ |
87 | hlist_del_init_rcu(&mn->hlist); |
88 | } |
89 | spin_unlock(&mm->mmu_notifier_mm->lock); |
90 | srcu_read_unlock(&srcu, id); |
91 | |
92 | /* |
93 | * synchronize_srcu here prevents mmu_notifier_release from returning to |
94 | * exit_mmap (which would proceed with freeing all pages in the mm) |
95 | * until the ->release method returns, if it was invoked by |
96 | * mmu_notifier_unregister. |
97 | * |
98 | * The mmu_notifier_mm can't go away from under us because one mm_count |
99 | * is held by exit_mmap. |
100 | */ |
101 | synchronize_srcu(&srcu); |
102 | } |
103 | |
104 | /* |
105 | * If no young bitflag is supported by the hardware, ->clear_flush_young can |
106 | * unmap the address and return 1 or 0 depending if the mapping previously |
107 | * existed or not. |
108 | */ |
109 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, |
110 | unsigned long start, |
111 | unsigned long end) |
112 | { |
113 | struct mmu_notifier *mn; |
114 | int young = 0, id; |
115 | |
116 | id = srcu_read_lock(&srcu); |
117 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
118 | if (mn->ops->clear_flush_young) |
119 | young |= mn->ops->clear_flush_young(mn, mm, start, end); |
120 | } |
121 | srcu_read_unlock(&srcu, id); |
122 | |
123 | return young; |
124 | } |
125 | |
126 | int __mmu_notifier_clear_young(struct mm_struct *mm, |
127 | unsigned long start, |
128 | unsigned long end) |
129 | { |
130 | struct mmu_notifier *mn; |
131 | int young = 0, id; |
132 | |
133 | id = srcu_read_lock(&srcu); |
134 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
135 | if (mn->ops->clear_young) |
136 | young |= mn->ops->clear_young(mn, mm, start, end); |
137 | } |
138 | srcu_read_unlock(&srcu, id); |
139 | |
140 | return young; |
141 | } |
142 | |
143 | int __mmu_notifier_test_young(struct mm_struct *mm, |
144 | unsigned long address) |
145 | { |
146 | struct mmu_notifier *mn; |
147 | int young = 0, id; |
148 | |
149 | id = srcu_read_lock(&srcu); |
150 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
151 | if (mn->ops->test_young) { |
152 | young = mn->ops->test_young(mn, mm, address); |
153 | if (young) |
154 | break; |
155 | } |
156 | } |
157 | srcu_read_unlock(&srcu, id); |
158 | |
159 | return young; |
160 | } |
161 | |
162 | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
163 | pte_t pte) |
164 | { |
165 | struct mmu_notifier *mn; |
166 | int id; |
167 | |
168 | id = srcu_read_lock(&srcu); |
169 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
170 | if (mn->ops->change_pte) |
171 | mn->ops->change_pte(mn, mm, address, pte); |
172 | } |
173 | srcu_read_unlock(&srcu, id); |
174 | } |
175 | |
176 | void __mmu_notifier_invalidate_page(struct mm_struct *mm, |
177 | unsigned long address) |
178 | { |
179 | struct mmu_notifier *mn; |
180 | int id; |
181 | |
182 | id = srcu_read_lock(&srcu); |
183 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
184 | if (mn->ops->invalidate_page) |
185 | mn->ops->invalidate_page(mn, mm, address); |
186 | } |
187 | srcu_read_unlock(&srcu, id); |
188 | } |
189 | |
190 | void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, |
191 | unsigned long start, unsigned long end) |
192 | { |
193 | struct mmu_notifier *mn; |
194 | int id; |
195 | |
196 | id = srcu_read_lock(&srcu); |
197 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
198 | if (mn->ops->invalidate_range_start) |
199 | mn->ops->invalidate_range_start(mn, mm, start, end); |
200 | } |
201 | srcu_read_unlock(&srcu, id); |
202 | } |
203 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); |
204 | |
205 | void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
206 | unsigned long start, unsigned long end) |
207 | { |
208 | struct mmu_notifier *mn; |
209 | int id; |
210 | |
211 | id = srcu_read_lock(&srcu); |
212 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
213 | /* |
214 | * Call invalidate_range here too to avoid the need for the |
215 | * subsystem of having to register an invalidate_range_end |
216 | * call-back when there is invalidate_range already. Usually a |
217 | * subsystem registers either invalidate_range_start()/end() or |
218 | * invalidate_range(), so this will be no additional overhead |
219 | * (besides the pointer check). |
220 | */ |
221 | if (mn->ops->invalidate_range) |
222 | mn->ops->invalidate_range(mn, mm, start, end); |
223 | if (mn->ops->invalidate_range_end) |
224 | mn->ops->invalidate_range_end(mn, mm, start, end); |
225 | } |
226 | srcu_read_unlock(&srcu, id); |
227 | } |
228 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); |
229 | |
230 | void __mmu_notifier_invalidate_range(struct mm_struct *mm, |
231 | unsigned long start, unsigned long end) |
232 | { |
233 | struct mmu_notifier *mn; |
234 | int id; |
235 | |
236 | id = srcu_read_lock(&srcu); |
237 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
238 | if (mn->ops->invalidate_range) |
239 | mn->ops->invalidate_range(mn, mm, start, end); |
240 | } |
241 | srcu_read_unlock(&srcu, id); |
242 | } |
243 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range); |
244 | |
245 | static int do_mmu_notifier_register(struct mmu_notifier *mn, |
246 | struct mm_struct *mm, |
247 | int take_mmap_sem) |
248 | { |
249 | struct mmu_notifier_mm *mmu_notifier_mm; |
250 | int ret; |
251 | |
252 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
253 | |
254 | /* |
255 | * Verify that mmu_notifier_init() already run and the global srcu is |
256 | * initialized. |
257 | */ |
258 | BUG_ON(!srcu.per_cpu_ref); |
259 | |
260 | ret = -ENOMEM; |
261 | mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); |
262 | if (unlikely(!mmu_notifier_mm)) |
263 | goto out; |
264 | |
265 | if (take_mmap_sem) |
266 | down_write(&mm->mmap_sem); |
267 | ret = mm_take_all_locks(mm); |
268 | if (unlikely(ret)) |
269 | goto out_clean; |
270 | |
271 | if (!mm_has_notifiers(mm)) { |
272 | INIT_HLIST_HEAD(&mmu_notifier_mm->list); |
273 | spin_lock_init(&mmu_notifier_mm->lock); |
274 | |
275 | mm->mmu_notifier_mm = mmu_notifier_mm; |
276 | mmu_notifier_mm = NULL; |
277 | } |
278 | atomic_inc(&mm->mm_count); |
279 | |
280 | /* |
281 | * Serialize the update against mmu_notifier_unregister. A |
282 | * side note: mmu_notifier_release can't run concurrently with |
283 | * us because we hold the mm_users pin (either implicitly as |
284 | * current->mm or explicitly with get_task_mm() or similar). |
285 | * We can't race against any other mmu notifier method either |
286 | * thanks to mm_take_all_locks(). |
287 | */ |
288 | spin_lock(&mm->mmu_notifier_mm->lock); |
289 | hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); |
290 | spin_unlock(&mm->mmu_notifier_mm->lock); |
291 | |
292 | mm_drop_all_locks(mm); |
293 | out_clean: |
294 | if (take_mmap_sem) |
295 | up_write(&mm->mmap_sem); |
296 | kfree(mmu_notifier_mm); |
297 | out: |
298 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
299 | return ret; |
300 | } |
301 | |
302 | /* |
303 | * Must not hold mmap_sem nor any other VM related lock when calling |
304 | * this registration function. Must also ensure mm_users can't go down |
305 | * to zero while this runs to avoid races with mmu_notifier_release, |
306 | * so mm has to be current->mm or the mm should be pinned safely such |
307 | * as with get_task_mm(). If the mm is not current->mm, the mm_users |
308 | * pin should be released by calling mmput after mmu_notifier_register |
309 | * returns. mmu_notifier_unregister must be always called to |
310 | * unregister the notifier. mm_count is automatically pinned to allow |
311 | * mmu_notifier_unregister to safely run at any time later, before or |
312 | * after exit_mmap. ->release will always be called before exit_mmap |
313 | * frees the pages. |
314 | */ |
315 | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) |
316 | { |
317 | return do_mmu_notifier_register(mn, mm, 1); |
318 | } |
319 | EXPORT_SYMBOL_GPL(mmu_notifier_register); |
320 | |
321 | /* |
322 | * Same as mmu_notifier_register but here the caller must hold the |
323 | * mmap_sem in write mode. |
324 | */ |
325 | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) |
326 | { |
327 | return do_mmu_notifier_register(mn, mm, 0); |
328 | } |
329 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); |
330 | |
331 | /* this is called after the last mmu_notifier_unregister() returned */ |
332 | void __mmu_notifier_mm_destroy(struct mm_struct *mm) |
333 | { |
334 | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); |
335 | kfree(mm->mmu_notifier_mm); |
336 | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ |
337 | } |
338 | |
339 | /* |
340 | * This releases the mm_count pin automatically and frees the mm |
341 | * structure if it was the last user of it. It serializes against |
342 | * running mmu notifiers with SRCU and against mmu_notifier_unregister |
343 | * with the unregister lock + SRCU. All sptes must be dropped before |
344 | * calling mmu_notifier_unregister. ->release or any other notifier |
345 | * method may be invoked concurrently with mmu_notifier_unregister, |
346 | * and only after mmu_notifier_unregister returned we're guaranteed |
347 | * that ->release or any other method can't run anymore. |
348 | */ |
349 | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) |
350 | { |
351 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
352 | |
353 | if (!hlist_unhashed(&mn->hlist)) { |
354 | /* |
355 | * SRCU here will force exit_mmap to wait for ->release to |
356 | * finish before freeing the pages. |
357 | */ |
358 | int id; |
359 | |
360 | id = srcu_read_lock(&srcu); |
361 | /* |
362 | * exit_mmap will block in mmu_notifier_release to guarantee |
363 | * that ->release is called before freeing the pages. |
364 | */ |
365 | if (mn->ops->release) |
366 | mn->ops->release(mn, mm); |
367 | srcu_read_unlock(&srcu, id); |
368 | |
369 | spin_lock(&mm->mmu_notifier_mm->lock); |
370 | /* |
371 | * Can not use list_del_rcu() since __mmu_notifier_release |
372 | * can delete it before we hold the lock. |
373 | */ |
374 | hlist_del_init_rcu(&mn->hlist); |
375 | spin_unlock(&mm->mmu_notifier_mm->lock); |
376 | } |
377 | |
378 | /* |
379 | * Wait for any running method to finish, of course including |
380 | * ->release if it was run by mmu_notifier_release instead of us. |
381 | */ |
382 | synchronize_srcu(&srcu); |
383 | |
384 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
385 | |
386 | mmdrop(mm); |
387 | } |
388 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); |
389 | |
390 | /* |
391 | * Same as mmu_notifier_unregister but no callback and no srcu synchronization. |
392 | */ |
393 | void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, |
394 | struct mm_struct *mm) |
395 | { |
396 | spin_lock(&mm->mmu_notifier_mm->lock); |
397 | /* |
398 | * Can not use list_del_rcu() since __mmu_notifier_release |
399 | * can delete it before we hold the lock. |
400 | */ |
401 | hlist_del_init_rcu(&mn->hlist); |
402 | spin_unlock(&mm->mmu_notifier_mm->lock); |
403 | |
404 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
405 | mmdrop(mm); |
406 | } |
407 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release); |
408 | |
409 | static int __init mmu_notifier_init(void) |
410 | { |
411 | return init_srcu_struct(&srcu); |
412 | } |
413 | subsys_initcall(mmu_notifier_init); |
414 |