blob: e17ae10a42d0aedc48c82aaf4daa6ac3ae4e7744
1 | /* |
2 | * fs/userfaultfd.c |
3 | * |
4 | * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> |
5 | * Copyright (C) 2008-2009 Red Hat, Inc. |
6 | * Copyright (C) 2015 Red Hat, Inc. |
7 | * |
8 | * This work is licensed under the terms of the GNU GPL, version 2. See |
9 | * the COPYING file in the top-level directory. |
10 | * |
11 | * Some part derived from fs/eventfd.c (anon inode setup) and |
12 | * mm/ksm.c (mm hashing). |
13 | */ |
14 | |
15 | #include <linux/hashtable.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/poll.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/seq_file.h> |
21 | #include <linux/file.h> |
22 | #include <linux/bug.h> |
23 | #include <linux/anon_inodes.h> |
24 | #include <linux/syscalls.h> |
25 | #include <linux/userfaultfd_k.h> |
26 | #include <linux/mempolicy.h> |
27 | #include <linux/ioctl.h> |
28 | #include <linux/security.h> |
29 | |
30 | static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; |
31 | |
32 | enum userfaultfd_state { |
33 | UFFD_STATE_WAIT_API, |
34 | UFFD_STATE_RUNNING, |
35 | }; |
36 | |
37 | /* |
38 | * Start with fault_pending_wqh and fault_wqh so they're more likely |
39 | * to be in the same cacheline. |
40 | */ |
41 | struct userfaultfd_ctx { |
42 | /* waitqueue head for the pending (i.e. not read) userfaults */ |
43 | wait_queue_head_t fault_pending_wqh; |
44 | /* waitqueue head for the userfaults */ |
45 | wait_queue_head_t fault_wqh; |
46 | /* waitqueue head for the pseudo fd to wakeup poll/read */ |
47 | wait_queue_head_t fd_wqh; |
48 | /* a refile sequence protected by fault_pending_wqh lock */ |
49 | struct seqcount refile_seq; |
50 | /* pseudo fd refcounting */ |
51 | atomic_t refcount; |
52 | /* userfaultfd syscall flags */ |
53 | unsigned int flags; |
54 | /* state machine */ |
55 | enum userfaultfd_state state; |
56 | /* released */ |
57 | bool released; |
58 | /* mm with one ore more vmas attached to this userfaultfd_ctx */ |
59 | struct mm_struct *mm; |
60 | }; |
61 | |
62 | struct userfaultfd_wait_queue { |
63 | struct uffd_msg msg; |
64 | wait_queue_t wq; |
65 | struct userfaultfd_ctx *ctx; |
66 | bool waken; |
67 | }; |
68 | |
69 | struct userfaultfd_wake_range { |
70 | unsigned long start; |
71 | unsigned long len; |
72 | }; |
73 | |
74 | static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, |
75 | int wake_flags, void *key) |
76 | { |
77 | struct userfaultfd_wake_range *range = key; |
78 | int ret; |
79 | struct userfaultfd_wait_queue *uwq; |
80 | unsigned long start, len; |
81 | |
82 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
83 | ret = 0; |
84 | /* len == 0 means wake all */ |
85 | start = range->start; |
86 | len = range->len; |
87 | if (len && (start > uwq->msg.arg.pagefault.address || |
88 | start + len <= uwq->msg.arg.pagefault.address)) |
89 | goto out; |
90 | WRITE_ONCE(uwq->waken, true); |
91 | /* |
92 | * The implicit smp_mb__before_spinlock in try_to_wake_up() |
93 | * renders uwq->waken visible to other CPUs before the task is |
94 | * waken. |
95 | */ |
96 | ret = wake_up_state(wq->private, mode); |
97 | if (ret) |
98 | /* |
99 | * Wake only once, autoremove behavior. |
100 | * |
101 | * After the effect of list_del_init is visible to the |
102 | * other CPUs, the waitqueue may disappear from under |
103 | * us, see the !list_empty_careful() in |
104 | * handle_userfault(). try_to_wake_up() has an |
105 | * implicit smp_mb__before_spinlock, and the |
106 | * wq->private is read before calling the extern |
107 | * function "wake_up_state" (which in turns calls |
108 | * try_to_wake_up). While the spin_lock;spin_unlock; |
109 | * wouldn't be enough, the smp_mb__before_spinlock is |
110 | * enough to avoid an explicit smp_mb() here. |
111 | */ |
112 | list_del_init(&wq->task_list); |
113 | out: |
114 | return ret; |
115 | } |
116 | |
117 | /** |
118 | * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd |
119 | * context. |
120 | * @ctx: [in] Pointer to the userfaultfd context. |
121 | * |
122 | * Returns: In case of success, returns not zero. |
123 | */ |
124 | static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) |
125 | { |
126 | if (!atomic_inc_not_zero(&ctx->refcount)) |
127 | BUG(); |
128 | } |
129 | |
130 | /** |
131 | * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd |
132 | * context. |
133 | * @ctx: [in] Pointer to userfaultfd context. |
134 | * |
135 | * The userfaultfd context reference must have been previously acquired either |
136 | * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). |
137 | */ |
138 | static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) |
139 | { |
140 | if (atomic_dec_and_test(&ctx->refcount)) { |
141 | VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); |
142 | VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); |
143 | VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); |
144 | VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); |
145 | VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); |
146 | VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); |
147 | mmdrop(ctx->mm); |
148 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
149 | } |
150 | } |
151 | |
152 | static inline void msg_init(struct uffd_msg *msg) |
153 | { |
154 | BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); |
155 | /* |
156 | * Must use memset to zero out the paddings or kernel data is |
157 | * leaked to userland. |
158 | */ |
159 | memset(msg, 0, sizeof(struct uffd_msg)); |
160 | } |
161 | |
162 | static inline struct uffd_msg userfault_msg(unsigned long address, |
163 | unsigned int flags, |
164 | unsigned long reason) |
165 | { |
166 | struct uffd_msg msg; |
167 | msg_init(&msg); |
168 | msg.event = UFFD_EVENT_PAGEFAULT; |
169 | msg.arg.pagefault.address = address; |
170 | if (flags & FAULT_FLAG_WRITE) |
171 | /* |
172 | * If UFFD_FEATURE_PAGEFAULT_FLAG_WRITE was set in the |
173 | * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE |
174 | * was not set in a UFFD_EVENT_PAGEFAULT, it means it |
175 | * was a read fault, otherwise if set it means it's |
176 | * a write fault. |
177 | */ |
178 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; |
179 | if (reason & VM_UFFD_WP) |
180 | /* |
181 | * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the |
182 | * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was |
183 | * not set in a UFFD_EVENT_PAGEFAULT, it means it was |
184 | * a missing fault, otherwise if set it means it's a |
185 | * write protect fault. |
186 | */ |
187 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; |
188 | return msg; |
189 | } |
190 | |
191 | /* |
192 | * Verify the pagetables are still not ok after having reigstered into |
193 | * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any |
194 | * userfault that has already been resolved, if userfaultfd_read and |
195 | * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different |
196 | * threads. |
197 | */ |
198 | static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, |
199 | unsigned long address, |
200 | unsigned long flags, |
201 | unsigned long reason) |
202 | { |
203 | struct mm_struct *mm = ctx->mm; |
204 | pgd_t *pgd; |
205 | pud_t *pud; |
206 | pmd_t *pmd, _pmd; |
207 | pte_t *pte; |
208 | bool ret = true; |
209 | |
210 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
211 | |
212 | pgd = pgd_offset(mm, address); |
213 | if (!pgd_present(*pgd)) |
214 | goto out; |
215 | pud = pud_offset(pgd, address); |
216 | if (!pud_present(*pud)) |
217 | goto out; |
218 | pmd = pmd_offset(pud, address); |
219 | /* |
220 | * READ_ONCE must function as a barrier with narrower scope |
221 | * and it must be equivalent to: |
222 | * _pmd = *pmd; barrier(); |
223 | * |
224 | * This is to deal with the instability (as in |
225 | * pmd_trans_unstable) of the pmd. |
226 | */ |
227 | _pmd = READ_ONCE(*pmd); |
228 | if (!pmd_present(_pmd)) |
229 | goto out; |
230 | |
231 | ret = false; |
232 | if (pmd_trans_huge(_pmd)) |
233 | goto out; |
234 | |
235 | /* |
236 | * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it |
237 | * and use the standard pte_offset_map() instead of parsing _pmd. |
238 | */ |
239 | pte = pte_offset_map(pmd, address); |
240 | /* |
241 | * Lockless access: we're in a wait_event so it's ok if it |
242 | * changes under us. |
243 | */ |
244 | if (pte_none(*pte)) |
245 | ret = true; |
246 | pte_unmap(pte); |
247 | |
248 | out: |
249 | return ret; |
250 | } |
251 | |
252 | /* |
253 | * The locking rules involved in returning VM_FAULT_RETRY depending on |
254 | * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and |
255 | * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" |
256 | * recommendation in __lock_page_or_retry is not an understatement. |
257 | * |
258 | * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released |
259 | * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is |
260 | * not set. |
261 | * |
262 | * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not |
263 | * set, VM_FAULT_RETRY can still be returned if and only if there are |
264 | * fatal_signal_pending()s, and the mmap_sem must be released before |
265 | * returning it. |
266 | */ |
267 | int handle_userfault(struct fault_env *fe, unsigned long reason) |
268 | { |
269 | struct mm_struct *mm = fe->vma->vm_mm; |
270 | struct userfaultfd_ctx *ctx; |
271 | struct userfaultfd_wait_queue uwq; |
272 | int ret; |
273 | bool must_wait, return_to_userland; |
274 | long blocking_state; |
275 | |
276 | BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
277 | |
278 | ret = VM_FAULT_SIGBUS; |
279 | ctx = fe->vma->vm_userfaultfd_ctx.ctx; |
280 | if (!ctx) |
281 | goto out; |
282 | |
283 | BUG_ON(ctx->mm != mm); |
284 | |
285 | VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP)); |
286 | VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP)); |
287 | |
288 | /* |
289 | * If it's already released don't get it. This avoids to loop |
290 | * in __get_user_pages if userfaultfd_release waits on the |
291 | * caller of handle_userfault to release the mmap_sem. |
292 | */ |
293 | if (unlikely(ACCESS_ONCE(ctx->released))) |
294 | goto out; |
295 | |
296 | /* |
297 | * We don't do userfault handling for the final child pid update. |
298 | */ |
299 | if (current->flags & PF_EXITING) |
300 | goto out; |
301 | |
302 | /* |
303 | * Check that we can return VM_FAULT_RETRY. |
304 | * |
305 | * NOTE: it should become possible to return VM_FAULT_RETRY |
306 | * even if FAULT_FLAG_TRIED is set without leading to gup() |
307 | * -EBUSY failures, if the userfaultfd is to be extended for |
308 | * VM_UFFD_WP tracking and we intend to arm the userfault |
309 | * without first stopping userland access to the memory. For |
310 | * VM_UFFD_MISSING userfaults this is enough for now. |
311 | */ |
312 | if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) { |
313 | /* |
314 | * Validate the invariant that nowait must allow retry |
315 | * to be sure not to return SIGBUS erroneously on |
316 | * nowait invocations. |
317 | */ |
318 | BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT); |
319 | #ifdef CONFIG_DEBUG_VM |
320 | if (printk_ratelimit()) { |
321 | printk(KERN_WARNING |
322 | "FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags); |
323 | dump_stack(); |
324 | } |
325 | #endif |
326 | goto out; |
327 | } |
328 | |
329 | /* |
330 | * Handle nowait, not much to do other than tell it to retry |
331 | * and wait. |
332 | */ |
333 | ret = VM_FAULT_RETRY; |
334 | if (fe->flags & FAULT_FLAG_RETRY_NOWAIT) |
335 | goto out; |
336 | |
337 | /* take the reference before dropping the mmap_sem */ |
338 | userfaultfd_ctx_get(ctx); |
339 | |
340 | init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); |
341 | uwq.wq.private = current; |
342 | uwq.msg = userfault_msg(fe->address, fe->flags, reason); |
343 | uwq.ctx = ctx; |
344 | uwq.waken = false; |
345 | |
346 | return_to_userland = |
347 | (fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == |
348 | (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); |
349 | blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : |
350 | TASK_KILLABLE; |
351 | |
352 | spin_lock(&ctx->fault_pending_wqh.lock); |
353 | /* |
354 | * After the __add_wait_queue the uwq is visible to userland |
355 | * through poll/read(). |
356 | */ |
357 | __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); |
358 | /* |
359 | * The smp_mb() after __set_current_state prevents the reads |
360 | * following the spin_unlock to happen before the list_add in |
361 | * __add_wait_queue. |
362 | */ |
363 | set_current_state(blocking_state); |
364 | spin_unlock(&ctx->fault_pending_wqh.lock); |
365 | |
366 | must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason); |
367 | up_read(&mm->mmap_sem); |
368 | |
369 | if (likely(must_wait && !ACCESS_ONCE(ctx->released) && |
370 | (return_to_userland ? !signal_pending(current) : |
371 | !fatal_signal_pending(current)))) { |
372 | wake_up_poll(&ctx->fd_wqh, POLLIN); |
373 | schedule(); |
374 | ret |= VM_FAULT_MAJOR; |
375 | |
376 | /* |
377 | * False wakeups can orginate even from rwsem before |
378 | * up_read() however userfaults will wait either for a |
379 | * targeted wakeup on the specific uwq waitqueue from |
380 | * wake_userfault() or for signals or for uffd |
381 | * release. |
382 | */ |
383 | while (!READ_ONCE(uwq.waken)) { |
384 | /* |
385 | * This needs the full smp_store_mb() |
386 | * guarantee as the state write must be |
387 | * visible to other CPUs before reading |
388 | * uwq.waken from other CPUs. |
389 | */ |
390 | set_current_state(blocking_state); |
391 | if (READ_ONCE(uwq.waken) || |
392 | READ_ONCE(ctx->released) || |
393 | (return_to_userland ? signal_pending(current) : |
394 | fatal_signal_pending(current))) |
395 | break; |
396 | schedule(); |
397 | } |
398 | } |
399 | |
400 | __set_current_state(TASK_RUNNING); |
401 | |
402 | if (return_to_userland) { |
403 | if (signal_pending(current) && |
404 | !fatal_signal_pending(current)) { |
405 | /* |
406 | * If we got a SIGSTOP or SIGCONT and this is |
407 | * a normal userland page fault, just let |
408 | * userland return so the signal will be |
409 | * handled and gdb debugging works. The page |
410 | * fault code immediately after we return from |
411 | * this function is going to release the |
412 | * mmap_sem and it's not depending on it |
413 | * (unlike gup would if we were not to return |
414 | * VM_FAULT_RETRY). |
415 | * |
416 | * If a fatal signal is pending we still take |
417 | * the streamlined VM_FAULT_RETRY failure path |
418 | * and there's no need to retake the mmap_sem |
419 | * in such case. |
420 | */ |
421 | down_read(&mm->mmap_sem); |
422 | ret = VM_FAULT_NOPAGE; |
423 | } |
424 | } |
425 | |
426 | /* |
427 | * Here we race with the list_del; list_add in |
428 | * userfaultfd_ctx_read(), however because we don't ever run |
429 | * list_del_init() to refile across the two lists, the prev |
430 | * and next pointers will never point to self. list_add also |
431 | * would never let any of the two pointers to point to |
432 | * self. So list_empty_careful won't risk to see both pointers |
433 | * pointing to self at any time during the list refile. The |
434 | * only case where list_del_init() is called is the full |
435 | * removal in the wake function and there we don't re-list_add |
436 | * and it's fine not to block on the spinlock. The uwq on this |
437 | * kernel stack can be released after the list_del_init. |
438 | */ |
439 | if (!list_empty_careful(&uwq.wq.task_list)) { |
440 | spin_lock(&ctx->fault_pending_wqh.lock); |
441 | /* |
442 | * No need of list_del_init(), the uwq on the stack |
443 | * will be freed shortly anyway. |
444 | */ |
445 | list_del(&uwq.wq.task_list); |
446 | spin_unlock(&ctx->fault_pending_wqh.lock); |
447 | } |
448 | |
449 | /* |
450 | * ctx may go away after this if the userfault pseudo fd is |
451 | * already released. |
452 | */ |
453 | userfaultfd_ctx_put(ctx); |
454 | |
455 | out: |
456 | return ret; |
457 | } |
458 | |
459 | static int userfaultfd_release(struct inode *inode, struct file *file) |
460 | { |
461 | struct userfaultfd_ctx *ctx = file->private_data; |
462 | struct mm_struct *mm = ctx->mm; |
463 | struct vm_area_struct *vma, *prev; |
464 | /* len == 0 means wake all */ |
465 | struct userfaultfd_wake_range range = { .len = 0, }; |
466 | unsigned long new_flags; |
467 | |
468 | ACCESS_ONCE(ctx->released) = true; |
469 | |
470 | if (!mmget_not_zero(mm)) |
471 | goto wakeup; |
472 | |
473 | /* |
474 | * Flush page faults out of all CPUs. NOTE: all page faults |
475 | * must be retried without returning VM_FAULT_SIGBUS if |
476 | * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx |
477 | * changes while handle_userfault released the mmap_sem. So |
478 | * it's critical that released is set to true (above), before |
479 | * taking the mmap_sem for writing. |
480 | */ |
481 | down_write(&mm->mmap_sem); |
482 | prev = NULL; |
483 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
484 | cond_resched(); |
485 | BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ |
486 | !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); |
487 | if (vma->vm_userfaultfd_ctx.ctx != ctx) { |
488 | prev = vma; |
489 | continue; |
490 | } |
491 | new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); |
492 | prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, |
493 | new_flags, vma->anon_vma, |
494 | vma->vm_file, vma->vm_pgoff, |
495 | vma_policy(vma), |
496 | NULL_VM_UFFD_CTX, |
497 | vma_get_anon_name(vma)); |
498 | if (prev) |
499 | vma = prev; |
500 | else |
501 | prev = vma; |
502 | vma->vm_flags = new_flags; |
503 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
504 | } |
505 | up_write(&mm->mmap_sem); |
506 | mmput(mm); |
507 | wakeup: |
508 | /* |
509 | * After no new page faults can wait on this fault_*wqh, flush |
510 | * the last page faults that may have been already waiting on |
511 | * the fault_*wqh. |
512 | */ |
513 | spin_lock(&ctx->fault_pending_wqh.lock); |
514 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); |
515 | __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range); |
516 | spin_unlock(&ctx->fault_pending_wqh.lock); |
517 | |
518 | wake_up_poll(&ctx->fd_wqh, POLLHUP); |
519 | userfaultfd_ctx_put(ctx); |
520 | return 0; |
521 | } |
522 | |
523 | /* fault_pending_wqh.lock must be hold by the caller */ |
524 | static inline struct userfaultfd_wait_queue *find_userfault( |
525 | struct userfaultfd_ctx *ctx) |
526 | { |
527 | wait_queue_t *wq; |
528 | struct userfaultfd_wait_queue *uwq; |
529 | |
530 | VM_BUG_ON(!spin_is_locked(&ctx->fault_pending_wqh.lock)); |
531 | |
532 | uwq = NULL; |
533 | if (!waitqueue_active(&ctx->fault_pending_wqh)) |
534 | goto out; |
535 | /* walk in reverse to provide FIFO behavior to read userfaults */ |
536 | wq = list_last_entry(&ctx->fault_pending_wqh.task_list, |
537 | typeof(*wq), task_list); |
538 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
539 | out: |
540 | return uwq; |
541 | } |
542 | |
543 | static unsigned int userfaultfd_poll(struct file *file, poll_table *wait) |
544 | { |
545 | struct userfaultfd_ctx *ctx = file->private_data; |
546 | unsigned int ret; |
547 | |
548 | poll_wait(file, &ctx->fd_wqh, wait); |
549 | |
550 | switch (ctx->state) { |
551 | case UFFD_STATE_WAIT_API: |
552 | return POLLERR; |
553 | case UFFD_STATE_RUNNING: |
554 | /* |
555 | * poll() never guarantees that read won't block. |
556 | * userfaults can be waken before they're read(). |
557 | */ |
558 | if (unlikely(!(file->f_flags & O_NONBLOCK))) |
559 | return POLLERR; |
560 | /* |
561 | * lockless access to see if there are pending faults |
562 | * __pollwait last action is the add_wait_queue but |
563 | * the spin_unlock would allow the waitqueue_active to |
564 | * pass above the actual list_add inside |
565 | * add_wait_queue critical section. So use a full |
566 | * memory barrier to serialize the list_add write of |
567 | * add_wait_queue() with the waitqueue_active read |
568 | * below. |
569 | */ |
570 | ret = 0; |
571 | smp_mb(); |
572 | if (waitqueue_active(&ctx->fault_pending_wqh)) |
573 | ret = POLLIN; |
574 | return ret; |
575 | default: |
576 | BUG(); |
577 | } |
578 | } |
579 | |
580 | static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, |
581 | struct uffd_msg *msg) |
582 | { |
583 | ssize_t ret; |
584 | DECLARE_WAITQUEUE(wait, current); |
585 | struct userfaultfd_wait_queue *uwq; |
586 | |
587 | /* always take the fd_wqh lock before the fault_pending_wqh lock */ |
588 | spin_lock(&ctx->fd_wqh.lock); |
589 | __add_wait_queue(&ctx->fd_wqh, &wait); |
590 | for (;;) { |
591 | set_current_state(TASK_INTERRUPTIBLE); |
592 | spin_lock(&ctx->fault_pending_wqh.lock); |
593 | uwq = find_userfault(ctx); |
594 | if (uwq) { |
595 | /* |
596 | * Use a seqcount to repeat the lockless check |
597 | * in wake_userfault() to avoid missing |
598 | * wakeups because during the refile both |
599 | * waitqueue could become empty if this is the |
600 | * only userfault. |
601 | */ |
602 | write_seqcount_begin(&ctx->refile_seq); |
603 | |
604 | /* |
605 | * The fault_pending_wqh.lock prevents the uwq |
606 | * to disappear from under us. |
607 | * |
608 | * Refile this userfault from |
609 | * fault_pending_wqh to fault_wqh, it's not |
610 | * pending anymore after we read it. |
611 | * |
612 | * Use list_del() by hand (as |
613 | * userfaultfd_wake_function also uses |
614 | * list_del_init() by hand) to be sure nobody |
615 | * changes __remove_wait_queue() to use |
616 | * list_del_init() in turn breaking the |
617 | * !list_empty_careful() check in |
618 | * handle_userfault(). The uwq->wq.task_list |
619 | * must never be empty at any time during the |
620 | * refile, or the waitqueue could disappear |
621 | * from under us. The "wait_queue_head_t" |
622 | * parameter of __remove_wait_queue() is unused |
623 | * anyway. |
624 | */ |
625 | list_del(&uwq->wq.task_list); |
626 | __add_wait_queue(&ctx->fault_wqh, &uwq->wq); |
627 | |
628 | write_seqcount_end(&ctx->refile_seq); |
629 | |
630 | /* careful to always initialize msg if ret == 0 */ |
631 | *msg = uwq->msg; |
632 | spin_unlock(&ctx->fault_pending_wqh.lock); |
633 | ret = 0; |
634 | break; |
635 | } |
636 | spin_unlock(&ctx->fault_pending_wqh.lock); |
637 | if (signal_pending(current)) { |
638 | ret = -ERESTARTSYS; |
639 | break; |
640 | } |
641 | if (no_wait) { |
642 | ret = -EAGAIN; |
643 | break; |
644 | } |
645 | spin_unlock(&ctx->fd_wqh.lock); |
646 | schedule(); |
647 | spin_lock(&ctx->fd_wqh.lock); |
648 | } |
649 | __remove_wait_queue(&ctx->fd_wqh, &wait); |
650 | __set_current_state(TASK_RUNNING); |
651 | spin_unlock(&ctx->fd_wqh.lock); |
652 | |
653 | return ret; |
654 | } |
655 | |
656 | static ssize_t userfaultfd_read(struct file *file, char __user *buf, |
657 | size_t count, loff_t *ppos) |
658 | { |
659 | struct userfaultfd_ctx *ctx = file->private_data; |
660 | ssize_t _ret, ret = 0; |
661 | struct uffd_msg msg; |
662 | int no_wait = file->f_flags & O_NONBLOCK; |
663 | |
664 | if (ctx->state == UFFD_STATE_WAIT_API) |
665 | return -EINVAL; |
666 | |
667 | for (;;) { |
668 | if (count < sizeof(msg)) |
669 | return ret ? ret : -EINVAL; |
670 | _ret = userfaultfd_ctx_read(ctx, no_wait, &msg); |
671 | if (_ret < 0) |
672 | return ret ? ret : _ret; |
673 | if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) |
674 | return ret ? ret : -EFAULT; |
675 | ret += sizeof(msg); |
676 | buf += sizeof(msg); |
677 | count -= sizeof(msg); |
678 | /* |
679 | * Allow to read more than one fault at time but only |
680 | * block if waiting for the very first one. |
681 | */ |
682 | no_wait = O_NONBLOCK; |
683 | } |
684 | } |
685 | |
686 | static void __wake_userfault(struct userfaultfd_ctx *ctx, |
687 | struct userfaultfd_wake_range *range) |
688 | { |
689 | unsigned long start, end; |
690 | |
691 | start = range->start; |
692 | end = range->start + range->len; |
693 | |
694 | spin_lock(&ctx->fault_pending_wqh.lock); |
695 | /* wake all in the range and autoremove */ |
696 | if (waitqueue_active(&ctx->fault_pending_wqh)) |
697 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, |
698 | range); |
699 | if (waitqueue_active(&ctx->fault_wqh)) |
700 | __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range); |
701 | spin_unlock(&ctx->fault_pending_wqh.lock); |
702 | } |
703 | |
704 | static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, |
705 | struct userfaultfd_wake_range *range) |
706 | { |
707 | unsigned seq; |
708 | bool need_wakeup; |
709 | |
710 | /* |
711 | * To be sure waitqueue_active() is not reordered by the CPU |
712 | * before the pagetable update, use an explicit SMP memory |
713 | * barrier here. PT lock release or up_read(mmap_sem) still |
714 | * have release semantics that can allow the |
715 | * waitqueue_active() to be reordered before the pte update. |
716 | */ |
717 | smp_mb(); |
718 | |
719 | /* |
720 | * Use waitqueue_active because it's very frequent to |
721 | * change the address space atomically even if there are no |
722 | * userfaults yet. So we take the spinlock only when we're |
723 | * sure we've userfaults to wake. |
724 | */ |
725 | do { |
726 | seq = read_seqcount_begin(&ctx->refile_seq); |
727 | need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || |
728 | waitqueue_active(&ctx->fault_wqh); |
729 | cond_resched(); |
730 | } while (read_seqcount_retry(&ctx->refile_seq, seq)); |
731 | if (need_wakeup) |
732 | __wake_userfault(ctx, range); |
733 | } |
734 | |
735 | static __always_inline int validate_range(struct mm_struct *mm, |
736 | __u64 start, __u64 len) |
737 | { |
738 | __u64 task_size = mm->task_size; |
739 | |
740 | if (start & ~PAGE_MASK) |
741 | return -EINVAL; |
742 | if (len & ~PAGE_MASK) |
743 | return -EINVAL; |
744 | if (!len) |
745 | return -EINVAL; |
746 | if (start < mmap_min_addr) |
747 | return -EINVAL; |
748 | if (start >= task_size) |
749 | return -EINVAL; |
750 | if (len > task_size - start) |
751 | return -EINVAL; |
752 | return 0; |
753 | } |
754 | |
755 | static int userfaultfd_register(struct userfaultfd_ctx *ctx, |
756 | unsigned long arg) |
757 | { |
758 | struct mm_struct *mm = ctx->mm; |
759 | struct vm_area_struct *vma, *prev, *cur; |
760 | int ret; |
761 | struct uffdio_register uffdio_register; |
762 | struct uffdio_register __user *user_uffdio_register; |
763 | unsigned long vm_flags, new_flags; |
764 | bool found; |
765 | unsigned long start, end, vma_end; |
766 | |
767 | user_uffdio_register = (struct uffdio_register __user *) arg; |
768 | |
769 | ret = -EFAULT; |
770 | if (copy_from_user(&uffdio_register, user_uffdio_register, |
771 | sizeof(uffdio_register)-sizeof(__u64))) |
772 | goto out; |
773 | |
774 | ret = -EINVAL; |
775 | if (!uffdio_register.mode) |
776 | goto out; |
777 | if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING| |
778 | UFFDIO_REGISTER_MODE_WP)) |
779 | goto out; |
780 | vm_flags = 0; |
781 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) |
782 | vm_flags |= VM_UFFD_MISSING; |
783 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { |
784 | vm_flags |= VM_UFFD_WP; |
785 | /* |
786 | * FIXME: remove the below error constraint by |
787 | * implementing the wprotect tracking mode. |
788 | */ |
789 | ret = -EINVAL; |
790 | goto out; |
791 | } |
792 | |
793 | ret = validate_range(mm, uffdio_register.range.start, |
794 | uffdio_register.range.len); |
795 | if (ret) |
796 | goto out; |
797 | |
798 | start = uffdio_register.range.start; |
799 | end = start + uffdio_register.range.len; |
800 | |
801 | ret = -ENOMEM; |
802 | if (!mmget_not_zero(mm)) |
803 | goto out; |
804 | |
805 | down_write(&mm->mmap_sem); |
806 | vma = find_vma_prev(mm, start, &prev); |
807 | if (!vma) |
808 | goto out_unlock; |
809 | |
810 | /* check that there's at least one vma in the range */ |
811 | ret = -EINVAL; |
812 | if (vma->vm_start >= end) |
813 | goto out_unlock; |
814 | |
815 | /* |
816 | * Search for not compatible vmas. |
817 | * |
818 | * FIXME: this shall be relaxed later so that it doesn't fail |
819 | * on tmpfs backed vmas (in addition to the current allowance |
820 | * on anonymous vmas). |
821 | */ |
822 | found = false; |
823 | for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { |
824 | cond_resched(); |
825 | |
826 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ |
827 | !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); |
828 | |
829 | /* check not compatible vmas */ |
830 | ret = -EINVAL; |
831 | if (cur->vm_ops) |
832 | goto out_unlock; |
833 | |
834 | /* |
835 | * UFFDIO_COPY will fill file holes even without |
836 | * PROT_WRITE. This check enforces that if this is a |
837 | * MAP_SHARED, the process has write permission to the backing |
838 | * file. If VM_MAYWRITE is set it also enforces that on a |
839 | * MAP_SHARED vma: there is no F_WRITE_SEAL and no further |
840 | * F_WRITE_SEAL can be taken until the vma is destroyed. |
841 | */ |
842 | ret = -EPERM; |
843 | if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) |
844 | goto out_unlock; |
845 | |
846 | /* |
847 | * Check that this vma isn't already owned by a |
848 | * different userfaultfd. We can't allow more than one |
849 | * userfaultfd to own a single vma simultaneously or we |
850 | * wouldn't know which one to deliver the userfaults to. |
851 | */ |
852 | ret = -EBUSY; |
853 | if (cur->vm_userfaultfd_ctx.ctx && |
854 | cur->vm_userfaultfd_ctx.ctx != ctx) |
855 | goto out_unlock; |
856 | |
857 | found = true; |
858 | } |
859 | BUG_ON(!found); |
860 | |
861 | if (vma->vm_start < start) |
862 | prev = vma; |
863 | |
864 | ret = 0; |
865 | do { |
866 | cond_resched(); |
867 | |
868 | BUG_ON(vma->vm_ops); |
869 | BUG_ON(vma->vm_userfaultfd_ctx.ctx && |
870 | vma->vm_userfaultfd_ctx.ctx != ctx); |
871 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
872 | |
873 | /* |
874 | * Nothing to do: this vma is already registered into this |
875 | * userfaultfd and with the right tracking mode too. |
876 | */ |
877 | if (vma->vm_userfaultfd_ctx.ctx == ctx && |
878 | (vma->vm_flags & vm_flags) == vm_flags) |
879 | goto skip; |
880 | |
881 | if (vma->vm_start > start) |
882 | start = vma->vm_start; |
883 | vma_end = min(end, vma->vm_end); |
884 | |
885 | new_flags = (vma->vm_flags & ~vm_flags) | vm_flags; |
886 | prev = vma_merge(mm, prev, start, vma_end, new_flags, |
887 | vma->anon_vma, vma->vm_file, vma->vm_pgoff, |
888 | vma_policy(vma), |
889 | ((struct vm_userfaultfd_ctx){ ctx }), |
890 | vma_get_anon_name(vma)); |
891 | if (prev) { |
892 | vma = prev; |
893 | goto next; |
894 | } |
895 | if (vma->vm_start < start) { |
896 | ret = split_vma(mm, vma, start, 1); |
897 | if (ret) |
898 | break; |
899 | } |
900 | if (vma->vm_end > end) { |
901 | ret = split_vma(mm, vma, end, 0); |
902 | if (ret) |
903 | break; |
904 | } |
905 | next: |
906 | /* |
907 | * In the vma_merge() successful mprotect-like case 8: |
908 | * the next vma was merged into the current one and |
909 | * the current one has not been updated yet. |
910 | */ |
911 | vma->vm_flags = new_flags; |
912 | vma->vm_userfaultfd_ctx.ctx = ctx; |
913 | |
914 | skip: |
915 | prev = vma; |
916 | start = vma->vm_end; |
917 | vma = vma->vm_next; |
918 | } while (vma && vma->vm_start < end); |
919 | out_unlock: |
920 | up_write(&mm->mmap_sem); |
921 | mmput(mm); |
922 | if (!ret) { |
923 | /* |
924 | * Now that we scanned all vmas we can already tell |
925 | * userland which ioctls methods are guaranteed to |
926 | * succeed on this range. |
927 | */ |
928 | if (put_user(UFFD_API_RANGE_IOCTLS, |
929 | &user_uffdio_register->ioctls)) |
930 | ret = -EFAULT; |
931 | } |
932 | out: |
933 | return ret; |
934 | } |
935 | |
936 | static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, |
937 | unsigned long arg) |
938 | { |
939 | struct mm_struct *mm = ctx->mm; |
940 | struct vm_area_struct *vma, *prev, *cur; |
941 | int ret; |
942 | struct uffdio_range uffdio_unregister; |
943 | unsigned long new_flags; |
944 | bool found; |
945 | unsigned long start, end, vma_end; |
946 | const void __user *buf = (void __user *)arg; |
947 | |
948 | ret = -EFAULT; |
949 | if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) |
950 | goto out; |
951 | |
952 | ret = validate_range(mm, uffdio_unregister.start, |
953 | uffdio_unregister.len); |
954 | if (ret) |
955 | goto out; |
956 | |
957 | start = uffdio_unregister.start; |
958 | end = start + uffdio_unregister.len; |
959 | |
960 | ret = -ENOMEM; |
961 | if (!mmget_not_zero(mm)) |
962 | goto out; |
963 | |
964 | down_write(&mm->mmap_sem); |
965 | vma = find_vma_prev(mm, start, &prev); |
966 | if (!vma) |
967 | goto out_unlock; |
968 | |
969 | /* check that there's at least one vma in the range */ |
970 | ret = -EINVAL; |
971 | if (vma->vm_start >= end) |
972 | goto out_unlock; |
973 | |
974 | /* |
975 | * Search for not compatible vmas. |
976 | * |
977 | * FIXME: this shall be relaxed later so that it doesn't fail |
978 | * on tmpfs backed vmas (in addition to the current allowance |
979 | * on anonymous vmas). |
980 | */ |
981 | found = false; |
982 | ret = -EINVAL; |
983 | for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { |
984 | cond_resched(); |
985 | |
986 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ |
987 | !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); |
988 | |
989 | /* |
990 | * Check not compatible vmas, not strictly required |
991 | * here as not compatible vmas cannot have an |
992 | * userfaultfd_ctx registered on them, but this |
993 | * provides for more strict behavior to notice |
994 | * unregistration errors. |
995 | */ |
996 | if (cur->vm_ops) |
997 | goto out_unlock; |
998 | |
999 | found = true; |
1000 | } |
1001 | BUG_ON(!found); |
1002 | |
1003 | if (vma->vm_start < start) |
1004 | prev = vma; |
1005 | |
1006 | ret = 0; |
1007 | do { |
1008 | cond_resched(); |
1009 | |
1010 | BUG_ON(vma->vm_ops); |
1011 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
1012 | |
1013 | /* |
1014 | * Nothing to do: this vma is already registered into this |
1015 | * userfaultfd and with the right tracking mode too. |
1016 | */ |
1017 | if (!vma->vm_userfaultfd_ctx.ctx) |
1018 | goto skip; |
1019 | |
1020 | if (vma->vm_start > start) |
1021 | start = vma->vm_start; |
1022 | vma_end = min(end, vma->vm_end); |
1023 | |
1024 | new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); |
1025 | prev = vma_merge(mm, prev, start, vma_end, new_flags, |
1026 | vma->anon_vma, vma->vm_file, vma->vm_pgoff, |
1027 | vma_policy(vma), |
1028 | NULL_VM_UFFD_CTX, |
1029 | vma_get_anon_name(vma)); |
1030 | if (prev) { |
1031 | vma = prev; |
1032 | goto next; |
1033 | } |
1034 | if (vma->vm_start < start) { |
1035 | ret = split_vma(mm, vma, start, 1); |
1036 | if (ret) |
1037 | break; |
1038 | } |
1039 | if (vma->vm_end > end) { |
1040 | ret = split_vma(mm, vma, end, 0); |
1041 | if (ret) |
1042 | break; |
1043 | } |
1044 | next: |
1045 | /* |
1046 | * In the vma_merge() successful mprotect-like case 8: |
1047 | * the next vma was merged into the current one and |
1048 | * the current one has not been updated yet. |
1049 | */ |
1050 | vma->vm_flags = new_flags; |
1051 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
1052 | |
1053 | skip: |
1054 | prev = vma; |
1055 | start = vma->vm_end; |
1056 | vma = vma->vm_next; |
1057 | } while (vma && vma->vm_start < end); |
1058 | out_unlock: |
1059 | up_write(&mm->mmap_sem); |
1060 | mmput(mm); |
1061 | out: |
1062 | return ret; |
1063 | } |
1064 | |
1065 | /* |
1066 | * userfaultfd_wake may be used in combination with the |
1067 | * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. |
1068 | */ |
1069 | static int userfaultfd_wake(struct userfaultfd_ctx *ctx, |
1070 | unsigned long arg) |
1071 | { |
1072 | int ret; |
1073 | struct uffdio_range uffdio_wake; |
1074 | struct userfaultfd_wake_range range; |
1075 | const void __user *buf = (void __user *)arg; |
1076 | |
1077 | ret = -EFAULT; |
1078 | if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) |
1079 | goto out; |
1080 | |
1081 | ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); |
1082 | if (ret) |
1083 | goto out; |
1084 | |
1085 | range.start = uffdio_wake.start; |
1086 | range.len = uffdio_wake.len; |
1087 | |
1088 | /* |
1089 | * len == 0 means wake all and we don't want to wake all here, |
1090 | * so check it again to be sure. |
1091 | */ |
1092 | VM_BUG_ON(!range.len); |
1093 | |
1094 | wake_userfault(ctx, &range); |
1095 | ret = 0; |
1096 | |
1097 | out: |
1098 | return ret; |
1099 | } |
1100 | |
1101 | static int userfaultfd_copy(struct userfaultfd_ctx *ctx, |
1102 | unsigned long arg) |
1103 | { |
1104 | __s64 ret; |
1105 | struct uffdio_copy uffdio_copy; |
1106 | struct uffdio_copy __user *user_uffdio_copy; |
1107 | struct userfaultfd_wake_range range; |
1108 | |
1109 | user_uffdio_copy = (struct uffdio_copy __user *) arg; |
1110 | |
1111 | ret = -EFAULT; |
1112 | if (copy_from_user(&uffdio_copy, user_uffdio_copy, |
1113 | /* don't copy "copy" last field */ |
1114 | sizeof(uffdio_copy)-sizeof(__s64))) |
1115 | goto out; |
1116 | |
1117 | ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); |
1118 | if (ret) |
1119 | goto out; |
1120 | /* |
1121 | * double check for wraparound just in case. copy_from_user() |
1122 | * will later check uffdio_copy.src + uffdio_copy.len to fit |
1123 | * in the userland range. |
1124 | */ |
1125 | ret = -EINVAL; |
1126 | if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src) |
1127 | goto out; |
1128 | if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE) |
1129 | goto out; |
1130 | if (mmget_not_zero(ctx->mm)) { |
1131 | ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, |
1132 | uffdio_copy.len); |
1133 | mmput(ctx->mm); |
1134 | } |
1135 | if (unlikely(put_user(ret, &user_uffdio_copy->copy))) |
1136 | return -EFAULT; |
1137 | if (ret < 0) |
1138 | goto out; |
1139 | BUG_ON(!ret); |
1140 | /* len == 0 would wake all */ |
1141 | range.len = ret; |
1142 | if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { |
1143 | range.start = uffdio_copy.dst; |
1144 | wake_userfault(ctx, &range); |
1145 | } |
1146 | ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; |
1147 | out: |
1148 | return ret; |
1149 | } |
1150 | |
1151 | static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, |
1152 | unsigned long arg) |
1153 | { |
1154 | __s64 ret; |
1155 | struct uffdio_zeropage uffdio_zeropage; |
1156 | struct uffdio_zeropage __user *user_uffdio_zeropage; |
1157 | struct userfaultfd_wake_range range; |
1158 | |
1159 | user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; |
1160 | |
1161 | ret = -EFAULT; |
1162 | if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, |
1163 | /* don't copy "zeropage" last field */ |
1164 | sizeof(uffdio_zeropage)-sizeof(__s64))) |
1165 | goto out; |
1166 | |
1167 | ret = validate_range(ctx->mm, uffdio_zeropage.range.start, |
1168 | uffdio_zeropage.range.len); |
1169 | if (ret) |
1170 | goto out; |
1171 | ret = -EINVAL; |
1172 | if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) |
1173 | goto out; |
1174 | |
1175 | if (mmget_not_zero(ctx->mm)) { |
1176 | ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, |
1177 | uffdio_zeropage.range.len); |
1178 | mmput(ctx->mm); |
1179 | } |
1180 | if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) |
1181 | return -EFAULT; |
1182 | if (ret < 0) |
1183 | goto out; |
1184 | /* len == 0 would wake all */ |
1185 | BUG_ON(!ret); |
1186 | range.len = ret; |
1187 | if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { |
1188 | range.start = uffdio_zeropage.range.start; |
1189 | wake_userfault(ctx, &range); |
1190 | } |
1191 | ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; |
1192 | out: |
1193 | return ret; |
1194 | } |
1195 | |
1196 | /* |
1197 | * userland asks for a certain API version and we return which bits |
1198 | * and ioctl commands are implemented in this kernel for such API |
1199 | * version or -EINVAL if unknown. |
1200 | */ |
1201 | static int userfaultfd_api(struct userfaultfd_ctx *ctx, |
1202 | unsigned long arg) |
1203 | { |
1204 | struct uffdio_api uffdio_api; |
1205 | void __user *buf = (void __user *)arg; |
1206 | int ret; |
1207 | |
1208 | ret = -EINVAL; |
1209 | if (ctx->state != UFFD_STATE_WAIT_API) |
1210 | goto out; |
1211 | ret = -EFAULT; |
1212 | if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) |
1213 | goto out; |
1214 | if (uffdio_api.api != UFFD_API || uffdio_api.features) { |
1215 | memset(&uffdio_api, 0, sizeof(uffdio_api)); |
1216 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) |
1217 | goto out; |
1218 | ret = -EINVAL; |
1219 | goto out; |
1220 | } |
1221 | uffdio_api.features = UFFD_API_FEATURES; |
1222 | uffdio_api.ioctls = UFFD_API_IOCTLS; |
1223 | ret = -EFAULT; |
1224 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) |
1225 | goto out; |
1226 | ctx->state = UFFD_STATE_RUNNING; |
1227 | ret = 0; |
1228 | out: |
1229 | return ret; |
1230 | } |
1231 | |
1232 | static long userfaultfd_ioctl(struct file *file, unsigned cmd, |
1233 | unsigned long arg) |
1234 | { |
1235 | int ret = -EINVAL; |
1236 | struct userfaultfd_ctx *ctx = file->private_data; |
1237 | |
1238 | if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API) |
1239 | return -EINVAL; |
1240 | |
1241 | switch(cmd) { |
1242 | case UFFDIO_API: |
1243 | ret = userfaultfd_api(ctx, arg); |
1244 | break; |
1245 | case UFFDIO_REGISTER: |
1246 | ret = userfaultfd_register(ctx, arg); |
1247 | break; |
1248 | case UFFDIO_UNREGISTER: |
1249 | ret = userfaultfd_unregister(ctx, arg); |
1250 | break; |
1251 | case UFFDIO_WAKE: |
1252 | ret = userfaultfd_wake(ctx, arg); |
1253 | break; |
1254 | case UFFDIO_COPY: |
1255 | ret = userfaultfd_copy(ctx, arg); |
1256 | break; |
1257 | case UFFDIO_ZEROPAGE: |
1258 | ret = userfaultfd_zeropage(ctx, arg); |
1259 | break; |
1260 | } |
1261 | return ret; |
1262 | } |
1263 | |
1264 | #ifdef CONFIG_PROC_FS |
1265 | static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) |
1266 | { |
1267 | struct userfaultfd_ctx *ctx = f->private_data; |
1268 | wait_queue_t *wq; |
1269 | struct userfaultfd_wait_queue *uwq; |
1270 | unsigned long pending = 0, total = 0; |
1271 | |
1272 | spin_lock(&ctx->fault_pending_wqh.lock); |
1273 | list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) { |
1274 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
1275 | pending++; |
1276 | total++; |
1277 | } |
1278 | list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) { |
1279 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
1280 | total++; |
1281 | } |
1282 | spin_unlock(&ctx->fault_pending_wqh.lock); |
1283 | |
1284 | /* |
1285 | * If more protocols will be added, there will be all shown |
1286 | * separated by a space. Like this: |
1287 | * protocols: aa:... bb:... |
1288 | */ |
1289 | seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", |
1290 | pending, total, UFFD_API, UFFD_API_FEATURES, |
1291 | UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); |
1292 | } |
1293 | #endif |
1294 | |
1295 | static const struct file_operations userfaultfd_fops = { |
1296 | #ifdef CONFIG_PROC_FS |
1297 | .show_fdinfo = userfaultfd_show_fdinfo, |
1298 | #endif |
1299 | .release = userfaultfd_release, |
1300 | .poll = userfaultfd_poll, |
1301 | .read = userfaultfd_read, |
1302 | .unlocked_ioctl = userfaultfd_ioctl, |
1303 | .compat_ioctl = userfaultfd_ioctl, |
1304 | .llseek = noop_llseek, |
1305 | }; |
1306 | |
1307 | static void init_once_userfaultfd_ctx(void *mem) |
1308 | { |
1309 | struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; |
1310 | |
1311 | init_waitqueue_head(&ctx->fault_pending_wqh); |
1312 | init_waitqueue_head(&ctx->fault_wqh); |
1313 | init_waitqueue_head(&ctx->fd_wqh); |
1314 | seqcount_init(&ctx->refile_seq); |
1315 | } |
1316 | |
1317 | /** |
1318 | * userfaultfd_file_create - Creates an userfaultfd file pointer. |
1319 | * @flags: Flags for the userfaultfd file. |
1320 | * |
1321 | * This function creates an userfaultfd file pointer, w/out installing |
1322 | * it into the fd table. This is useful when the userfaultfd file is |
1323 | * used during the initialization of data structures that require |
1324 | * extra setup after the userfaultfd creation. So the userfaultfd |
1325 | * creation is split into the file pointer creation phase, and the |
1326 | * file descriptor installation phase. In this way races with |
1327 | * userspace closing the newly installed file descriptor can be |
1328 | * avoided. Returns an userfaultfd file pointer, or a proper error |
1329 | * pointer. |
1330 | */ |
1331 | static struct file *userfaultfd_file_create(int flags) |
1332 | { |
1333 | struct file *file; |
1334 | struct userfaultfd_ctx *ctx; |
1335 | |
1336 | BUG_ON(!current->mm); |
1337 | |
1338 | /* Check the UFFD_* constants for consistency. */ |
1339 | BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); |
1340 | BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); |
1341 | |
1342 | file = ERR_PTR(-EINVAL); |
1343 | if (flags & ~UFFD_SHARED_FCNTL_FLAGS) |
1344 | goto out; |
1345 | |
1346 | file = ERR_PTR(-ENOMEM); |
1347 | ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); |
1348 | if (!ctx) |
1349 | goto out; |
1350 | |
1351 | atomic_set(&ctx->refcount, 1); |
1352 | ctx->flags = flags; |
1353 | ctx->state = UFFD_STATE_WAIT_API; |
1354 | ctx->released = false; |
1355 | ctx->mm = current->mm; |
1356 | /* prevent the mm struct to be freed */ |
1357 | atomic_inc(&ctx->mm->mm_count); |
1358 | |
1359 | file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, |
1360 | O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); |
1361 | if (IS_ERR(file)) { |
1362 | mmdrop(ctx->mm); |
1363 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
1364 | } |
1365 | out: |
1366 | return file; |
1367 | } |
1368 | |
1369 | SYSCALL_DEFINE1(userfaultfd, int, flags) |
1370 | { |
1371 | int fd, error; |
1372 | struct file *file; |
1373 | |
1374 | error = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS); |
1375 | if (error < 0) |
1376 | return error; |
1377 | fd = error; |
1378 | |
1379 | file = userfaultfd_file_create(flags); |
1380 | if (IS_ERR(file)) { |
1381 | error = PTR_ERR(file); |
1382 | goto err_put_unused_fd; |
1383 | } |
1384 | fd_install(fd, file); |
1385 | |
1386 | return fd; |
1387 | |
1388 | err_put_unused_fd: |
1389 | put_unused_fd(fd); |
1390 | |
1391 | return error; |
1392 | } |
1393 | |
1394 | static int __init userfaultfd_init(void) |
1395 | { |
1396 | userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", |
1397 | sizeof(struct userfaultfd_ctx), |
1398 | 0, |
1399 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
1400 | init_once_userfaultfd_ctx); |
1401 | return 0; |
1402 | } |
1403 | __initcall(userfaultfd_init); |
1404 |