blob: 2e766ffff2cb10cf17cea6a8a97fb5634809da2d
1 | /* |
2 | * Fast Userspace Mutexes (which I call "Futexes!"). |
3 | * (C) Rusty Russell, IBM 2002 |
4 | * |
5 | * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar |
6 | * (C) Copyright 2003 Red Hat Inc, All Rights Reserved |
7 | * |
8 | * Removed page pinning, fix privately mapped COW pages and other cleanups |
9 | * (C) Copyright 2003, 2004 Jamie Lokier |
10 | * |
11 | * Robust futex support started by Ingo Molnar |
12 | * (C) Copyright 2006 Red Hat Inc, All Rights Reserved |
13 | * Thanks to Thomas Gleixner for suggestions, analysis and fixes. |
14 | * |
15 | * PI-futex support started by Ingo Molnar and Thomas Gleixner |
16 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
17 | * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> |
18 | * |
19 | * PRIVATE futexes by Eric Dumazet |
20 | * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> |
21 | * |
22 | * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> |
23 | * Copyright (C) IBM Corporation, 2009 |
24 | * Thanks to Thomas Gleixner for conceptual design and careful reviews. |
25 | * |
26 | * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly |
27 | * enough at me, Linus for the original (flawed) idea, Matthew |
28 | * Kirkwood for proof-of-concept implementation. |
29 | * |
30 | * "The futexes are also cursed." |
31 | * "But they come in a choice of three flavours!" |
32 | * |
33 | * This program is free software; you can redistribute it and/or modify |
34 | * it under the terms of the GNU General Public License as published by |
35 | * the Free Software Foundation; either version 2 of the License, or |
36 | * (at your option) any later version. |
37 | * |
38 | * This program is distributed in the hope that it will be useful, |
39 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
40 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
41 | * GNU General Public License for more details. |
42 | * |
43 | * You should have received a copy of the GNU General Public License |
44 | * along with this program; if not, write to the Free Software |
45 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
46 | */ |
47 | #include <linux/slab.h> |
48 | #include <linux/poll.h> |
49 | #include <linux/fs.h> |
50 | #include <linux/file.h> |
51 | #include <linux/jhash.h> |
52 | #include <linux/init.h> |
53 | #include <linux/futex.h> |
54 | #include <linux/mount.h> |
55 | #include <linux/pagemap.h> |
56 | #include <linux/syscalls.h> |
57 | #include <linux/signal.h> |
58 | #include <linux/export.h> |
59 | #include <linux/magic.h> |
60 | #include <linux/pid.h> |
61 | #include <linux/nsproxy.h> |
62 | #include <linux/ptrace.h> |
63 | #include <linux/sched/rt.h> |
64 | #include <linux/hugetlb.h> |
65 | #include <linux/freezer.h> |
66 | #include <linux/bootmem.h> |
67 | #include <linux/fault-inject.h> |
68 | |
69 | #include <asm/futex.h> |
70 | |
71 | #include "locking/rtmutex_common.h" |
72 | |
73 | /* |
74 | * READ this before attempting to hack on futexes! |
75 | * |
76 | * Basic futex operation and ordering guarantees |
77 | * ============================================= |
78 | * |
79 | * The waiter reads the futex value in user space and calls |
80 | * futex_wait(). This function computes the hash bucket and acquires |
81 | * the hash bucket lock. After that it reads the futex user space value |
82 | * again and verifies that the data has not changed. If it has not changed |
83 | * it enqueues itself into the hash bucket, releases the hash bucket lock |
84 | * and schedules. |
85 | * |
86 | * The waker side modifies the user space value of the futex and calls |
87 | * futex_wake(). This function computes the hash bucket and acquires the |
88 | * hash bucket lock. Then it looks for waiters on that futex in the hash |
89 | * bucket and wakes them. |
90 | * |
91 | * In futex wake up scenarios where no tasks are blocked on a futex, taking |
92 | * the hb spinlock can be avoided and simply return. In order for this |
93 | * optimization to work, ordering guarantees must exist so that the waiter |
94 | * being added to the list is acknowledged when the list is concurrently being |
95 | * checked by the waker, avoiding scenarios like the following: |
96 | * |
97 | * CPU 0 CPU 1 |
98 | * val = *futex; |
99 | * sys_futex(WAIT, futex, val); |
100 | * futex_wait(futex, val); |
101 | * uval = *futex; |
102 | * *futex = newval; |
103 | * sys_futex(WAKE, futex); |
104 | * futex_wake(futex); |
105 | * if (queue_empty()) |
106 | * return; |
107 | * if (uval == val) |
108 | * lock(hash_bucket(futex)); |
109 | * queue(); |
110 | * unlock(hash_bucket(futex)); |
111 | * schedule(); |
112 | * |
113 | * This would cause the waiter on CPU 0 to wait forever because it |
114 | * missed the transition of the user space value from val to newval |
115 | * and the waker did not find the waiter in the hash bucket queue. |
116 | * |
117 | * The correct serialization ensures that a waiter either observes |
118 | * the changed user space value before blocking or is woken by a |
119 | * concurrent waker: |
120 | * |
121 | * CPU 0 CPU 1 |
122 | * val = *futex; |
123 | * sys_futex(WAIT, futex, val); |
124 | * futex_wait(futex, val); |
125 | * |
126 | * waiters++; (a) |
127 | * smp_mb(); (A) <-- paired with -. |
128 | * | |
129 | * lock(hash_bucket(futex)); | |
130 | * | |
131 | * uval = *futex; | |
132 | * | *futex = newval; |
133 | * | sys_futex(WAKE, futex); |
134 | * | futex_wake(futex); |
135 | * | |
136 | * `--------> smp_mb(); (B) |
137 | * if (uval == val) |
138 | * queue(); |
139 | * unlock(hash_bucket(futex)); |
140 | * schedule(); if (waiters) |
141 | * lock(hash_bucket(futex)); |
142 | * else wake_waiters(futex); |
143 | * waiters--; (b) unlock(hash_bucket(futex)); |
144 | * |
145 | * Where (A) orders the waiters increment and the futex value read through |
146 | * atomic operations (see hb_waiters_inc) and where (B) orders the write |
147 | * to futex and the waiters read -- this is done by the barriers for both |
148 | * shared and private futexes in get_futex_key_refs(). |
149 | * |
150 | * This yields the following case (where X:=waiters, Y:=futex): |
151 | * |
152 | * X = Y = 0 |
153 | * |
154 | * w[X]=1 w[Y]=1 |
155 | * MB MB |
156 | * r[Y]=y r[X]=x |
157 | * |
158 | * Which guarantees that x==0 && y==0 is impossible; which translates back into |
159 | * the guarantee that we cannot both miss the futex variable change and the |
160 | * enqueue. |
161 | * |
162 | * Note that a new waiter is accounted for in (a) even when it is possible that |
163 | * the wait call can return error, in which case we backtrack from it in (b). |
164 | * Refer to the comment in queue_lock(). |
165 | * |
166 | * Similarly, in order to account for waiters being requeued on another |
167 | * address we always increment the waiters for the destination bucket before |
168 | * acquiring the lock. It then decrements them again after releasing it - |
169 | * the code that actually moves the futex(es) between hash buckets (requeue_futex) |
170 | * will do the additional required waiter count housekeeping. This is done for |
171 | * double_lock_hb() and double_unlock_hb(), respectively. |
172 | */ |
173 | |
174 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
175 | int __read_mostly futex_cmpxchg_enabled; |
176 | #endif |
177 | |
178 | /* |
179 | * Futex flags used to encode options to functions and preserve them across |
180 | * restarts. |
181 | */ |
182 | #ifdef CONFIG_MMU |
183 | # define FLAGS_SHARED 0x01 |
184 | #else |
185 | /* |
186 | * NOMMU does not have per process address space. Let the compiler optimize |
187 | * code away. |
188 | */ |
189 | # define FLAGS_SHARED 0x00 |
190 | #endif |
191 | #define FLAGS_CLOCKRT 0x02 |
192 | #define FLAGS_HAS_TIMEOUT 0x04 |
193 | |
194 | /* |
195 | * Priority Inheritance state: |
196 | */ |
197 | struct futex_pi_state { |
198 | /* |
199 | * list of 'owned' pi_state instances - these have to be |
200 | * cleaned up in do_exit() if the task exits prematurely: |
201 | */ |
202 | struct list_head list; |
203 | |
204 | /* |
205 | * The PI object: |
206 | */ |
207 | struct rt_mutex pi_mutex; |
208 | |
209 | struct task_struct *owner; |
210 | atomic_t refcount; |
211 | |
212 | union futex_key key; |
213 | }; |
214 | |
215 | /** |
216 | * struct futex_q - The hashed futex queue entry, one per waiting task |
217 | * @list: priority-sorted list of tasks waiting on this futex |
218 | * @task: the task waiting on the futex |
219 | * @lock_ptr: the hash bucket lock |
220 | * @key: the key the futex is hashed on |
221 | * @pi_state: optional priority inheritance state |
222 | * @rt_waiter: rt_waiter storage for use with requeue_pi |
223 | * @requeue_pi_key: the requeue_pi target futex key |
224 | * @bitset: bitset for the optional bitmasked wakeup |
225 | * |
226 | * We use this hashed waitqueue, instead of a normal wait_queue_t, so |
227 | * we can wake only the relevant ones (hashed queues may be shared). |
228 | * |
229 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. |
230 | * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. |
231 | * The order of wakeup is always to make the first condition true, then |
232 | * the second. |
233 | * |
234 | * PI futexes are typically woken before they are removed from the hash list via |
235 | * the rt_mutex code. See unqueue_me_pi(). |
236 | */ |
237 | struct futex_q { |
238 | struct plist_node list; |
239 | |
240 | struct task_struct *task; |
241 | spinlock_t *lock_ptr; |
242 | union futex_key key; |
243 | struct futex_pi_state *pi_state; |
244 | struct rt_mutex_waiter *rt_waiter; |
245 | union futex_key *requeue_pi_key; |
246 | u32 bitset; |
247 | }; |
248 | |
249 | static const struct futex_q futex_q_init = { |
250 | /* list gets initialized in queue_me()*/ |
251 | .key = FUTEX_KEY_INIT, |
252 | .bitset = FUTEX_BITSET_MATCH_ANY |
253 | }; |
254 | |
255 | /* |
256 | * Hash buckets are shared by all the futex_keys that hash to the same |
257 | * location. Each key may have multiple futex_q structures, one for each task |
258 | * waiting on a futex. |
259 | */ |
260 | struct futex_hash_bucket { |
261 | atomic_t waiters; |
262 | spinlock_t lock; |
263 | struct plist_head chain; |
264 | } ____cacheline_aligned_in_smp; |
265 | |
266 | /* |
267 | * The base of the bucket array and its size are always used together |
268 | * (after initialization only in hash_futex()), so ensure that they |
269 | * reside in the same cacheline. |
270 | */ |
271 | static struct { |
272 | struct futex_hash_bucket *queues; |
273 | unsigned long hashsize; |
274 | } __futex_data __read_mostly __aligned(2*sizeof(long)); |
275 | #define futex_queues (__futex_data.queues) |
276 | #define futex_hashsize (__futex_data.hashsize) |
277 | |
278 | |
279 | /* |
280 | * Fault injections for futexes. |
281 | */ |
282 | #ifdef CONFIG_FAIL_FUTEX |
283 | |
284 | static struct { |
285 | struct fault_attr attr; |
286 | |
287 | bool ignore_private; |
288 | } fail_futex = { |
289 | .attr = FAULT_ATTR_INITIALIZER, |
290 | .ignore_private = false, |
291 | }; |
292 | |
293 | static int __init setup_fail_futex(char *str) |
294 | { |
295 | return setup_fault_attr(&fail_futex.attr, str); |
296 | } |
297 | __setup("fail_futex=", setup_fail_futex); |
298 | |
299 | static bool should_fail_futex(bool fshared) |
300 | { |
301 | if (fail_futex.ignore_private && !fshared) |
302 | return false; |
303 | |
304 | return should_fail(&fail_futex.attr, 1); |
305 | } |
306 | |
307 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS |
308 | |
309 | static int __init fail_futex_debugfs(void) |
310 | { |
311 | umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; |
312 | struct dentry *dir; |
313 | |
314 | dir = fault_create_debugfs_attr("fail_futex", NULL, |
315 | &fail_futex.attr); |
316 | if (IS_ERR(dir)) |
317 | return PTR_ERR(dir); |
318 | |
319 | if (!debugfs_create_bool("ignore-private", mode, dir, |
320 | &fail_futex.ignore_private)) { |
321 | debugfs_remove_recursive(dir); |
322 | return -ENOMEM; |
323 | } |
324 | |
325 | return 0; |
326 | } |
327 | |
328 | late_initcall(fail_futex_debugfs); |
329 | |
330 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ |
331 | |
332 | #else |
333 | static inline bool should_fail_futex(bool fshared) |
334 | { |
335 | return false; |
336 | } |
337 | #endif /* CONFIG_FAIL_FUTEX */ |
338 | |
339 | static inline void futex_get_mm(union futex_key *key) |
340 | { |
341 | atomic_inc(&key->private.mm->mm_count); |
342 | /* |
343 | * Ensure futex_get_mm() implies a full barrier such that |
344 | * get_futex_key() implies a full barrier. This is relied upon |
345 | * as smp_mb(); (B), see the ordering comment above. |
346 | */ |
347 | smp_mb__after_atomic(); |
348 | } |
349 | |
350 | /* |
351 | * Reflects a new waiter being added to the waitqueue. |
352 | */ |
353 | static inline void hb_waiters_inc(struct futex_hash_bucket *hb) |
354 | { |
355 | #ifdef CONFIG_SMP |
356 | atomic_inc(&hb->waiters); |
357 | /* |
358 | * Full barrier (A), see the ordering comment above. |
359 | */ |
360 | smp_mb__after_atomic(); |
361 | #endif |
362 | } |
363 | |
364 | /* |
365 | * Reflects a waiter being removed from the waitqueue by wakeup |
366 | * paths. |
367 | */ |
368 | static inline void hb_waiters_dec(struct futex_hash_bucket *hb) |
369 | { |
370 | #ifdef CONFIG_SMP |
371 | atomic_dec(&hb->waiters); |
372 | #endif |
373 | } |
374 | |
375 | static inline int hb_waiters_pending(struct futex_hash_bucket *hb) |
376 | { |
377 | #ifdef CONFIG_SMP |
378 | return atomic_read(&hb->waiters); |
379 | #else |
380 | return 1; |
381 | #endif |
382 | } |
383 | |
384 | /** |
385 | * hash_futex - Return the hash bucket in the global hash |
386 | * @key: Pointer to the futex key for which the hash is calculated |
387 | * |
388 | * We hash on the keys returned from get_futex_key (see below) and return the |
389 | * corresponding hash bucket in the global hash. |
390 | */ |
391 | static struct futex_hash_bucket *hash_futex(union futex_key *key) |
392 | { |
393 | u32 hash = jhash2((u32*)&key->both.word, |
394 | (sizeof(key->both.word)+sizeof(key->both.ptr))/4, |
395 | key->both.offset); |
396 | return &futex_queues[hash & (futex_hashsize - 1)]; |
397 | } |
398 | |
399 | |
400 | /** |
401 | * match_futex - Check whether two futex keys are equal |
402 | * @key1: Pointer to key1 |
403 | * @key2: Pointer to key2 |
404 | * |
405 | * Return 1 if two futex_keys are equal, 0 otherwise. |
406 | */ |
407 | static inline int match_futex(union futex_key *key1, union futex_key *key2) |
408 | { |
409 | return (key1 && key2 |
410 | && key1->both.word == key2->both.word |
411 | && key1->both.ptr == key2->both.ptr |
412 | && key1->both.offset == key2->both.offset); |
413 | } |
414 | |
415 | /* |
416 | * Take a reference to the resource addressed by a key. |
417 | * Can be called while holding spinlocks. |
418 | * |
419 | */ |
420 | static void get_futex_key_refs(union futex_key *key) |
421 | { |
422 | if (!key->both.ptr) |
423 | return; |
424 | |
425 | /* |
426 | * On MMU less systems futexes are always "private" as there is no per |
427 | * process address space. We need the smp wmb nevertheless - yes, |
428 | * arch/blackfin has MMU less SMP ... |
429 | */ |
430 | if (!IS_ENABLED(CONFIG_MMU)) { |
431 | smp_mb(); /* explicit smp_mb(); (B) */ |
432 | return; |
433 | } |
434 | |
435 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
436 | case FUT_OFF_INODE: |
437 | ihold(key->shared.inode); /* implies smp_mb(); (B) */ |
438 | break; |
439 | case FUT_OFF_MMSHARED: |
440 | futex_get_mm(key); /* implies smp_mb(); (B) */ |
441 | break; |
442 | default: |
443 | /* |
444 | * Private futexes do not hold reference on an inode or |
445 | * mm, therefore the only purpose of calling get_futex_key_refs |
446 | * is because we need the barrier for the lockless waiter check. |
447 | */ |
448 | smp_mb(); /* explicit smp_mb(); (B) */ |
449 | } |
450 | } |
451 | |
452 | /* |
453 | * Drop a reference to the resource addressed by a key. |
454 | * The hash bucket spinlock must not be held. This is |
455 | * a no-op for private futexes, see comment in the get |
456 | * counterpart. |
457 | */ |
458 | static void drop_futex_key_refs(union futex_key *key) |
459 | { |
460 | if (!key->both.ptr) { |
461 | /* If we're here then we tried to put a key we failed to get */ |
462 | WARN_ON_ONCE(1); |
463 | return; |
464 | } |
465 | |
466 | if (!IS_ENABLED(CONFIG_MMU)) |
467 | return; |
468 | |
469 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
470 | case FUT_OFF_INODE: |
471 | iput(key->shared.inode); |
472 | break; |
473 | case FUT_OFF_MMSHARED: |
474 | mmdrop(key->private.mm); |
475 | break; |
476 | } |
477 | } |
478 | |
479 | /** |
480 | * get_futex_key() - Get parameters which are the keys for a futex |
481 | * @uaddr: virtual address of the futex |
482 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
483 | * @key: address where result is stored. |
484 | * @rw: mapping needs to be read/write (values: VERIFY_READ, |
485 | * VERIFY_WRITE) |
486 | * |
487 | * Return: a negative error code or 0 |
488 | * |
489 | * The key words are stored in *key on success. |
490 | * |
491 | * For shared mappings, it's (page->index, file_inode(vma->vm_file), |
492 | * offset_within_page). For private mappings, it's (uaddr, current->mm). |
493 | * We can usually work out the index without swapping in the page. |
494 | * |
495 | * lock_page() might sleep, the caller should not hold a spinlock. |
496 | */ |
497 | static int |
498 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) |
499 | { |
500 | unsigned long address = (unsigned long)uaddr; |
501 | struct mm_struct *mm = current->mm; |
502 | struct page *page, *tail; |
503 | struct address_space *mapping; |
504 | int err, ro = 0; |
505 | |
506 | /* |
507 | * The futex address must be "naturally" aligned. |
508 | */ |
509 | key->both.offset = address % PAGE_SIZE; |
510 | if (unlikely((address % sizeof(u32)) != 0)) |
511 | return -EINVAL; |
512 | address -= key->both.offset; |
513 | |
514 | if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) |
515 | return -EFAULT; |
516 | |
517 | if (unlikely(should_fail_futex(fshared))) |
518 | return -EFAULT; |
519 | |
520 | /* |
521 | * PROCESS_PRIVATE futexes are fast. |
522 | * As the mm cannot disappear under us and the 'key' only needs |
523 | * virtual address, we dont even have to find the underlying vma. |
524 | * Note : We do have to check 'uaddr' is a valid user address, |
525 | * but access_ok() should be faster than find_vma() |
526 | */ |
527 | if (!fshared) { |
528 | key->private.mm = mm; |
529 | key->private.address = address; |
530 | get_futex_key_refs(key); /* implies smp_mb(); (B) */ |
531 | return 0; |
532 | } |
533 | |
534 | again: |
535 | /* Ignore any VERIFY_READ mapping (futex common case) */ |
536 | if (unlikely(should_fail_futex(fshared))) |
537 | return -EFAULT; |
538 | |
539 | err = get_user_pages_fast(address, 1, 1, &page); |
540 | /* |
541 | * If write access is not required (eg. FUTEX_WAIT), try |
542 | * and get read-only access. |
543 | */ |
544 | if (err == -EFAULT && rw == VERIFY_READ) { |
545 | err = get_user_pages_fast(address, 1, 0, &page); |
546 | ro = 1; |
547 | } |
548 | if (err < 0) |
549 | return err; |
550 | else |
551 | err = 0; |
552 | |
553 | /* |
554 | * The treatment of mapping from this point on is critical. The page |
555 | * lock protects many things but in this context the page lock |
556 | * stabilizes mapping, prevents inode freeing in the shared |
557 | * file-backed region case and guards against movement to swap cache. |
558 | * |
559 | * Strictly speaking the page lock is not needed in all cases being |
560 | * considered here and page lock forces unnecessarily serialization |
561 | * From this point on, mapping will be re-verified if necessary and |
562 | * page lock will be acquired only if it is unavoidable |
563 | * |
564 | * Mapping checks require the head page for any compound page so the |
565 | * head page and mapping is looked up now. For anonymous pages, it |
566 | * does not matter if the page splits in the future as the key is |
567 | * based on the address. For filesystem-backed pages, the tail is |
568 | * required as the index of the page determines the key. For |
569 | * base pages, there is no tail page and tail == page. |
570 | */ |
571 | tail = page; |
572 | page = compound_head(page); |
573 | mapping = READ_ONCE(page->mapping); |
574 | |
575 | /* |
576 | * If page->mapping is NULL, then it cannot be a PageAnon |
577 | * page; but it might be the ZERO_PAGE or in the gate area or |
578 | * in a special mapping (all cases which we are happy to fail); |
579 | * or it may have been a good file page when get_user_pages_fast |
580 | * found it, but truncated or holepunched or subjected to |
581 | * invalidate_complete_page2 before we got the page lock (also |
582 | * cases which we are happy to fail). And we hold a reference, |
583 | * so refcount care in invalidate_complete_page's remove_mapping |
584 | * prevents drop_caches from setting mapping to NULL beneath us. |
585 | * |
586 | * The case we do have to guard against is when memory pressure made |
587 | * shmem_writepage move it from filecache to swapcache beneath us: |
588 | * an unlikely race, but we do need to retry for page->mapping. |
589 | */ |
590 | if (unlikely(!mapping)) { |
591 | int shmem_swizzled; |
592 | |
593 | /* |
594 | * Page lock is required to identify which special case above |
595 | * applies. If this is really a shmem page then the page lock |
596 | * will prevent unexpected transitions. |
597 | */ |
598 | lock_page(page); |
599 | shmem_swizzled = PageSwapCache(page) || page->mapping; |
600 | unlock_page(page); |
601 | put_page(page); |
602 | |
603 | if (shmem_swizzled) |
604 | goto again; |
605 | |
606 | return -EFAULT; |
607 | } |
608 | |
609 | /* |
610 | * Private mappings are handled in a simple way. |
611 | * |
612 | * If the futex key is stored on an anonymous page, then the associated |
613 | * object is the mm which is implicitly pinned by the calling process. |
614 | * |
615 | * NOTE: When userspace waits on a MAP_SHARED mapping, even if |
616 | * it's a read-only handle, it's expected that futexes attach to |
617 | * the object not the particular process. |
618 | */ |
619 | if (PageAnon(page)) { |
620 | /* |
621 | * A RO anonymous page will never change and thus doesn't make |
622 | * sense for futex operations. |
623 | */ |
624 | if (unlikely(should_fail_futex(fshared)) || ro) { |
625 | err = -EFAULT; |
626 | goto out; |
627 | } |
628 | |
629 | key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ |
630 | key->private.mm = mm; |
631 | key->private.address = address; |
632 | |
633 | get_futex_key_refs(key); /* implies smp_mb(); (B) */ |
634 | |
635 | } else { |
636 | struct inode *inode; |
637 | |
638 | /* |
639 | * The associated futex object in this case is the inode and |
640 | * the page->mapping must be traversed. Ordinarily this should |
641 | * be stabilised under page lock but it's not strictly |
642 | * necessary in this case as we just want to pin the inode, not |
643 | * update the radix tree or anything like that. |
644 | * |
645 | * The RCU read lock is taken as the inode is finally freed |
646 | * under RCU. If the mapping still matches expectations then the |
647 | * mapping->host can be safely accessed as being a valid inode. |
648 | */ |
649 | rcu_read_lock(); |
650 | |
651 | if (READ_ONCE(page->mapping) != mapping) { |
652 | rcu_read_unlock(); |
653 | put_page(page); |
654 | |
655 | goto again; |
656 | } |
657 | |
658 | inode = READ_ONCE(mapping->host); |
659 | if (!inode) { |
660 | rcu_read_unlock(); |
661 | put_page(page); |
662 | |
663 | goto again; |
664 | } |
665 | |
666 | /* |
667 | * Take a reference unless it is about to be freed. Previously |
668 | * this reference was taken by ihold under the page lock |
669 | * pinning the inode in place so i_lock was unnecessary. The |
670 | * only way for this check to fail is if the inode was |
671 | * truncated in parallel which is almost certainly an |
672 | * application bug. In such a case, just retry. |
673 | * |
674 | * We are not calling into get_futex_key_refs() in file-backed |
675 | * cases, therefore a successful atomic_inc return below will |
676 | * guarantee that get_futex_key() will still imply smp_mb(); (B). |
677 | */ |
678 | if (!atomic_inc_not_zero(&inode->i_count)) { |
679 | rcu_read_unlock(); |
680 | put_page(page); |
681 | |
682 | goto again; |
683 | } |
684 | |
685 | /* Should be impossible but lets be paranoid for now */ |
686 | if (WARN_ON_ONCE(inode->i_mapping != mapping)) { |
687 | err = -EFAULT; |
688 | rcu_read_unlock(); |
689 | iput(inode); |
690 | |
691 | goto out; |
692 | } |
693 | |
694 | key->both.offset |= FUT_OFF_INODE; /* inode-based key */ |
695 | key->shared.inode = inode; |
696 | key->shared.pgoff = basepage_index(tail); |
697 | rcu_read_unlock(); |
698 | } |
699 | |
700 | out: |
701 | put_page(page); |
702 | return err; |
703 | } |
704 | |
705 | static inline void put_futex_key(union futex_key *key) |
706 | { |
707 | drop_futex_key_refs(key); |
708 | } |
709 | |
710 | /** |
711 | * fault_in_user_writeable() - Fault in user address and verify RW access |
712 | * @uaddr: pointer to faulting user space address |
713 | * |
714 | * Slow path to fixup the fault we just took in the atomic write |
715 | * access to @uaddr. |
716 | * |
717 | * We have no generic implementation of a non-destructive write to the |
718 | * user address. We know that we faulted in the atomic pagefault |
719 | * disabled section so we can as well avoid the #PF overhead by |
720 | * calling get_user_pages() right away. |
721 | */ |
722 | static int fault_in_user_writeable(u32 __user *uaddr) |
723 | { |
724 | struct mm_struct *mm = current->mm; |
725 | int ret; |
726 | |
727 | down_read(&mm->mmap_sem); |
728 | ret = fixup_user_fault(current, mm, (unsigned long)uaddr, |
729 | FAULT_FLAG_WRITE, NULL); |
730 | up_read(&mm->mmap_sem); |
731 | |
732 | return ret < 0 ? ret : 0; |
733 | } |
734 | |
735 | /** |
736 | * futex_top_waiter() - Return the highest priority waiter on a futex |
737 | * @hb: the hash bucket the futex_q's reside in |
738 | * @key: the futex key (to distinguish it from other futex futex_q's) |
739 | * |
740 | * Must be called with the hb lock held. |
741 | */ |
742 | static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, |
743 | union futex_key *key) |
744 | { |
745 | struct futex_q *this; |
746 | |
747 | plist_for_each_entry(this, &hb->chain, list) { |
748 | if (match_futex(&this->key, key)) |
749 | return this; |
750 | } |
751 | return NULL; |
752 | } |
753 | |
754 | static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, |
755 | u32 uval, u32 newval) |
756 | { |
757 | int ret; |
758 | |
759 | pagefault_disable(); |
760 | ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); |
761 | pagefault_enable(); |
762 | |
763 | return ret; |
764 | } |
765 | |
766 | static int get_futex_value_locked(u32 *dest, u32 __user *from) |
767 | { |
768 | int ret; |
769 | |
770 | pagefault_disable(); |
771 | ret = __get_user(*dest, from); |
772 | pagefault_enable(); |
773 | |
774 | return ret ? -EFAULT : 0; |
775 | } |
776 | |
777 | |
778 | /* |
779 | * PI code: |
780 | */ |
781 | static int refill_pi_state_cache(void) |
782 | { |
783 | struct futex_pi_state *pi_state; |
784 | |
785 | if (likely(current->pi_state_cache)) |
786 | return 0; |
787 | |
788 | pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); |
789 | |
790 | if (!pi_state) |
791 | return -ENOMEM; |
792 | |
793 | INIT_LIST_HEAD(&pi_state->list); |
794 | /* pi_mutex gets initialized later */ |
795 | pi_state->owner = NULL; |
796 | atomic_set(&pi_state->refcount, 1); |
797 | pi_state->key = FUTEX_KEY_INIT; |
798 | |
799 | current->pi_state_cache = pi_state; |
800 | |
801 | return 0; |
802 | } |
803 | |
804 | static struct futex_pi_state * alloc_pi_state(void) |
805 | { |
806 | struct futex_pi_state *pi_state = current->pi_state_cache; |
807 | |
808 | WARN_ON(!pi_state); |
809 | current->pi_state_cache = NULL; |
810 | |
811 | return pi_state; |
812 | } |
813 | |
814 | /* |
815 | * Drops a reference to the pi_state object and frees or caches it |
816 | * when the last reference is gone. |
817 | * |
818 | * Must be called with the hb lock held. |
819 | */ |
820 | static void put_pi_state(struct futex_pi_state *pi_state) |
821 | { |
822 | if (!pi_state) |
823 | return; |
824 | |
825 | if (!atomic_dec_and_test(&pi_state->refcount)) |
826 | return; |
827 | |
828 | /* |
829 | * If pi_state->owner is NULL, the owner is most probably dying |
830 | * and has cleaned up the pi_state already |
831 | */ |
832 | if (pi_state->owner) { |
833 | raw_spin_lock_irq(&pi_state->owner->pi_lock); |
834 | list_del_init(&pi_state->list); |
835 | raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
836 | |
837 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); |
838 | } |
839 | |
840 | if (current->pi_state_cache) |
841 | kfree(pi_state); |
842 | else { |
843 | /* |
844 | * pi_state->list is already empty. |
845 | * clear pi_state->owner. |
846 | * refcount is at 0 - put it back to 1. |
847 | */ |
848 | pi_state->owner = NULL; |
849 | atomic_set(&pi_state->refcount, 1); |
850 | current->pi_state_cache = pi_state; |
851 | } |
852 | } |
853 | |
854 | /* |
855 | * Look up the task based on what TID userspace gave us. |
856 | * We dont trust it. |
857 | */ |
858 | static struct task_struct * futex_find_get_task(pid_t pid) |
859 | { |
860 | struct task_struct *p; |
861 | |
862 | rcu_read_lock(); |
863 | p = find_task_by_vpid(pid); |
864 | if (p) |
865 | get_task_struct(p); |
866 | |
867 | rcu_read_unlock(); |
868 | |
869 | return p; |
870 | } |
871 | |
872 | /* |
873 | * This task is holding PI mutexes at exit time => bad. |
874 | * Kernel cleans up PI-state, but userspace is likely hosed. |
875 | * (Robust-futex cleanup is separate and might save the day for userspace.) |
876 | */ |
877 | void exit_pi_state_list(struct task_struct *curr) |
878 | { |
879 | struct list_head *next, *head = &curr->pi_state_list; |
880 | struct futex_pi_state *pi_state; |
881 | struct futex_hash_bucket *hb; |
882 | union futex_key key = FUTEX_KEY_INIT; |
883 | |
884 | if (!futex_cmpxchg_enabled) |
885 | return; |
886 | /* |
887 | * We are a ZOMBIE and nobody can enqueue itself on |
888 | * pi_state_list anymore, but we have to be careful |
889 | * versus waiters unqueueing themselves: |
890 | */ |
891 | raw_spin_lock_irq(&curr->pi_lock); |
892 | while (!list_empty(head)) { |
893 | |
894 | next = head->next; |
895 | pi_state = list_entry(next, struct futex_pi_state, list); |
896 | key = pi_state->key; |
897 | hb = hash_futex(&key); |
898 | raw_spin_unlock_irq(&curr->pi_lock); |
899 | |
900 | spin_lock(&hb->lock); |
901 | |
902 | raw_spin_lock_irq(&curr->pi_lock); |
903 | /* |
904 | * We dropped the pi-lock, so re-check whether this |
905 | * task still owns the PI-state: |
906 | */ |
907 | if (head->next != next) { |
908 | spin_unlock(&hb->lock); |
909 | continue; |
910 | } |
911 | |
912 | WARN_ON(pi_state->owner != curr); |
913 | WARN_ON(list_empty(&pi_state->list)); |
914 | list_del_init(&pi_state->list); |
915 | pi_state->owner = NULL; |
916 | raw_spin_unlock_irq(&curr->pi_lock); |
917 | |
918 | rt_mutex_unlock(&pi_state->pi_mutex); |
919 | |
920 | spin_unlock(&hb->lock); |
921 | |
922 | raw_spin_lock_irq(&curr->pi_lock); |
923 | } |
924 | raw_spin_unlock_irq(&curr->pi_lock); |
925 | } |
926 | |
927 | /* |
928 | * We need to check the following states: |
929 | * |
930 | * Waiter | pi_state | pi->owner | uTID | uODIED | ? |
931 | * |
932 | * [1] NULL | --- | --- | 0 | 0/1 | Valid |
933 | * [2] NULL | --- | --- | >0 | 0/1 | Valid |
934 | * |
935 | * [3] Found | NULL | -- | Any | 0/1 | Invalid |
936 | * |
937 | * [4] Found | Found | NULL | 0 | 1 | Valid |
938 | * [5] Found | Found | NULL | >0 | 1 | Invalid |
939 | * |
940 | * [6] Found | Found | task | 0 | 1 | Valid |
941 | * |
942 | * [7] Found | Found | NULL | Any | 0 | Invalid |
943 | * |
944 | * [8] Found | Found | task | ==taskTID | 0/1 | Valid |
945 | * [9] Found | Found | task | 0 | 0 | Invalid |
946 | * [10] Found | Found | task | !=taskTID | 0/1 | Invalid |
947 | * |
948 | * [1] Indicates that the kernel can acquire the futex atomically. We |
949 | * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. |
950 | * |
951 | * [2] Valid, if TID does not belong to a kernel thread. If no matching |
952 | * thread is found then it indicates that the owner TID has died. |
953 | * |
954 | * [3] Invalid. The waiter is queued on a non PI futex |
955 | * |
956 | * [4] Valid state after exit_robust_list(), which sets the user space |
957 | * value to FUTEX_WAITERS | FUTEX_OWNER_DIED. |
958 | * |
959 | * [5] The user space value got manipulated between exit_robust_list() |
960 | * and exit_pi_state_list() |
961 | * |
962 | * [6] Valid state after exit_pi_state_list() which sets the new owner in |
963 | * the pi_state but cannot access the user space value. |
964 | * |
965 | * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set. |
966 | * |
967 | * [8] Owner and user space value match |
968 | * |
969 | * [9] There is no transient state which sets the user space TID to 0 |
970 | * except exit_robust_list(), but this is indicated by the |
971 | * FUTEX_OWNER_DIED bit. See [4] |
972 | * |
973 | * [10] There is no transient state which leaves owner and user space |
974 | * TID out of sync. |
975 | */ |
976 | |
977 | /* |
978 | * Validate that the existing waiter has a pi_state and sanity check |
979 | * the pi_state against the user space value. If correct, attach to |
980 | * it. |
981 | */ |
982 | static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, |
983 | struct futex_pi_state **ps) |
984 | { |
985 | pid_t pid = uval & FUTEX_TID_MASK; |
986 | |
987 | /* |
988 | * Userspace might have messed up non-PI and PI futexes [3] |
989 | */ |
990 | if (unlikely(!pi_state)) |
991 | return -EINVAL; |
992 | |
993 | WARN_ON(!atomic_read(&pi_state->refcount)); |
994 | |
995 | /* |
996 | * Handle the owner died case: |
997 | */ |
998 | if (uval & FUTEX_OWNER_DIED) { |
999 | /* |
1000 | * exit_pi_state_list sets owner to NULL and wakes the |
1001 | * topmost waiter. The task which acquires the |
1002 | * pi_state->rt_mutex will fixup owner. |
1003 | */ |
1004 | if (!pi_state->owner) { |
1005 | /* |
1006 | * No pi state owner, but the user space TID |
1007 | * is not 0. Inconsistent state. [5] |
1008 | */ |
1009 | if (pid) |
1010 | return -EINVAL; |
1011 | /* |
1012 | * Take a ref on the state and return success. [4] |
1013 | */ |
1014 | goto out_state; |
1015 | } |
1016 | |
1017 | /* |
1018 | * If TID is 0, then either the dying owner has not |
1019 | * yet executed exit_pi_state_list() or some waiter |
1020 | * acquired the rtmutex in the pi state, but did not |
1021 | * yet fixup the TID in user space. |
1022 | * |
1023 | * Take a ref on the state and return success. [6] |
1024 | */ |
1025 | if (!pid) |
1026 | goto out_state; |
1027 | } else { |
1028 | /* |
1029 | * If the owner died bit is not set, then the pi_state |
1030 | * must have an owner. [7] |
1031 | */ |
1032 | if (!pi_state->owner) |
1033 | return -EINVAL; |
1034 | } |
1035 | |
1036 | /* |
1037 | * Bail out if user space manipulated the futex value. If pi |
1038 | * state exists then the owner TID must be the same as the |
1039 | * user space TID. [9/10] |
1040 | */ |
1041 | if (pid != task_pid_vnr(pi_state->owner)) |
1042 | return -EINVAL; |
1043 | out_state: |
1044 | atomic_inc(&pi_state->refcount); |
1045 | *ps = pi_state; |
1046 | return 0; |
1047 | } |
1048 | |
1049 | /* |
1050 | * Lookup the task for the TID provided from user space and attach to |
1051 | * it after doing proper sanity checks. |
1052 | */ |
1053 | static int attach_to_pi_owner(u32 uval, union futex_key *key, |
1054 | struct futex_pi_state **ps) |
1055 | { |
1056 | pid_t pid = uval & FUTEX_TID_MASK; |
1057 | struct futex_pi_state *pi_state; |
1058 | struct task_struct *p; |
1059 | |
1060 | /* |
1061 | * We are the first waiter - try to look up the real owner and attach |
1062 | * the new pi_state to it, but bail out when TID = 0 [1] |
1063 | */ |
1064 | if (!pid) |
1065 | return -ESRCH; |
1066 | p = futex_find_get_task(pid); |
1067 | if (!p) |
1068 | return -ESRCH; |
1069 | |
1070 | if (unlikely(p->flags & PF_KTHREAD)) { |
1071 | put_task_struct(p); |
1072 | return -EPERM; |
1073 | } |
1074 | |
1075 | /* |
1076 | * We need to look at the task state flags to figure out, |
1077 | * whether the task is exiting. To protect against the do_exit |
1078 | * change of the task flags, we do this protected by |
1079 | * p->pi_lock: |
1080 | */ |
1081 | raw_spin_lock_irq(&p->pi_lock); |
1082 | if (unlikely(p->flags & PF_EXITING)) { |
1083 | /* |
1084 | * The task is on the way out. When PF_EXITPIDONE is |
1085 | * set, we know that the task has finished the |
1086 | * cleanup: |
1087 | */ |
1088 | int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; |
1089 | |
1090 | raw_spin_unlock_irq(&p->pi_lock); |
1091 | put_task_struct(p); |
1092 | return ret; |
1093 | } |
1094 | |
1095 | /* |
1096 | * No existing pi state. First waiter. [2] |
1097 | */ |
1098 | pi_state = alloc_pi_state(); |
1099 | |
1100 | /* |
1101 | * Initialize the pi_mutex in locked state and make @p |
1102 | * the owner of it: |
1103 | */ |
1104 | rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); |
1105 | |
1106 | /* Store the key for possible exit cleanups: */ |
1107 | pi_state->key = *key; |
1108 | |
1109 | WARN_ON(!list_empty(&pi_state->list)); |
1110 | list_add(&pi_state->list, &p->pi_state_list); |
1111 | pi_state->owner = p; |
1112 | raw_spin_unlock_irq(&p->pi_lock); |
1113 | |
1114 | put_task_struct(p); |
1115 | |
1116 | *ps = pi_state; |
1117 | |
1118 | return 0; |
1119 | } |
1120 | |
1121 | static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
1122 | union futex_key *key, struct futex_pi_state **ps) |
1123 | { |
1124 | struct futex_q *match = futex_top_waiter(hb, key); |
1125 | |
1126 | /* |
1127 | * If there is a waiter on that futex, validate it and |
1128 | * attach to the pi_state when the validation succeeds. |
1129 | */ |
1130 | if (match) |
1131 | return attach_to_pi_state(uval, match->pi_state, ps); |
1132 | |
1133 | /* |
1134 | * We are the first waiter - try to look up the owner based on |
1135 | * @uval and attach to it. |
1136 | */ |
1137 | return attach_to_pi_owner(uval, key, ps); |
1138 | } |
1139 | |
1140 | static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) |
1141 | { |
1142 | u32 uninitialized_var(curval); |
1143 | |
1144 | if (unlikely(should_fail_futex(true))) |
1145 | return -EFAULT; |
1146 | |
1147 | if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) |
1148 | return -EFAULT; |
1149 | |
1150 | /*If user space value changed, let the caller retry */ |
1151 | return curval != uval ? -EAGAIN : 0; |
1152 | } |
1153 | |
1154 | /** |
1155 | * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex |
1156 | * @uaddr: the pi futex user address |
1157 | * @hb: the pi futex hash bucket |
1158 | * @key: the futex key associated with uaddr and hb |
1159 | * @ps: the pi_state pointer where we store the result of the |
1160 | * lookup |
1161 | * @task: the task to perform the atomic lock work for. This will |
1162 | * be "current" except in the case of requeue pi. |
1163 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
1164 | * |
1165 | * Return: |
1166 | * 0 - ready to wait; |
1167 | * 1 - acquired the lock; |
1168 | * <0 - error |
1169 | * |
1170 | * The hb->lock and futex_key refs shall be held by the caller. |
1171 | */ |
1172 | static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, |
1173 | union futex_key *key, |
1174 | struct futex_pi_state **ps, |
1175 | struct task_struct *task, int set_waiters) |
1176 | { |
1177 | u32 uval, newval, vpid = task_pid_vnr(task); |
1178 | struct futex_q *match; |
1179 | int ret; |
1180 | |
1181 | /* |
1182 | * Read the user space value first so we can validate a few |
1183 | * things before proceeding further. |
1184 | */ |
1185 | if (get_futex_value_locked(&uval, uaddr)) |
1186 | return -EFAULT; |
1187 | |
1188 | if (unlikely(should_fail_futex(true))) |
1189 | return -EFAULT; |
1190 | |
1191 | /* |
1192 | * Detect deadlocks. |
1193 | */ |
1194 | if ((unlikely((uval & FUTEX_TID_MASK) == vpid))) |
1195 | return -EDEADLK; |
1196 | |
1197 | if ((unlikely(should_fail_futex(true)))) |
1198 | return -EDEADLK; |
1199 | |
1200 | /* |
1201 | * Lookup existing state first. If it exists, try to attach to |
1202 | * its pi_state. |
1203 | */ |
1204 | match = futex_top_waiter(hb, key); |
1205 | if (match) |
1206 | return attach_to_pi_state(uval, match->pi_state, ps); |
1207 | |
1208 | /* |
1209 | * No waiter and user TID is 0. We are here because the |
1210 | * waiters or the owner died bit is set or called from |
1211 | * requeue_cmp_pi or for whatever reason something took the |
1212 | * syscall. |
1213 | */ |
1214 | if (!(uval & FUTEX_TID_MASK)) { |
1215 | /* |
1216 | * We take over the futex. No other waiters and the user space |
1217 | * TID is 0. We preserve the owner died bit. |
1218 | */ |
1219 | newval = uval & FUTEX_OWNER_DIED; |
1220 | newval |= vpid; |
1221 | |
1222 | /* The futex requeue_pi code can enforce the waiters bit */ |
1223 | if (set_waiters) |
1224 | newval |= FUTEX_WAITERS; |
1225 | |
1226 | ret = lock_pi_update_atomic(uaddr, uval, newval); |
1227 | /* If the take over worked, return 1 */ |
1228 | return ret < 0 ? ret : 1; |
1229 | } |
1230 | |
1231 | /* |
1232 | * First waiter. Set the waiters bit before attaching ourself to |
1233 | * the owner. If owner tries to unlock, it will be forced into |
1234 | * the kernel and blocked on hb->lock. |
1235 | */ |
1236 | newval = uval | FUTEX_WAITERS; |
1237 | ret = lock_pi_update_atomic(uaddr, uval, newval); |
1238 | if (ret) |
1239 | return ret; |
1240 | /* |
1241 | * If the update of the user space value succeeded, we try to |
1242 | * attach to the owner. If that fails, no harm done, we only |
1243 | * set the FUTEX_WAITERS bit in the user space variable. |
1244 | */ |
1245 | return attach_to_pi_owner(uval, key, ps); |
1246 | } |
1247 | |
1248 | /** |
1249 | * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket |
1250 | * @q: The futex_q to unqueue |
1251 | * |
1252 | * The q->lock_ptr must not be NULL and must be held by the caller. |
1253 | */ |
1254 | static void __unqueue_futex(struct futex_q *q) |
1255 | { |
1256 | struct futex_hash_bucket *hb; |
1257 | |
1258 | if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) |
1259 | || WARN_ON(plist_node_empty(&q->list))) |
1260 | return; |
1261 | |
1262 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); |
1263 | plist_del(&q->list, &hb->chain); |
1264 | hb_waiters_dec(hb); |
1265 | } |
1266 | |
1267 | /* |
1268 | * The hash bucket lock must be held when this is called. |
1269 | * Afterwards, the futex_q must not be accessed. Callers |
1270 | * must ensure to later call wake_up_q() for the actual |
1271 | * wakeups to occur. |
1272 | */ |
1273 | static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) |
1274 | { |
1275 | struct task_struct *p = q->task; |
1276 | |
1277 | if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) |
1278 | return; |
1279 | |
1280 | /* |
1281 | * Queue the task for later wakeup for after we've released |
1282 | * the hb->lock. wake_q_add() grabs reference to p. |
1283 | */ |
1284 | wake_q_add(wake_q, p); |
1285 | __unqueue_futex(q); |
1286 | /* |
1287 | * The waiting task can free the futex_q as soon as |
1288 | * q->lock_ptr = NULL is written, without taking any locks. A |
1289 | * memory barrier is required here to prevent the following |
1290 | * store to lock_ptr from getting ahead of the plist_del. |
1291 | */ |
1292 | smp_wmb(); |
1293 | q->lock_ptr = NULL; |
1294 | } |
1295 | |
1296 | static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, |
1297 | struct futex_hash_bucket *hb) |
1298 | { |
1299 | struct task_struct *new_owner; |
1300 | struct futex_pi_state *pi_state = this->pi_state; |
1301 | u32 uninitialized_var(curval), newval; |
1302 | WAKE_Q(wake_q); |
1303 | bool deboost; |
1304 | int ret = 0; |
1305 | |
1306 | if (!pi_state) |
1307 | return -EINVAL; |
1308 | |
1309 | /* |
1310 | * If current does not own the pi_state then the futex is |
1311 | * inconsistent and user space fiddled with the futex value. |
1312 | */ |
1313 | if (pi_state->owner != current) |
1314 | return -EINVAL; |
1315 | |
1316 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
1317 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
1318 | |
1319 | /* |
1320 | * It is possible that the next waiter (the one that brought |
1321 | * this owner to the kernel) timed out and is no longer |
1322 | * waiting on the lock. |
1323 | */ |
1324 | if (!new_owner) |
1325 | new_owner = this->task; |
1326 | |
1327 | /* |
1328 | * We pass it to the next owner. The WAITERS bit is always |
1329 | * kept enabled while there is PI state around. We cleanup the |
1330 | * owner died bit, because we are the owner. |
1331 | */ |
1332 | newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
1333 | |
1334 | if (unlikely(should_fail_futex(true))) |
1335 | ret = -EFAULT; |
1336 | |
1337 | if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) { |
1338 | ret = -EFAULT; |
1339 | } else if (curval != uval) { |
1340 | /* |
1341 | * If a unconditional UNLOCK_PI operation (user space did not |
1342 | * try the TID->0 transition) raced with a waiter setting the |
1343 | * FUTEX_WAITERS flag between get_user() and locking the hash |
1344 | * bucket lock, retry the operation. |
1345 | */ |
1346 | if ((FUTEX_TID_MASK & curval) == uval) |
1347 | ret = -EAGAIN; |
1348 | else |
1349 | ret = -EINVAL; |
1350 | } |
1351 | if (ret) { |
1352 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
1353 | return ret; |
1354 | } |
1355 | |
1356 | raw_spin_lock(&pi_state->owner->pi_lock); |
1357 | WARN_ON(list_empty(&pi_state->list)); |
1358 | list_del_init(&pi_state->list); |
1359 | raw_spin_unlock(&pi_state->owner->pi_lock); |
1360 | |
1361 | raw_spin_lock(&new_owner->pi_lock); |
1362 | WARN_ON(!list_empty(&pi_state->list)); |
1363 | list_add(&pi_state->list, &new_owner->pi_state_list); |
1364 | pi_state->owner = new_owner; |
1365 | raw_spin_unlock(&new_owner->pi_lock); |
1366 | |
1367 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
1368 | |
1369 | deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); |
1370 | |
1371 | /* |
1372 | * First unlock HB so the waiter does not spin on it once he got woken |
1373 | * up. Second wake up the waiter before the priority is adjusted. If we |
1374 | * deboost first (and lose our higher priority), then the task might get |
1375 | * scheduled away before the wake up can take place. |
1376 | */ |
1377 | spin_unlock(&hb->lock); |
1378 | wake_up_q(&wake_q); |
1379 | if (deboost) |
1380 | rt_mutex_adjust_prio(current); |
1381 | |
1382 | return 0; |
1383 | } |
1384 | |
1385 | /* |
1386 | * Express the locking dependencies for lockdep: |
1387 | */ |
1388 | static inline void |
1389 | double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) |
1390 | { |
1391 | if (hb1 <= hb2) { |
1392 | spin_lock(&hb1->lock); |
1393 | if (hb1 < hb2) |
1394 | spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); |
1395 | } else { /* hb1 > hb2 */ |
1396 | spin_lock(&hb2->lock); |
1397 | spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); |
1398 | } |
1399 | } |
1400 | |
1401 | static inline void |
1402 | double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) |
1403 | { |
1404 | spin_unlock(&hb1->lock); |
1405 | if (hb1 != hb2) |
1406 | spin_unlock(&hb2->lock); |
1407 | } |
1408 | |
1409 | /* |
1410 | * Wake up waiters matching bitset queued on this futex (uaddr). |
1411 | */ |
1412 | static int |
1413 | futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) |
1414 | { |
1415 | struct futex_hash_bucket *hb; |
1416 | struct futex_q *this, *next; |
1417 | union futex_key key = FUTEX_KEY_INIT; |
1418 | int ret; |
1419 | WAKE_Q(wake_q); |
1420 | |
1421 | if (!bitset) |
1422 | return -EINVAL; |
1423 | |
1424 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ); |
1425 | if (unlikely(ret != 0)) |
1426 | goto out; |
1427 | |
1428 | hb = hash_futex(&key); |
1429 | |
1430 | /* Make sure we really have tasks to wakeup */ |
1431 | if (!hb_waiters_pending(hb)) |
1432 | goto out_put_key; |
1433 | |
1434 | spin_lock(&hb->lock); |
1435 | |
1436 | plist_for_each_entry_safe(this, next, &hb->chain, list) { |
1437 | if (match_futex (&this->key, &key)) { |
1438 | if (this->pi_state || this->rt_waiter) { |
1439 | ret = -EINVAL; |
1440 | break; |
1441 | } |
1442 | |
1443 | /* Check if one of the bits is set in both bitsets */ |
1444 | if (!(this->bitset & bitset)) |
1445 | continue; |
1446 | |
1447 | mark_wake_futex(&wake_q, this); |
1448 | if (++ret >= nr_wake) |
1449 | break; |
1450 | } |
1451 | } |
1452 | |
1453 | spin_unlock(&hb->lock); |
1454 | wake_up_q(&wake_q); |
1455 | out_put_key: |
1456 | put_futex_key(&key); |
1457 | out: |
1458 | return ret; |
1459 | } |
1460 | |
1461 | static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) |
1462 | { |
1463 | unsigned int op = (encoded_op & 0x70000000) >> 28; |
1464 | unsigned int cmp = (encoded_op & 0x0f000000) >> 24; |
1465 | int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11); |
1466 | int cmparg = sign_extend32(encoded_op & 0x00000fff, 11); |
1467 | int oldval, ret; |
1468 | |
1469 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { |
1470 | if (oparg < 0 || oparg > 31) { |
1471 | char comm[sizeof(current->comm)]; |
1472 | /* |
1473 | * kill this print and return -EINVAL when userspace |
1474 | * is sane again |
1475 | */ |
1476 | pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n", |
1477 | get_task_comm(comm, current), oparg); |
1478 | oparg &= 31; |
1479 | } |
1480 | oparg = 1 << oparg; |
1481 | } |
1482 | |
1483 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
1484 | return -EFAULT; |
1485 | |
1486 | ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); |
1487 | if (ret) |
1488 | return ret; |
1489 | |
1490 | switch (cmp) { |
1491 | case FUTEX_OP_CMP_EQ: |
1492 | return oldval == cmparg; |
1493 | case FUTEX_OP_CMP_NE: |
1494 | return oldval != cmparg; |
1495 | case FUTEX_OP_CMP_LT: |
1496 | return oldval < cmparg; |
1497 | case FUTEX_OP_CMP_GE: |
1498 | return oldval >= cmparg; |
1499 | case FUTEX_OP_CMP_LE: |
1500 | return oldval <= cmparg; |
1501 | case FUTEX_OP_CMP_GT: |
1502 | return oldval > cmparg; |
1503 | default: |
1504 | return -ENOSYS; |
1505 | } |
1506 | } |
1507 | |
1508 | /* |
1509 | * Wake up all waiters hashed on the physical page that is mapped |
1510 | * to this virtual address: |
1511 | */ |
1512 | static int |
1513 | futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, |
1514 | int nr_wake, int nr_wake2, int op) |
1515 | { |
1516 | union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
1517 | struct futex_hash_bucket *hb1, *hb2; |
1518 | struct futex_q *this, *next; |
1519 | int ret, op_ret; |
1520 | WAKE_Q(wake_q); |
1521 | |
1522 | retry: |
1523 | ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); |
1524 | if (unlikely(ret != 0)) |
1525 | goto out; |
1526 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); |
1527 | if (unlikely(ret != 0)) |
1528 | goto out_put_key1; |
1529 | |
1530 | hb1 = hash_futex(&key1); |
1531 | hb2 = hash_futex(&key2); |
1532 | |
1533 | retry_private: |
1534 | double_lock_hb(hb1, hb2); |
1535 | op_ret = futex_atomic_op_inuser(op, uaddr2); |
1536 | if (unlikely(op_ret < 0)) { |
1537 | |
1538 | double_unlock_hb(hb1, hb2); |
1539 | |
1540 | #ifndef CONFIG_MMU |
1541 | /* |
1542 | * we don't get EFAULT from MMU faults if we don't have an MMU, |
1543 | * but we might get them from range checking |
1544 | */ |
1545 | ret = op_ret; |
1546 | goto out_put_keys; |
1547 | #endif |
1548 | |
1549 | if (unlikely(op_ret != -EFAULT)) { |
1550 | ret = op_ret; |
1551 | goto out_put_keys; |
1552 | } |
1553 | |
1554 | ret = fault_in_user_writeable(uaddr2); |
1555 | if (ret) |
1556 | goto out_put_keys; |
1557 | |
1558 | if (!(flags & FLAGS_SHARED)) |
1559 | goto retry_private; |
1560 | |
1561 | put_futex_key(&key2); |
1562 | put_futex_key(&key1); |
1563 | goto retry; |
1564 | } |
1565 | |
1566 | plist_for_each_entry_safe(this, next, &hb1->chain, list) { |
1567 | if (match_futex (&this->key, &key1)) { |
1568 | if (this->pi_state || this->rt_waiter) { |
1569 | ret = -EINVAL; |
1570 | goto out_unlock; |
1571 | } |
1572 | mark_wake_futex(&wake_q, this); |
1573 | if (++ret >= nr_wake) |
1574 | break; |
1575 | } |
1576 | } |
1577 | |
1578 | if (op_ret > 0) { |
1579 | op_ret = 0; |
1580 | plist_for_each_entry_safe(this, next, &hb2->chain, list) { |
1581 | if (match_futex (&this->key, &key2)) { |
1582 | if (this->pi_state || this->rt_waiter) { |
1583 | ret = -EINVAL; |
1584 | goto out_unlock; |
1585 | } |
1586 | mark_wake_futex(&wake_q, this); |
1587 | if (++op_ret >= nr_wake2) |
1588 | break; |
1589 | } |
1590 | } |
1591 | ret += op_ret; |
1592 | } |
1593 | |
1594 | out_unlock: |
1595 | double_unlock_hb(hb1, hb2); |
1596 | wake_up_q(&wake_q); |
1597 | out_put_keys: |
1598 | put_futex_key(&key2); |
1599 | out_put_key1: |
1600 | put_futex_key(&key1); |
1601 | out: |
1602 | return ret; |
1603 | } |
1604 | |
1605 | /** |
1606 | * requeue_futex() - Requeue a futex_q from one hb to another |
1607 | * @q: the futex_q to requeue |
1608 | * @hb1: the source hash_bucket |
1609 | * @hb2: the target hash_bucket |
1610 | * @key2: the new key for the requeued futex_q |
1611 | */ |
1612 | static inline |
1613 | void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, |
1614 | struct futex_hash_bucket *hb2, union futex_key *key2) |
1615 | { |
1616 | |
1617 | /* |
1618 | * If key1 and key2 hash to the same bucket, no need to |
1619 | * requeue. |
1620 | */ |
1621 | if (likely(&hb1->chain != &hb2->chain)) { |
1622 | plist_del(&q->list, &hb1->chain); |
1623 | hb_waiters_dec(hb1); |
1624 | hb_waiters_inc(hb2); |
1625 | plist_add(&q->list, &hb2->chain); |
1626 | q->lock_ptr = &hb2->lock; |
1627 | } |
1628 | get_futex_key_refs(key2); |
1629 | q->key = *key2; |
1630 | } |
1631 | |
1632 | /** |
1633 | * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue |
1634 | * @q: the futex_q |
1635 | * @key: the key of the requeue target futex |
1636 | * @hb: the hash_bucket of the requeue target futex |
1637 | * |
1638 | * During futex_requeue, with requeue_pi=1, it is possible to acquire the |
1639 | * target futex if it is uncontended or via a lock steal. Set the futex_q key |
1640 | * to the requeue target futex so the waiter can detect the wakeup on the right |
1641 | * futex, but remove it from the hb and NULL the rt_waiter so it can detect |
1642 | * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock |
1643 | * to protect access to the pi_state to fixup the owner later. Must be called |
1644 | * with both q->lock_ptr and hb->lock held. |
1645 | */ |
1646 | static inline |
1647 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
1648 | struct futex_hash_bucket *hb) |
1649 | { |
1650 | get_futex_key_refs(key); |
1651 | q->key = *key; |
1652 | |
1653 | __unqueue_futex(q); |
1654 | |
1655 | WARN_ON(!q->rt_waiter); |
1656 | q->rt_waiter = NULL; |
1657 | |
1658 | q->lock_ptr = &hb->lock; |
1659 | |
1660 | wake_up_state(q->task, TASK_NORMAL); |
1661 | } |
1662 | |
1663 | /** |
1664 | * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter |
1665 | * @pifutex: the user address of the to futex |
1666 | * @hb1: the from futex hash bucket, must be locked by the caller |
1667 | * @hb2: the to futex hash bucket, must be locked by the caller |
1668 | * @key1: the from futex key |
1669 | * @key2: the to futex key |
1670 | * @ps: address to store the pi_state pointer |
1671 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
1672 | * |
1673 | * Try and get the lock on behalf of the top waiter if we can do it atomically. |
1674 | * Wake the top waiter if we succeed. If the caller specified set_waiters, |
1675 | * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. |
1676 | * hb1 and hb2 must be held by the caller. |
1677 | * |
1678 | * Return: |
1679 | * 0 - failed to acquire the lock atomically; |
1680 | * >0 - acquired the lock, return value is vpid of the top_waiter |
1681 | * <0 - error |
1682 | */ |
1683 | static int futex_proxy_trylock_atomic(u32 __user *pifutex, |
1684 | struct futex_hash_bucket *hb1, |
1685 | struct futex_hash_bucket *hb2, |
1686 | union futex_key *key1, union futex_key *key2, |
1687 | struct futex_pi_state **ps, int set_waiters) |
1688 | { |
1689 | struct futex_q *top_waiter = NULL; |
1690 | u32 curval; |
1691 | int ret, vpid; |
1692 | |
1693 | if (get_futex_value_locked(&curval, pifutex)) |
1694 | return -EFAULT; |
1695 | |
1696 | if (unlikely(should_fail_futex(true))) |
1697 | return -EFAULT; |
1698 | |
1699 | /* |
1700 | * Find the top_waiter and determine if there are additional waiters. |
1701 | * If the caller intends to requeue more than 1 waiter to pifutex, |
1702 | * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, |
1703 | * as we have means to handle the possible fault. If not, don't set |
1704 | * the bit unecessarily as it will force the subsequent unlock to enter |
1705 | * the kernel. |
1706 | */ |
1707 | top_waiter = futex_top_waiter(hb1, key1); |
1708 | |
1709 | /* There are no waiters, nothing for us to do. */ |
1710 | if (!top_waiter) |
1711 | return 0; |
1712 | |
1713 | /* Ensure we requeue to the expected futex. */ |
1714 | if (!match_futex(top_waiter->requeue_pi_key, key2)) |
1715 | return -EINVAL; |
1716 | |
1717 | /* |
1718 | * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in |
1719 | * the contended case or if set_waiters is 1. The pi_state is returned |
1720 | * in ps in contended cases. |
1721 | */ |
1722 | vpid = task_pid_vnr(top_waiter->task); |
1723 | ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, |
1724 | set_waiters); |
1725 | if (ret == 1) { |
1726 | requeue_pi_wake_futex(top_waiter, key2, hb2); |
1727 | return vpid; |
1728 | } |
1729 | return ret; |
1730 | } |
1731 | |
1732 | /** |
1733 | * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 |
1734 | * @uaddr1: source futex user address |
1735 | * @flags: futex flags (FLAGS_SHARED, etc.) |
1736 | * @uaddr2: target futex user address |
1737 | * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) |
1738 | * @nr_requeue: number of waiters to requeue (0-INT_MAX) |
1739 | * @cmpval: @uaddr1 expected value (or %NULL) |
1740 | * @requeue_pi: if we are attempting to requeue from a non-pi futex to a |
1741 | * pi futex (pi to pi requeue is not supported) |
1742 | * |
1743 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire |
1744 | * uaddr2 atomically on behalf of the top waiter. |
1745 | * |
1746 | * Return: |
1747 | * >=0 - on success, the number of tasks requeued or woken; |
1748 | * <0 - on error |
1749 | */ |
1750 | static int futex_requeue(u32 __user *uaddr1, unsigned int flags, |
1751 | u32 __user *uaddr2, int nr_wake, int nr_requeue, |
1752 | u32 *cmpval, int requeue_pi) |
1753 | { |
1754 | union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
1755 | int drop_count = 0, task_count = 0, ret; |
1756 | struct futex_pi_state *pi_state = NULL; |
1757 | struct futex_hash_bucket *hb1, *hb2; |
1758 | struct futex_q *this, *next; |
1759 | WAKE_Q(wake_q); |
1760 | |
1761 | if (nr_wake < 0 || nr_requeue < 0) |
1762 | return -EINVAL; |
1763 | |
1764 | if (requeue_pi) { |
1765 | /* |
1766 | * Requeue PI only works on two distinct uaddrs. This |
1767 | * check is only valid for private futexes. See below. |
1768 | */ |
1769 | if (uaddr1 == uaddr2) |
1770 | return -EINVAL; |
1771 | |
1772 | /* |
1773 | * requeue_pi requires a pi_state, try to allocate it now |
1774 | * without any locks in case it fails. |
1775 | */ |
1776 | if (refill_pi_state_cache()) |
1777 | return -ENOMEM; |
1778 | /* |
1779 | * requeue_pi must wake as many tasks as it can, up to nr_wake |
1780 | * + nr_requeue, since it acquires the rt_mutex prior to |
1781 | * returning to userspace, so as to not leave the rt_mutex with |
1782 | * waiters and no owner. However, second and third wake-ups |
1783 | * cannot be predicted as they involve race conditions with the |
1784 | * first wake and a fault while looking up the pi_state. Both |
1785 | * pthread_cond_signal() and pthread_cond_broadcast() should |
1786 | * use nr_wake=1. |
1787 | */ |
1788 | if (nr_wake != 1) |
1789 | return -EINVAL; |
1790 | } |
1791 | |
1792 | retry: |
1793 | ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); |
1794 | if (unlikely(ret != 0)) |
1795 | goto out; |
1796 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, |
1797 | requeue_pi ? VERIFY_WRITE : VERIFY_READ); |
1798 | if (unlikely(ret != 0)) |
1799 | goto out_put_key1; |
1800 | |
1801 | /* |
1802 | * The check above which compares uaddrs is not sufficient for |
1803 | * shared futexes. We need to compare the keys: |
1804 | */ |
1805 | if (requeue_pi && match_futex(&key1, &key2)) { |
1806 | ret = -EINVAL; |
1807 | goto out_put_keys; |
1808 | } |
1809 | |
1810 | hb1 = hash_futex(&key1); |
1811 | hb2 = hash_futex(&key2); |
1812 | |
1813 | retry_private: |
1814 | hb_waiters_inc(hb2); |
1815 | double_lock_hb(hb1, hb2); |
1816 | |
1817 | if (likely(cmpval != NULL)) { |
1818 | u32 curval; |
1819 | |
1820 | ret = get_futex_value_locked(&curval, uaddr1); |
1821 | |
1822 | if (unlikely(ret)) { |
1823 | double_unlock_hb(hb1, hb2); |
1824 | hb_waiters_dec(hb2); |
1825 | |
1826 | ret = get_user(curval, uaddr1); |
1827 | if (ret) |
1828 | goto out_put_keys; |
1829 | |
1830 | if (!(flags & FLAGS_SHARED)) |
1831 | goto retry_private; |
1832 | |
1833 | put_futex_key(&key2); |
1834 | put_futex_key(&key1); |
1835 | goto retry; |
1836 | } |
1837 | if (curval != *cmpval) { |
1838 | ret = -EAGAIN; |
1839 | goto out_unlock; |
1840 | } |
1841 | } |
1842 | |
1843 | if (requeue_pi && (task_count - nr_wake < nr_requeue)) { |
1844 | /* |
1845 | * Attempt to acquire uaddr2 and wake the top waiter. If we |
1846 | * intend to requeue waiters, force setting the FUTEX_WAITERS |
1847 | * bit. We force this here where we are able to easily handle |
1848 | * faults rather in the requeue loop below. |
1849 | */ |
1850 | ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, |
1851 | &key2, &pi_state, nr_requeue); |
1852 | |
1853 | /* |
1854 | * At this point the top_waiter has either taken uaddr2 or is |
1855 | * waiting on it. If the former, then the pi_state will not |
1856 | * exist yet, look it up one more time to ensure we have a |
1857 | * reference to it. If the lock was taken, ret contains the |
1858 | * vpid of the top waiter task. |
1859 | * If the lock was not taken, we have pi_state and an initial |
1860 | * refcount on it. In case of an error we have nothing. |
1861 | */ |
1862 | if (ret > 0) { |
1863 | WARN_ON(pi_state); |
1864 | drop_count++; |
1865 | task_count++; |
1866 | /* |
1867 | * If we acquired the lock, then the user space value |
1868 | * of uaddr2 should be vpid. It cannot be changed by |
1869 | * the top waiter as it is blocked on hb2 lock if it |
1870 | * tries to do so. If something fiddled with it behind |
1871 | * our back the pi state lookup might unearth it. So |
1872 | * we rather use the known value than rereading and |
1873 | * handing potential crap to lookup_pi_state. |
1874 | * |
1875 | * If that call succeeds then we have pi_state and an |
1876 | * initial refcount on it. |
1877 | */ |
1878 | ret = lookup_pi_state(ret, hb2, &key2, &pi_state); |
1879 | } |
1880 | |
1881 | switch (ret) { |
1882 | case 0: |
1883 | /* We hold a reference on the pi state. */ |
1884 | break; |
1885 | |
1886 | /* If the above failed, then pi_state is NULL */ |
1887 | case -EFAULT: |
1888 | double_unlock_hb(hb1, hb2); |
1889 | hb_waiters_dec(hb2); |
1890 | put_futex_key(&key2); |
1891 | put_futex_key(&key1); |
1892 | ret = fault_in_user_writeable(uaddr2); |
1893 | if (!ret) |
1894 | goto retry; |
1895 | goto out; |
1896 | case -EAGAIN: |
1897 | /* |
1898 | * Two reasons for this: |
1899 | * - Owner is exiting and we just wait for the |
1900 | * exit to complete. |
1901 | * - The user space value changed. |
1902 | */ |
1903 | double_unlock_hb(hb1, hb2); |
1904 | hb_waiters_dec(hb2); |
1905 | put_futex_key(&key2); |
1906 | put_futex_key(&key1); |
1907 | cond_resched(); |
1908 | goto retry; |
1909 | default: |
1910 | goto out_unlock; |
1911 | } |
1912 | } |
1913 | |
1914 | plist_for_each_entry_safe(this, next, &hb1->chain, list) { |
1915 | if (task_count - nr_wake >= nr_requeue) |
1916 | break; |
1917 | |
1918 | if (!match_futex(&this->key, &key1)) |
1919 | continue; |
1920 | |
1921 | /* |
1922 | * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always |
1923 | * be paired with each other and no other futex ops. |
1924 | * |
1925 | * We should never be requeueing a futex_q with a pi_state, |
1926 | * which is awaiting a futex_unlock_pi(). |
1927 | */ |
1928 | if ((requeue_pi && !this->rt_waiter) || |
1929 | (!requeue_pi && this->rt_waiter) || |
1930 | this->pi_state) { |
1931 | ret = -EINVAL; |
1932 | break; |
1933 | } |
1934 | |
1935 | /* |
1936 | * Wake nr_wake waiters. For requeue_pi, if we acquired the |
1937 | * lock, we already woke the top_waiter. If not, it will be |
1938 | * woken by futex_unlock_pi(). |
1939 | */ |
1940 | if (++task_count <= nr_wake && !requeue_pi) { |
1941 | mark_wake_futex(&wake_q, this); |
1942 | continue; |
1943 | } |
1944 | |
1945 | /* Ensure we requeue to the expected futex for requeue_pi. */ |
1946 | if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { |
1947 | ret = -EINVAL; |
1948 | break; |
1949 | } |
1950 | |
1951 | /* |
1952 | * Requeue nr_requeue waiters and possibly one more in the case |
1953 | * of requeue_pi if we couldn't acquire the lock atomically. |
1954 | */ |
1955 | if (requeue_pi) { |
1956 | /* |
1957 | * Prepare the waiter to take the rt_mutex. Take a |
1958 | * refcount on the pi_state and store the pointer in |
1959 | * the futex_q object of the waiter. |
1960 | */ |
1961 | atomic_inc(&pi_state->refcount); |
1962 | this->pi_state = pi_state; |
1963 | ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, |
1964 | this->rt_waiter, |
1965 | this->task); |
1966 | if (ret == 1) { |
1967 | /* |
1968 | * We got the lock. We do neither drop the |
1969 | * refcount on pi_state nor clear |
1970 | * this->pi_state because the waiter needs the |
1971 | * pi_state for cleaning up the user space |
1972 | * value. It will drop the refcount after |
1973 | * doing so. |
1974 | */ |
1975 | requeue_pi_wake_futex(this, &key2, hb2); |
1976 | drop_count++; |
1977 | continue; |
1978 | } else if (ret) { |
1979 | /* |
1980 | * rt_mutex_start_proxy_lock() detected a |
1981 | * potential deadlock when we tried to queue |
1982 | * that waiter. Drop the pi_state reference |
1983 | * which we took above and remove the pointer |
1984 | * to the state from the waiters futex_q |
1985 | * object. |
1986 | */ |
1987 | this->pi_state = NULL; |
1988 | put_pi_state(pi_state); |
1989 | /* |
1990 | * We stop queueing more waiters and let user |
1991 | * space deal with the mess. |
1992 | */ |
1993 | break; |
1994 | } |
1995 | } |
1996 | requeue_futex(this, hb1, hb2, &key2); |
1997 | drop_count++; |
1998 | } |
1999 | |
2000 | /* |
2001 | * We took an extra initial reference to the pi_state either |
2002 | * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We |
2003 | * need to drop it here again. |
2004 | */ |
2005 | put_pi_state(pi_state); |
2006 | |
2007 | out_unlock: |
2008 | double_unlock_hb(hb1, hb2); |
2009 | wake_up_q(&wake_q); |
2010 | hb_waiters_dec(hb2); |
2011 | |
2012 | /* |
2013 | * drop_futex_key_refs() must be called outside the spinlocks. During |
2014 | * the requeue we moved futex_q's from the hash bucket at key1 to the |
2015 | * one at key2 and updated their key pointer. We no longer need to |
2016 | * hold the references to key1. |
2017 | */ |
2018 | while (--drop_count >= 0) |
2019 | drop_futex_key_refs(&key1); |
2020 | |
2021 | out_put_keys: |
2022 | put_futex_key(&key2); |
2023 | out_put_key1: |
2024 | put_futex_key(&key1); |
2025 | out: |
2026 | return ret ? ret : task_count; |
2027 | } |
2028 | |
2029 | /* The key must be already stored in q->key. */ |
2030 | static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) |
2031 | __acquires(&hb->lock) |
2032 | { |
2033 | struct futex_hash_bucket *hb; |
2034 | |
2035 | hb = hash_futex(&q->key); |
2036 | |
2037 | /* |
2038 | * Increment the counter before taking the lock so that |
2039 | * a potential waker won't miss a to-be-slept task that is |
2040 | * waiting for the spinlock. This is safe as all queue_lock() |
2041 | * users end up calling queue_me(). Similarly, for housekeeping, |
2042 | * decrement the counter at queue_unlock() when some error has |
2043 | * occurred and we don't end up adding the task to the list. |
2044 | */ |
2045 | hb_waiters_inc(hb); |
2046 | |
2047 | q->lock_ptr = &hb->lock; |
2048 | |
2049 | spin_lock(&hb->lock); /* implies smp_mb(); (A) */ |
2050 | return hb; |
2051 | } |
2052 | |
2053 | static inline void |
2054 | queue_unlock(struct futex_hash_bucket *hb) |
2055 | __releases(&hb->lock) |
2056 | { |
2057 | spin_unlock(&hb->lock); |
2058 | hb_waiters_dec(hb); |
2059 | } |
2060 | |
2061 | /** |
2062 | * queue_me() - Enqueue the futex_q on the futex_hash_bucket |
2063 | * @q: The futex_q to enqueue |
2064 | * @hb: The destination hash bucket |
2065 | * |
2066 | * The hb->lock must be held by the caller, and is released here. A call to |
2067 | * queue_me() is typically paired with exactly one call to unqueue_me(). The |
2068 | * exceptions involve the PI related operations, which may use unqueue_me_pi() |
2069 | * or nothing if the unqueue is done as part of the wake process and the unqueue |
2070 | * state is implicit in the state of woken task (see futex_wait_requeue_pi() for |
2071 | * an example). |
2072 | */ |
2073 | static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
2074 | __releases(&hb->lock) |
2075 | { |
2076 | int prio; |
2077 | |
2078 | /* |
2079 | * The priority used to register this element is |
2080 | * - either the real thread-priority for the real-time threads |
2081 | * (i.e. threads with a priority lower than MAX_RT_PRIO) |
2082 | * - or MAX_RT_PRIO for non-RT threads. |
2083 | * Thus, all RT-threads are woken first in priority order, and |
2084 | * the others are woken last, in FIFO order. |
2085 | */ |
2086 | prio = min(current->normal_prio, MAX_RT_PRIO); |
2087 | |
2088 | plist_node_init(&q->list, prio); |
2089 | plist_add(&q->list, &hb->chain); |
2090 | q->task = current; |
2091 | spin_unlock(&hb->lock); |
2092 | } |
2093 | |
2094 | /** |
2095 | * unqueue_me() - Remove the futex_q from its futex_hash_bucket |
2096 | * @q: The futex_q to unqueue |
2097 | * |
2098 | * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must |
2099 | * be paired with exactly one earlier call to queue_me(). |
2100 | * |
2101 | * Return: |
2102 | * 1 - if the futex_q was still queued (and we removed unqueued it); |
2103 | * 0 - if the futex_q was already removed by the waking thread |
2104 | */ |
2105 | static int unqueue_me(struct futex_q *q) |
2106 | { |
2107 | spinlock_t *lock_ptr; |
2108 | int ret = 0; |
2109 | |
2110 | /* In the common case we don't take the spinlock, which is nice. */ |
2111 | retry: |
2112 | /* |
2113 | * q->lock_ptr can change between this read and the following spin_lock. |
2114 | * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and |
2115 | * optimizing lock_ptr out of the logic below. |
2116 | */ |
2117 | lock_ptr = READ_ONCE(q->lock_ptr); |
2118 | if (lock_ptr != NULL) { |
2119 | spin_lock(lock_ptr); |
2120 | /* |
2121 | * q->lock_ptr can change between reading it and |
2122 | * spin_lock(), causing us to take the wrong lock. This |
2123 | * corrects the race condition. |
2124 | * |
2125 | * Reasoning goes like this: if we have the wrong lock, |
2126 | * q->lock_ptr must have changed (maybe several times) |
2127 | * between reading it and the spin_lock(). It can |
2128 | * change again after the spin_lock() but only if it was |
2129 | * already changed before the spin_lock(). It cannot, |
2130 | * however, change back to the original value. Therefore |
2131 | * we can detect whether we acquired the correct lock. |
2132 | */ |
2133 | if (unlikely(lock_ptr != q->lock_ptr)) { |
2134 | spin_unlock(lock_ptr); |
2135 | goto retry; |
2136 | } |
2137 | __unqueue_futex(q); |
2138 | |
2139 | BUG_ON(q->pi_state); |
2140 | |
2141 | spin_unlock(lock_ptr); |
2142 | ret = 1; |
2143 | } |
2144 | |
2145 | drop_futex_key_refs(&q->key); |
2146 | return ret; |
2147 | } |
2148 | |
2149 | /* |
2150 | * PI futexes can not be requeued and must remove themself from the |
2151 | * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry |
2152 | * and dropped here. |
2153 | */ |
2154 | static void unqueue_me_pi(struct futex_q *q) |
2155 | __releases(q->lock_ptr) |
2156 | { |
2157 | __unqueue_futex(q); |
2158 | |
2159 | BUG_ON(!q->pi_state); |
2160 | put_pi_state(q->pi_state); |
2161 | q->pi_state = NULL; |
2162 | |
2163 | spin_unlock(q->lock_ptr); |
2164 | } |
2165 | |
2166 | /* |
2167 | * Fixup the pi_state owner with the new owner. |
2168 | * |
2169 | * Must be called with hash bucket lock held and mm->sem held for non |
2170 | * private futexes. |
2171 | */ |
2172 | static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
2173 | struct task_struct *newowner) |
2174 | { |
2175 | u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
2176 | struct futex_pi_state *pi_state = q->pi_state; |
2177 | struct task_struct *oldowner = pi_state->owner; |
2178 | u32 uval, uninitialized_var(curval), newval; |
2179 | int ret; |
2180 | |
2181 | /* Owner died? */ |
2182 | if (!pi_state->owner) |
2183 | newtid |= FUTEX_OWNER_DIED; |
2184 | |
2185 | /* |
2186 | * We are here either because we stole the rtmutex from the |
2187 | * previous highest priority waiter or we are the highest priority |
2188 | * waiter but failed to get the rtmutex the first time. |
2189 | * We have to replace the newowner TID in the user space variable. |
2190 | * This must be atomic as we have to preserve the owner died bit here. |
2191 | * |
2192 | * Note: We write the user space value _before_ changing the pi_state |
2193 | * because we can fault here. Imagine swapped out pages or a fork |
2194 | * that marked all the anonymous memory readonly for cow. |
2195 | * |
2196 | * Modifying pi_state _before_ the user space value would |
2197 | * leave the pi_state in an inconsistent state when we fault |
2198 | * here, because we need to drop the hash bucket lock to |
2199 | * handle the fault. This might be observed in the PID check |
2200 | * in lookup_pi_state. |
2201 | */ |
2202 | retry: |
2203 | if (get_futex_value_locked(&uval, uaddr)) |
2204 | goto handle_fault; |
2205 | |
2206 | while (1) { |
2207 | newval = (uval & FUTEX_OWNER_DIED) | newtid; |
2208 | |
2209 | if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) |
2210 | goto handle_fault; |
2211 | if (curval == uval) |
2212 | break; |
2213 | uval = curval; |
2214 | } |
2215 | |
2216 | /* |
2217 | * We fixed up user space. Now we need to fix the pi_state |
2218 | * itself. |
2219 | */ |
2220 | if (pi_state->owner != NULL) { |
2221 | raw_spin_lock_irq(&pi_state->owner->pi_lock); |
2222 | WARN_ON(list_empty(&pi_state->list)); |
2223 | list_del_init(&pi_state->list); |
2224 | raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
2225 | } |
2226 | |
2227 | pi_state->owner = newowner; |
2228 | |
2229 | raw_spin_lock_irq(&newowner->pi_lock); |
2230 | WARN_ON(!list_empty(&pi_state->list)); |
2231 | list_add(&pi_state->list, &newowner->pi_state_list); |
2232 | raw_spin_unlock_irq(&newowner->pi_lock); |
2233 | return 0; |
2234 | |
2235 | /* |
2236 | * To handle the page fault we need to drop the hash bucket |
2237 | * lock here. That gives the other task (either the highest priority |
2238 | * waiter itself or the task which stole the rtmutex) the |
2239 | * chance to try the fixup of the pi_state. So once we are |
2240 | * back from handling the fault we need to check the pi_state |
2241 | * after reacquiring the hash bucket lock and before trying to |
2242 | * do another fixup. When the fixup has been done already we |
2243 | * simply return. |
2244 | */ |
2245 | handle_fault: |
2246 | spin_unlock(q->lock_ptr); |
2247 | |
2248 | ret = fault_in_user_writeable(uaddr); |
2249 | |
2250 | spin_lock(q->lock_ptr); |
2251 | |
2252 | /* |
2253 | * Check if someone else fixed it for us: |
2254 | */ |
2255 | if (pi_state->owner != oldowner) |
2256 | return 0; |
2257 | |
2258 | if (ret) |
2259 | return ret; |
2260 | |
2261 | goto retry; |
2262 | } |
2263 | |
2264 | static long futex_wait_restart(struct restart_block *restart); |
2265 | |
2266 | /** |
2267 | * fixup_owner() - Post lock pi_state and corner case management |
2268 | * @uaddr: user address of the futex |
2269 | * @q: futex_q (contains pi_state and access to the rt_mutex) |
2270 | * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) |
2271 | * |
2272 | * After attempting to lock an rt_mutex, this function is called to cleanup |
2273 | * the pi_state owner as well as handle race conditions that may allow us to |
2274 | * acquire the lock. Must be called with the hb lock held. |
2275 | * |
2276 | * Return: |
2277 | * 1 - success, lock taken; |
2278 | * 0 - success, lock not taken; |
2279 | * <0 - on error (-EFAULT) |
2280 | */ |
2281 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
2282 | { |
2283 | struct task_struct *owner; |
2284 | int ret = 0; |
2285 | |
2286 | if (locked) { |
2287 | /* |
2288 | * Got the lock. We might not be the anticipated owner if we |
2289 | * did a lock-steal - fix up the PI-state in that case: |
2290 | */ |
2291 | if (q->pi_state->owner != current) |
2292 | ret = fixup_pi_state_owner(uaddr, q, current); |
2293 | goto out; |
2294 | } |
2295 | |
2296 | /* |
2297 | * Catch the rare case, where the lock was released when we were on the |
2298 | * way back before we locked the hash bucket. |
2299 | */ |
2300 | if (q->pi_state->owner == current) { |
2301 | /* |
2302 | * Try to get the rt_mutex now. This might fail as some other |
2303 | * task acquired the rt_mutex after we removed ourself from the |
2304 | * rt_mutex waiters list. |
2305 | */ |
2306 | if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { |
2307 | locked = 1; |
2308 | goto out; |
2309 | } |
2310 | |
2311 | /* |
2312 | * pi_state is incorrect, some other task did a lock steal and |
2313 | * we returned due to timeout or signal without taking the |
2314 | * rt_mutex. Too late. |
2315 | */ |
2316 | raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); |
2317 | owner = rt_mutex_owner(&q->pi_state->pi_mutex); |
2318 | if (!owner) |
2319 | owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); |
2320 | raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); |
2321 | ret = fixup_pi_state_owner(uaddr, q, owner); |
2322 | goto out; |
2323 | } |
2324 | |
2325 | /* |
2326 | * Paranoia check. If we did not take the lock, then we should not be |
2327 | * the owner of the rt_mutex. |
2328 | */ |
2329 | if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) |
2330 | printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " |
2331 | "pi-state %p\n", ret, |
2332 | q->pi_state->pi_mutex.owner, |
2333 | q->pi_state->owner); |
2334 | |
2335 | out: |
2336 | return ret ? ret : locked; |
2337 | } |
2338 | |
2339 | /** |
2340 | * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal |
2341 | * @hb: the futex hash bucket, must be locked by the caller |
2342 | * @q: the futex_q to queue up on |
2343 | * @timeout: the prepared hrtimer_sleeper, or null for no timeout |
2344 | */ |
2345 | static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, |
2346 | struct hrtimer_sleeper *timeout) |
2347 | { |
2348 | /* |
2349 | * The task state is guaranteed to be set before another task can |
2350 | * wake it. set_current_state() is implemented using smp_store_mb() and |
2351 | * queue_me() calls spin_unlock() upon completion, both serializing |
2352 | * access to the hash list and forcing another memory barrier. |
2353 | */ |
2354 | set_current_state(TASK_INTERRUPTIBLE); |
2355 | queue_me(q, hb); |
2356 | |
2357 | /* Arm the timer */ |
2358 | if (timeout) |
2359 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); |
2360 | |
2361 | /* |
2362 | * If we have been removed from the hash list, then another task |
2363 | * has tried to wake us, and we can skip the call to schedule(). |
2364 | */ |
2365 | if (likely(!plist_node_empty(&q->list))) { |
2366 | /* |
2367 | * If the timer has already expired, current will already be |
2368 | * flagged for rescheduling. Only call schedule if there |
2369 | * is no timeout, or if it has yet to expire. |
2370 | */ |
2371 | if (!timeout || timeout->task) |
2372 | freezable_schedule(); |
2373 | } |
2374 | __set_current_state(TASK_RUNNING); |
2375 | } |
2376 | |
2377 | /** |
2378 | * futex_wait_setup() - Prepare to wait on a futex |
2379 | * @uaddr: the futex userspace address |
2380 | * @val: the expected value |
2381 | * @flags: futex flags (FLAGS_SHARED, etc.) |
2382 | * @q: the associated futex_q |
2383 | * @hb: storage for hash_bucket pointer to be returned to caller |
2384 | * |
2385 | * Setup the futex_q and locate the hash_bucket. Get the futex value and |
2386 | * compare it with the expected value. Handle atomic faults internally. |
2387 | * Return with the hb lock held and a q.key reference on success, and unlocked |
2388 | * with no q.key reference on failure. |
2389 | * |
2390 | * Return: |
2391 | * 0 - uaddr contains val and hb has been locked; |
2392 | * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked |
2393 | */ |
2394 | static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, |
2395 | struct futex_q *q, struct futex_hash_bucket **hb) |
2396 | { |
2397 | u32 uval; |
2398 | int ret; |
2399 | |
2400 | /* |
2401 | * Access the page AFTER the hash-bucket is locked. |
2402 | * Order is important: |
2403 | * |
2404 | * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); |
2405 | * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } |
2406 | * |
2407 | * The basic logical guarantee of a futex is that it blocks ONLY |
2408 | * if cond(var) is known to be true at the time of blocking, for |
2409 | * any cond. If we locked the hash-bucket after testing *uaddr, that |
2410 | * would open a race condition where we could block indefinitely with |
2411 | * cond(var) false, which would violate the guarantee. |
2412 | * |
2413 | * On the other hand, we insert q and release the hash-bucket only |
2414 | * after testing *uaddr. This guarantees that futex_wait() will NOT |
2415 | * absorb a wakeup if *uaddr does not match the desired values |
2416 | * while the syscall executes. |
2417 | */ |
2418 | retry: |
2419 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ); |
2420 | if (unlikely(ret != 0)) |
2421 | return ret; |
2422 | |
2423 | retry_private: |
2424 | *hb = queue_lock(q); |
2425 | |
2426 | ret = get_futex_value_locked(&uval, uaddr); |
2427 | |
2428 | if (ret) { |
2429 | queue_unlock(*hb); |
2430 | |
2431 | ret = get_user(uval, uaddr); |
2432 | if (ret) |
2433 | goto out; |
2434 | |
2435 | if (!(flags & FLAGS_SHARED)) |
2436 | goto retry_private; |
2437 | |
2438 | put_futex_key(&q->key); |
2439 | goto retry; |
2440 | } |
2441 | |
2442 | if (uval != val) { |
2443 | queue_unlock(*hb); |
2444 | ret = -EWOULDBLOCK; |
2445 | } |
2446 | |
2447 | out: |
2448 | if (ret) |
2449 | put_futex_key(&q->key); |
2450 | return ret; |
2451 | } |
2452 | |
2453 | static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, |
2454 | ktime_t *abs_time, u32 bitset) |
2455 | { |
2456 | struct hrtimer_sleeper timeout, *to = NULL; |
2457 | struct restart_block *restart; |
2458 | struct futex_hash_bucket *hb; |
2459 | struct futex_q q = futex_q_init; |
2460 | int ret; |
2461 | |
2462 | if (!bitset) |
2463 | return -EINVAL; |
2464 | q.bitset = bitset; |
2465 | |
2466 | if (abs_time) { |
2467 | to = &timeout; |
2468 | |
2469 | hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? |
2470 | CLOCK_REALTIME : CLOCK_MONOTONIC, |
2471 | HRTIMER_MODE_ABS); |
2472 | hrtimer_init_sleeper(to, current); |
2473 | hrtimer_set_expires_range_ns(&to->timer, *abs_time, |
2474 | current->timer_slack_ns); |
2475 | } |
2476 | |
2477 | retry: |
2478 | /* |
2479 | * Prepare to wait on uaddr. On success, holds hb lock and increments |
2480 | * q.key refs. |
2481 | */ |
2482 | ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
2483 | if (ret) |
2484 | goto out; |
2485 | |
2486 | /* queue_me and wait for wakeup, timeout, or a signal. */ |
2487 | futex_wait_queue_me(hb, &q, to); |
2488 | |
2489 | /* If we were woken (and unqueued), we succeeded, whatever. */ |
2490 | ret = 0; |
2491 | /* unqueue_me() drops q.key ref */ |
2492 | if (!unqueue_me(&q)) |
2493 | goto out; |
2494 | ret = -ETIMEDOUT; |
2495 | if (to && !to->task) |
2496 | goto out; |
2497 | |
2498 | /* |
2499 | * We expect signal_pending(current), but we might be the |
2500 | * victim of a spurious wakeup as well. |
2501 | */ |
2502 | if (!signal_pending(current)) |
2503 | goto retry; |
2504 | |
2505 | ret = -ERESTARTSYS; |
2506 | if (!abs_time) |
2507 | goto out; |
2508 | |
2509 | restart = ¤t->restart_block; |
2510 | restart->fn = futex_wait_restart; |
2511 | restart->futex.uaddr = uaddr; |
2512 | restart->futex.val = val; |
2513 | restart->futex.time = abs_time->tv64; |
2514 | restart->futex.bitset = bitset; |
2515 | restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; |
2516 | |
2517 | ret = -ERESTART_RESTARTBLOCK; |
2518 | |
2519 | out: |
2520 | if (to) { |
2521 | hrtimer_cancel(&to->timer); |
2522 | destroy_hrtimer_on_stack(&to->timer); |
2523 | } |
2524 | return ret; |
2525 | } |
2526 | |
2527 | |
2528 | static long futex_wait_restart(struct restart_block *restart) |
2529 | { |
2530 | u32 __user *uaddr = restart->futex.uaddr; |
2531 | ktime_t t, *tp = NULL; |
2532 | |
2533 | if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { |
2534 | t.tv64 = restart->futex.time; |
2535 | tp = &t; |
2536 | } |
2537 | restart->fn = do_no_restart_syscall; |
2538 | |
2539 | return (long)futex_wait(uaddr, restart->futex.flags, |
2540 | restart->futex.val, tp, restart->futex.bitset); |
2541 | } |
2542 | |
2543 | |
2544 | /* |
2545 | * Userspace tried a 0 -> TID atomic transition of the futex value |
2546 | * and failed. The kernel side here does the whole locking operation: |
2547 | * if there are waiters then it will block as a consequence of relying |
2548 | * on rt-mutexes, it does PI, etc. (Due to races the kernel might see |
2549 | * a 0 value of the futex too.). |
2550 | * |
2551 | * Also serves as futex trylock_pi()'ing, and due semantics. |
2552 | */ |
2553 | static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, |
2554 | ktime_t *time, int trylock) |
2555 | { |
2556 | struct hrtimer_sleeper timeout, *to = NULL; |
2557 | struct futex_hash_bucket *hb; |
2558 | struct futex_q q = futex_q_init; |
2559 | int res, ret; |
2560 | |
2561 | if (refill_pi_state_cache()) |
2562 | return -ENOMEM; |
2563 | |
2564 | if (time) { |
2565 | to = &timeout; |
2566 | hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, |
2567 | HRTIMER_MODE_ABS); |
2568 | hrtimer_init_sleeper(to, current); |
2569 | hrtimer_set_expires(&to->timer, *time); |
2570 | } |
2571 | |
2572 | retry: |
2573 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE); |
2574 | if (unlikely(ret != 0)) |
2575 | goto out; |
2576 | |
2577 | retry_private: |
2578 | hb = queue_lock(&q); |
2579 | |
2580 | ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); |
2581 | if (unlikely(ret)) { |
2582 | /* |
2583 | * Atomic work succeeded and we got the lock, |
2584 | * or failed. Either way, we do _not_ block. |
2585 | */ |
2586 | switch (ret) { |
2587 | case 1: |
2588 | /* We got the lock. */ |
2589 | ret = 0; |
2590 | goto out_unlock_put_key; |
2591 | case -EFAULT: |
2592 | goto uaddr_faulted; |
2593 | case -EAGAIN: |
2594 | /* |
2595 | * Two reasons for this: |
2596 | * - Task is exiting and we just wait for the |
2597 | * exit to complete. |
2598 | * - The user space value changed. |
2599 | */ |
2600 | queue_unlock(hb); |
2601 | put_futex_key(&q.key); |
2602 | cond_resched(); |
2603 | goto retry; |
2604 | default: |
2605 | goto out_unlock_put_key; |
2606 | } |
2607 | } |
2608 | |
2609 | /* |
2610 | * Only actually queue now that the atomic ops are done: |
2611 | */ |
2612 | queue_me(&q, hb); |
2613 | |
2614 | WARN_ON(!q.pi_state); |
2615 | /* |
2616 | * Block on the PI mutex: |
2617 | */ |
2618 | if (!trylock) { |
2619 | ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to); |
2620 | } else { |
2621 | ret = rt_mutex_trylock(&q.pi_state->pi_mutex); |
2622 | /* Fixup the trylock return value: */ |
2623 | ret = ret ? 0 : -EWOULDBLOCK; |
2624 | } |
2625 | |
2626 | spin_lock(q.lock_ptr); |
2627 | /* |
2628 | * Fixup the pi_state owner and possibly acquire the lock if we |
2629 | * haven't already. |
2630 | */ |
2631 | res = fixup_owner(uaddr, &q, !ret); |
2632 | /* |
2633 | * If fixup_owner() returned an error, proprogate that. If it acquired |
2634 | * the lock, clear our -ETIMEDOUT or -EINTR. |
2635 | */ |
2636 | if (res) |
2637 | ret = (res < 0) ? res : 0; |
2638 | |
2639 | /* |
2640 | * If fixup_owner() faulted and was unable to handle the fault, unlock |
2641 | * it and return the fault to userspace. |
2642 | */ |
2643 | if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) |
2644 | rt_mutex_unlock(&q.pi_state->pi_mutex); |
2645 | |
2646 | /* Unqueue and drop the lock */ |
2647 | unqueue_me_pi(&q); |
2648 | |
2649 | goto out_put_key; |
2650 | |
2651 | out_unlock_put_key: |
2652 | queue_unlock(hb); |
2653 | |
2654 | out_put_key: |
2655 | put_futex_key(&q.key); |
2656 | out: |
2657 | if (to) |
2658 | destroy_hrtimer_on_stack(&to->timer); |
2659 | return ret != -EINTR ? ret : -ERESTARTNOINTR; |
2660 | |
2661 | uaddr_faulted: |
2662 | queue_unlock(hb); |
2663 | |
2664 | ret = fault_in_user_writeable(uaddr); |
2665 | if (ret) |
2666 | goto out_put_key; |
2667 | |
2668 | if (!(flags & FLAGS_SHARED)) |
2669 | goto retry_private; |
2670 | |
2671 | put_futex_key(&q.key); |
2672 | goto retry; |
2673 | } |
2674 | |
2675 | /* |
2676 | * Userspace attempted a TID -> 0 atomic transition, and failed. |
2677 | * This is the in-kernel slowpath: we look up the PI state (if any), |
2678 | * and do the rt-mutex unlock. |
2679 | */ |
2680 | static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) |
2681 | { |
2682 | u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current); |
2683 | union futex_key key = FUTEX_KEY_INIT; |
2684 | struct futex_hash_bucket *hb; |
2685 | struct futex_q *match; |
2686 | int ret; |
2687 | |
2688 | retry: |
2689 | if (get_user(uval, uaddr)) |
2690 | return -EFAULT; |
2691 | /* |
2692 | * We release only a lock we actually own: |
2693 | */ |
2694 | if ((uval & FUTEX_TID_MASK) != vpid) |
2695 | return -EPERM; |
2696 | |
2697 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE); |
2698 | if (ret) |
2699 | return ret; |
2700 | |
2701 | hb = hash_futex(&key); |
2702 | spin_lock(&hb->lock); |
2703 | |
2704 | /* |
2705 | * Check waiters first. We do not trust user space values at |
2706 | * all and we at least want to know if user space fiddled |
2707 | * with the futex value instead of blindly unlocking. |
2708 | */ |
2709 | match = futex_top_waiter(hb, &key); |
2710 | if (match) { |
2711 | ret = wake_futex_pi(uaddr, uval, match, hb); |
2712 | /* |
2713 | * In case of success wake_futex_pi dropped the hash |
2714 | * bucket lock. |
2715 | */ |
2716 | if (!ret) |
2717 | goto out_putkey; |
2718 | /* |
2719 | * The atomic access to the futex value generated a |
2720 | * pagefault, so retry the user-access and the wakeup: |
2721 | */ |
2722 | if (ret == -EFAULT) |
2723 | goto pi_faulted; |
2724 | /* |
2725 | * A unconditional UNLOCK_PI op raced against a waiter |
2726 | * setting the FUTEX_WAITERS bit. Try again. |
2727 | */ |
2728 | if (ret == -EAGAIN) { |
2729 | spin_unlock(&hb->lock); |
2730 | put_futex_key(&key); |
2731 | goto retry; |
2732 | } |
2733 | /* |
2734 | * wake_futex_pi has detected invalid state. Tell user |
2735 | * space. |
2736 | */ |
2737 | goto out_unlock; |
2738 | } |
2739 | |
2740 | /* |
2741 | * We have no kernel internal state, i.e. no waiters in the |
2742 | * kernel. Waiters which are about to queue themselves are stuck |
2743 | * on hb->lock. So we can safely ignore them. We do neither |
2744 | * preserve the WAITERS bit not the OWNER_DIED one. We are the |
2745 | * owner. |
2746 | */ |
2747 | if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) |
2748 | goto pi_faulted; |
2749 | |
2750 | /* |
2751 | * If uval has changed, let user space handle it. |
2752 | */ |
2753 | ret = (curval == uval) ? 0 : -EAGAIN; |
2754 | |
2755 | out_unlock: |
2756 | spin_unlock(&hb->lock); |
2757 | out_putkey: |
2758 | put_futex_key(&key); |
2759 | return ret; |
2760 | |
2761 | pi_faulted: |
2762 | spin_unlock(&hb->lock); |
2763 | put_futex_key(&key); |
2764 | |
2765 | ret = fault_in_user_writeable(uaddr); |
2766 | if (!ret) |
2767 | goto retry; |
2768 | |
2769 | return ret; |
2770 | } |
2771 | |
2772 | /** |
2773 | * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex |
2774 | * @hb: the hash_bucket futex_q was original enqueued on |
2775 | * @q: the futex_q woken while waiting to be requeued |
2776 | * @key2: the futex_key of the requeue target futex |
2777 | * @timeout: the timeout associated with the wait (NULL if none) |
2778 | * |
2779 | * Detect if the task was woken on the initial futex as opposed to the requeue |
2780 | * target futex. If so, determine if it was a timeout or a signal that caused |
2781 | * the wakeup and return the appropriate error code to the caller. Must be |
2782 | * called with the hb lock held. |
2783 | * |
2784 | * Return: |
2785 | * 0 = no early wakeup detected; |
2786 | * <0 = -ETIMEDOUT or -ERESTARTNOINTR |
2787 | */ |
2788 | static inline |
2789 | int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, |
2790 | struct futex_q *q, union futex_key *key2, |
2791 | struct hrtimer_sleeper *timeout) |
2792 | { |
2793 | int ret = 0; |
2794 | |
2795 | /* |
2796 | * With the hb lock held, we avoid races while we process the wakeup. |
2797 | * We only need to hold hb (and not hb2) to ensure atomicity as the |
2798 | * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. |
2799 | * It can't be requeued from uaddr2 to something else since we don't |
2800 | * support a PI aware source futex for requeue. |
2801 | */ |
2802 | if (!match_futex(&q->key, key2)) { |
2803 | WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); |
2804 | /* |
2805 | * We were woken prior to requeue by a timeout or a signal. |
2806 | * Unqueue the futex_q and determine which it was. |
2807 | */ |
2808 | plist_del(&q->list, &hb->chain); |
2809 | hb_waiters_dec(hb); |
2810 | |
2811 | /* Handle spurious wakeups gracefully */ |
2812 | ret = -EWOULDBLOCK; |
2813 | if (timeout && !timeout->task) |
2814 | ret = -ETIMEDOUT; |
2815 | else if (signal_pending(current)) |
2816 | ret = -ERESTARTNOINTR; |
2817 | } |
2818 | return ret; |
2819 | } |
2820 | |
2821 | /** |
2822 | * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 |
2823 | * @uaddr: the futex we initially wait on (non-pi) |
2824 | * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be |
2825 | * the same type, no requeueing from private to shared, etc. |
2826 | * @val: the expected value of uaddr |
2827 | * @abs_time: absolute timeout |
2828 | * @bitset: 32 bit wakeup bitset set by userspace, defaults to all |
2829 | * @uaddr2: the pi futex we will take prior to returning to user-space |
2830 | * |
2831 | * The caller will wait on uaddr and will be requeued by futex_requeue() to |
2832 | * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake |
2833 | * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to |
2834 | * userspace. This ensures the rt_mutex maintains an owner when it has waiters; |
2835 | * without one, the pi logic would not know which task to boost/deboost, if |
2836 | * there was a need to. |
2837 | * |
2838 | * We call schedule in futex_wait_queue_me() when we enqueue and return there |
2839 | * via the following-- |
2840 | * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() |
2841 | * 2) wakeup on uaddr2 after a requeue |
2842 | * 3) signal |
2843 | * 4) timeout |
2844 | * |
2845 | * If 3, cleanup and return -ERESTARTNOINTR. |
2846 | * |
2847 | * If 2, we may then block on trying to take the rt_mutex and return via: |
2848 | * 5) successful lock |
2849 | * 6) signal |
2850 | * 7) timeout |
2851 | * 8) other lock acquisition failure |
2852 | * |
2853 | * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). |
2854 | * |
2855 | * If 4 or 7, we cleanup and return with -ETIMEDOUT. |
2856 | * |
2857 | * Return: |
2858 | * 0 - On success; |
2859 | * <0 - On error |
2860 | */ |
2861 | static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
2862 | u32 val, ktime_t *abs_time, u32 bitset, |
2863 | u32 __user *uaddr2) |
2864 | { |
2865 | struct hrtimer_sleeper timeout, *to = NULL; |
2866 | struct rt_mutex_waiter rt_waiter; |
2867 | struct futex_hash_bucket *hb; |
2868 | union futex_key key2 = FUTEX_KEY_INIT; |
2869 | struct futex_q q = futex_q_init; |
2870 | int res, ret; |
2871 | |
2872 | if (uaddr == uaddr2) |
2873 | return -EINVAL; |
2874 | |
2875 | if (!bitset) |
2876 | return -EINVAL; |
2877 | |
2878 | if (abs_time) { |
2879 | to = &timeout; |
2880 | hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? |
2881 | CLOCK_REALTIME : CLOCK_MONOTONIC, |
2882 | HRTIMER_MODE_ABS); |
2883 | hrtimer_init_sleeper(to, current); |
2884 | hrtimer_set_expires_range_ns(&to->timer, *abs_time, |
2885 | current->timer_slack_ns); |
2886 | } |
2887 | |
2888 | /* |
2889 | * The waiter is allocated on our stack, manipulated by the requeue |
2890 | * code while we sleep on uaddr. |
2891 | */ |
2892 | debug_rt_mutex_init_waiter(&rt_waiter); |
2893 | RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); |
2894 | RB_CLEAR_NODE(&rt_waiter.tree_entry); |
2895 | rt_waiter.task = NULL; |
2896 | |
2897 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); |
2898 | if (unlikely(ret != 0)) |
2899 | goto out; |
2900 | |
2901 | q.bitset = bitset; |
2902 | q.rt_waiter = &rt_waiter; |
2903 | q.requeue_pi_key = &key2; |
2904 | |
2905 | /* |
2906 | * Prepare to wait on uaddr. On success, increments q.key (key1) ref |
2907 | * count. |
2908 | */ |
2909 | ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
2910 | if (ret) |
2911 | goto out_key2; |
2912 | |
2913 | /* |
2914 | * The check above which compares uaddrs is not sufficient for |
2915 | * shared futexes. We need to compare the keys: |
2916 | */ |
2917 | if (match_futex(&q.key, &key2)) { |
2918 | queue_unlock(hb); |
2919 | ret = -EINVAL; |
2920 | goto out_put_keys; |
2921 | } |
2922 | |
2923 | /* Queue the futex_q, drop the hb lock, wait for wakeup. */ |
2924 | futex_wait_queue_me(hb, &q, to); |
2925 | |
2926 | spin_lock(&hb->lock); |
2927 | ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); |
2928 | spin_unlock(&hb->lock); |
2929 | if (ret) |
2930 | goto out_put_keys; |
2931 | |
2932 | /* |
2933 | * In order for us to be here, we know our q.key == key2, and since |
2934 | * we took the hb->lock above, we also know that futex_requeue() has |
2935 | * completed and we no longer have to concern ourselves with a wakeup |
2936 | * race with the atomic proxy lock acquisition by the requeue code. The |
2937 | * futex_requeue dropped our key1 reference and incremented our key2 |
2938 | * reference count. |
2939 | */ |
2940 | |
2941 | /* Check if the requeue code acquired the second futex for us. */ |
2942 | if (!q.rt_waiter) { |
2943 | /* |
2944 | * Got the lock. We might not be the anticipated owner if we |
2945 | * did a lock-steal - fix up the PI-state in that case. |
2946 | */ |
2947 | if (q.pi_state && (q.pi_state->owner != current)) { |
2948 | spin_lock(q.lock_ptr); |
2949 | ret = fixup_pi_state_owner(uaddr2, &q, current); |
2950 | if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) |
2951 | rt_mutex_unlock(&q.pi_state->pi_mutex); |
2952 | /* |
2953 | * Drop the reference to the pi state which |
2954 | * the requeue_pi() code acquired for us. |
2955 | */ |
2956 | put_pi_state(q.pi_state); |
2957 | spin_unlock(q.lock_ptr); |
2958 | } |
2959 | } else { |
2960 | struct rt_mutex *pi_mutex; |
2961 | |
2962 | /* |
2963 | * We have been woken up by futex_unlock_pi(), a timeout, or a |
2964 | * signal. futex_unlock_pi() will not destroy the lock_ptr nor |
2965 | * the pi_state. |
2966 | */ |
2967 | WARN_ON(!q.pi_state); |
2968 | pi_mutex = &q.pi_state->pi_mutex; |
2969 | ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); |
2970 | |
2971 | spin_lock(q.lock_ptr); |
2972 | if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) |
2973 | ret = 0; |
2974 | |
2975 | debug_rt_mutex_free_waiter(&rt_waiter); |
2976 | /* |
2977 | * Fixup the pi_state owner and possibly acquire the lock if we |
2978 | * haven't already. |
2979 | */ |
2980 | res = fixup_owner(uaddr2, &q, !ret); |
2981 | /* |
2982 | * If fixup_owner() returned an error, proprogate that. If it |
2983 | * acquired the lock, clear -ETIMEDOUT or -EINTR. |
2984 | */ |
2985 | if (res) |
2986 | ret = (res < 0) ? res : 0; |
2987 | |
2988 | /* |
2989 | * If fixup_pi_state_owner() faulted and was unable to handle |
2990 | * the fault, unlock the rt_mutex and return the fault to |
2991 | * userspace. |
2992 | */ |
2993 | if (ret && rt_mutex_owner(pi_mutex) == current) |
2994 | rt_mutex_unlock(pi_mutex); |
2995 | |
2996 | /* Unqueue and drop the lock. */ |
2997 | unqueue_me_pi(&q); |
2998 | } |
2999 | |
3000 | if (ret == -EINTR) { |
3001 | /* |
3002 | * We've already been requeued, but cannot restart by calling |
3003 | * futex_lock_pi() directly. We could restart this syscall, but |
3004 | * it would detect that the user space "val" changed and return |
3005 | * -EWOULDBLOCK. Save the overhead of the restart and return |
3006 | * -EWOULDBLOCK directly. |
3007 | */ |
3008 | ret = -EWOULDBLOCK; |
3009 | } |
3010 | |
3011 | out_put_keys: |
3012 | put_futex_key(&q.key); |
3013 | out_key2: |
3014 | put_futex_key(&key2); |
3015 | |
3016 | out: |
3017 | if (to) { |
3018 | hrtimer_cancel(&to->timer); |
3019 | destroy_hrtimer_on_stack(&to->timer); |
3020 | } |
3021 | return ret; |
3022 | } |
3023 | |
3024 | /* |
3025 | * Support for robust futexes: the kernel cleans up held futexes at |
3026 | * thread exit time. |
3027 | * |
3028 | * Implementation: user-space maintains a per-thread list of locks it |
3029 | * is holding. Upon do_exit(), the kernel carefully walks this list, |
3030 | * and marks all locks that are owned by this thread with the |
3031 | * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is |
3032 | * always manipulated with the lock held, so the list is private and |
3033 | * per-thread. Userspace also maintains a per-thread 'list_op_pending' |
3034 | * field, to allow the kernel to clean up if the thread dies after |
3035 | * acquiring the lock, but just before it could have added itself to |
3036 | * the list. There can only be one such pending lock. |
3037 | */ |
3038 | |
3039 | /** |
3040 | * sys_set_robust_list() - Set the robust-futex list head of a task |
3041 | * @head: pointer to the list-head |
3042 | * @len: length of the list-head, as userspace expects |
3043 | */ |
3044 | SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, |
3045 | size_t, len) |
3046 | { |
3047 | if (!futex_cmpxchg_enabled) |
3048 | return -ENOSYS; |
3049 | /* |
3050 | * The kernel knows only one size for now: |
3051 | */ |
3052 | if (unlikely(len != sizeof(*head))) |
3053 | return -EINVAL; |
3054 | |
3055 | current->robust_list = head; |
3056 | |
3057 | return 0; |
3058 | } |
3059 | |
3060 | /** |
3061 | * sys_get_robust_list() - Get the robust-futex list head of a task |
3062 | * @pid: pid of the process [zero for current task] |
3063 | * @head_ptr: pointer to a list-head pointer, the kernel fills it in |
3064 | * @len_ptr: pointer to a length field, the kernel fills in the header size |
3065 | */ |
3066 | SYSCALL_DEFINE3(get_robust_list, int, pid, |
3067 | struct robust_list_head __user * __user *, head_ptr, |
3068 | size_t __user *, len_ptr) |
3069 | { |
3070 | struct robust_list_head __user *head; |
3071 | unsigned long ret; |
3072 | struct task_struct *p; |
3073 | |
3074 | if (!futex_cmpxchg_enabled) |
3075 | return -ENOSYS; |
3076 | |
3077 | rcu_read_lock(); |
3078 | |
3079 | ret = -ESRCH; |
3080 | if (!pid) |
3081 | p = current; |
3082 | else { |
3083 | p = find_task_by_vpid(pid); |
3084 | if (!p) |
3085 | goto err_unlock; |
3086 | } |
3087 | |
3088 | ret = -EPERM; |
3089 | if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
3090 | goto err_unlock; |
3091 | |
3092 | head = p->robust_list; |
3093 | rcu_read_unlock(); |
3094 | |
3095 | if (put_user(sizeof(*head), len_ptr)) |
3096 | return -EFAULT; |
3097 | return put_user(head, head_ptr); |
3098 | |
3099 | err_unlock: |
3100 | rcu_read_unlock(); |
3101 | |
3102 | return ret; |
3103 | } |
3104 | |
3105 | /* |
3106 | * Process a futex-list entry, check whether it's owned by the |
3107 | * dying task, and do notification if so: |
3108 | */ |
3109 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
3110 | { |
3111 | u32 uval, uninitialized_var(nval), mval; |
3112 | |
3113 | /* Futex address must be 32bit aligned */ |
3114 | if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) |
3115 | return -1; |
3116 | |
3117 | retry: |
3118 | if (get_user(uval, uaddr)) |
3119 | return -1; |
3120 | |
3121 | if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { |
3122 | /* |
3123 | * Ok, this dying thread is truly holding a futex |
3124 | * of interest. Set the OWNER_DIED bit atomically |
3125 | * via cmpxchg, and if the value had FUTEX_WAITERS |
3126 | * set, wake up a waiter (if any). (We have to do a |
3127 | * futex_wake() even if OWNER_DIED is already set - |
3128 | * to handle the rare but possible case of recursive |
3129 | * thread-death.) The rest of the cleanup is done in |
3130 | * userspace. |
3131 | */ |
3132 | mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; |
3133 | /* |
3134 | * We are not holding a lock here, but we want to have |
3135 | * the pagefault_disable/enable() protection because |
3136 | * we want to handle the fault gracefully. If the |
3137 | * access fails we try to fault in the futex with R/W |
3138 | * verification via get_user_pages. get_user() above |
3139 | * does not guarantee R/W access. If that fails we |
3140 | * give up and leave the futex locked. |
3141 | */ |
3142 | if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) { |
3143 | if (fault_in_user_writeable(uaddr)) |
3144 | return -1; |
3145 | goto retry; |
3146 | } |
3147 | if (nval != uval) |
3148 | goto retry; |
3149 | |
3150 | /* |
3151 | * Wake robust non-PI futexes here. The wakeup of |
3152 | * PI futexes happens in exit_pi_state(): |
3153 | */ |
3154 | if (!pi && (uval & FUTEX_WAITERS)) |
3155 | futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); |
3156 | } |
3157 | return 0; |
3158 | } |
3159 | |
3160 | /* |
3161 | * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
3162 | */ |
3163 | static inline int fetch_robust_entry(struct robust_list __user **entry, |
3164 | struct robust_list __user * __user *head, |
3165 | unsigned int *pi) |
3166 | { |
3167 | unsigned long uentry; |
3168 | |
3169 | if (get_user(uentry, (unsigned long __user *)head)) |
3170 | return -EFAULT; |
3171 | |
3172 | *entry = (void __user *)(uentry & ~1UL); |
3173 | *pi = uentry & 1; |
3174 | |
3175 | return 0; |
3176 | } |
3177 | |
3178 | /* |
3179 | * Walk curr->robust_list (very carefully, it's a userspace list!) |
3180 | * and mark any locks found there dead, and notify any waiters. |
3181 | * |
3182 | * We silently return on any sign of list-walking problem. |
3183 | */ |
3184 | void exit_robust_list(struct task_struct *curr) |
3185 | { |
3186 | struct robust_list_head __user *head = curr->robust_list; |
3187 | struct robust_list __user *entry, *next_entry, *pending; |
3188 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
3189 | unsigned int uninitialized_var(next_pi); |
3190 | unsigned long futex_offset; |
3191 | int rc; |
3192 | |
3193 | if (!futex_cmpxchg_enabled) |
3194 | return; |
3195 | |
3196 | /* |
3197 | * Fetch the list head (which was registered earlier, via |
3198 | * sys_set_robust_list()): |
3199 | */ |
3200 | if (fetch_robust_entry(&entry, &head->list.next, &pi)) |
3201 | return; |
3202 | /* |
3203 | * Fetch the relative futex offset: |
3204 | */ |
3205 | if (get_user(futex_offset, &head->futex_offset)) |
3206 | return; |
3207 | /* |
3208 | * Fetch any possibly pending lock-add first, and handle it |
3209 | * if it exists: |
3210 | */ |
3211 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
3212 | return; |
3213 | |
3214 | next_entry = NULL; /* avoid warning with gcc */ |
3215 | while (entry != &head->list) { |
3216 | /* |
3217 | * Fetch the next entry in the list before calling |
3218 | * handle_futex_death: |
3219 | */ |
3220 | rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); |
3221 | /* |
3222 | * A pending lock might already be on the list, so |
3223 | * don't process it twice: |
3224 | */ |
3225 | if (entry != pending) |
3226 | if (handle_futex_death((void __user *)entry + futex_offset, |
3227 | curr, pi)) |
3228 | return; |
3229 | if (rc) |
3230 | return; |
3231 | entry = next_entry; |
3232 | pi = next_pi; |
3233 | /* |
3234 | * Avoid excessively long or circular lists: |
3235 | */ |
3236 | if (!--limit) |
3237 | break; |
3238 | |
3239 | cond_resched(); |
3240 | } |
3241 | |
3242 | if (pending) |
3243 | handle_futex_death((void __user *)pending + futex_offset, |
3244 | curr, pip); |
3245 | } |
3246 | |
3247 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
3248 | u32 __user *uaddr2, u32 val2, u32 val3) |
3249 | { |
3250 | int cmd = op & FUTEX_CMD_MASK; |
3251 | unsigned int flags = 0; |
3252 | |
3253 | if (!(op & FUTEX_PRIVATE_FLAG)) |
3254 | flags |= FLAGS_SHARED; |
3255 | |
3256 | if (op & FUTEX_CLOCK_REALTIME) { |
3257 | flags |= FLAGS_CLOCKRT; |
3258 | if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \ |
3259 | cmd != FUTEX_WAIT_REQUEUE_PI) |
3260 | return -ENOSYS; |
3261 | } |
3262 | |
3263 | switch (cmd) { |
3264 | case FUTEX_LOCK_PI: |
3265 | case FUTEX_UNLOCK_PI: |
3266 | case FUTEX_TRYLOCK_PI: |
3267 | case FUTEX_WAIT_REQUEUE_PI: |
3268 | case FUTEX_CMP_REQUEUE_PI: |
3269 | if (!futex_cmpxchg_enabled) |
3270 | return -ENOSYS; |
3271 | } |
3272 | |
3273 | switch (cmd) { |
3274 | case FUTEX_WAIT: |
3275 | val3 = FUTEX_BITSET_MATCH_ANY; |
3276 | case FUTEX_WAIT_BITSET: |
3277 | return futex_wait(uaddr, flags, val, timeout, val3); |
3278 | case FUTEX_WAKE: |
3279 | val3 = FUTEX_BITSET_MATCH_ANY; |
3280 | case FUTEX_WAKE_BITSET: |
3281 | return futex_wake(uaddr, flags, val, val3); |
3282 | case FUTEX_REQUEUE: |
3283 | return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0); |
3284 | case FUTEX_CMP_REQUEUE: |
3285 | return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0); |
3286 | case FUTEX_WAKE_OP: |
3287 | return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); |
3288 | case FUTEX_LOCK_PI: |
3289 | return futex_lock_pi(uaddr, flags, timeout, 0); |
3290 | case FUTEX_UNLOCK_PI: |
3291 | return futex_unlock_pi(uaddr, flags); |
3292 | case FUTEX_TRYLOCK_PI: |
3293 | return futex_lock_pi(uaddr, flags, NULL, 1); |
3294 | case FUTEX_WAIT_REQUEUE_PI: |
3295 | val3 = FUTEX_BITSET_MATCH_ANY; |
3296 | return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, |
3297 | uaddr2); |
3298 | case FUTEX_CMP_REQUEUE_PI: |
3299 | return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1); |
3300 | } |
3301 | return -ENOSYS; |
3302 | } |
3303 | |
3304 | |
3305 | SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
3306 | struct timespec __user *, utime, u32 __user *, uaddr2, |
3307 | u32, val3) |
3308 | { |
3309 | struct timespec ts; |
3310 | ktime_t t, *tp = NULL; |
3311 | u32 val2 = 0; |
3312 | int cmd = op & FUTEX_CMD_MASK; |
3313 | |
3314 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
3315 | cmd == FUTEX_WAIT_BITSET || |
3316 | cmd == FUTEX_WAIT_REQUEUE_PI)) { |
3317 | if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) |
3318 | return -EFAULT; |
3319 | if (copy_from_user(&ts, utime, sizeof(ts)) != 0) |
3320 | return -EFAULT; |
3321 | if (!timespec_valid(&ts)) |
3322 | return -EINVAL; |
3323 | |
3324 | t = timespec_to_ktime(ts); |
3325 | if (cmd == FUTEX_WAIT) |
3326 | t = ktime_add_safe(ktime_get(), t); |
3327 | tp = &t; |
3328 | } |
3329 | /* |
3330 | * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*. |
3331 | * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. |
3332 | */ |
3333 | if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || |
3334 | cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) |
3335 | val2 = (u32) (unsigned long) utime; |
3336 | |
3337 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
3338 | } |
3339 | |
3340 | static void __init futex_detect_cmpxchg(void) |
3341 | { |
3342 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
3343 | u32 curval; |
3344 | |
3345 | /* |
3346 | * This will fail and we want it. Some arch implementations do |
3347 | * runtime detection of the futex_atomic_cmpxchg_inatomic() |
3348 | * functionality. We want to know that before we call in any |
3349 | * of the complex code paths. Also we want to prevent |
3350 | * registration of robust lists in that case. NULL is |
3351 | * guaranteed to fault and we get -EFAULT on functional |
3352 | * implementation, the non-functional ones will return |
3353 | * -ENOSYS. |
3354 | */ |
3355 | if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) |
3356 | futex_cmpxchg_enabled = 1; |
3357 | #endif |
3358 | } |
3359 | |
3360 | static int __init futex_init(void) |
3361 | { |
3362 | unsigned int futex_shift; |
3363 | unsigned long i; |
3364 | |
3365 | #if CONFIG_BASE_SMALL |
3366 | futex_hashsize = 16; |
3367 | #else |
3368 | futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus()); |
3369 | #endif |
3370 | |
3371 | futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), |
3372 | futex_hashsize, 0, |
3373 | futex_hashsize < 256 ? HASH_SMALL : 0, |
3374 | &futex_shift, NULL, |
3375 | futex_hashsize, futex_hashsize); |
3376 | futex_hashsize = 1UL << futex_shift; |
3377 | |
3378 | futex_detect_cmpxchg(); |
3379 | |
3380 | for (i = 0; i < futex_hashsize; i++) { |
3381 | atomic_set(&futex_queues[i].waiters, 0); |
3382 | plist_head_init(&futex_queues[i].chain); |
3383 | spin_lock_init(&futex_queues[i].lock); |
3384 | } |
3385 | |
3386 | return 0; |
3387 | } |
3388 | core_initcall(futex_init); |
3389 |