blob: c3fc8029439764dfb8df37ce52e03f09b8617307
1 | /* |
2 | * An async IO implementation for Linux |
3 | * Written by Benjamin LaHaise <bcrl@kvack.org> |
4 | * |
5 | * Implements an efficient asynchronous io interface. |
6 | * |
7 | * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. |
8 | * |
9 | * See ../COPYING for licensing terms. |
10 | */ |
11 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
12 | |
13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> |
15 | #include <linux/errno.h> |
16 | #include <linux/time.h> |
17 | #include <linux/aio_abi.h> |
18 | #include <linux/export.h> |
19 | #include <linux/syscalls.h> |
20 | #include <linux/backing-dev.h> |
21 | #include <linux/uio.h> |
22 | |
23 | #include <linux/sched.h> |
24 | #include <linux/fs.h> |
25 | #include <linux/file.h> |
26 | #include <linux/mm.h> |
27 | #include <linux/mman.h> |
28 | #include <linux/mmu_context.h> |
29 | #include <linux/percpu.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/timer.h> |
32 | #include <linux/aio.h> |
33 | #include <linux/highmem.h> |
34 | #include <linux/workqueue.h> |
35 | #include <linux/security.h> |
36 | #include <linux/eventfd.h> |
37 | #include <linux/blkdev.h> |
38 | #include <linux/compat.h> |
39 | #include <linux/migrate.h> |
40 | #include <linux/ramfs.h> |
41 | #include <linux/percpu-refcount.h> |
42 | #include <linux/mount.h> |
43 | #include <linux/nospec.h> |
44 | |
45 | #include <asm/kmap_types.h> |
46 | #include <asm/uaccess.h> |
47 | |
48 | #include "internal.h" |
49 | |
50 | #define AIO_RING_MAGIC 0xa10a10a1 |
51 | #define AIO_RING_COMPAT_FEATURES 1 |
52 | #define AIO_RING_INCOMPAT_FEATURES 0 |
53 | struct aio_ring { |
54 | unsigned id; /* kernel internal index number */ |
55 | unsigned nr; /* number of io_events */ |
56 | unsigned head; /* Written to by userland or under ring_lock |
57 | * mutex by aio_read_events_ring(). */ |
58 | unsigned tail; |
59 | |
60 | unsigned magic; |
61 | unsigned compat_features; |
62 | unsigned incompat_features; |
63 | unsigned header_length; /* size of aio_ring */ |
64 | |
65 | |
66 | struct io_event io_events[0]; |
67 | }; /* 128 bytes + ring size */ |
68 | |
69 | #define AIO_RING_PAGES 8 |
70 | |
71 | struct kioctx_table { |
72 | struct rcu_head rcu; |
73 | unsigned nr; |
74 | struct kioctx __rcu *table[]; |
75 | }; |
76 | |
77 | struct kioctx_cpu { |
78 | unsigned reqs_available; |
79 | }; |
80 | |
81 | struct ctx_rq_wait { |
82 | struct completion comp; |
83 | atomic_t count; |
84 | }; |
85 | |
86 | struct kioctx { |
87 | struct percpu_ref users; |
88 | atomic_t dead; |
89 | |
90 | struct percpu_ref reqs; |
91 | |
92 | unsigned long user_id; |
93 | |
94 | struct __percpu kioctx_cpu *cpu; |
95 | |
96 | /* |
97 | * For percpu reqs_available, number of slots we move to/from global |
98 | * counter at a time: |
99 | */ |
100 | unsigned req_batch; |
101 | /* |
102 | * This is what userspace passed to io_setup(), it's not used for |
103 | * anything but counting against the global max_reqs quota. |
104 | * |
105 | * The real limit is nr_events - 1, which will be larger (see |
106 | * aio_setup_ring()) |
107 | */ |
108 | unsigned max_reqs; |
109 | |
110 | /* Size of ringbuffer, in units of struct io_event */ |
111 | unsigned nr_events; |
112 | |
113 | unsigned long mmap_base; |
114 | unsigned long mmap_size; |
115 | |
116 | struct page **ring_pages; |
117 | long nr_pages; |
118 | |
119 | struct rcu_head free_rcu; |
120 | struct work_struct free_work; /* see free_ioctx() */ |
121 | |
122 | /* |
123 | * signals when all in-flight requests are done |
124 | */ |
125 | struct ctx_rq_wait *rq_wait; |
126 | |
127 | struct { |
128 | /* |
129 | * This counts the number of available slots in the ringbuffer, |
130 | * so we avoid overflowing it: it's decremented (if positive) |
131 | * when allocating a kiocb and incremented when the resulting |
132 | * io_event is pulled off the ringbuffer. |
133 | * |
134 | * We batch accesses to it with a percpu version. |
135 | */ |
136 | atomic_t reqs_available; |
137 | } ____cacheline_aligned_in_smp; |
138 | |
139 | struct { |
140 | spinlock_t ctx_lock; |
141 | struct list_head active_reqs; /* used for cancellation */ |
142 | } ____cacheline_aligned_in_smp; |
143 | |
144 | struct { |
145 | struct mutex ring_lock; |
146 | wait_queue_head_t wait; |
147 | } ____cacheline_aligned_in_smp; |
148 | |
149 | struct { |
150 | unsigned tail; |
151 | unsigned completed_events; |
152 | spinlock_t completion_lock; |
153 | } ____cacheline_aligned_in_smp; |
154 | |
155 | struct page *internal_pages[AIO_RING_PAGES]; |
156 | struct file *aio_ring_file; |
157 | |
158 | unsigned id; |
159 | }; |
160 | |
161 | /* |
162 | * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either |
163 | * cancelled or completed (this makes a certain amount of sense because |
164 | * successful cancellation - io_cancel() - does deliver the completion to |
165 | * userspace). |
166 | * |
167 | * And since most things don't implement kiocb cancellation and we'd really like |
168 | * kiocb completion to be lockless when possible, we use ki_cancel to |
169 | * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED |
170 | * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel(). |
171 | */ |
172 | #define KIOCB_CANCELLED ((void *) (~0ULL)) |
173 | |
174 | struct aio_kiocb { |
175 | struct kiocb common; |
176 | |
177 | struct kioctx *ki_ctx; |
178 | kiocb_cancel_fn *ki_cancel; |
179 | |
180 | struct iocb __user *ki_user_iocb; /* user's aiocb */ |
181 | __u64 ki_user_data; /* user's data for completion */ |
182 | |
183 | struct list_head ki_list; /* the aio core uses this |
184 | * for cancellation */ |
185 | |
186 | /* |
187 | * If the aio_resfd field of the userspace iocb is not zero, |
188 | * this is the underlying eventfd context to deliver events to. |
189 | */ |
190 | struct eventfd_ctx *ki_eventfd; |
191 | }; |
192 | |
193 | /*------ sysctl variables----*/ |
194 | static DEFINE_SPINLOCK(aio_nr_lock); |
195 | unsigned long aio_nr; /* current system wide number of aio requests */ |
196 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ |
197 | /*----end sysctl variables---*/ |
198 | |
199 | static struct kmem_cache *kiocb_cachep; |
200 | static struct kmem_cache *kioctx_cachep; |
201 | |
202 | static struct vfsmount *aio_mnt; |
203 | |
204 | static const struct file_operations aio_ring_fops; |
205 | static const struct address_space_operations aio_ctx_aops; |
206 | |
207 | static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) |
208 | { |
209 | struct qstr this = QSTR_INIT("[aio]", 5); |
210 | struct file *file; |
211 | struct path path; |
212 | struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); |
213 | if (IS_ERR(inode)) |
214 | return ERR_CAST(inode); |
215 | |
216 | inode->i_mapping->a_ops = &aio_ctx_aops; |
217 | inode->i_mapping->private_data = ctx; |
218 | inode->i_size = PAGE_SIZE * nr_pages; |
219 | |
220 | path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); |
221 | if (!path.dentry) { |
222 | iput(inode); |
223 | return ERR_PTR(-ENOMEM); |
224 | } |
225 | path.mnt = mntget(aio_mnt); |
226 | |
227 | d_instantiate(path.dentry, inode); |
228 | file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops); |
229 | if (IS_ERR(file)) { |
230 | path_put(&path); |
231 | return file; |
232 | } |
233 | |
234 | file->f_flags = O_RDWR; |
235 | return file; |
236 | } |
237 | |
238 | static struct dentry *aio_mount(struct file_system_type *fs_type, |
239 | int flags, const char *dev_name, void *data) |
240 | { |
241 | static const struct dentry_operations ops = { |
242 | .d_dname = simple_dname, |
243 | }; |
244 | struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops, |
245 | AIO_RING_MAGIC); |
246 | |
247 | if (!IS_ERR(root)) |
248 | root->d_sb->s_iflags |= SB_I_NOEXEC; |
249 | return root; |
250 | } |
251 | |
252 | /* aio_setup |
253 | * Creates the slab caches used by the aio routines, panic on |
254 | * failure as this is done early during the boot sequence. |
255 | */ |
256 | static int __init aio_setup(void) |
257 | { |
258 | static struct file_system_type aio_fs = { |
259 | .name = "aio", |
260 | .mount = aio_mount, |
261 | .kill_sb = kill_anon_super, |
262 | }; |
263 | aio_mnt = kern_mount(&aio_fs); |
264 | if (IS_ERR(aio_mnt)) |
265 | panic("Failed to create aio fs mount."); |
266 | |
267 | kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
268 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
269 | |
270 | pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); |
271 | |
272 | return 0; |
273 | } |
274 | __initcall(aio_setup); |
275 | |
276 | static void put_aio_ring_file(struct kioctx *ctx) |
277 | { |
278 | struct file *aio_ring_file = ctx->aio_ring_file; |
279 | struct address_space *i_mapping; |
280 | |
281 | if (aio_ring_file) { |
282 | truncate_setsize(aio_ring_file->f_inode, 0); |
283 | |
284 | /* Prevent further access to the kioctx from migratepages */ |
285 | i_mapping = aio_ring_file->f_inode->i_mapping; |
286 | spin_lock(&i_mapping->private_lock); |
287 | i_mapping->private_data = NULL; |
288 | ctx->aio_ring_file = NULL; |
289 | spin_unlock(&i_mapping->private_lock); |
290 | |
291 | fput(aio_ring_file); |
292 | } |
293 | } |
294 | |
295 | static void aio_free_ring(struct kioctx *ctx) |
296 | { |
297 | int i; |
298 | |
299 | /* Disconnect the kiotx from the ring file. This prevents future |
300 | * accesses to the kioctx from page migration. |
301 | */ |
302 | put_aio_ring_file(ctx); |
303 | |
304 | for (i = 0; i < ctx->nr_pages; i++) { |
305 | struct page *page; |
306 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, |
307 | page_count(ctx->ring_pages[i])); |
308 | page = ctx->ring_pages[i]; |
309 | if (!page) |
310 | continue; |
311 | ctx->ring_pages[i] = NULL; |
312 | put_page(page); |
313 | } |
314 | |
315 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { |
316 | kfree(ctx->ring_pages); |
317 | ctx->ring_pages = NULL; |
318 | } |
319 | } |
320 | |
321 | static int aio_ring_mremap(struct vm_area_struct *vma) |
322 | { |
323 | struct file *file = vma->vm_file; |
324 | struct mm_struct *mm = vma->vm_mm; |
325 | struct kioctx_table *table; |
326 | int i, res = -EINVAL; |
327 | |
328 | spin_lock(&mm->ioctx_lock); |
329 | rcu_read_lock(); |
330 | table = rcu_dereference(mm->ioctx_table); |
331 | for (i = 0; i < table->nr; i++) { |
332 | struct kioctx *ctx; |
333 | |
334 | ctx = rcu_dereference(table->table[i]); |
335 | if (ctx && ctx->aio_ring_file == file) { |
336 | if (!atomic_read(&ctx->dead)) { |
337 | ctx->user_id = ctx->mmap_base = vma->vm_start; |
338 | res = 0; |
339 | } |
340 | break; |
341 | } |
342 | } |
343 | |
344 | rcu_read_unlock(); |
345 | spin_unlock(&mm->ioctx_lock); |
346 | return res; |
347 | } |
348 | |
349 | static const struct vm_operations_struct aio_ring_vm_ops = { |
350 | .mremap = aio_ring_mremap, |
351 | #if IS_ENABLED(CONFIG_MMU) |
352 | .fault = filemap_fault, |
353 | .map_pages = filemap_map_pages, |
354 | .page_mkwrite = filemap_page_mkwrite, |
355 | #endif |
356 | }; |
357 | |
358 | static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) |
359 | { |
360 | vma->vm_flags |= VM_DONTEXPAND; |
361 | vma->vm_ops = &aio_ring_vm_ops; |
362 | return 0; |
363 | } |
364 | |
365 | static const struct file_operations aio_ring_fops = { |
366 | .mmap = aio_ring_mmap, |
367 | }; |
368 | |
369 | #if IS_ENABLED(CONFIG_MIGRATION) |
370 | static int aio_migratepage(struct address_space *mapping, struct page *new, |
371 | struct page *old, enum migrate_mode mode) |
372 | { |
373 | struct kioctx *ctx; |
374 | unsigned long flags; |
375 | pgoff_t idx; |
376 | int rc; |
377 | |
378 | rc = 0; |
379 | |
380 | /* mapping->private_lock here protects against the kioctx teardown. */ |
381 | spin_lock(&mapping->private_lock); |
382 | ctx = mapping->private_data; |
383 | if (!ctx) { |
384 | rc = -EINVAL; |
385 | goto out; |
386 | } |
387 | |
388 | /* The ring_lock mutex. The prevents aio_read_events() from writing |
389 | * to the ring's head, and prevents page migration from mucking in |
390 | * a partially initialized kiotx. |
391 | */ |
392 | if (!mutex_trylock(&ctx->ring_lock)) { |
393 | rc = -EAGAIN; |
394 | goto out; |
395 | } |
396 | |
397 | idx = old->index; |
398 | if (idx < (pgoff_t)ctx->nr_pages) { |
399 | /* Make sure the old page hasn't already been changed */ |
400 | if (ctx->ring_pages[idx] != old) |
401 | rc = -EAGAIN; |
402 | } else |
403 | rc = -EINVAL; |
404 | |
405 | if (rc != 0) |
406 | goto out_unlock; |
407 | |
408 | /* Writeback must be complete */ |
409 | BUG_ON(PageWriteback(old)); |
410 | get_page(new); |
411 | |
412 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); |
413 | if (rc != MIGRATEPAGE_SUCCESS) { |
414 | put_page(new); |
415 | goto out_unlock; |
416 | } |
417 | |
418 | /* Take completion_lock to prevent other writes to the ring buffer |
419 | * while the old page is copied to the new. This prevents new |
420 | * events from being lost. |
421 | */ |
422 | spin_lock_irqsave(&ctx->completion_lock, flags); |
423 | migrate_page_copy(new, old); |
424 | BUG_ON(ctx->ring_pages[idx] != old); |
425 | ctx->ring_pages[idx] = new; |
426 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
427 | |
428 | /* The old page is no longer accessible. */ |
429 | put_page(old); |
430 | |
431 | out_unlock: |
432 | mutex_unlock(&ctx->ring_lock); |
433 | out: |
434 | spin_unlock(&mapping->private_lock); |
435 | return rc; |
436 | } |
437 | #endif |
438 | |
439 | static const struct address_space_operations aio_ctx_aops = { |
440 | .set_page_dirty = __set_page_dirty_no_writeback, |
441 | #if IS_ENABLED(CONFIG_MIGRATION) |
442 | .migratepage = aio_migratepage, |
443 | #endif |
444 | }; |
445 | |
446 | static int aio_setup_ring(struct kioctx *ctx) |
447 | { |
448 | struct aio_ring *ring; |
449 | unsigned nr_events = ctx->max_reqs; |
450 | struct mm_struct *mm = current->mm; |
451 | unsigned long size, unused; |
452 | int nr_pages; |
453 | int i; |
454 | struct file *file; |
455 | |
456 | /* Compensate for the ring buffer's head/tail overlap entry */ |
457 | nr_events += 2; /* 1 is required, 2 for good luck */ |
458 | |
459 | size = sizeof(struct aio_ring); |
460 | size += sizeof(struct io_event) * nr_events; |
461 | |
462 | nr_pages = PFN_UP(size); |
463 | if (nr_pages < 0) |
464 | return -EINVAL; |
465 | |
466 | file = aio_private_file(ctx, nr_pages); |
467 | if (IS_ERR(file)) { |
468 | ctx->aio_ring_file = NULL; |
469 | return -ENOMEM; |
470 | } |
471 | |
472 | ctx->aio_ring_file = file; |
473 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) |
474 | / sizeof(struct io_event); |
475 | |
476 | ctx->ring_pages = ctx->internal_pages; |
477 | if (nr_pages > AIO_RING_PAGES) { |
478 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), |
479 | GFP_KERNEL); |
480 | if (!ctx->ring_pages) { |
481 | put_aio_ring_file(ctx); |
482 | return -ENOMEM; |
483 | } |
484 | } |
485 | |
486 | for (i = 0; i < nr_pages; i++) { |
487 | struct page *page; |
488 | page = find_or_create_page(file->f_inode->i_mapping, |
489 | i, GFP_HIGHUSER | __GFP_ZERO); |
490 | if (!page) |
491 | break; |
492 | pr_debug("pid(%d) page[%d]->count=%d\n", |
493 | current->pid, i, page_count(page)); |
494 | SetPageUptodate(page); |
495 | unlock_page(page); |
496 | |
497 | ctx->ring_pages[i] = page; |
498 | } |
499 | ctx->nr_pages = i; |
500 | |
501 | if (unlikely(i != nr_pages)) { |
502 | aio_free_ring(ctx); |
503 | return -ENOMEM; |
504 | } |
505 | |
506 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
507 | pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); |
508 | |
509 | if (down_write_killable(&mm->mmap_sem)) { |
510 | ctx->mmap_size = 0; |
511 | aio_free_ring(ctx); |
512 | return -EINTR; |
513 | } |
514 | |
515 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
516 | PROT_READ | PROT_WRITE, |
517 | MAP_SHARED, 0, &unused); |
518 | up_write(&mm->mmap_sem); |
519 | if (IS_ERR((void *)ctx->mmap_base)) { |
520 | ctx->mmap_size = 0; |
521 | aio_free_ring(ctx); |
522 | return -ENOMEM; |
523 | } |
524 | |
525 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
526 | |
527 | ctx->user_id = ctx->mmap_base; |
528 | ctx->nr_events = nr_events; /* trusted copy */ |
529 | |
530 | ring = kmap_atomic(ctx->ring_pages[0]); |
531 | ring->nr = nr_events; /* user copy */ |
532 | ring->id = ~0U; |
533 | ring->head = ring->tail = 0; |
534 | ring->magic = AIO_RING_MAGIC; |
535 | ring->compat_features = AIO_RING_COMPAT_FEATURES; |
536 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; |
537 | ring->header_length = sizeof(struct aio_ring); |
538 | kunmap_atomic(ring); |
539 | flush_dcache_page(ctx->ring_pages[0]); |
540 | |
541 | return 0; |
542 | } |
543 | |
544 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) |
545 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) |
546 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) |
547 | |
548 | void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) |
549 | { |
550 | struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common); |
551 | struct kioctx *ctx = req->ki_ctx; |
552 | unsigned long flags; |
553 | |
554 | spin_lock_irqsave(&ctx->ctx_lock, flags); |
555 | |
556 | if (!req->ki_list.next) |
557 | list_add(&req->ki_list, &ctx->active_reqs); |
558 | |
559 | req->ki_cancel = cancel; |
560 | |
561 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
562 | } |
563 | EXPORT_SYMBOL(kiocb_set_cancel_fn); |
564 | |
565 | static int kiocb_cancel(struct aio_kiocb *kiocb) |
566 | { |
567 | kiocb_cancel_fn *old, *cancel; |
568 | |
569 | /* |
570 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it |
571 | * actually has a cancel function, hence the cmpxchg() |
572 | */ |
573 | |
574 | cancel = ACCESS_ONCE(kiocb->ki_cancel); |
575 | do { |
576 | if (!cancel || cancel == KIOCB_CANCELLED) |
577 | return -EINVAL; |
578 | |
579 | old = cancel; |
580 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); |
581 | } while (cancel != old); |
582 | |
583 | return cancel(&kiocb->common); |
584 | } |
585 | |
586 | /* |
587 | * free_ioctx() should be RCU delayed to synchronize against the RCU |
588 | * protected lookup_ioctx() and also needs process context to call |
589 | * aio_free_ring(), so the double bouncing through kioctx->free_rcu and |
590 | * ->free_work. |
591 | */ |
592 | static void free_ioctx(struct work_struct *work) |
593 | { |
594 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); |
595 | |
596 | pr_debug("freeing %p\n", ctx); |
597 | |
598 | aio_free_ring(ctx); |
599 | free_percpu(ctx->cpu); |
600 | percpu_ref_exit(&ctx->reqs); |
601 | percpu_ref_exit(&ctx->users); |
602 | kmem_cache_free(kioctx_cachep, ctx); |
603 | } |
604 | |
605 | static void free_ioctx_rcufn(struct rcu_head *head) |
606 | { |
607 | struct kioctx *ctx = container_of(head, struct kioctx, free_rcu); |
608 | |
609 | INIT_WORK(&ctx->free_work, free_ioctx); |
610 | schedule_work(&ctx->free_work); |
611 | } |
612 | |
613 | static void free_ioctx_reqs(struct percpu_ref *ref) |
614 | { |
615 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); |
616 | |
617 | /* At this point we know that there are no any in-flight requests */ |
618 | if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) |
619 | complete(&ctx->rq_wait->comp); |
620 | |
621 | /* Synchronize against RCU protected table->table[] dereferences */ |
622 | call_rcu(&ctx->free_rcu, free_ioctx_rcufn); |
623 | } |
624 | |
625 | /* |
626 | * When this function runs, the kioctx has been removed from the "hash table" |
627 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - |
628 | * now it's safe to cancel any that need to be. |
629 | */ |
630 | static void free_ioctx_users(struct percpu_ref *ref) |
631 | { |
632 | struct kioctx *ctx = container_of(ref, struct kioctx, users); |
633 | struct aio_kiocb *req; |
634 | |
635 | spin_lock_irq(&ctx->ctx_lock); |
636 | |
637 | while (!list_empty(&ctx->active_reqs)) { |
638 | req = list_first_entry(&ctx->active_reqs, |
639 | struct aio_kiocb, ki_list); |
640 | kiocb_cancel(req); |
641 | list_del_init(&req->ki_list); |
642 | } |
643 | |
644 | spin_unlock_irq(&ctx->ctx_lock); |
645 | |
646 | percpu_ref_kill(&ctx->reqs); |
647 | percpu_ref_put(&ctx->reqs); |
648 | } |
649 | |
650 | static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) |
651 | { |
652 | unsigned i, new_nr; |
653 | struct kioctx_table *table, *old; |
654 | struct aio_ring *ring; |
655 | |
656 | spin_lock(&mm->ioctx_lock); |
657 | table = rcu_dereference_raw(mm->ioctx_table); |
658 | |
659 | while (1) { |
660 | if (table) |
661 | for (i = 0; i < table->nr; i++) |
662 | if (!rcu_access_pointer(table->table[i])) { |
663 | ctx->id = i; |
664 | rcu_assign_pointer(table->table[i], ctx); |
665 | spin_unlock(&mm->ioctx_lock); |
666 | |
667 | /* While kioctx setup is in progress, |
668 | * we are protected from page migration |
669 | * changes ring_pages by ->ring_lock. |
670 | */ |
671 | ring = kmap_atomic(ctx->ring_pages[0]); |
672 | ring->id = ctx->id; |
673 | kunmap_atomic(ring); |
674 | return 0; |
675 | } |
676 | |
677 | new_nr = (table ? table->nr : 1) * 4; |
678 | spin_unlock(&mm->ioctx_lock); |
679 | |
680 | table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * |
681 | new_nr, GFP_KERNEL); |
682 | if (!table) |
683 | return -ENOMEM; |
684 | |
685 | table->nr = new_nr; |
686 | |
687 | spin_lock(&mm->ioctx_lock); |
688 | old = rcu_dereference_raw(mm->ioctx_table); |
689 | |
690 | if (!old) { |
691 | rcu_assign_pointer(mm->ioctx_table, table); |
692 | } else if (table->nr > old->nr) { |
693 | memcpy(table->table, old->table, |
694 | old->nr * sizeof(struct kioctx *)); |
695 | |
696 | rcu_assign_pointer(mm->ioctx_table, table); |
697 | kfree_rcu(old, rcu); |
698 | } else { |
699 | kfree(table); |
700 | table = old; |
701 | } |
702 | } |
703 | } |
704 | |
705 | static void aio_nr_sub(unsigned nr) |
706 | { |
707 | spin_lock(&aio_nr_lock); |
708 | if (WARN_ON(aio_nr - nr > aio_nr)) |
709 | aio_nr = 0; |
710 | else |
711 | aio_nr -= nr; |
712 | spin_unlock(&aio_nr_lock); |
713 | } |
714 | |
715 | /* ioctx_alloc |
716 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. |
717 | */ |
718 | static struct kioctx *ioctx_alloc(unsigned nr_events) |
719 | { |
720 | struct mm_struct *mm = current->mm; |
721 | struct kioctx *ctx; |
722 | int err = -ENOMEM; |
723 | |
724 | /* |
725 | * We keep track of the number of available ringbuffer slots, to prevent |
726 | * overflow (reqs_available), and we also use percpu counters for this. |
727 | * |
728 | * So since up to half the slots might be on other cpu's percpu counters |
729 | * and unavailable, double nr_events so userspace sees what they |
730 | * expected: additionally, we move req_batch slots to/from percpu |
731 | * counters at a time, so make sure that isn't 0: |
732 | */ |
733 | nr_events = max(nr_events, num_possible_cpus() * 4); |
734 | nr_events *= 2; |
735 | |
736 | /* Prevent overflows */ |
737 | if (nr_events > (0x10000000U / sizeof(struct io_event))) { |
738 | pr_debug("ENOMEM: nr_events too high\n"); |
739 | return ERR_PTR(-EINVAL); |
740 | } |
741 | |
742 | if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) |
743 | return ERR_PTR(-EAGAIN); |
744 | |
745 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
746 | if (!ctx) |
747 | return ERR_PTR(-ENOMEM); |
748 | |
749 | ctx->max_reqs = nr_events; |
750 | |
751 | spin_lock_init(&ctx->ctx_lock); |
752 | spin_lock_init(&ctx->completion_lock); |
753 | mutex_init(&ctx->ring_lock); |
754 | /* Protect against page migration throughout kiotx setup by keeping |
755 | * the ring_lock mutex held until setup is complete. */ |
756 | mutex_lock(&ctx->ring_lock); |
757 | init_waitqueue_head(&ctx->wait); |
758 | |
759 | INIT_LIST_HEAD(&ctx->active_reqs); |
760 | |
761 | if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) |
762 | goto err; |
763 | |
764 | if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) |
765 | goto err; |
766 | |
767 | ctx->cpu = alloc_percpu(struct kioctx_cpu); |
768 | if (!ctx->cpu) |
769 | goto err; |
770 | |
771 | err = aio_setup_ring(ctx); |
772 | if (err < 0) |
773 | goto err; |
774 | |
775 | atomic_set(&ctx->reqs_available, ctx->nr_events - 1); |
776 | ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); |
777 | if (ctx->req_batch < 1) |
778 | ctx->req_batch = 1; |
779 | |
780 | /* limit the number of system wide aios */ |
781 | spin_lock(&aio_nr_lock); |
782 | if (aio_nr + nr_events > (aio_max_nr * 2UL) || |
783 | aio_nr + nr_events < aio_nr) { |
784 | spin_unlock(&aio_nr_lock); |
785 | err = -EAGAIN; |
786 | goto err_ctx; |
787 | } |
788 | aio_nr += ctx->max_reqs; |
789 | spin_unlock(&aio_nr_lock); |
790 | |
791 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ |
792 | percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ |
793 | |
794 | err = ioctx_add_table(ctx, mm); |
795 | if (err) |
796 | goto err_cleanup; |
797 | |
798 | /* Release the ring_lock mutex now that all setup is complete. */ |
799 | mutex_unlock(&ctx->ring_lock); |
800 | |
801 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", |
802 | ctx, ctx->user_id, mm, ctx->nr_events); |
803 | return ctx; |
804 | |
805 | err_cleanup: |
806 | aio_nr_sub(ctx->max_reqs); |
807 | err_ctx: |
808 | atomic_set(&ctx->dead, 1); |
809 | if (ctx->mmap_size) |
810 | vm_munmap(ctx->mmap_base, ctx->mmap_size); |
811 | aio_free_ring(ctx); |
812 | err: |
813 | mutex_unlock(&ctx->ring_lock); |
814 | free_percpu(ctx->cpu); |
815 | percpu_ref_exit(&ctx->reqs); |
816 | percpu_ref_exit(&ctx->users); |
817 | kmem_cache_free(kioctx_cachep, ctx); |
818 | pr_debug("error allocating ioctx %d\n", err); |
819 | return ERR_PTR(err); |
820 | } |
821 | |
822 | /* kill_ioctx |
823 | * Cancels all outstanding aio requests on an aio context. Used |
824 | * when the processes owning a context have all exited to encourage |
825 | * the rapid destruction of the kioctx. |
826 | */ |
827 | static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, |
828 | struct ctx_rq_wait *wait) |
829 | { |
830 | struct kioctx_table *table; |
831 | |
832 | spin_lock(&mm->ioctx_lock); |
833 | if (atomic_xchg(&ctx->dead, 1)) { |
834 | spin_unlock(&mm->ioctx_lock); |
835 | return -EINVAL; |
836 | } |
837 | |
838 | table = rcu_dereference_raw(mm->ioctx_table); |
839 | WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); |
840 | RCU_INIT_POINTER(table->table[ctx->id], NULL); |
841 | spin_unlock(&mm->ioctx_lock); |
842 | |
843 | /* free_ioctx_reqs() will do the necessary RCU synchronization */ |
844 | wake_up_all(&ctx->wait); |
845 | |
846 | /* |
847 | * It'd be more correct to do this in free_ioctx(), after all |
848 | * the outstanding kiocbs have finished - but by then io_destroy |
849 | * has already returned, so io_setup() could potentially return |
850 | * -EAGAIN with no ioctxs actually in use (as far as userspace |
851 | * could tell). |
852 | */ |
853 | aio_nr_sub(ctx->max_reqs); |
854 | |
855 | if (ctx->mmap_size) |
856 | vm_munmap(ctx->mmap_base, ctx->mmap_size); |
857 | |
858 | ctx->rq_wait = wait; |
859 | percpu_ref_kill(&ctx->users); |
860 | return 0; |
861 | } |
862 | |
863 | /* |
864 | * exit_aio: called when the last user of mm goes away. At this point, there is |
865 | * no way for any new requests to be submited or any of the io_* syscalls to be |
866 | * called on the context. |
867 | * |
868 | * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on |
869 | * them. |
870 | */ |
871 | void exit_aio(struct mm_struct *mm) |
872 | { |
873 | struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); |
874 | struct ctx_rq_wait wait; |
875 | int i, skipped; |
876 | |
877 | if (!table) |
878 | return; |
879 | |
880 | atomic_set(&wait.count, table->nr); |
881 | init_completion(&wait.comp); |
882 | |
883 | skipped = 0; |
884 | for (i = 0; i < table->nr; ++i) { |
885 | struct kioctx *ctx = |
886 | rcu_dereference_protected(table->table[i], true); |
887 | |
888 | if (!ctx) { |
889 | skipped++; |
890 | continue; |
891 | } |
892 | |
893 | /* |
894 | * We don't need to bother with munmap() here - exit_mmap(mm) |
895 | * is coming and it'll unmap everything. And we simply can't, |
896 | * this is not necessarily our ->mm. |
897 | * Since kill_ioctx() uses non-zero ->mmap_size as indicator |
898 | * that it needs to unmap the area, just set it to 0. |
899 | */ |
900 | ctx->mmap_size = 0; |
901 | kill_ioctx(mm, ctx, &wait); |
902 | } |
903 | |
904 | if (!atomic_sub_and_test(skipped, &wait.count)) { |
905 | /* Wait until all IO for the context are done. */ |
906 | wait_for_completion(&wait.comp); |
907 | } |
908 | |
909 | RCU_INIT_POINTER(mm->ioctx_table, NULL); |
910 | kfree(table); |
911 | } |
912 | |
913 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) |
914 | { |
915 | struct kioctx_cpu *kcpu; |
916 | unsigned long flags; |
917 | |
918 | local_irq_save(flags); |
919 | kcpu = this_cpu_ptr(ctx->cpu); |
920 | kcpu->reqs_available += nr; |
921 | |
922 | while (kcpu->reqs_available >= ctx->req_batch * 2) { |
923 | kcpu->reqs_available -= ctx->req_batch; |
924 | atomic_add(ctx->req_batch, &ctx->reqs_available); |
925 | } |
926 | |
927 | local_irq_restore(flags); |
928 | } |
929 | |
930 | static bool get_reqs_available(struct kioctx *ctx) |
931 | { |
932 | struct kioctx_cpu *kcpu; |
933 | bool ret = false; |
934 | unsigned long flags; |
935 | |
936 | local_irq_save(flags); |
937 | kcpu = this_cpu_ptr(ctx->cpu); |
938 | if (!kcpu->reqs_available) { |
939 | int old, avail = atomic_read(&ctx->reqs_available); |
940 | |
941 | do { |
942 | if (avail < ctx->req_batch) |
943 | goto out; |
944 | |
945 | old = avail; |
946 | avail = atomic_cmpxchg(&ctx->reqs_available, |
947 | avail, avail - ctx->req_batch); |
948 | } while (avail != old); |
949 | |
950 | kcpu->reqs_available += ctx->req_batch; |
951 | } |
952 | |
953 | ret = true; |
954 | kcpu->reqs_available--; |
955 | out: |
956 | local_irq_restore(flags); |
957 | return ret; |
958 | } |
959 | |
960 | /* refill_reqs_available |
961 | * Updates the reqs_available reference counts used for tracking the |
962 | * number of free slots in the completion ring. This can be called |
963 | * from aio_complete() (to optimistically update reqs_available) or |
964 | * from aio_get_req() (the we're out of events case). It must be |
965 | * called holding ctx->completion_lock. |
966 | */ |
967 | static void refill_reqs_available(struct kioctx *ctx, unsigned head, |
968 | unsigned tail) |
969 | { |
970 | unsigned events_in_ring, completed; |
971 | |
972 | /* Clamp head since userland can write to it. */ |
973 | head %= ctx->nr_events; |
974 | if (head <= tail) |
975 | events_in_ring = tail - head; |
976 | else |
977 | events_in_ring = ctx->nr_events - (head - tail); |
978 | |
979 | completed = ctx->completed_events; |
980 | if (events_in_ring < completed) |
981 | completed -= events_in_ring; |
982 | else |
983 | completed = 0; |
984 | |
985 | if (!completed) |
986 | return; |
987 | |
988 | ctx->completed_events -= completed; |
989 | put_reqs_available(ctx, completed); |
990 | } |
991 | |
992 | /* user_refill_reqs_available |
993 | * Called to refill reqs_available when aio_get_req() encounters an |
994 | * out of space in the completion ring. |
995 | */ |
996 | static void user_refill_reqs_available(struct kioctx *ctx) |
997 | { |
998 | spin_lock_irq(&ctx->completion_lock); |
999 | if (ctx->completed_events) { |
1000 | struct aio_ring *ring; |
1001 | unsigned head; |
1002 | |
1003 | /* Access of ring->head may race with aio_read_events_ring() |
1004 | * here, but that's okay since whether we read the old version |
1005 | * or the new version, and either will be valid. The important |
1006 | * part is that head cannot pass tail since we prevent |
1007 | * aio_complete() from updating tail by holding |
1008 | * ctx->completion_lock. Even if head is invalid, the check |
1009 | * against ctx->completed_events below will make sure we do the |
1010 | * safe/right thing. |
1011 | */ |
1012 | ring = kmap_atomic(ctx->ring_pages[0]); |
1013 | head = ring->head; |
1014 | kunmap_atomic(ring); |
1015 | |
1016 | refill_reqs_available(ctx, head, ctx->tail); |
1017 | } |
1018 | |
1019 | spin_unlock_irq(&ctx->completion_lock); |
1020 | } |
1021 | |
1022 | /* aio_get_req |
1023 | * Allocate a slot for an aio request. |
1024 | * Returns NULL if no requests are free. |
1025 | */ |
1026 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) |
1027 | { |
1028 | struct aio_kiocb *req; |
1029 | |
1030 | if (!get_reqs_available(ctx)) { |
1031 | user_refill_reqs_available(ctx); |
1032 | if (!get_reqs_available(ctx)) |
1033 | return NULL; |
1034 | } |
1035 | |
1036 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); |
1037 | if (unlikely(!req)) |
1038 | goto out_put; |
1039 | |
1040 | percpu_ref_get(&ctx->reqs); |
1041 | |
1042 | req->ki_ctx = ctx; |
1043 | return req; |
1044 | out_put: |
1045 | put_reqs_available(ctx, 1); |
1046 | return NULL; |
1047 | } |
1048 | |
1049 | static void kiocb_free(struct aio_kiocb *req) |
1050 | { |
1051 | if (req->common.ki_filp) |
1052 | fput(req->common.ki_filp); |
1053 | if (req->ki_eventfd != NULL) |
1054 | eventfd_ctx_put(req->ki_eventfd); |
1055 | kmem_cache_free(kiocb_cachep, req); |
1056 | } |
1057 | |
1058 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
1059 | { |
1060 | struct aio_ring __user *ring = (void __user *)ctx_id; |
1061 | struct mm_struct *mm = current->mm; |
1062 | struct kioctx *ctx, *ret = NULL; |
1063 | struct kioctx_table *table; |
1064 | unsigned id; |
1065 | |
1066 | if (get_user(id, &ring->id)) |
1067 | return NULL; |
1068 | |
1069 | rcu_read_lock(); |
1070 | table = rcu_dereference(mm->ioctx_table); |
1071 | |
1072 | if (!table || id >= table->nr) |
1073 | goto out; |
1074 | |
1075 | id = array_index_nospec(id, table->nr); |
1076 | ctx = rcu_dereference(table->table[id]); |
1077 | if (ctx && ctx->user_id == ctx_id) { |
1078 | if (percpu_ref_tryget_live(&ctx->users)) |
1079 | ret = ctx; |
1080 | } |
1081 | out: |
1082 | rcu_read_unlock(); |
1083 | return ret; |
1084 | } |
1085 | |
1086 | /* aio_complete |
1087 | * Called when the io request on the given iocb is complete. |
1088 | */ |
1089 | static void aio_complete(struct kiocb *kiocb, long res, long res2) |
1090 | { |
1091 | struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common); |
1092 | struct kioctx *ctx = iocb->ki_ctx; |
1093 | struct aio_ring *ring; |
1094 | struct io_event *ev_page, *event; |
1095 | unsigned tail, pos, head; |
1096 | unsigned long flags; |
1097 | |
1098 | if (kiocb->ki_flags & IOCB_WRITE) { |
1099 | struct file *file = kiocb->ki_filp; |
1100 | |
1101 | /* |
1102 | * Tell lockdep we inherited freeze protection from submission |
1103 | * thread. |
1104 | */ |
1105 | if (S_ISREG(file_inode(file)->i_mode)) |
1106 | __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); |
1107 | file_end_write(file); |
1108 | } |
1109 | |
1110 | /* |
1111 | * Special case handling for sync iocbs: |
1112 | * - events go directly into the iocb for fast handling |
1113 | * - the sync task with the iocb in its stack holds the single iocb |
1114 | * ref, no other paths have a way to get another ref |
1115 | * - the sync task helpfully left a reference to itself in the iocb |
1116 | */ |
1117 | BUG_ON(is_sync_kiocb(kiocb)); |
1118 | |
1119 | if (iocb->ki_list.next) { |
1120 | unsigned long flags; |
1121 | |
1122 | spin_lock_irqsave(&ctx->ctx_lock, flags); |
1123 | list_del(&iocb->ki_list); |
1124 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
1125 | } |
1126 | |
1127 | /* |
1128 | * Add a completion event to the ring buffer. Must be done holding |
1129 | * ctx->completion_lock to prevent other code from messing with the tail |
1130 | * pointer since we might be called from irq context. |
1131 | */ |
1132 | spin_lock_irqsave(&ctx->completion_lock, flags); |
1133 | |
1134 | tail = ctx->tail; |
1135 | pos = tail + AIO_EVENTS_OFFSET; |
1136 | |
1137 | if (++tail >= ctx->nr_events) |
1138 | tail = 0; |
1139 | |
1140 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
1141 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
1142 | |
1143 | event->obj = (u64)(unsigned long)iocb->ki_user_iocb; |
1144 | event->data = iocb->ki_user_data; |
1145 | event->res = res; |
1146 | event->res2 = res2; |
1147 | |
1148 | kunmap_atomic(ev_page); |
1149 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
1150 | |
1151 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", |
1152 | ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, |
1153 | res, res2); |
1154 | |
1155 | /* after flagging the request as done, we |
1156 | * must never even look at it again |
1157 | */ |
1158 | smp_wmb(); /* make event visible before updating tail */ |
1159 | |
1160 | ctx->tail = tail; |
1161 | |
1162 | ring = kmap_atomic(ctx->ring_pages[0]); |
1163 | head = ring->head; |
1164 | ring->tail = tail; |
1165 | kunmap_atomic(ring); |
1166 | flush_dcache_page(ctx->ring_pages[0]); |
1167 | |
1168 | ctx->completed_events++; |
1169 | if (ctx->completed_events > 1) |
1170 | refill_reqs_available(ctx, head, tail); |
1171 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
1172 | |
1173 | pr_debug("added to ring %p at [%u]\n", iocb, tail); |
1174 | |
1175 | /* |
1176 | * Check if the user asked us to deliver the result through an |
1177 | * eventfd. The eventfd_signal() function is safe to be called |
1178 | * from IRQ context. |
1179 | */ |
1180 | if (iocb->ki_eventfd != NULL) |
1181 | eventfd_signal(iocb->ki_eventfd, 1); |
1182 | |
1183 | /* everything turned out well, dispose of the aiocb. */ |
1184 | kiocb_free(iocb); |
1185 | |
1186 | /* |
1187 | * We have to order our ring_info tail store above and test |
1188 | * of the wait list below outside the wait lock. This is |
1189 | * like in wake_up_bit() where clearing a bit has to be |
1190 | * ordered with the unlocked test. |
1191 | */ |
1192 | smp_mb(); |
1193 | |
1194 | if (waitqueue_active(&ctx->wait)) |
1195 | wake_up(&ctx->wait); |
1196 | |
1197 | percpu_ref_put(&ctx->reqs); |
1198 | } |
1199 | |
1200 | /* aio_read_events_ring |
1201 | * Pull an event off of the ioctx's event ring. Returns the number of |
1202 | * events fetched |
1203 | */ |
1204 | static long aio_read_events_ring(struct kioctx *ctx, |
1205 | struct io_event __user *event, long nr) |
1206 | { |
1207 | struct aio_ring *ring; |
1208 | unsigned head, tail, pos; |
1209 | long ret = 0; |
1210 | int copy_ret; |
1211 | |
1212 | /* |
1213 | * The mutex can block and wake us up and that will cause |
1214 | * wait_event_interruptible_hrtimeout() to schedule without sleeping |
1215 | * and repeat. This should be rare enough that it doesn't cause |
1216 | * peformance issues. See the comment in read_events() for more detail. |
1217 | */ |
1218 | sched_annotate_sleep(); |
1219 | mutex_lock(&ctx->ring_lock); |
1220 | |
1221 | /* Access to ->ring_pages here is protected by ctx->ring_lock. */ |
1222 | ring = kmap_atomic(ctx->ring_pages[0]); |
1223 | head = ring->head; |
1224 | tail = ring->tail; |
1225 | kunmap_atomic(ring); |
1226 | |
1227 | /* |
1228 | * Ensure that once we've read the current tail pointer, that |
1229 | * we also see the events that were stored up to the tail. |
1230 | */ |
1231 | smp_rmb(); |
1232 | |
1233 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); |
1234 | |
1235 | if (head == tail) |
1236 | goto out; |
1237 | |
1238 | head %= ctx->nr_events; |
1239 | tail %= ctx->nr_events; |
1240 | |
1241 | while (ret < nr) { |
1242 | long avail; |
1243 | struct io_event *ev; |
1244 | struct page *page; |
1245 | |
1246 | avail = (head <= tail ? tail : ctx->nr_events) - head; |
1247 | if (head == tail) |
1248 | break; |
1249 | |
1250 | avail = min(avail, nr - ret); |
1251 | avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - |
1252 | ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); |
1253 | |
1254 | pos = head + AIO_EVENTS_OFFSET; |
1255 | page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; |
1256 | pos %= AIO_EVENTS_PER_PAGE; |
1257 | |
1258 | ev = kmap(page); |
1259 | copy_ret = copy_to_user(event + ret, ev + pos, |
1260 | sizeof(*ev) * avail); |
1261 | kunmap(page); |
1262 | |
1263 | if (unlikely(copy_ret)) { |
1264 | ret = -EFAULT; |
1265 | goto out; |
1266 | } |
1267 | |
1268 | ret += avail; |
1269 | head += avail; |
1270 | head %= ctx->nr_events; |
1271 | } |
1272 | |
1273 | ring = kmap_atomic(ctx->ring_pages[0]); |
1274 | ring->head = head; |
1275 | kunmap_atomic(ring); |
1276 | flush_dcache_page(ctx->ring_pages[0]); |
1277 | |
1278 | pr_debug("%li h%u t%u\n", ret, head, tail); |
1279 | out: |
1280 | mutex_unlock(&ctx->ring_lock); |
1281 | |
1282 | return ret; |
1283 | } |
1284 | |
1285 | static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, |
1286 | struct io_event __user *event, long *i) |
1287 | { |
1288 | long ret = aio_read_events_ring(ctx, event + *i, nr - *i); |
1289 | |
1290 | if (ret > 0) |
1291 | *i += ret; |
1292 | |
1293 | if (unlikely(atomic_read(&ctx->dead))) |
1294 | ret = -EINVAL; |
1295 | |
1296 | if (!*i) |
1297 | *i = ret; |
1298 | |
1299 | return ret < 0 || *i >= min_nr; |
1300 | } |
1301 | |
1302 | static long read_events(struct kioctx *ctx, long min_nr, long nr, |
1303 | struct io_event __user *event, |
1304 | struct timespec __user *timeout) |
1305 | { |
1306 | ktime_t until = { .tv64 = KTIME_MAX }; |
1307 | long ret = 0; |
1308 | |
1309 | if (timeout) { |
1310 | struct timespec ts; |
1311 | |
1312 | if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) |
1313 | return -EFAULT; |
1314 | |
1315 | until = timespec_to_ktime(ts); |
1316 | } |
1317 | |
1318 | /* |
1319 | * Note that aio_read_events() is being called as the conditional - i.e. |
1320 | * we're calling it after prepare_to_wait() has set task state to |
1321 | * TASK_INTERRUPTIBLE. |
1322 | * |
1323 | * But aio_read_events() can block, and if it blocks it's going to flip |
1324 | * the task state back to TASK_RUNNING. |
1325 | * |
1326 | * This should be ok, provided it doesn't flip the state back to |
1327 | * TASK_RUNNING and return 0 too much - that causes us to spin. That |
1328 | * will only happen if the mutex_lock() call blocks, and we then find |
1329 | * the ringbuffer empty. So in practice we should be ok, but it's |
1330 | * something to be aware of when touching this code. |
1331 | */ |
1332 | if (until.tv64 == 0) |
1333 | aio_read_events(ctx, min_nr, nr, event, &ret); |
1334 | else |
1335 | wait_event_interruptible_hrtimeout(ctx->wait, |
1336 | aio_read_events(ctx, min_nr, nr, event, &ret), |
1337 | until); |
1338 | |
1339 | if (!ret && signal_pending(current)) |
1340 | ret = -EINTR; |
1341 | |
1342 | return ret; |
1343 | } |
1344 | |
1345 | /* sys_io_setup: |
1346 | * Create an aio_context capable of receiving at least nr_events. |
1347 | * ctxp must not point to an aio_context that already exists, and |
1348 | * must be initialized to 0 prior to the call. On successful |
1349 | * creation of the aio_context, *ctxp is filled in with the resulting |
1350 | * handle. May fail with -EINVAL if *ctxp is not initialized, |
1351 | * if the specified nr_events exceeds internal limits. May fail |
1352 | * with -EAGAIN if the specified nr_events exceeds the user's limit |
1353 | * of available events. May fail with -ENOMEM if insufficient kernel |
1354 | * resources are available. May fail with -EFAULT if an invalid |
1355 | * pointer is passed for ctxp. Will fail with -ENOSYS if not |
1356 | * implemented. |
1357 | */ |
1358 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) |
1359 | { |
1360 | struct kioctx *ioctx = NULL; |
1361 | unsigned long ctx; |
1362 | long ret; |
1363 | |
1364 | ret = get_user(ctx, ctxp); |
1365 | if (unlikely(ret)) |
1366 | goto out; |
1367 | |
1368 | ret = -EINVAL; |
1369 | if (unlikely(ctx || nr_events == 0)) { |
1370 | pr_debug("EINVAL: ctx %lu nr_events %u\n", |
1371 | ctx, nr_events); |
1372 | goto out; |
1373 | } |
1374 | |
1375 | ioctx = ioctx_alloc(nr_events); |
1376 | ret = PTR_ERR(ioctx); |
1377 | if (!IS_ERR(ioctx)) { |
1378 | ret = put_user(ioctx->user_id, ctxp); |
1379 | if (ret) |
1380 | kill_ioctx(current->mm, ioctx, NULL); |
1381 | percpu_ref_put(&ioctx->users); |
1382 | } |
1383 | |
1384 | out: |
1385 | return ret; |
1386 | } |
1387 | |
1388 | /* sys_io_destroy: |
1389 | * Destroy the aio_context specified. May cancel any outstanding |
1390 | * AIOs and block on completion. Will fail with -ENOSYS if not |
1391 | * implemented. May fail with -EINVAL if the context pointed to |
1392 | * is invalid. |
1393 | */ |
1394 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) |
1395 | { |
1396 | struct kioctx *ioctx = lookup_ioctx(ctx); |
1397 | if (likely(NULL != ioctx)) { |
1398 | struct ctx_rq_wait wait; |
1399 | int ret; |
1400 | |
1401 | init_completion(&wait.comp); |
1402 | atomic_set(&wait.count, 1); |
1403 | |
1404 | /* Pass requests_done to kill_ioctx() where it can be set |
1405 | * in a thread-safe way. If we try to set it here then we have |
1406 | * a race condition if two io_destroy() called simultaneously. |
1407 | */ |
1408 | ret = kill_ioctx(current->mm, ioctx, &wait); |
1409 | percpu_ref_put(&ioctx->users); |
1410 | |
1411 | /* Wait until all IO for the context are done. Otherwise kernel |
1412 | * keep using user-space buffers even if user thinks the context |
1413 | * is destroyed. |
1414 | */ |
1415 | if (!ret) |
1416 | wait_for_completion(&wait.comp); |
1417 | |
1418 | return ret; |
1419 | } |
1420 | pr_debug("EINVAL: invalid context id\n"); |
1421 | return -EINVAL; |
1422 | } |
1423 | |
1424 | static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec, |
1425 | bool vectored, bool compat, struct iov_iter *iter) |
1426 | { |
1427 | void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; |
1428 | size_t len = iocb->aio_nbytes; |
1429 | |
1430 | if (!vectored) { |
1431 | ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); |
1432 | *iovec = NULL; |
1433 | return ret; |
1434 | } |
1435 | #ifdef CONFIG_COMPAT |
1436 | if (compat) |
1437 | return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec, |
1438 | iter); |
1439 | #endif |
1440 | return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter); |
1441 | } |
1442 | |
1443 | static inline ssize_t aio_ret(struct kiocb *req, ssize_t ret) |
1444 | { |
1445 | switch (ret) { |
1446 | case -EIOCBQUEUED: |
1447 | return ret; |
1448 | case -ERESTARTSYS: |
1449 | case -ERESTARTNOINTR: |
1450 | case -ERESTARTNOHAND: |
1451 | case -ERESTART_RESTARTBLOCK: |
1452 | /* |
1453 | * There's no easy way to restart the syscall since other AIO's |
1454 | * may be already running. Just fail this IO with EINTR. |
1455 | */ |
1456 | ret = -EINTR; |
1457 | /*FALLTHRU*/ |
1458 | default: |
1459 | aio_complete(req, ret, 0); |
1460 | return 0; |
1461 | } |
1462 | } |
1463 | |
1464 | static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, |
1465 | bool compat) |
1466 | { |
1467 | struct file *file = req->ki_filp; |
1468 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
1469 | struct iov_iter iter; |
1470 | ssize_t ret; |
1471 | |
1472 | if (unlikely(!(file->f_mode & FMODE_READ))) |
1473 | return -EBADF; |
1474 | if (unlikely(!file->f_op->read_iter)) |
1475 | return -EINVAL; |
1476 | |
1477 | ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); |
1478 | if (ret) |
1479 | return ret; |
1480 | ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); |
1481 | if (!ret) |
1482 | ret = aio_ret(req, file->f_op->read_iter(req, &iter)); |
1483 | kfree(iovec); |
1484 | return ret; |
1485 | } |
1486 | |
1487 | static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, |
1488 | bool compat) |
1489 | { |
1490 | struct file *file = req->ki_filp; |
1491 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
1492 | struct iov_iter iter; |
1493 | ssize_t ret; |
1494 | |
1495 | if (unlikely(!(file->f_mode & FMODE_WRITE))) |
1496 | return -EBADF; |
1497 | if (unlikely(!file->f_op->write_iter)) |
1498 | return -EINVAL; |
1499 | |
1500 | ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); |
1501 | if (ret) |
1502 | return ret; |
1503 | ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); |
1504 | if (!ret) { |
1505 | req->ki_flags |= IOCB_WRITE; |
1506 | file_start_write(file); |
1507 | ret = aio_ret(req, file->f_op->write_iter(req, &iter)); |
1508 | /* |
1509 | * We release freeze protection in aio_complete(). Fool lockdep |
1510 | * by telling it the lock got released so that it doesn't |
1511 | * complain about held lock when we return to userspace. |
1512 | */ |
1513 | if (S_ISREG(file_inode(file)->i_mode)) |
1514 | __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); |
1515 | } |
1516 | kfree(iovec); |
1517 | return ret; |
1518 | } |
1519 | |
1520 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1521 | struct iocb *iocb, bool compat) |
1522 | { |
1523 | struct aio_kiocb *req; |
1524 | struct file *file; |
1525 | ssize_t ret; |
1526 | |
1527 | /* enforce forwards compatibility on users */ |
1528 | if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { |
1529 | pr_debug("EINVAL: reserve field set\n"); |
1530 | return -EINVAL; |
1531 | } |
1532 | |
1533 | /* prevent overflows */ |
1534 | if (unlikely( |
1535 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || |
1536 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || |
1537 | ((ssize_t)iocb->aio_nbytes < 0) |
1538 | )) { |
1539 | pr_debug("EINVAL: overflow check\n"); |
1540 | return -EINVAL; |
1541 | } |
1542 | |
1543 | req = aio_get_req(ctx); |
1544 | if (unlikely(!req)) |
1545 | return -EAGAIN; |
1546 | |
1547 | req->common.ki_filp = file = fget(iocb->aio_fildes); |
1548 | if (unlikely(!req->common.ki_filp)) { |
1549 | ret = -EBADF; |
1550 | goto out_put_req; |
1551 | } |
1552 | req->common.ki_pos = iocb->aio_offset; |
1553 | req->common.ki_complete = aio_complete; |
1554 | req->common.ki_flags = iocb_flags(req->common.ki_filp); |
1555 | |
1556 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
1557 | /* |
1558 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an |
1559 | * instance of the file* now. The file descriptor must be |
1560 | * an eventfd() fd, and will be signaled for each completed |
1561 | * event using the eventfd_signal() function. |
1562 | */ |
1563 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); |
1564 | if (IS_ERR(req->ki_eventfd)) { |
1565 | ret = PTR_ERR(req->ki_eventfd); |
1566 | req->ki_eventfd = NULL; |
1567 | goto out_put_req; |
1568 | } |
1569 | |
1570 | req->common.ki_flags |= IOCB_EVENTFD; |
1571 | } |
1572 | |
1573 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); |
1574 | if (unlikely(ret)) { |
1575 | pr_debug("EFAULT: aio_key\n"); |
1576 | goto out_put_req; |
1577 | } |
1578 | |
1579 | req->ki_user_iocb = user_iocb; |
1580 | req->ki_user_data = iocb->aio_data; |
1581 | |
1582 | get_file(file); |
1583 | switch (iocb->aio_lio_opcode) { |
1584 | case IOCB_CMD_PREAD: |
1585 | ret = aio_read(&req->common, iocb, false, compat); |
1586 | break; |
1587 | case IOCB_CMD_PWRITE: |
1588 | ret = aio_write(&req->common, iocb, false, compat); |
1589 | break; |
1590 | case IOCB_CMD_PREADV: |
1591 | ret = aio_read(&req->common, iocb, true, compat); |
1592 | break; |
1593 | case IOCB_CMD_PWRITEV: |
1594 | ret = aio_write(&req->common, iocb, true, compat); |
1595 | break; |
1596 | default: |
1597 | pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); |
1598 | ret = -EINVAL; |
1599 | break; |
1600 | } |
1601 | fput(file); |
1602 | |
1603 | if (ret && ret != -EIOCBQUEUED) |
1604 | goto out_put_req; |
1605 | return 0; |
1606 | out_put_req: |
1607 | put_reqs_available(ctx, 1); |
1608 | percpu_ref_put(&ctx->reqs); |
1609 | kiocb_free(req); |
1610 | return ret; |
1611 | } |
1612 | |
1613 | long do_io_submit(aio_context_t ctx_id, long nr, |
1614 | struct iocb __user *__user *iocbpp, bool compat) |
1615 | { |
1616 | struct kioctx *ctx; |
1617 | long ret = 0; |
1618 | int i = 0; |
1619 | struct blk_plug plug; |
1620 | |
1621 | if (unlikely(nr < 0)) |
1622 | return -EINVAL; |
1623 | |
1624 | if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) |
1625 | nr = LONG_MAX/sizeof(*iocbpp); |
1626 | |
1627 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) |
1628 | return -EFAULT; |
1629 | |
1630 | ctx = lookup_ioctx(ctx_id); |
1631 | if (unlikely(!ctx)) { |
1632 | pr_debug("EINVAL: invalid context id\n"); |
1633 | return -EINVAL; |
1634 | } |
1635 | |
1636 | blk_start_plug(&plug); |
1637 | |
1638 | /* |
1639 | * AKPM: should this return a partial result if some of the IOs were |
1640 | * successfully submitted? |
1641 | */ |
1642 | for (i=0; i<nr; i++) { |
1643 | struct iocb __user *user_iocb; |
1644 | struct iocb tmp; |
1645 | |
1646 | if (unlikely(__get_user(user_iocb, iocbpp + i))) { |
1647 | ret = -EFAULT; |
1648 | break; |
1649 | } |
1650 | |
1651 | if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { |
1652 | ret = -EFAULT; |
1653 | break; |
1654 | } |
1655 | |
1656 | ret = io_submit_one(ctx, user_iocb, &tmp, compat); |
1657 | if (ret) |
1658 | break; |
1659 | } |
1660 | blk_finish_plug(&plug); |
1661 | |
1662 | percpu_ref_put(&ctx->users); |
1663 | return i ? i : ret; |
1664 | } |
1665 | |
1666 | /* sys_io_submit: |
1667 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns |
1668 | * the number of iocbs queued. May return -EINVAL if the aio_context |
1669 | * specified by ctx_id is invalid, if nr is < 0, if the iocb at |
1670 | * *iocbpp[0] is not properly initialized, if the operation specified |
1671 | * is invalid for the file descriptor in the iocb. May fail with |
1672 | * -EFAULT if any of the data structures point to invalid data. May |
1673 | * fail with -EBADF if the file descriptor specified in the first |
1674 | * iocb is invalid. May fail with -EAGAIN if insufficient resources |
1675 | * are available to queue any iocbs. Will return 0 if nr is 0. Will |
1676 | * fail with -ENOSYS if not implemented. |
1677 | */ |
1678 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, |
1679 | struct iocb __user * __user *, iocbpp) |
1680 | { |
1681 | return do_io_submit(ctx_id, nr, iocbpp, 0); |
1682 | } |
1683 | |
1684 | /* lookup_kiocb |
1685 | * Finds a given iocb for cancellation. |
1686 | */ |
1687 | static struct aio_kiocb * |
1688 | lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) |
1689 | { |
1690 | struct aio_kiocb *kiocb; |
1691 | |
1692 | assert_spin_locked(&ctx->ctx_lock); |
1693 | |
1694 | if (key != KIOCB_KEY) |
1695 | return NULL; |
1696 | |
1697 | /* TODO: use a hash or array, this sucks. */ |
1698 | list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { |
1699 | if (kiocb->ki_user_iocb == iocb) |
1700 | return kiocb; |
1701 | } |
1702 | return NULL; |
1703 | } |
1704 | |
1705 | /* sys_io_cancel: |
1706 | * Attempts to cancel an iocb previously passed to io_submit. If |
1707 | * the operation is successfully cancelled, the resulting event is |
1708 | * copied into the memory pointed to by result without being placed |
1709 | * into the completion queue and 0 is returned. May fail with |
1710 | * -EFAULT if any of the data structures pointed to are invalid. |
1711 | * May fail with -EINVAL if aio_context specified by ctx_id is |
1712 | * invalid. May fail with -EAGAIN if the iocb specified was not |
1713 | * cancelled. Will fail with -ENOSYS if not implemented. |
1714 | */ |
1715 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
1716 | struct io_event __user *, result) |
1717 | { |
1718 | struct kioctx *ctx; |
1719 | struct aio_kiocb *kiocb; |
1720 | u32 key; |
1721 | int ret; |
1722 | |
1723 | ret = get_user(key, &iocb->aio_key); |
1724 | if (unlikely(ret)) |
1725 | return -EFAULT; |
1726 | |
1727 | ctx = lookup_ioctx(ctx_id); |
1728 | if (unlikely(!ctx)) |
1729 | return -EINVAL; |
1730 | |
1731 | spin_lock_irq(&ctx->ctx_lock); |
1732 | |
1733 | kiocb = lookup_kiocb(ctx, iocb, key); |
1734 | if (kiocb) |
1735 | ret = kiocb_cancel(kiocb); |
1736 | else |
1737 | ret = -EINVAL; |
1738 | |
1739 | spin_unlock_irq(&ctx->ctx_lock); |
1740 | |
1741 | if (!ret) { |
1742 | /* |
1743 | * The result argument is no longer used - the io_event is |
1744 | * always delivered via the ring buffer. -EINPROGRESS indicates |
1745 | * cancellation is progress: |
1746 | */ |
1747 | ret = -EINPROGRESS; |
1748 | } |
1749 | |
1750 | percpu_ref_put(&ctx->users); |
1751 | |
1752 | return ret; |
1753 | } |
1754 | |
1755 | /* io_getevents: |
1756 | * Attempts to read at least min_nr events and up to nr events from |
1757 | * the completion queue for the aio_context specified by ctx_id. If |
1758 | * it succeeds, the number of read events is returned. May fail with |
1759 | * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is |
1760 | * out of range, if timeout is out of range. May fail with -EFAULT |
1761 | * if any of the memory specified is invalid. May return 0 or |
1762 | * < min_nr if the timeout specified by timeout has elapsed |
1763 | * before sufficient events are available, where timeout == NULL |
1764 | * specifies an infinite timeout. Note that the timeout pointed to by |
1765 | * timeout is relative. Will fail with -ENOSYS if not implemented. |
1766 | */ |
1767 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, |
1768 | long, min_nr, |
1769 | long, nr, |
1770 | struct io_event __user *, events, |
1771 | struct timespec __user *, timeout) |
1772 | { |
1773 | struct kioctx *ioctx = lookup_ioctx(ctx_id); |
1774 | long ret = -EINVAL; |
1775 | |
1776 | if (likely(ioctx)) { |
1777 | if (likely(min_nr <= nr && min_nr >= 0)) |
1778 | ret = read_events(ioctx, min_nr, nr, events, timeout); |
1779 | percpu_ref_put(&ioctx->users); |
1780 | } |
1781 | return ret; |
1782 | } |
1783 |