blob: ec03cf620fd7a4a2253633573e4f08bc05a3a478
1 | /* |
2 | * linux/fs/fcntl.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | */ |
6 | |
7 | #include <linux/syscalls.h> |
8 | #include <linux/init.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/fs.h> |
11 | #include <linux/file.h> |
12 | #include <linux/fdtable.h> |
13 | #include <linux/capability.h> |
14 | #include <linux/dnotify.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/module.h> |
17 | #include <linux/pipe_fs_i.h> |
18 | #include <linux/security.h> |
19 | #include <linux/ptrace.h> |
20 | #include <linux/signal.h> |
21 | #include <linux/rcupdate.h> |
22 | #include <linux/pid_namespace.h> |
23 | #include <linux/user_namespace.h> |
24 | #include <linux/shmem_fs.h> |
25 | |
26 | #include <asm/poll.h> |
27 | #include <asm/siginfo.h> |
28 | #include <asm/uaccess.h> |
29 | |
30 | #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME) |
31 | |
32 | static int setfl(int fd, struct file * filp, unsigned long arg) |
33 | { |
34 | struct inode * inode = file_inode(filp); |
35 | int error = 0; |
36 | |
37 | /* |
38 | * O_APPEND cannot be cleared if the file is marked as append-only |
39 | * and the file is open for write. |
40 | */ |
41 | if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode)) |
42 | return -EPERM; |
43 | |
44 | /* O_NOATIME can only be set by the owner or superuser */ |
45 | if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) |
46 | if (!inode_owner_or_capable(inode)) |
47 | return -EPERM; |
48 | |
49 | /* required for strict SunOS emulation */ |
50 | if (O_NONBLOCK != O_NDELAY) |
51 | if (arg & O_NDELAY) |
52 | arg |= O_NONBLOCK; |
53 | |
54 | /* Pipe packetized mode is controlled by O_DIRECT flag */ |
55 | if (!S_ISFIFO(filp->f_inode->i_mode) && (arg & O_DIRECT)) { |
56 | if (!filp->f_mapping || !filp->f_mapping->a_ops || |
57 | !filp->f_mapping->a_ops->direct_IO) |
58 | return -EINVAL; |
59 | } |
60 | |
61 | if (filp->f_op->check_flags) |
62 | error = filp->f_op->check_flags(arg); |
63 | if (error) |
64 | return error; |
65 | |
66 | /* |
67 | * ->fasync() is responsible for setting the FASYNC bit. |
68 | */ |
69 | if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) { |
70 | error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); |
71 | if (error < 0) |
72 | goto out; |
73 | if (error > 0) |
74 | error = 0; |
75 | } |
76 | spin_lock(&filp->f_lock); |
77 | filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); |
78 | spin_unlock(&filp->f_lock); |
79 | |
80 | out: |
81 | return error; |
82 | } |
83 | |
84 | static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, |
85 | int force) |
86 | { |
87 | write_lock_irq(&filp->f_owner.lock); |
88 | if (force || !filp->f_owner.pid) { |
89 | put_pid(filp->f_owner.pid); |
90 | filp->f_owner.pid = get_pid(pid); |
91 | filp->f_owner.pid_type = type; |
92 | |
93 | if (pid) { |
94 | const struct cred *cred = current_cred(); |
95 | filp->f_owner.uid = cred->uid; |
96 | filp->f_owner.euid = cred->euid; |
97 | } |
98 | } |
99 | write_unlock_irq(&filp->f_owner.lock); |
100 | } |
101 | |
102 | void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, |
103 | int force) |
104 | { |
105 | security_file_set_fowner(filp); |
106 | f_modown(filp, pid, type, force); |
107 | } |
108 | EXPORT_SYMBOL(__f_setown); |
109 | |
110 | void f_setown(struct file *filp, unsigned long arg, int force) |
111 | { |
112 | enum pid_type type; |
113 | struct pid *pid; |
114 | int who = arg; |
115 | type = PIDTYPE_PID; |
116 | if (who < 0) { |
117 | /* avoid overflow below */ |
118 | if (who == INT_MIN) |
119 | return; |
120 | |
121 | type = PIDTYPE_PGID; |
122 | who = -who; |
123 | } |
124 | rcu_read_lock(); |
125 | pid = find_vpid(who); |
126 | __f_setown(filp, pid, type, force); |
127 | rcu_read_unlock(); |
128 | } |
129 | EXPORT_SYMBOL(f_setown); |
130 | |
131 | void f_delown(struct file *filp) |
132 | { |
133 | f_modown(filp, NULL, PIDTYPE_PID, 1); |
134 | } |
135 | |
136 | pid_t f_getown(struct file *filp) |
137 | { |
138 | pid_t pid; |
139 | read_lock(&filp->f_owner.lock); |
140 | pid = pid_vnr(filp->f_owner.pid); |
141 | if (filp->f_owner.pid_type == PIDTYPE_PGID) |
142 | pid = -pid; |
143 | read_unlock(&filp->f_owner.lock); |
144 | return pid; |
145 | } |
146 | |
147 | static int f_setown_ex(struct file *filp, unsigned long arg) |
148 | { |
149 | struct f_owner_ex __user *owner_p = (void __user *)arg; |
150 | struct f_owner_ex owner; |
151 | struct pid *pid; |
152 | int type; |
153 | int ret; |
154 | |
155 | ret = copy_from_user(&owner, owner_p, sizeof(owner)); |
156 | if (ret) |
157 | return -EFAULT; |
158 | |
159 | switch (owner.type) { |
160 | case F_OWNER_TID: |
161 | type = PIDTYPE_MAX; |
162 | break; |
163 | |
164 | case F_OWNER_PID: |
165 | type = PIDTYPE_PID; |
166 | break; |
167 | |
168 | case F_OWNER_PGRP: |
169 | type = PIDTYPE_PGID; |
170 | break; |
171 | |
172 | default: |
173 | return -EINVAL; |
174 | } |
175 | |
176 | rcu_read_lock(); |
177 | pid = find_vpid(owner.pid); |
178 | if (owner.pid && !pid) |
179 | ret = -ESRCH; |
180 | else |
181 | __f_setown(filp, pid, type, 1); |
182 | rcu_read_unlock(); |
183 | |
184 | return ret; |
185 | } |
186 | |
187 | static int f_getown_ex(struct file *filp, unsigned long arg) |
188 | { |
189 | struct f_owner_ex __user *owner_p = (void __user *)arg; |
190 | struct f_owner_ex owner; |
191 | int ret = 0; |
192 | |
193 | read_lock(&filp->f_owner.lock); |
194 | owner.pid = pid_vnr(filp->f_owner.pid); |
195 | switch (filp->f_owner.pid_type) { |
196 | case PIDTYPE_MAX: |
197 | owner.type = F_OWNER_TID; |
198 | break; |
199 | |
200 | case PIDTYPE_PID: |
201 | owner.type = F_OWNER_PID; |
202 | break; |
203 | |
204 | case PIDTYPE_PGID: |
205 | owner.type = F_OWNER_PGRP; |
206 | break; |
207 | |
208 | default: |
209 | WARN_ON(1); |
210 | ret = -EINVAL; |
211 | break; |
212 | } |
213 | read_unlock(&filp->f_owner.lock); |
214 | |
215 | if (!ret) { |
216 | ret = copy_to_user(owner_p, &owner, sizeof(owner)); |
217 | if (ret) |
218 | ret = -EFAULT; |
219 | } |
220 | return ret; |
221 | } |
222 | |
223 | #ifdef CONFIG_CHECKPOINT_RESTORE |
224 | static int f_getowner_uids(struct file *filp, unsigned long arg) |
225 | { |
226 | struct user_namespace *user_ns = current_user_ns(); |
227 | uid_t __user *dst = (void __user *)arg; |
228 | uid_t src[2]; |
229 | int err; |
230 | |
231 | read_lock(&filp->f_owner.lock); |
232 | src[0] = from_kuid(user_ns, filp->f_owner.uid); |
233 | src[1] = from_kuid(user_ns, filp->f_owner.euid); |
234 | read_unlock(&filp->f_owner.lock); |
235 | |
236 | err = put_user(src[0], &dst[0]); |
237 | err |= put_user(src[1], &dst[1]); |
238 | |
239 | return err; |
240 | } |
241 | #else |
242 | static int f_getowner_uids(struct file *filp, unsigned long arg) |
243 | { |
244 | return -EINVAL; |
245 | } |
246 | #endif |
247 | |
248 | static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, |
249 | struct file *filp) |
250 | { |
251 | long err = -EINVAL; |
252 | |
253 | switch (cmd) { |
254 | case F_DUPFD: |
255 | err = f_dupfd(arg, filp, 0); |
256 | break; |
257 | case F_DUPFD_CLOEXEC: |
258 | err = f_dupfd(arg, filp, O_CLOEXEC); |
259 | break; |
260 | case F_GETFD: |
261 | err = get_close_on_exec(fd) ? FD_CLOEXEC : 0; |
262 | break; |
263 | case F_SETFD: |
264 | err = 0; |
265 | set_close_on_exec(fd, arg & FD_CLOEXEC); |
266 | break; |
267 | case F_GETFL: |
268 | err = filp->f_flags; |
269 | break; |
270 | case F_SETFL: |
271 | err = setfl(fd, filp, arg); |
272 | break; |
273 | #if BITS_PER_LONG != 32 |
274 | /* 32-bit arches must use fcntl64() */ |
275 | case F_OFD_GETLK: |
276 | #endif |
277 | case F_GETLK: |
278 | err = fcntl_getlk(filp, cmd, (struct flock __user *) arg); |
279 | break; |
280 | #if BITS_PER_LONG != 32 |
281 | /* 32-bit arches must use fcntl64() */ |
282 | case F_OFD_SETLK: |
283 | case F_OFD_SETLKW: |
284 | #endif |
285 | /* Fallthrough */ |
286 | case F_SETLK: |
287 | case F_SETLKW: |
288 | err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg); |
289 | break; |
290 | case F_GETOWN: |
291 | /* |
292 | * XXX If f_owner is a process group, the |
293 | * negative return value will get converted |
294 | * into an error. Oops. If we keep the |
295 | * current syscall conventions, the only way |
296 | * to fix this will be in libc. |
297 | */ |
298 | err = f_getown(filp); |
299 | force_successful_syscall_return(); |
300 | break; |
301 | case F_SETOWN: |
302 | f_setown(filp, arg, 1); |
303 | err = 0; |
304 | break; |
305 | case F_GETOWN_EX: |
306 | err = f_getown_ex(filp, arg); |
307 | break; |
308 | case F_SETOWN_EX: |
309 | err = f_setown_ex(filp, arg); |
310 | break; |
311 | case F_GETOWNER_UIDS: |
312 | err = f_getowner_uids(filp, arg); |
313 | break; |
314 | case F_GETSIG: |
315 | err = filp->f_owner.signum; |
316 | break; |
317 | case F_SETSIG: |
318 | /* arg == 0 restores default behaviour. */ |
319 | if (!valid_signal(arg)) { |
320 | break; |
321 | } |
322 | err = 0; |
323 | filp->f_owner.signum = arg; |
324 | break; |
325 | case F_GETLEASE: |
326 | err = fcntl_getlease(filp); |
327 | break; |
328 | case F_SETLEASE: |
329 | err = fcntl_setlease(fd, filp, arg); |
330 | break; |
331 | case F_NOTIFY: |
332 | err = fcntl_dirnotify(fd, filp, arg); |
333 | break; |
334 | case F_SETPIPE_SZ: |
335 | case F_GETPIPE_SZ: |
336 | err = pipe_fcntl(filp, cmd, arg); |
337 | break; |
338 | case F_ADD_SEALS: |
339 | case F_GET_SEALS: |
340 | err = shmem_fcntl(filp, cmd, arg); |
341 | break; |
342 | default: |
343 | break; |
344 | } |
345 | return err; |
346 | } |
347 | |
348 | static int check_fcntl_cmd(unsigned cmd) |
349 | { |
350 | switch (cmd) { |
351 | case F_DUPFD: |
352 | case F_DUPFD_CLOEXEC: |
353 | case F_GETFD: |
354 | case F_SETFD: |
355 | case F_GETFL: |
356 | return 1; |
357 | } |
358 | return 0; |
359 | } |
360 | |
361 | SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) |
362 | { |
363 | struct fd f = fdget_raw(fd); |
364 | long err = -EBADF; |
365 | |
366 | if (!f.file) |
367 | goto out; |
368 | |
369 | if (unlikely(f.file->f_mode & FMODE_PATH)) { |
370 | if (!check_fcntl_cmd(cmd)) |
371 | goto out1; |
372 | } |
373 | |
374 | err = security_file_fcntl(f.file, cmd, arg); |
375 | if (!err) |
376 | err = do_fcntl(fd, cmd, arg, f.file); |
377 | |
378 | out1: |
379 | fdput(f); |
380 | out: |
381 | return err; |
382 | } |
383 | |
384 | #if BITS_PER_LONG == 32 |
385 | SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, |
386 | unsigned long, arg) |
387 | { |
388 | struct fd f = fdget_raw(fd); |
389 | long err = -EBADF; |
390 | |
391 | if (!f.file) |
392 | goto out; |
393 | |
394 | if (unlikely(f.file->f_mode & FMODE_PATH)) { |
395 | if (!check_fcntl_cmd(cmd)) |
396 | goto out1; |
397 | } |
398 | |
399 | err = security_file_fcntl(f.file, cmd, arg); |
400 | if (err) |
401 | goto out1; |
402 | |
403 | switch (cmd) { |
404 | case F_GETLK64: |
405 | case F_OFD_GETLK: |
406 | err = fcntl_getlk64(f.file, cmd, (struct flock64 __user *) arg); |
407 | break; |
408 | case F_SETLK64: |
409 | case F_SETLKW64: |
410 | case F_OFD_SETLK: |
411 | case F_OFD_SETLKW: |
412 | err = fcntl_setlk64(fd, f.file, cmd, |
413 | (struct flock64 __user *) arg); |
414 | break; |
415 | default: |
416 | err = do_fcntl(fd, cmd, arg, f.file); |
417 | break; |
418 | } |
419 | out1: |
420 | fdput(f); |
421 | out: |
422 | return err; |
423 | } |
424 | #endif |
425 | |
426 | /* Table to convert sigio signal codes into poll band bitmaps */ |
427 | |
428 | static const long band_table[NSIGPOLL] = { |
429 | POLLIN | POLLRDNORM, /* POLL_IN */ |
430 | POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ |
431 | POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ |
432 | POLLERR, /* POLL_ERR */ |
433 | POLLPRI | POLLRDBAND, /* POLL_PRI */ |
434 | POLLHUP | POLLERR /* POLL_HUP */ |
435 | }; |
436 | |
437 | static inline int sigio_perm(struct task_struct *p, |
438 | struct fown_struct *fown, int sig) |
439 | { |
440 | const struct cred *cred; |
441 | int ret; |
442 | |
443 | rcu_read_lock(); |
444 | cred = __task_cred(p); |
445 | ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) || |
446 | uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) || |
447 | uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) && |
448 | !security_file_send_sigiotask(p, fown, sig)); |
449 | rcu_read_unlock(); |
450 | return ret; |
451 | } |
452 | |
453 | static void send_sigio_to_task(struct task_struct *p, |
454 | struct fown_struct *fown, |
455 | int fd, int reason, int group) |
456 | { |
457 | /* |
458 | * F_SETSIG can change ->signum lockless in parallel, make |
459 | * sure we read it once and use the same value throughout. |
460 | */ |
461 | int signum = ACCESS_ONCE(fown->signum); |
462 | |
463 | if (!sigio_perm(p, fown, signum)) |
464 | return; |
465 | |
466 | switch (signum) { |
467 | siginfo_t si; |
468 | default: |
469 | /* Queue a rt signal with the appropriate fd as its |
470 | value. We use SI_SIGIO as the source, not |
471 | SI_KERNEL, since kernel signals always get |
472 | delivered even if we can't queue. Failure to |
473 | queue in this case _should_ be reported; we fall |
474 | back to SIGIO in that case. --sct */ |
475 | si.si_signo = signum; |
476 | si.si_errno = 0; |
477 | si.si_code = reason; |
478 | /* Make sure we are called with one of the POLL_* |
479 | reasons, otherwise we could leak kernel stack into |
480 | userspace. */ |
481 | BUG_ON((reason & __SI_MASK) != __SI_POLL); |
482 | if (reason - POLL_IN >= NSIGPOLL) |
483 | si.si_band = ~0L; |
484 | else |
485 | si.si_band = band_table[reason - POLL_IN]; |
486 | si.si_fd = fd; |
487 | if (!do_send_sig_info(signum, &si, p, group)) |
488 | break; |
489 | /* fall-through: fall back on the old plain SIGIO signal */ |
490 | case 0: |
491 | do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group); |
492 | } |
493 | } |
494 | |
495 | void send_sigio(struct fown_struct *fown, int fd, int band) |
496 | { |
497 | struct task_struct *p; |
498 | enum pid_type type; |
499 | struct pid *pid; |
500 | int group = 1; |
501 | |
502 | read_lock(&fown->lock); |
503 | |
504 | type = fown->pid_type; |
505 | if (type == PIDTYPE_MAX) { |
506 | group = 0; |
507 | type = PIDTYPE_PID; |
508 | } |
509 | |
510 | pid = fown->pid; |
511 | if (!pid) |
512 | goto out_unlock_fown; |
513 | |
514 | read_lock(&tasklist_lock); |
515 | do_each_pid_task(pid, type, p) { |
516 | send_sigio_to_task(p, fown, fd, band, group); |
517 | } while_each_pid_task(pid, type, p); |
518 | read_unlock(&tasklist_lock); |
519 | out_unlock_fown: |
520 | read_unlock(&fown->lock); |
521 | } |
522 | |
523 | static void send_sigurg_to_task(struct task_struct *p, |
524 | struct fown_struct *fown, int group) |
525 | { |
526 | if (sigio_perm(p, fown, SIGURG)) |
527 | do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group); |
528 | } |
529 | |
530 | int send_sigurg(struct fown_struct *fown) |
531 | { |
532 | struct task_struct *p; |
533 | enum pid_type type; |
534 | struct pid *pid; |
535 | int group = 1; |
536 | int ret = 0; |
537 | |
538 | read_lock(&fown->lock); |
539 | |
540 | type = fown->pid_type; |
541 | if (type == PIDTYPE_MAX) { |
542 | group = 0; |
543 | type = PIDTYPE_PID; |
544 | } |
545 | |
546 | pid = fown->pid; |
547 | if (!pid) |
548 | goto out_unlock_fown; |
549 | |
550 | ret = 1; |
551 | |
552 | read_lock(&tasklist_lock); |
553 | do_each_pid_task(pid, type, p) { |
554 | send_sigurg_to_task(p, fown, group); |
555 | } while_each_pid_task(pid, type, p); |
556 | read_unlock(&tasklist_lock); |
557 | out_unlock_fown: |
558 | read_unlock(&fown->lock); |
559 | return ret; |
560 | } |
561 | |
562 | static DEFINE_SPINLOCK(fasync_lock); |
563 | static struct kmem_cache *fasync_cache __read_mostly; |
564 | |
565 | static void fasync_free_rcu(struct rcu_head *head) |
566 | { |
567 | kmem_cache_free(fasync_cache, |
568 | container_of(head, struct fasync_struct, fa_rcu)); |
569 | } |
570 | |
571 | /* |
572 | * Remove a fasync entry. If successfully removed, return |
573 | * positive and clear the FASYNC flag. If no entry exists, |
574 | * do nothing and return 0. |
575 | * |
576 | * NOTE! It is very important that the FASYNC flag always |
577 | * match the state "is the filp on a fasync list". |
578 | * |
579 | */ |
580 | int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) |
581 | { |
582 | struct fasync_struct *fa, **fp; |
583 | int result = 0; |
584 | |
585 | spin_lock(&filp->f_lock); |
586 | spin_lock(&fasync_lock); |
587 | for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { |
588 | if (fa->fa_file != filp) |
589 | continue; |
590 | |
591 | spin_lock_irq(&fa->fa_lock); |
592 | fa->fa_file = NULL; |
593 | spin_unlock_irq(&fa->fa_lock); |
594 | |
595 | *fp = fa->fa_next; |
596 | call_rcu(&fa->fa_rcu, fasync_free_rcu); |
597 | filp->f_flags &= ~FASYNC; |
598 | result = 1; |
599 | break; |
600 | } |
601 | spin_unlock(&fasync_lock); |
602 | spin_unlock(&filp->f_lock); |
603 | return result; |
604 | } |
605 | |
606 | struct fasync_struct *fasync_alloc(void) |
607 | { |
608 | return kmem_cache_alloc(fasync_cache, GFP_KERNEL); |
609 | } |
610 | |
611 | /* |
612 | * NOTE! This can be used only for unused fasync entries: |
613 | * entries that actually got inserted on the fasync list |
614 | * need to be released by rcu - see fasync_remove_entry. |
615 | */ |
616 | void fasync_free(struct fasync_struct *new) |
617 | { |
618 | kmem_cache_free(fasync_cache, new); |
619 | } |
620 | |
621 | /* |
622 | * Insert a new entry into the fasync list. Return the pointer to the |
623 | * old one if we didn't use the new one. |
624 | * |
625 | * NOTE! It is very important that the FASYNC flag always |
626 | * match the state "is the filp on a fasync list". |
627 | */ |
628 | struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new) |
629 | { |
630 | struct fasync_struct *fa, **fp; |
631 | |
632 | spin_lock(&filp->f_lock); |
633 | spin_lock(&fasync_lock); |
634 | for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { |
635 | if (fa->fa_file != filp) |
636 | continue; |
637 | |
638 | spin_lock_irq(&fa->fa_lock); |
639 | fa->fa_fd = fd; |
640 | spin_unlock_irq(&fa->fa_lock); |
641 | goto out; |
642 | } |
643 | |
644 | spin_lock_init(&new->fa_lock); |
645 | new->magic = FASYNC_MAGIC; |
646 | new->fa_file = filp; |
647 | new->fa_fd = fd; |
648 | new->fa_next = *fapp; |
649 | rcu_assign_pointer(*fapp, new); |
650 | filp->f_flags |= FASYNC; |
651 | |
652 | out: |
653 | spin_unlock(&fasync_lock); |
654 | spin_unlock(&filp->f_lock); |
655 | return fa; |
656 | } |
657 | |
658 | /* |
659 | * Add a fasync entry. Return negative on error, positive if |
660 | * added, and zero if did nothing but change an existing one. |
661 | */ |
662 | static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) |
663 | { |
664 | struct fasync_struct *new; |
665 | |
666 | new = fasync_alloc(); |
667 | if (!new) |
668 | return -ENOMEM; |
669 | |
670 | /* |
671 | * fasync_insert_entry() returns the old (update) entry if |
672 | * it existed. |
673 | * |
674 | * So free the (unused) new entry and return 0 to let the |
675 | * caller know that we didn't add any new fasync entries. |
676 | */ |
677 | if (fasync_insert_entry(fd, filp, fapp, new)) { |
678 | fasync_free(new); |
679 | return 0; |
680 | } |
681 | |
682 | return 1; |
683 | } |
684 | |
685 | /* |
686 | * fasync_helper() is used by almost all character device drivers |
687 | * to set up the fasync queue, and for regular files by the file |
688 | * lease code. It returns negative on error, 0 if it did no changes |
689 | * and positive if it added/deleted the entry. |
690 | */ |
691 | int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) |
692 | { |
693 | if (!on) |
694 | return fasync_remove_entry(filp, fapp); |
695 | return fasync_add_entry(fd, filp, fapp); |
696 | } |
697 | |
698 | EXPORT_SYMBOL(fasync_helper); |
699 | |
700 | /* |
701 | * rcu_read_lock() is held |
702 | */ |
703 | static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) |
704 | { |
705 | while (fa) { |
706 | struct fown_struct *fown; |
707 | unsigned long flags; |
708 | |
709 | if (fa->magic != FASYNC_MAGIC) { |
710 | printk(KERN_ERR "kill_fasync: bad magic number in " |
711 | "fasync_struct!\n"); |
712 | return; |
713 | } |
714 | spin_lock_irqsave(&fa->fa_lock, flags); |
715 | if (fa->fa_file) { |
716 | fown = &fa->fa_file->f_owner; |
717 | /* Don't send SIGURG to processes which have not set a |
718 | queued signum: SIGURG has its own default signalling |
719 | mechanism. */ |
720 | if (!(sig == SIGURG && fown->signum == 0)) |
721 | send_sigio(fown, fa->fa_fd, band); |
722 | } |
723 | spin_unlock_irqrestore(&fa->fa_lock, flags); |
724 | fa = rcu_dereference(fa->fa_next); |
725 | } |
726 | } |
727 | |
728 | void kill_fasync(struct fasync_struct **fp, int sig, int band) |
729 | { |
730 | /* First a quick test without locking: usually |
731 | * the list is empty. |
732 | */ |
733 | if (*fp) { |
734 | rcu_read_lock(); |
735 | kill_fasync_rcu(rcu_dereference(*fp), sig, band); |
736 | rcu_read_unlock(); |
737 | } |
738 | } |
739 | EXPORT_SYMBOL(kill_fasync); |
740 | |
741 | static int __init fcntl_init(void) |
742 | { |
743 | /* |
744 | * Please add new bits here to ensure allocation uniqueness. |
745 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY |
746 | * is defined as O_NONBLOCK on some platforms and not on others. |
747 | */ |
748 | BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != |
749 | HWEIGHT32( |
750 | (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) | |
751 | __FMODE_EXEC | __FMODE_NONOTIFY)); |
752 | |
753 | fasync_cache = kmem_cache_create("fasync_cache", |
754 | sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); |
755 | return 0; |
756 | } |
757 | |
758 | module_init(fcntl_init) |
759 |