blob: 02fb438fd3afb07f1205d2edec84e6ceab157113
1 | /* |
2 | * POSIX message queues filesystem for Linux. |
3 | * |
4 | * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) |
5 | * Michal Wronski (michal.wronski@gmail.com) |
6 | * |
7 | * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) |
8 | * Lockless receive & send, fd based notify: |
9 | * Manfred Spraul (manfred@colorfullife.com) |
10 | * |
11 | * Audit: George Wilson (ltcgcw@us.ibm.com) |
12 | * |
13 | * This file is released under the GPL. |
14 | */ |
15 | |
16 | #include <linux/capability.h> |
17 | #include <linux/init.h> |
18 | #include <linux/pagemap.h> |
19 | #include <linux/file.h> |
20 | #include <linux/mount.h> |
21 | #include <linux/namei.h> |
22 | #include <linux/sysctl.h> |
23 | #include <linux/poll.h> |
24 | #include <linux/mqueue.h> |
25 | #include <linux/msg.h> |
26 | #include <linux/skbuff.h> |
27 | #include <linux/vmalloc.h> |
28 | #include <linux/netlink.h> |
29 | #include <linux/syscalls.h> |
30 | #include <linux/audit.h> |
31 | #include <linux/signal.h> |
32 | #include <linux/mutex.h> |
33 | #include <linux/nsproxy.h> |
34 | #include <linux/pid.h> |
35 | #include <linux/ipc_namespace.h> |
36 | #include <linux/user_namespace.h> |
37 | #include <linux/slab.h> |
38 | |
39 | #include <net/sock.h> |
40 | #include "util.h" |
41 | |
42 | #define MQUEUE_MAGIC 0x19800202 |
43 | #define DIRENT_SIZE 20 |
44 | #define FILENT_SIZE 80 |
45 | |
46 | #define SEND 0 |
47 | #define RECV 1 |
48 | |
49 | #define STATE_NONE 0 |
50 | #define STATE_READY 1 |
51 | |
52 | struct posix_msg_tree_node { |
53 | struct rb_node rb_node; |
54 | struct list_head msg_list; |
55 | int priority; |
56 | }; |
57 | |
58 | struct ext_wait_queue { /* queue of sleeping tasks */ |
59 | struct task_struct *task; |
60 | struct list_head list; |
61 | struct msg_msg *msg; /* ptr of loaded message */ |
62 | int state; /* one of STATE_* values */ |
63 | }; |
64 | |
65 | struct mqueue_inode_info { |
66 | spinlock_t lock; |
67 | struct inode vfs_inode; |
68 | wait_queue_head_t wait_q; |
69 | |
70 | struct rb_root msg_tree; |
71 | struct posix_msg_tree_node *node_cache; |
72 | struct mq_attr attr; |
73 | |
74 | struct sigevent notify; |
75 | struct pid *notify_owner; |
76 | struct user_namespace *notify_user_ns; |
77 | struct user_struct *user; /* user who created, for accounting */ |
78 | struct sock *notify_sock; |
79 | struct sk_buff *notify_cookie; |
80 | |
81 | /* for tasks waiting for free space and messages, respectively */ |
82 | struct ext_wait_queue e_wait_q[2]; |
83 | |
84 | unsigned long qsize; /* size of queue in memory (sum of all msgs) */ |
85 | }; |
86 | |
87 | static const struct inode_operations mqueue_dir_inode_operations; |
88 | static const struct file_operations mqueue_file_operations; |
89 | static const struct super_operations mqueue_super_ops; |
90 | static void remove_notification(struct mqueue_inode_info *info); |
91 | |
92 | static struct kmem_cache *mqueue_inode_cachep; |
93 | |
94 | static struct ctl_table_header *mq_sysctl_table; |
95 | |
96 | static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) |
97 | { |
98 | return container_of(inode, struct mqueue_inode_info, vfs_inode); |
99 | } |
100 | |
101 | /* |
102 | * This routine should be called with the mq_lock held. |
103 | */ |
104 | static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) |
105 | { |
106 | return get_ipc_ns(inode->i_sb->s_fs_info); |
107 | } |
108 | |
109 | static struct ipc_namespace *get_ns_from_inode(struct inode *inode) |
110 | { |
111 | struct ipc_namespace *ns; |
112 | |
113 | spin_lock(&mq_lock); |
114 | ns = __get_ns_from_inode(inode); |
115 | spin_unlock(&mq_lock); |
116 | return ns; |
117 | } |
118 | |
119 | /* Auxiliary functions to manipulate messages' list */ |
120 | static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) |
121 | { |
122 | struct rb_node **p, *parent = NULL; |
123 | struct posix_msg_tree_node *leaf; |
124 | |
125 | p = &info->msg_tree.rb_node; |
126 | while (*p) { |
127 | parent = *p; |
128 | leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); |
129 | |
130 | if (likely(leaf->priority == msg->m_type)) |
131 | goto insert_msg; |
132 | else if (msg->m_type < leaf->priority) |
133 | p = &(*p)->rb_left; |
134 | else |
135 | p = &(*p)->rb_right; |
136 | } |
137 | if (info->node_cache) { |
138 | leaf = info->node_cache; |
139 | info->node_cache = NULL; |
140 | } else { |
141 | leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); |
142 | if (!leaf) |
143 | return -ENOMEM; |
144 | INIT_LIST_HEAD(&leaf->msg_list); |
145 | } |
146 | leaf->priority = msg->m_type; |
147 | rb_link_node(&leaf->rb_node, parent, p); |
148 | rb_insert_color(&leaf->rb_node, &info->msg_tree); |
149 | insert_msg: |
150 | info->attr.mq_curmsgs++; |
151 | info->qsize += msg->m_ts; |
152 | list_add_tail(&msg->m_list, &leaf->msg_list); |
153 | return 0; |
154 | } |
155 | |
156 | static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) |
157 | { |
158 | struct rb_node **p, *parent = NULL; |
159 | struct posix_msg_tree_node *leaf; |
160 | struct msg_msg *msg; |
161 | |
162 | try_again: |
163 | p = &info->msg_tree.rb_node; |
164 | while (*p) { |
165 | parent = *p; |
166 | /* |
167 | * During insert, low priorities go to the left and high to the |
168 | * right. On receive, we want the highest priorities first, so |
169 | * walk all the way to the right. |
170 | */ |
171 | p = &(*p)->rb_right; |
172 | } |
173 | if (!parent) { |
174 | if (info->attr.mq_curmsgs) { |
175 | pr_warn_once("Inconsistency in POSIX message queue, " |
176 | "no tree element, but supposedly messages " |
177 | "should exist!\n"); |
178 | info->attr.mq_curmsgs = 0; |
179 | } |
180 | return NULL; |
181 | } |
182 | leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); |
183 | if (unlikely(list_empty(&leaf->msg_list))) { |
184 | pr_warn_once("Inconsistency in POSIX message queue, " |
185 | "empty leaf node but we haven't implemented " |
186 | "lazy leaf delete!\n"); |
187 | rb_erase(&leaf->rb_node, &info->msg_tree); |
188 | if (info->node_cache) { |
189 | kfree(leaf); |
190 | } else { |
191 | info->node_cache = leaf; |
192 | } |
193 | goto try_again; |
194 | } else { |
195 | msg = list_first_entry(&leaf->msg_list, |
196 | struct msg_msg, m_list); |
197 | list_del(&msg->m_list); |
198 | if (list_empty(&leaf->msg_list)) { |
199 | rb_erase(&leaf->rb_node, &info->msg_tree); |
200 | if (info->node_cache) { |
201 | kfree(leaf); |
202 | } else { |
203 | info->node_cache = leaf; |
204 | } |
205 | } |
206 | } |
207 | info->attr.mq_curmsgs--; |
208 | info->qsize -= msg->m_ts; |
209 | return msg; |
210 | } |
211 | |
212 | static struct inode *mqueue_get_inode(struct super_block *sb, |
213 | struct ipc_namespace *ipc_ns, umode_t mode, |
214 | struct mq_attr *attr) |
215 | { |
216 | struct user_struct *u = current_user(); |
217 | struct inode *inode; |
218 | int ret = -ENOMEM; |
219 | |
220 | inode = new_inode(sb); |
221 | if (!inode) |
222 | goto err; |
223 | |
224 | inode->i_ino = get_next_ino(); |
225 | inode->i_mode = mode; |
226 | inode->i_uid = current_fsuid(); |
227 | inode->i_gid = current_fsgid(); |
228 | inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode); |
229 | |
230 | if (S_ISREG(mode)) { |
231 | struct mqueue_inode_info *info; |
232 | unsigned long mq_bytes, mq_treesize; |
233 | |
234 | inode->i_fop = &mqueue_file_operations; |
235 | inode->i_size = FILENT_SIZE; |
236 | /* mqueue specific info */ |
237 | info = MQUEUE_I(inode); |
238 | spin_lock_init(&info->lock); |
239 | init_waitqueue_head(&info->wait_q); |
240 | INIT_LIST_HEAD(&info->e_wait_q[0].list); |
241 | INIT_LIST_HEAD(&info->e_wait_q[1].list); |
242 | info->notify_owner = NULL; |
243 | info->notify_user_ns = NULL; |
244 | info->qsize = 0; |
245 | info->user = NULL; /* set when all is ok */ |
246 | info->msg_tree = RB_ROOT; |
247 | info->node_cache = NULL; |
248 | memset(&info->attr, 0, sizeof(info->attr)); |
249 | info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, |
250 | ipc_ns->mq_msg_default); |
251 | info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, |
252 | ipc_ns->mq_msgsize_default); |
253 | if (attr) { |
254 | info->attr.mq_maxmsg = attr->mq_maxmsg; |
255 | info->attr.mq_msgsize = attr->mq_msgsize; |
256 | } |
257 | /* |
258 | * We used to allocate a static array of pointers and account |
259 | * the size of that array as well as one msg_msg struct per |
260 | * possible message into the queue size. That's no longer |
261 | * accurate as the queue is now an rbtree and will grow and |
262 | * shrink depending on usage patterns. We can, however, still |
263 | * account one msg_msg struct per message, but the nodes are |
264 | * allocated depending on priority usage, and most programs |
265 | * only use one, or a handful, of priorities. However, since |
266 | * this is pinned memory, we need to assume worst case, so |
267 | * that means the min(mq_maxmsg, max_priorities) * struct |
268 | * posix_msg_tree_node. |
269 | */ |
270 | mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + |
271 | min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * |
272 | sizeof(struct posix_msg_tree_node); |
273 | |
274 | mq_bytes = mq_treesize + (info->attr.mq_maxmsg * |
275 | info->attr.mq_msgsize); |
276 | |
277 | spin_lock(&mq_lock); |
278 | if (u->mq_bytes + mq_bytes < u->mq_bytes || |
279 | u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { |
280 | spin_unlock(&mq_lock); |
281 | /* mqueue_evict_inode() releases info->messages */ |
282 | ret = -EMFILE; |
283 | goto out_inode; |
284 | } |
285 | u->mq_bytes += mq_bytes; |
286 | spin_unlock(&mq_lock); |
287 | |
288 | /* all is ok */ |
289 | info->user = get_uid(u); |
290 | } else if (S_ISDIR(mode)) { |
291 | inc_nlink(inode); |
292 | /* Some things misbehave if size == 0 on a directory */ |
293 | inode->i_size = 2 * DIRENT_SIZE; |
294 | inode->i_op = &mqueue_dir_inode_operations; |
295 | inode->i_fop = &simple_dir_operations; |
296 | } |
297 | |
298 | return inode; |
299 | out_inode: |
300 | iput(inode); |
301 | err: |
302 | return ERR_PTR(ret); |
303 | } |
304 | |
305 | static int mqueue_fill_super(struct super_block *sb, void *data, int silent) |
306 | { |
307 | struct inode *inode; |
308 | struct ipc_namespace *ns = sb->s_fs_info; |
309 | |
310 | sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; |
311 | sb->s_blocksize = PAGE_SIZE; |
312 | sb->s_blocksize_bits = PAGE_SHIFT; |
313 | sb->s_magic = MQUEUE_MAGIC; |
314 | sb->s_op = &mqueue_super_ops; |
315 | |
316 | inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); |
317 | if (IS_ERR(inode)) |
318 | return PTR_ERR(inode); |
319 | |
320 | sb->s_root = d_make_root(inode); |
321 | if (!sb->s_root) |
322 | return -ENOMEM; |
323 | return 0; |
324 | } |
325 | |
326 | static struct dentry *mqueue_mount(struct file_system_type *fs_type, |
327 | int flags, const char *dev_name, |
328 | void *data) |
329 | { |
330 | struct ipc_namespace *ns; |
331 | if (flags & MS_KERNMOUNT) { |
332 | ns = data; |
333 | data = NULL; |
334 | } else { |
335 | ns = current->nsproxy->ipc_ns; |
336 | } |
337 | return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super); |
338 | } |
339 | |
340 | static void init_once(void *foo) |
341 | { |
342 | struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; |
343 | |
344 | inode_init_once(&p->vfs_inode); |
345 | } |
346 | |
347 | static struct inode *mqueue_alloc_inode(struct super_block *sb) |
348 | { |
349 | struct mqueue_inode_info *ei; |
350 | |
351 | ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); |
352 | if (!ei) |
353 | return NULL; |
354 | return &ei->vfs_inode; |
355 | } |
356 | |
357 | static void mqueue_i_callback(struct rcu_head *head) |
358 | { |
359 | struct inode *inode = container_of(head, struct inode, i_rcu); |
360 | kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); |
361 | } |
362 | |
363 | static void mqueue_destroy_inode(struct inode *inode) |
364 | { |
365 | call_rcu(&inode->i_rcu, mqueue_i_callback); |
366 | } |
367 | |
368 | static void mqueue_evict_inode(struct inode *inode) |
369 | { |
370 | struct mqueue_inode_info *info; |
371 | struct user_struct *user; |
372 | unsigned long mq_bytes, mq_treesize; |
373 | struct ipc_namespace *ipc_ns; |
374 | struct msg_msg *msg; |
375 | |
376 | clear_inode(inode); |
377 | |
378 | if (S_ISDIR(inode->i_mode)) |
379 | return; |
380 | |
381 | ipc_ns = get_ns_from_inode(inode); |
382 | info = MQUEUE_I(inode); |
383 | spin_lock(&info->lock); |
384 | while ((msg = msg_get(info)) != NULL) |
385 | free_msg(msg); |
386 | kfree(info->node_cache); |
387 | spin_unlock(&info->lock); |
388 | |
389 | /* Total amount of bytes accounted for the mqueue */ |
390 | mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + |
391 | min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * |
392 | sizeof(struct posix_msg_tree_node); |
393 | |
394 | mq_bytes = mq_treesize + (info->attr.mq_maxmsg * |
395 | info->attr.mq_msgsize); |
396 | |
397 | user = info->user; |
398 | if (user) { |
399 | spin_lock(&mq_lock); |
400 | user->mq_bytes -= mq_bytes; |
401 | /* |
402 | * get_ns_from_inode() ensures that the |
403 | * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns |
404 | * to which we now hold a reference, or it is NULL. |
405 | * We can't put it here under mq_lock, though. |
406 | */ |
407 | if (ipc_ns) |
408 | ipc_ns->mq_queues_count--; |
409 | spin_unlock(&mq_lock); |
410 | free_uid(user); |
411 | } |
412 | if (ipc_ns) |
413 | put_ipc_ns(ipc_ns); |
414 | } |
415 | |
416 | static int mqueue_create(struct inode *dir, struct dentry *dentry, |
417 | umode_t mode, bool excl) |
418 | { |
419 | struct inode *inode; |
420 | struct mq_attr *attr = dentry->d_fsdata; |
421 | int error; |
422 | struct ipc_namespace *ipc_ns; |
423 | |
424 | spin_lock(&mq_lock); |
425 | ipc_ns = __get_ns_from_inode(dir); |
426 | if (!ipc_ns) { |
427 | error = -EACCES; |
428 | goto out_unlock; |
429 | } |
430 | |
431 | if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && |
432 | !capable(CAP_SYS_RESOURCE)) { |
433 | error = -ENOSPC; |
434 | goto out_unlock; |
435 | } |
436 | ipc_ns->mq_queues_count++; |
437 | spin_unlock(&mq_lock); |
438 | |
439 | inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); |
440 | if (IS_ERR(inode)) { |
441 | error = PTR_ERR(inode); |
442 | spin_lock(&mq_lock); |
443 | ipc_ns->mq_queues_count--; |
444 | goto out_unlock; |
445 | } |
446 | |
447 | put_ipc_ns(ipc_ns); |
448 | dir->i_size += DIRENT_SIZE; |
449 | dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); |
450 | |
451 | d_instantiate(dentry, inode); |
452 | dget(dentry); |
453 | return 0; |
454 | out_unlock: |
455 | spin_unlock(&mq_lock); |
456 | if (ipc_ns) |
457 | put_ipc_ns(ipc_ns); |
458 | return error; |
459 | } |
460 | |
461 | static int mqueue_unlink(struct inode *dir, struct dentry *dentry) |
462 | { |
463 | struct inode *inode = d_inode(dentry); |
464 | |
465 | dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); |
466 | dir->i_size -= DIRENT_SIZE; |
467 | drop_nlink(inode); |
468 | dput(dentry); |
469 | return 0; |
470 | } |
471 | |
472 | /* |
473 | * This is routine for system read from queue file. |
474 | * To avoid mess with doing here some sort of mq_receive we allow |
475 | * to read only queue size & notification info (the only values |
476 | * that are interesting from user point of view and aren't accessible |
477 | * through std routines) |
478 | */ |
479 | static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, |
480 | size_t count, loff_t *off) |
481 | { |
482 | struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); |
483 | char buffer[FILENT_SIZE]; |
484 | ssize_t ret; |
485 | |
486 | spin_lock(&info->lock); |
487 | snprintf(buffer, sizeof(buffer), |
488 | "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", |
489 | info->qsize, |
490 | info->notify_owner ? info->notify.sigev_notify : 0, |
491 | (info->notify_owner && |
492 | info->notify.sigev_notify == SIGEV_SIGNAL) ? |
493 | info->notify.sigev_signo : 0, |
494 | pid_vnr(info->notify_owner)); |
495 | spin_unlock(&info->lock); |
496 | buffer[sizeof(buffer)-1] = '\0'; |
497 | |
498 | ret = simple_read_from_buffer(u_data, count, off, buffer, |
499 | strlen(buffer)); |
500 | if (ret <= 0) |
501 | return ret; |
502 | |
503 | file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp)); |
504 | return ret; |
505 | } |
506 | |
507 | static int mqueue_flush_file(struct file *filp, fl_owner_t id) |
508 | { |
509 | struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); |
510 | |
511 | spin_lock(&info->lock); |
512 | if (task_tgid(current) == info->notify_owner) |
513 | remove_notification(info); |
514 | |
515 | spin_unlock(&info->lock); |
516 | return 0; |
517 | } |
518 | |
519 | static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) |
520 | { |
521 | struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); |
522 | int retval = 0; |
523 | |
524 | poll_wait(filp, &info->wait_q, poll_tab); |
525 | |
526 | spin_lock(&info->lock); |
527 | if (info->attr.mq_curmsgs) |
528 | retval = POLLIN | POLLRDNORM; |
529 | |
530 | if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) |
531 | retval |= POLLOUT | POLLWRNORM; |
532 | spin_unlock(&info->lock); |
533 | |
534 | return retval; |
535 | } |
536 | |
537 | /* Adds current to info->e_wait_q[sr] before element with smaller prio */ |
538 | static void wq_add(struct mqueue_inode_info *info, int sr, |
539 | struct ext_wait_queue *ewp) |
540 | { |
541 | struct ext_wait_queue *walk; |
542 | |
543 | ewp->task = current; |
544 | |
545 | list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { |
546 | if (walk->task->static_prio <= current->static_prio) { |
547 | list_add_tail(&ewp->list, &walk->list); |
548 | return; |
549 | } |
550 | } |
551 | list_add_tail(&ewp->list, &info->e_wait_q[sr].list); |
552 | } |
553 | |
554 | /* |
555 | * Puts current task to sleep. Caller must hold queue lock. After return |
556 | * lock isn't held. |
557 | * sr: SEND or RECV |
558 | */ |
559 | static int wq_sleep(struct mqueue_inode_info *info, int sr, |
560 | ktime_t *timeout, struct ext_wait_queue *ewp) |
561 | { |
562 | int retval; |
563 | signed long time; |
564 | |
565 | wq_add(info, sr, ewp); |
566 | |
567 | for (;;) { |
568 | __set_current_state(TASK_INTERRUPTIBLE); |
569 | |
570 | spin_unlock(&info->lock); |
571 | time = schedule_hrtimeout_range_clock(timeout, 0, |
572 | HRTIMER_MODE_ABS, CLOCK_REALTIME); |
573 | |
574 | if (ewp->state == STATE_READY) { |
575 | retval = 0; |
576 | goto out; |
577 | } |
578 | spin_lock(&info->lock); |
579 | if (ewp->state == STATE_READY) { |
580 | retval = 0; |
581 | goto out_unlock; |
582 | } |
583 | if (signal_pending(current)) { |
584 | retval = -ERESTARTSYS; |
585 | break; |
586 | } |
587 | if (time == 0) { |
588 | retval = -ETIMEDOUT; |
589 | break; |
590 | } |
591 | } |
592 | list_del(&ewp->list); |
593 | out_unlock: |
594 | spin_unlock(&info->lock); |
595 | out: |
596 | return retval; |
597 | } |
598 | |
599 | /* |
600 | * Returns waiting task that should be serviced first or NULL if none exists |
601 | */ |
602 | static struct ext_wait_queue *wq_get_first_waiter( |
603 | struct mqueue_inode_info *info, int sr) |
604 | { |
605 | struct list_head *ptr; |
606 | |
607 | ptr = info->e_wait_q[sr].list.prev; |
608 | if (ptr == &info->e_wait_q[sr].list) |
609 | return NULL; |
610 | return list_entry(ptr, struct ext_wait_queue, list); |
611 | } |
612 | |
613 | |
614 | static inline void set_cookie(struct sk_buff *skb, char code) |
615 | { |
616 | ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; |
617 | } |
618 | |
619 | /* |
620 | * The next function is only to split too long sys_mq_timedsend |
621 | */ |
622 | static void __do_notify(struct mqueue_inode_info *info) |
623 | { |
624 | /* notification |
625 | * invoked when there is registered process and there isn't process |
626 | * waiting synchronously for message AND state of queue changed from |
627 | * empty to not empty. Here we are sure that no one is waiting |
628 | * synchronously. */ |
629 | if (info->notify_owner && |
630 | info->attr.mq_curmsgs == 1) { |
631 | struct siginfo sig_i; |
632 | switch (info->notify.sigev_notify) { |
633 | case SIGEV_NONE: |
634 | break; |
635 | case SIGEV_SIGNAL: |
636 | /* sends signal */ |
637 | |
638 | sig_i.si_signo = info->notify.sigev_signo; |
639 | sig_i.si_errno = 0; |
640 | sig_i.si_code = SI_MESGQ; |
641 | sig_i.si_value = info->notify.sigev_value; |
642 | /* map current pid/uid into info->owner's namespaces */ |
643 | rcu_read_lock(); |
644 | sig_i.si_pid = task_tgid_nr_ns(current, |
645 | ns_of_pid(info->notify_owner)); |
646 | sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); |
647 | rcu_read_unlock(); |
648 | |
649 | kill_pid_info(info->notify.sigev_signo, |
650 | &sig_i, info->notify_owner); |
651 | break; |
652 | case SIGEV_THREAD: |
653 | set_cookie(info->notify_cookie, NOTIFY_WOKENUP); |
654 | netlink_sendskb(info->notify_sock, info->notify_cookie); |
655 | break; |
656 | } |
657 | /* after notification unregisters process */ |
658 | put_pid(info->notify_owner); |
659 | put_user_ns(info->notify_user_ns); |
660 | info->notify_owner = NULL; |
661 | info->notify_user_ns = NULL; |
662 | } |
663 | wake_up(&info->wait_q); |
664 | } |
665 | |
666 | static int prepare_timeout(const struct timespec __user *u_abs_timeout, |
667 | ktime_t *expires, struct timespec *ts) |
668 | { |
669 | if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) |
670 | return -EFAULT; |
671 | if (!timespec_valid(ts)) |
672 | return -EINVAL; |
673 | |
674 | *expires = timespec_to_ktime(*ts); |
675 | return 0; |
676 | } |
677 | |
678 | static void remove_notification(struct mqueue_inode_info *info) |
679 | { |
680 | if (info->notify_owner != NULL && |
681 | info->notify.sigev_notify == SIGEV_THREAD) { |
682 | set_cookie(info->notify_cookie, NOTIFY_REMOVED); |
683 | netlink_sendskb(info->notify_sock, info->notify_cookie); |
684 | } |
685 | put_pid(info->notify_owner); |
686 | put_user_ns(info->notify_user_ns); |
687 | info->notify_owner = NULL; |
688 | info->notify_user_ns = NULL; |
689 | } |
690 | |
691 | static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) |
692 | { |
693 | int mq_treesize; |
694 | unsigned long total_size; |
695 | |
696 | if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) |
697 | return -EINVAL; |
698 | if (capable(CAP_SYS_RESOURCE)) { |
699 | if (attr->mq_maxmsg > HARD_MSGMAX || |
700 | attr->mq_msgsize > HARD_MSGSIZEMAX) |
701 | return -EINVAL; |
702 | } else { |
703 | if (attr->mq_maxmsg > ipc_ns->mq_msg_max || |
704 | attr->mq_msgsize > ipc_ns->mq_msgsize_max) |
705 | return -EINVAL; |
706 | } |
707 | /* check for overflow */ |
708 | if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) |
709 | return -EOVERFLOW; |
710 | mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + |
711 | min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * |
712 | sizeof(struct posix_msg_tree_node); |
713 | total_size = attr->mq_maxmsg * attr->mq_msgsize; |
714 | if (total_size + mq_treesize < total_size) |
715 | return -EOVERFLOW; |
716 | return 0; |
717 | } |
718 | |
719 | /* |
720 | * Invoked when creating a new queue via sys_mq_open |
721 | */ |
722 | static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, |
723 | struct path *path, int oflag, umode_t mode, |
724 | struct mq_attr *attr) |
725 | { |
726 | const struct cred *cred = current_cred(); |
727 | int ret; |
728 | |
729 | if (attr) { |
730 | ret = mq_attr_ok(ipc_ns, attr); |
731 | if (ret) |
732 | return ERR_PTR(ret); |
733 | /* store for use during create */ |
734 | path->dentry->d_fsdata = attr; |
735 | } else { |
736 | struct mq_attr def_attr; |
737 | |
738 | def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, |
739 | ipc_ns->mq_msg_default); |
740 | def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, |
741 | ipc_ns->mq_msgsize_default); |
742 | ret = mq_attr_ok(ipc_ns, &def_attr); |
743 | if (ret) |
744 | return ERR_PTR(ret); |
745 | } |
746 | |
747 | mode &= ~current_umask(); |
748 | ret = vfs_create2(path->mnt, dir, path->dentry, mode, true); |
749 | path->dentry->d_fsdata = NULL; |
750 | if (ret) |
751 | return ERR_PTR(ret); |
752 | return dentry_open(path, oflag, cred); |
753 | } |
754 | |
755 | /* Opens existing queue */ |
756 | static struct file *do_open(struct path *path, int oflag) |
757 | { |
758 | static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, |
759 | MAY_READ | MAY_WRITE }; |
760 | int acc; |
761 | if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) |
762 | return ERR_PTR(-EINVAL); |
763 | acc = oflag2acc[oflag & O_ACCMODE]; |
764 | if (inode_permission2(path->mnt, d_inode(path->dentry), acc)) |
765 | return ERR_PTR(-EACCES); |
766 | return dentry_open(path, oflag, current_cred()); |
767 | } |
768 | |
769 | SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, |
770 | struct mq_attr __user *, u_attr) |
771 | { |
772 | struct path path; |
773 | struct file *filp; |
774 | struct filename *name; |
775 | struct mq_attr attr; |
776 | int fd, error; |
777 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; |
778 | struct vfsmount *mnt = ipc_ns->mq_mnt; |
779 | struct dentry *root = mnt->mnt_root; |
780 | int ro; |
781 | |
782 | if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) |
783 | return -EFAULT; |
784 | |
785 | audit_mq_open(oflag, mode, u_attr ? &attr : NULL); |
786 | |
787 | if (IS_ERR(name = getname(u_name))) |
788 | return PTR_ERR(name); |
789 | |
790 | fd = get_unused_fd_flags(O_CLOEXEC); |
791 | if (fd < 0) |
792 | goto out_putname; |
793 | |
794 | ro = mnt_want_write(mnt); /* we'll drop it in any case */ |
795 | error = 0; |
796 | inode_lock(d_inode(root)); |
797 | path.dentry = lookup_one_len2(name->name, mnt, root, strlen(name->name)); |
798 | if (IS_ERR(path.dentry)) { |
799 | error = PTR_ERR(path.dentry); |
800 | goto out_putfd; |
801 | } |
802 | path.mnt = mntget(mnt); |
803 | |
804 | if (oflag & O_CREAT) { |
805 | if (d_really_is_positive(path.dentry)) { /* entry already exists */ |
806 | audit_inode(name, path.dentry, 0); |
807 | if (oflag & O_EXCL) { |
808 | error = -EEXIST; |
809 | goto out; |
810 | } |
811 | filp = do_open(&path, oflag); |
812 | } else { |
813 | if (ro) { |
814 | error = ro; |
815 | goto out; |
816 | } |
817 | audit_inode_parent_hidden(name, root); |
818 | filp = do_create(ipc_ns, d_inode(root), |
819 | &path, oflag, mode, |
820 | u_attr ? &attr : NULL); |
821 | } |
822 | } else { |
823 | if (d_really_is_negative(path.dentry)) { |
824 | error = -ENOENT; |
825 | goto out; |
826 | } |
827 | audit_inode(name, path.dentry, 0); |
828 | filp = do_open(&path, oflag); |
829 | } |
830 | |
831 | if (!IS_ERR(filp)) |
832 | fd_install(fd, filp); |
833 | else |
834 | error = PTR_ERR(filp); |
835 | out: |
836 | path_put(&path); |
837 | out_putfd: |
838 | if (error) { |
839 | put_unused_fd(fd); |
840 | fd = error; |
841 | } |
842 | inode_unlock(d_inode(root)); |
843 | if (!ro) |
844 | mnt_drop_write(mnt); |
845 | out_putname: |
846 | putname(name); |
847 | return fd; |
848 | } |
849 | |
850 | SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) |
851 | { |
852 | int err; |
853 | struct filename *name; |
854 | struct dentry *dentry; |
855 | struct inode *inode = NULL; |
856 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; |
857 | struct vfsmount *mnt = ipc_ns->mq_mnt; |
858 | |
859 | name = getname(u_name); |
860 | if (IS_ERR(name)) |
861 | return PTR_ERR(name); |
862 | |
863 | audit_inode_parent_hidden(name, mnt->mnt_root); |
864 | err = mnt_want_write(mnt); |
865 | if (err) |
866 | goto out_name; |
867 | inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); |
868 | dentry = lookup_one_len2(name->name, mnt, mnt->mnt_root, |
869 | strlen(name->name)); |
870 | if (IS_ERR(dentry)) { |
871 | err = PTR_ERR(dentry); |
872 | goto out_unlock; |
873 | } |
874 | |
875 | inode = d_inode(dentry); |
876 | if (!inode) { |
877 | err = -ENOENT; |
878 | } else { |
879 | ihold(inode); |
880 | err = vfs_unlink2(mnt, d_inode(dentry->d_parent), dentry, NULL); |
881 | } |
882 | dput(dentry); |
883 | |
884 | out_unlock: |
885 | inode_unlock(d_inode(mnt->mnt_root)); |
886 | if (inode) |
887 | iput(inode); |
888 | mnt_drop_write(mnt); |
889 | out_name: |
890 | putname(name); |
891 | |
892 | return err; |
893 | } |
894 | |
895 | /* Pipelined send and receive functions. |
896 | * |
897 | * If a receiver finds no waiting message, then it registers itself in the |
898 | * list of waiting receivers. A sender checks that list before adding the new |
899 | * message into the message array. If there is a waiting receiver, then it |
900 | * bypasses the message array and directly hands the message over to the |
901 | * receiver. The receiver accepts the message and returns without grabbing the |
902 | * queue spinlock: |
903 | * |
904 | * - Set pointer to message. |
905 | * - Queue the receiver task for later wakeup (without the info->lock). |
906 | * - Update its state to STATE_READY. Now the receiver can continue. |
907 | * - Wake up the process after the lock is dropped. Should the process wake up |
908 | * before this wakeup (due to a timeout or a signal) it will either see |
909 | * STATE_READY and continue or acquire the lock to check the state again. |
910 | * |
911 | * The same algorithm is used for senders. |
912 | */ |
913 | |
914 | /* pipelined_send() - send a message directly to the task waiting in |
915 | * sys_mq_timedreceive() (without inserting message into a queue). |
916 | */ |
917 | static inline void pipelined_send(struct wake_q_head *wake_q, |
918 | struct mqueue_inode_info *info, |
919 | struct msg_msg *message, |
920 | struct ext_wait_queue *receiver) |
921 | { |
922 | receiver->msg = message; |
923 | list_del(&receiver->list); |
924 | wake_q_add(wake_q, receiver->task); |
925 | /* |
926 | * Rely on the implicit cmpxchg barrier from wake_q_add such |
927 | * that we can ensure that updating receiver->state is the last |
928 | * write operation: As once set, the receiver can continue, |
929 | * and if we don't have the reference count from the wake_q, |
930 | * yet, at that point we can later have a use-after-free |
931 | * condition and bogus wakeup. |
932 | */ |
933 | receiver->state = STATE_READY; |
934 | } |
935 | |
936 | /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() |
937 | * gets its message and put to the queue (we have one free place for sure). */ |
938 | static inline void pipelined_receive(struct wake_q_head *wake_q, |
939 | struct mqueue_inode_info *info) |
940 | { |
941 | struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); |
942 | |
943 | if (!sender) { |
944 | /* for poll */ |
945 | wake_up_interruptible(&info->wait_q); |
946 | return; |
947 | } |
948 | if (msg_insert(sender->msg, info)) |
949 | return; |
950 | |
951 | list_del(&sender->list); |
952 | wake_q_add(wake_q, sender->task); |
953 | sender->state = STATE_READY; |
954 | } |
955 | |
956 | SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, |
957 | size_t, msg_len, unsigned int, msg_prio, |
958 | const struct timespec __user *, u_abs_timeout) |
959 | { |
960 | struct fd f; |
961 | struct inode *inode; |
962 | struct ext_wait_queue wait; |
963 | struct ext_wait_queue *receiver; |
964 | struct msg_msg *msg_ptr; |
965 | struct mqueue_inode_info *info; |
966 | ktime_t expires, *timeout = NULL; |
967 | struct timespec ts; |
968 | struct posix_msg_tree_node *new_leaf = NULL; |
969 | int ret = 0; |
970 | WAKE_Q(wake_q); |
971 | |
972 | if (u_abs_timeout) { |
973 | int res = prepare_timeout(u_abs_timeout, &expires, &ts); |
974 | if (res) |
975 | return res; |
976 | timeout = &expires; |
977 | } |
978 | |
979 | if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) |
980 | return -EINVAL; |
981 | |
982 | audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); |
983 | |
984 | f = fdget(mqdes); |
985 | if (unlikely(!f.file)) { |
986 | ret = -EBADF; |
987 | goto out; |
988 | } |
989 | |
990 | inode = file_inode(f.file); |
991 | if (unlikely(f.file->f_op != &mqueue_file_operations)) { |
992 | ret = -EBADF; |
993 | goto out_fput; |
994 | } |
995 | info = MQUEUE_I(inode); |
996 | audit_file(f.file); |
997 | |
998 | if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { |
999 | ret = -EBADF; |
1000 | goto out_fput; |
1001 | } |
1002 | |
1003 | if (unlikely(msg_len > info->attr.mq_msgsize)) { |
1004 | ret = -EMSGSIZE; |
1005 | goto out_fput; |
1006 | } |
1007 | |
1008 | /* First try to allocate memory, before doing anything with |
1009 | * existing queues. */ |
1010 | msg_ptr = load_msg(u_msg_ptr, msg_len); |
1011 | if (IS_ERR(msg_ptr)) { |
1012 | ret = PTR_ERR(msg_ptr); |
1013 | goto out_fput; |
1014 | } |
1015 | msg_ptr->m_ts = msg_len; |
1016 | msg_ptr->m_type = msg_prio; |
1017 | |
1018 | /* |
1019 | * msg_insert really wants us to have a valid, spare node struct so |
1020 | * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will |
1021 | * fall back to that if necessary. |
1022 | */ |
1023 | if (!info->node_cache) |
1024 | new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); |
1025 | |
1026 | spin_lock(&info->lock); |
1027 | |
1028 | if (!info->node_cache && new_leaf) { |
1029 | /* Save our speculative allocation into the cache */ |
1030 | INIT_LIST_HEAD(&new_leaf->msg_list); |
1031 | info->node_cache = new_leaf; |
1032 | new_leaf = NULL; |
1033 | } else { |
1034 | kfree(new_leaf); |
1035 | } |
1036 | |
1037 | if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { |
1038 | if (f.file->f_flags & O_NONBLOCK) { |
1039 | ret = -EAGAIN; |
1040 | } else { |
1041 | wait.task = current; |
1042 | wait.msg = (void *) msg_ptr; |
1043 | wait.state = STATE_NONE; |
1044 | ret = wq_sleep(info, SEND, timeout, &wait); |
1045 | /* |
1046 | * wq_sleep must be called with info->lock held, and |
1047 | * returns with the lock released |
1048 | */ |
1049 | goto out_free; |
1050 | } |
1051 | } else { |
1052 | receiver = wq_get_first_waiter(info, RECV); |
1053 | if (receiver) { |
1054 | pipelined_send(&wake_q, info, msg_ptr, receiver); |
1055 | } else { |
1056 | /* adds message to the queue */ |
1057 | ret = msg_insert(msg_ptr, info); |
1058 | if (ret) |
1059 | goto out_unlock; |
1060 | __do_notify(info); |
1061 | } |
1062 | inode->i_atime = inode->i_mtime = inode->i_ctime = |
1063 | current_time(inode); |
1064 | } |
1065 | out_unlock: |
1066 | spin_unlock(&info->lock); |
1067 | wake_up_q(&wake_q); |
1068 | out_free: |
1069 | if (ret) |
1070 | free_msg(msg_ptr); |
1071 | out_fput: |
1072 | fdput(f); |
1073 | out: |
1074 | return ret; |
1075 | } |
1076 | |
1077 | SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, |
1078 | size_t, msg_len, unsigned int __user *, u_msg_prio, |
1079 | const struct timespec __user *, u_abs_timeout) |
1080 | { |
1081 | ssize_t ret; |
1082 | struct msg_msg *msg_ptr; |
1083 | struct fd f; |
1084 | struct inode *inode; |
1085 | struct mqueue_inode_info *info; |
1086 | struct ext_wait_queue wait; |
1087 | ktime_t expires, *timeout = NULL; |
1088 | struct timespec ts; |
1089 | struct posix_msg_tree_node *new_leaf = NULL; |
1090 | |
1091 | if (u_abs_timeout) { |
1092 | int res = prepare_timeout(u_abs_timeout, &expires, &ts); |
1093 | if (res) |
1094 | return res; |
1095 | timeout = &expires; |
1096 | } |
1097 | |
1098 | audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); |
1099 | |
1100 | f = fdget(mqdes); |
1101 | if (unlikely(!f.file)) { |
1102 | ret = -EBADF; |
1103 | goto out; |
1104 | } |
1105 | |
1106 | inode = file_inode(f.file); |
1107 | if (unlikely(f.file->f_op != &mqueue_file_operations)) { |
1108 | ret = -EBADF; |
1109 | goto out_fput; |
1110 | } |
1111 | info = MQUEUE_I(inode); |
1112 | audit_file(f.file); |
1113 | |
1114 | if (unlikely(!(f.file->f_mode & FMODE_READ))) { |
1115 | ret = -EBADF; |
1116 | goto out_fput; |
1117 | } |
1118 | |
1119 | /* checks if buffer is big enough */ |
1120 | if (unlikely(msg_len < info->attr.mq_msgsize)) { |
1121 | ret = -EMSGSIZE; |
1122 | goto out_fput; |
1123 | } |
1124 | |
1125 | /* |
1126 | * msg_insert really wants us to have a valid, spare node struct so |
1127 | * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will |
1128 | * fall back to that if necessary. |
1129 | */ |
1130 | if (!info->node_cache) |
1131 | new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); |
1132 | |
1133 | spin_lock(&info->lock); |
1134 | |
1135 | if (!info->node_cache && new_leaf) { |
1136 | /* Save our speculative allocation into the cache */ |
1137 | INIT_LIST_HEAD(&new_leaf->msg_list); |
1138 | info->node_cache = new_leaf; |
1139 | } else { |
1140 | kfree(new_leaf); |
1141 | } |
1142 | |
1143 | if (info->attr.mq_curmsgs == 0) { |
1144 | if (f.file->f_flags & O_NONBLOCK) { |
1145 | spin_unlock(&info->lock); |
1146 | ret = -EAGAIN; |
1147 | } else { |
1148 | wait.task = current; |
1149 | wait.state = STATE_NONE; |
1150 | ret = wq_sleep(info, RECV, timeout, &wait); |
1151 | msg_ptr = wait.msg; |
1152 | } |
1153 | } else { |
1154 | WAKE_Q(wake_q); |
1155 | |
1156 | msg_ptr = msg_get(info); |
1157 | |
1158 | inode->i_atime = inode->i_mtime = inode->i_ctime = |
1159 | current_time(inode); |
1160 | |
1161 | /* There is now free space in queue. */ |
1162 | pipelined_receive(&wake_q, info); |
1163 | spin_unlock(&info->lock); |
1164 | wake_up_q(&wake_q); |
1165 | ret = 0; |
1166 | } |
1167 | if (ret == 0) { |
1168 | ret = msg_ptr->m_ts; |
1169 | |
1170 | if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || |
1171 | store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { |
1172 | ret = -EFAULT; |
1173 | } |
1174 | free_msg(msg_ptr); |
1175 | } |
1176 | out_fput: |
1177 | fdput(f); |
1178 | out: |
1179 | return ret; |
1180 | } |
1181 | |
1182 | /* |
1183 | * Notes: the case when user wants us to deregister (with NULL as pointer) |
1184 | * and he isn't currently owner of notification, will be silently discarded. |
1185 | * It isn't explicitly defined in the POSIX. |
1186 | */ |
1187 | SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, |
1188 | const struct sigevent __user *, u_notification) |
1189 | { |
1190 | int ret; |
1191 | struct fd f; |
1192 | struct sock *sock; |
1193 | struct inode *inode; |
1194 | struct sigevent notification; |
1195 | struct mqueue_inode_info *info; |
1196 | struct sk_buff *nc; |
1197 | |
1198 | if (u_notification) { |
1199 | if (copy_from_user(¬ification, u_notification, |
1200 | sizeof(struct sigevent))) |
1201 | return -EFAULT; |
1202 | } |
1203 | |
1204 | audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); |
1205 | |
1206 | nc = NULL; |
1207 | sock = NULL; |
1208 | if (u_notification != NULL) { |
1209 | if (unlikely(notification.sigev_notify != SIGEV_NONE && |
1210 | notification.sigev_notify != SIGEV_SIGNAL && |
1211 | notification.sigev_notify != SIGEV_THREAD)) |
1212 | return -EINVAL; |
1213 | if (notification.sigev_notify == SIGEV_SIGNAL && |
1214 | !valid_signal(notification.sigev_signo)) { |
1215 | return -EINVAL; |
1216 | } |
1217 | if (notification.sigev_notify == SIGEV_THREAD) { |
1218 | long timeo; |
1219 | |
1220 | /* create the notify skb */ |
1221 | nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); |
1222 | if (!nc) { |
1223 | ret = -ENOMEM; |
1224 | goto out; |
1225 | } |
1226 | if (copy_from_user(nc->data, |
1227 | notification.sigev_value.sival_ptr, |
1228 | NOTIFY_COOKIE_LEN)) { |
1229 | ret = -EFAULT; |
1230 | goto out; |
1231 | } |
1232 | |
1233 | /* TODO: add a header? */ |
1234 | skb_put(nc, NOTIFY_COOKIE_LEN); |
1235 | /* and attach it to the socket */ |
1236 | retry: |
1237 | f = fdget(notification.sigev_signo); |
1238 | if (!f.file) { |
1239 | ret = -EBADF; |
1240 | goto out; |
1241 | } |
1242 | sock = netlink_getsockbyfilp(f.file); |
1243 | fdput(f); |
1244 | if (IS_ERR(sock)) { |
1245 | ret = PTR_ERR(sock); |
1246 | sock = NULL; |
1247 | goto out; |
1248 | } |
1249 | |
1250 | timeo = MAX_SCHEDULE_TIMEOUT; |
1251 | ret = netlink_attachskb(sock, nc, &timeo, NULL); |
1252 | if (ret == 1) { |
1253 | sock = NULL; |
1254 | goto retry; |
1255 | } |
1256 | if (ret) { |
1257 | sock = NULL; |
1258 | nc = NULL; |
1259 | goto out; |
1260 | } |
1261 | } |
1262 | } |
1263 | |
1264 | f = fdget(mqdes); |
1265 | if (!f.file) { |
1266 | ret = -EBADF; |
1267 | goto out; |
1268 | } |
1269 | |
1270 | inode = file_inode(f.file); |
1271 | if (unlikely(f.file->f_op != &mqueue_file_operations)) { |
1272 | ret = -EBADF; |
1273 | goto out_fput; |
1274 | } |
1275 | info = MQUEUE_I(inode); |
1276 | |
1277 | ret = 0; |
1278 | spin_lock(&info->lock); |
1279 | if (u_notification == NULL) { |
1280 | if (info->notify_owner == task_tgid(current)) { |
1281 | remove_notification(info); |
1282 | inode->i_atime = inode->i_ctime = current_time(inode); |
1283 | } |
1284 | } else if (info->notify_owner != NULL) { |
1285 | ret = -EBUSY; |
1286 | } else { |
1287 | switch (notification.sigev_notify) { |
1288 | case SIGEV_NONE: |
1289 | info->notify.sigev_notify = SIGEV_NONE; |
1290 | break; |
1291 | case SIGEV_THREAD: |
1292 | info->notify_sock = sock; |
1293 | info->notify_cookie = nc; |
1294 | sock = NULL; |
1295 | nc = NULL; |
1296 | info->notify.sigev_notify = SIGEV_THREAD; |
1297 | break; |
1298 | case SIGEV_SIGNAL: |
1299 | info->notify.sigev_signo = notification.sigev_signo; |
1300 | info->notify.sigev_value = notification.sigev_value; |
1301 | info->notify.sigev_notify = SIGEV_SIGNAL; |
1302 | break; |
1303 | } |
1304 | |
1305 | info->notify_owner = get_pid(task_tgid(current)); |
1306 | info->notify_user_ns = get_user_ns(current_user_ns()); |
1307 | inode->i_atime = inode->i_ctime = current_time(inode); |
1308 | } |
1309 | spin_unlock(&info->lock); |
1310 | out_fput: |
1311 | fdput(f); |
1312 | out: |
1313 | if (sock) |
1314 | netlink_detachskb(sock, nc); |
1315 | else if (nc) |
1316 | dev_kfree_skb(nc); |
1317 | |
1318 | return ret; |
1319 | } |
1320 | |
1321 | SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, |
1322 | const struct mq_attr __user *, u_mqstat, |
1323 | struct mq_attr __user *, u_omqstat) |
1324 | { |
1325 | int ret; |
1326 | struct mq_attr mqstat, omqstat; |
1327 | struct fd f; |
1328 | struct inode *inode; |
1329 | struct mqueue_inode_info *info; |
1330 | |
1331 | if (u_mqstat != NULL) { |
1332 | if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) |
1333 | return -EFAULT; |
1334 | if (mqstat.mq_flags & (~O_NONBLOCK)) |
1335 | return -EINVAL; |
1336 | } |
1337 | |
1338 | f = fdget(mqdes); |
1339 | if (!f.file) { |
1340 | ret = -EBADF; |
1341 | goto out; |
1342 | } |
1343 | |
1344 | inode = file_inode(f.file); |
1345 | if (unlikely(f.file->f_op != &mqueue_file_operations)) { |
1346 | ret = -EBADF; |
1347 | goto out_fput; |
1348 | } |
1349 | info = MQUEUE_I(inode); |
1350 | |
1351 | spin_lock(&info->lock); |
1352 | |
1353 | omqstat = info->attr; |
1354 | omqstat.mq_flags = f.file->f_flags & O_NONBLOCK; |
1355 | if (u_mqstat) { |
1356 | audit_mq_getsetattr(mqdes, &mqstat); |
1357 | spin_lock(&f.file->f_lock); |
1358 | if (mqstat.mq_flags & O_NONBLOCK) |
1359 | f.file->f_flags |= O_NONBLOCK; |
1360 | else |
1361 | f.file->f_flags &= ~O_NONBLOCK; |
1362 | spin_unlock(&f.file->f_lock); |
1363 | |
1364 | inode->i_atime = inode->i_ctime = current_time(inode); |
1365 | } |
1366 | |
1367 | spin_unlock(&info->lock); |
1368 | |
1369 | ret = 0; |
1370 | if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, |
1371 | sizeof(struct mq_attr))) |
1372 | ret = -EFAULT; |
1373 | |
1374 | out_fput: |
1375 | fdput(f); |
1376 | out: |
1377 | return ret; |
1378 | } |
1379 | |
1380 | static const struct inode_operations mqueue_dir_inode_operations = { |
1381 | .lookup = simple_lookup, |
1382 | .create = mqueue_create, |
1383 | .unlink = mqueue_unlink, |
1384 | }; |
1385 | |
1386 | static const struct file_operations mqueue_file_operations = { |
1387 | .flush = mqueue_flush_file, |
1388 | .poll = mqueue_poll_file, |
1389 | .read = mqueue_read_file, |
1390 | .llseek = default_llseek, |
1391 | }; |
1392 | |
1393 | static const struct super_operations mqueue_super_ops = { |
1394 | .alloc_inode = mqueue_alloc_inode, |
1395 | .destroy_inode = mqueue_destroy_inode, |
1396 | .evict_inode = mqueue_evict_inode, |
1397 | .statfs = simple_statfs, |
1398 | }; |
1399 | |
1400 | static struct file_system_type mqueue_fs_type = { |
1401 | .name = "mqueue", |
1402 | .mount = mqueue_mount, |
1403 | .kill_sb = kill_litter_super, |
1404 | .fs_flags = FS_USERNS_MOUNT, |
1405 | }; |
1406 | |
1407 | int mq_init_ns(struct ipc_namespace *ns) |
1408 | { |
1409 | ns->mq_queues_count = 0; |
1410 | ns->mq_queues_max = DFLT_QUEUESMAX; |
1411 | ns->mq_msg_max = DFLT_MSGMAX; |
1412 | ns->mq_msgsize_max = DFLT_MSGSIZEMAX; |
1413 | ns->mq_msg_default = DFLT_MSG; |
1414 | ns->mq_msgsize_default = DFLT_MSGSIZE; |
1415 | |
1416 | ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); |
1417 | if (IS_ERR(ns->mq_mnt)) { |
1418 | int err = PTR_ERR(ns->mq_mnt); |
1419 | ns->mq_mnt = NULL; |
1420 | return err; |
1421 | } |
1422 | return 0; |
1423 | } |
1424 | |
1425 | void mq_clear_sbinfo(struct ipc_namespace *ns) |
1426 | { |
1427 | ns->mq_mnt->mnt_sb->s_fs_info = NULL; |
1428 | } |
1429 | |
1430 | void mq_put_mnt(struct ipc_namespace *ns) |
1431 | { |
1432 | kern_unmount(ns->mq_mnt); |
1433 | } |
1434 | |
1435 | static int __init init_mqueue_fs(void) |
1436 | { |
1437 | int error; |
1438 | |
1439 | mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", |
1440 | sizeof(struct mqueue_inode_info), 0, |
1441 | SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); |
1442 | if (mqueue_inode_cachep == NULL) |
1443 | return -ENOMEM; |
1444 | |
1445 | /* ignore failures - they are not fatal */ |
1446 | mq_sysctl_table = mq_register_sysctl_table(); |
1447 | |
1448 | error = register_filesystem(&mqueue_fs_type); |
1449 | if (error) |
1450 | goto out_sysctl; |
1451 | |
1452 | spin_lock_init(&mq_lock); |
1453 | |
1454 | error = mq_init_ns(&init_ipc_ns); |
1455 | if (error) |
1456 | goto out_filesystem; |
1457 | |
1458 | return 0; |
1459 | |
1460 | out_filesystem: |
1461 | unregister_filesystem(&mqueue_fs_type); |
1462 | out_sysctl: |
1463 | if (mq_sysctl_table) |
1464 | unregister_sysctl_table(mq_sysctl_table); |
1465 | kmem_cache_destroy(mqueue_inode_cachep); |
1466 | return error; |
1467 | } |
1468 | |
1469 | device_initcall(init_mqueue_fs); |
1470 |