blob: 35154a5c9392ddd7c03570453efd82ded9167f23
1 | /* |
2 | * linux/fs/pnode.c |
3 | * |
4 | * (C) Copyright IBM Corporation 2005. |
5 | * Released under GPL v2. |
6 | * Author : Ram Pai (linuxram@us.ibm.com) |
7 | * |
8 | */ |
9 | #include <linux/mnt_namespace.h> |
10 | #include <linux/mount.h> |
11 | #include <linux/fs.h> |
12 | #include <linux/nsproxy.h> |
13 | #include "internal.h" |
14 | #include "pnode.h" |
15 | |
16 | /* return the next shared peer mount of @p */ |
17 | static inline struct mount *next_peer(struct mount *p) |
18 | { |
19 | return list_entry(p->mnt_share.next, struct mount, mnt_share); |
20 | } |
21 | |
22 | static inline struct mount *first_slave(struct mount *p) |
23 | { |
24 | return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); |
25 | } |
26 | |
27 | static inline struct mount *last_slave(struct mount *p) |
28 | { |
29 | return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave); |
30 | } |
31 | |
32 | static inline struct mount *next_slave(struct mount *p) |
33 | { |
34 | return list_entry(p->mnt_slave.next, struct mount, mnt_slave); |
35 | } |
36 | |
37 | static struct mount *get_peer_under_root(struct mount *mnt, |
38 | struct mnt_namespace *ns, |
39 | const struct path *root) |
40 | { |
41 | struct mount *m = mnt; |
42 | |
43 | do { |
44 | /* Check the namespace first for optimization */ |
45 | if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root)) |
46 | return m; |
47 | |
48 | m = next_peer(m); |
49 | } while (m != mnt); |
50 | |
51 | return NULL; |
52 | } |
53 | |
54 | /* |
55 | * Get ID of closest dominating peer group having a representative |
56 | * under the given root. |
57 | * |
58 | * Caller must hold namespace_sem |
59 | */ |
60 | int get_dominating_id(struct mount *mnt, const struct path *root) |
61 | { |
62 | struct mount *m; |
63 | |
64 | for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) { |
65 | struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root); |
66 | if (d) |
67 | return d->mnt_group_id; |
68 | } |
69 | |
70 | return 0; |
71 | } |
72 | |
73 | static int do_make_slave(struct mount *mnt) |
74 | { |
75 | struct mount *peer_mnt = mnt, *master = mnt->mnt_master; |
76 | struct mount *slave_mnt; |
77 | |
78 | /* |
79 | * slave 'mnt' to a peer mount that has the |
80 | * same root dentry. If none is available then |
81 | * slave it to anything that is available. |
82 | */ |
83 | while ((peer_mnt = next_peer(peer_mnt)) != mnt && |
84 | peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ; |
85 | |
86 | if (peer_mnt == mnt) { |
87 | peer_mnt = next_peer(mnt); |
88 | if (peer_mnt == mnt) |
89 | peer_mnt = NULL; |
90 | } |
91 | if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) && |
92 | list_empty(&mnt->mnt_share)) |
93 | mnt_release_group_id(mnt); |
94 | |
95 | list_del_init(&mnt->mnt_share); |
96 | mnt->mnt_group_id = 0; |
97 | |
98 | if (peer_mnt) |
99 | master = peer_mnt; |
100 | |
101 | if (master) { |
102 | list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) |
103 | slave_mnt->mnt_master = master; |
104 | list_move(&mnt->mnt_slave, &master->mnt_slave_list); |
105 | list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); |
106 | INIT_LIST_HEAD(&mnt->mnt_slave_list); |
107 | } else { |
108 | struct list_head *p = &mnt->mnt_slave_list; |
109 | while (!list_empty(p)) { |
110 | slave_mnt = list_first_entry(p, |
111 | struct mount, mnt_slave); |
112 | list_del_init(&slave_mnt->mnt_slave); |
113 | slave_mnt->mnt_master = NULL; |
114 | } |
115 | } |
116 | mnt->mnt_master = master; |
117 | CLEAR_MNT_SHARED(mnt); |
118 | return 0; |
119 | } |
120 | |
121 | /* |
122 | * vfsmount lock must be held for write |
123 | */ |
124 | void change_mnt_propagation(struct mount *mnt, int type) |
125 | { |
126 | if (type == MS_SHARED) { |
127 | set_mnt_shared(mnt); |
128 | return; |
129 | } |
130 | do_make_slave(mnt); |
131 | if (type != MS_SLAVE) { |
132 | list_del_init(&mnt->mnt_slave); |
133 | mnt->mnt_master = NULL; |
134 | if (type == MS_UNBINDABLE) |
135 | mnt->mnt.mnt_flags |= MNT_UNBINDABLE; |
136 | else |
137 | mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE; |
138 | } |
139 | } |
140 | |
141 | /* |
142 | * get the next mount in the propagation tree. |
143 | * @m: the mount seen last |
144 | * @origin: the original mount from where the tree walk initiated |
145 | * |
146 | * Note that peer groups form contiguous segments of slave lists. |
147 | * We rely on that in get_source() to be able to find out if |
148 | * vfsmount found while iterating with propagation_next() is |
149 | * a peer of one we'd found earlier. |
150 | */ |
151 | static struct mount *propagation_next(struct mount *m, |
152 | struct mount *origin) |
153 | { |
154 | /* are there any slaves of this mount? */ |
155 | if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) |
156 | return first_slave(m); |
157 | |
158 | while (1) { |
159 | struct mount *master = m->mnt_master; |
160 | |
161 | if (master == origin->mnt_master) { |
162 | struct mount *next = next_peer(m); |
163 | return (next == origin) ? NULL : next; |
164 | } else if (m->mnt_slave.next != &master->mnt_slave_list) |
165 | return next_slave(m); |
166 | |
167 | /* back at master */ |
168 | m = master; |
169 | } |
170 | } |
171 | |
172 | static struct mount *skip_propagation_subtree(struct mount *m, |
173 | struct mount *origin) |
174 | { |
175 | /* |
176 | * Advance m such that propagation_next will not return |
177 | * the slaves of m. |
178 | */ |
179 | if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) |
180 | m = last_slave(m); |
181 | |
182 | return m; |
183 | } |
184 | |
185 | static struct mount *next_group(struct mount *m, struct mount *origin) |
186 | { |
187 | while (1) { |
188 | while (1) { |
189 | struct mount *next; |
190 | if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) |
191 | return first_slave(m); |
192 | next = next_peer(m); |
193 | if (m->mnt_group_id == origin->mnt_group_id) { |
194 | if (next == origin) |
195 | return NULL; |
196 | } else if (m->mnt_slave.next != &next->mnt_slave) |
197 | break; |
198 | m = next; |
199 | } |
200 | /* m is the last peer */ |
201 | while (1) { |
202 | struct mount *master = m->mnt_master; |
203 | if (m->mnt_slave.next != &master->mnt_slave_list) |
204 | return next_slave(m); |
205 | m = next_peer(master); |
206 | if (master->mnt_group_id == origin->mnt_group_id) |
207 | break; |
208 | if (master->mnt_slave.next == &m->mnt_slave) |
209 | break; |
210 | m = master; |
211 | } |
212 | if (m == origin) |
213 | return NULL; |
214 | } |
215 | } |
216 | |
217 | /* all accesses are serialized by namespace_sem */ |
218 | static struct user_namespace *user_ns; |
219 | static struct mount *last_dest, *first_source, *last_source, *dest_master; |
220 | static struct mountpoint *mp; |
221 | static struct hlist_head *list; |
222 | |
223 | static inline bool peers(struct mount *m1, struct mount *m2) |
224 | { |
225 | return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id; |
226 | } |
227 | |
228 | static int propagate_one(struct mount *m) |
229 | { |
230 | struct mount *child; |
231 | int type; |
232 | /* skip ones added by this propagate_mnt() */ |
233 | if (IS_MNT_NEW(m)) |
234 | return 0; |
235 | /* skip if mountpoint isn't covered by it */ |
236 | if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) |
237 | return 0; |
238 | if (peers(m, last_dest)) { |
239 | type = CL_MAKE_SHARED; |
240 | } else { |
241 | struct mount *n, *p; |
242 | bool done; |
243 | for (n = m; ; n = p) { |
244 | p = n->mnt_master; |
245 | if (p == dest_master || IS_MNT_MARKED(p)) |
246 | break; |
247 | } |
248 | do { |
249 | struct mount *parent = last_source->mnt_parent; |
250 | if (last_source == first_source) |
251 | break; |
252 | done = parent->mnt_master == p; |
253 | if (done && peers(n, parent)) |
254 | break; |
255 | last_source = last_source->mnt_master; |
256 | } while (!done); |
257 | |
258 | type = CL_SLAVE; |
259 | /* beginning of peer group among the slaves? */ |
260 | if (IS_MNT_SHARED(m)) |
261 | type |= CL_MAKE_SHARED; |
262 | } |
263 | |
264 | /* Notice when we are propagating across user namespaces */ |
265 | if (m->mnt_ns->user_ns != user_ns) |
266 | type |= CL_UNPRIVILEGED; |
267 | child = copy_tree(last_source, last_source->mnt.mnt_root, type); |
268 | if (IS_ERR(child)) |
269 | return PTR_ERR(child); |
270 | child->mnt.mnt_flags &= ~MNT_LOCKED; |
271 | mnt_set_mountpoint(m, mp, child); |
272 | last_dest = m; |
273 | last_source = child; |
274 | if (m->mnt_master != dest_master) { |
275 | read_seqlock_excl(&mount_lock); |
276 | SET_MNT_MARK(m->mnt_master); |
277 | read_sequnlock_excl(&mount_lock); |
278 | } |
279 | hlist_add_head(&child->mnt_hash, list); |
280 | return count_mounts(m->mnt_ns, child); |
281 | } |
282 | |
283 | /* |
284 | * mount 'source_mnt' under the destination 'dest_mnt' at |
285 | * dentry 'dest_dentry'. And propagate that mount to |
286 | * all the peer and slave mounts of 'dest_mnt'. |
287 | * Link all the new mounts into a propagation tree headed at |
288 | * source_mnt. Also link all the new mounts using ->mnt_list |
289 | * headed at source_mnt's ->mnt_list |
290 | * |
291 | * @dest_mnt: destination mount. |
292 | * @dest_dentry: destination dentry. |
293 | * @source_mnt: source mount. |
294 | * @tree_list : list of heads of trees to be attached. |
295 | */ |
296 | int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, |
297 | struct mount *source_mnt, struct hlist_head *tree_list) |
298 | { |
299 | struct mount *m, *n; |
300 | int ret = 0; |
301 | |
302 | /* |
303 | * we don't want to bother passing tons of arguments to |
304 | * propagate_one(); everything is serialized by namespace_sem, |
305 | * so globals will do just fine. |
306 | */ |
307 | user_ns = current->nsproxy->mnt_ns->user_ns; |
308 | last_dest = dest_mnt; |
309 | first_source = source_mnt; |
310 | last_source = source_mnt; |
311 | mp = dest_mp; |
312 | list = tree_list; |
313 | dest_master = dest_mnt->mnt_master; |
314 | |
315 | /* all peers of dest_mnt, except dest_mnt itself */ |
316 | for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) { |
317 | ret = propagate_one(n); |
318 | if (ret) |
319 | goto out; |
320 | } |
321 | |
322 | /* all slave groups */ |
323 | for (m = next_group(dest_mnt, dest_mnt); m; |
324 | m = next_group(m, dest_mnt)) { |
325 | /* everything in that slave group */ |
326 | n = m; |
327 | do { |
328 | ret = propagate_one(n); |
329 | if (ret) |
330 | goto out; |
331 | n = next_peer(n); |
332 | } while (n != m); |
333 | } |
334 | out: |
335 | read_seqlock_excl(&mount_lock); |
336 | hlist_for_each_entry(n, tree_list, mnt_hash) { |
337 | m = n->mnt_parent; |
338 | if (m->mnt_master != dest_mnt->mnt_master) |
339 | CLEAR_MNT_MARK(m->mnt_master); |
340 | } |
341 | read_sequnlock_excl(&mount_lock); |
342 | return ret; |
343 | } |
344 | |
345 | static struct mount *find_topper(struct mount *mnt) |
346 | { |
347 | /* If there is exactly one mount covering mnt completely return it. */ |
348 | struct mount *child; |
349 | |
350 | if (!list_is_singular(&mnt->mnt_mounts)) |
351 | return NULL; |
352 | |
353 | child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child); |
354 | if (child->mnt_mountpoint != mnt->mnt.mnt_root) |
355 | return NULL; |
356 | |
357 | return child; |
358 | } |
359 | |
360 | /* |
361 | * return true if the refcount is greater than count |
362 | */ |
363 | static inline int do_refcount_check(struct mount *mnt, int count) |
364 | { |
365 | return mnt_get_count(mnt) > count; |
366 | } |
367 | |
368 | /* |
369 | * check if the mount 'mnt' can be unmounted successfully. |
370 | * @mnt: the mount to be checked for unmount |
371 | * NOTE: unmounting 'mnt' would naturally propagate to all |
372 | * other mounts its parent propagates to. |
373 | * Check if any of these mounts that **do not have submounts** |
374 | * have more references than 'refcnt'. If so return busy. |
375 | * |
376 | * vfsmount lock must be held for write |
377 | */ |
378 | int propagate_mount_busy(struct mount *mnt, int refcnt) |
379 | { |
380 | struct mount *m, *child, *topper; |
381 | struct mount *parent = mnt->mnt_parent; |
382 | |
383 | if (mnt == parent) |
384 | return do_refcount_check(mnt, refcnt); |
385 | |
386 | /* |
387 | * quickly check if the current mount can be unmounted. |
388 | * If not, we don't have to go checking for all other |
389 | * mounts |
390 | */ |
391 | if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) |
392 | return 1; |
393 | |
394 | for (m = propagation_next(parent, parent); m; |
395 | m = propagation_next(m, parent)) { |
396 | int count = 1; |
397 | child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); |
398 | if (!child) |
399 | continue; |
400 | |
401 | /* Is there exactly one mount on the child that covers |
402 | * it completely whose reference should be ignored? |
403 | */ |
404 | topper = find_topper(child); |
405 | if (topper) |
406 | count += 1; |
407 | else if (!list_empty(&child->mnt_mounts)) |
408 | continue; |
409 | |
410 | if (do_refcount_check(child, count)) |
411 | return 1; |
412 | } |
413 | return 0; |
414 | } |
415 | |
416 | /* |
417 | * Clear MNT_LOCKED when it can be shown to be safe. |
418 | * |
419 | * mount_lock lock must be held for write |
420 | */ |
421 | void propagate_mount_unlock(struct mount *mnt) |
422 | { |
423 | struct mount *parent = mnt->mnt_parent; |
424 | struct mount *m, *child; |
425 | |
426 | BUG_ON(parent == mnt); |
427 | |
428 | for (m = propagation_next(parent, parent); m; |
429 | m = propagation_next(m, parent)) { |
430 | child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); |
431 | if (child) |
432 | child->mnt.mnt_flags &= ~MNT_LOCKED; |
433 | } |
434 | } |
435 | |
436 | static void umount_one(struct mount *mnt, struct list_head *to_umount) |
437 | { |
438 | CLEAR_MNT_MARK(mnt); |
439 | mnt->mnt.mnt_flags |= MNT_UMOUNT; |
440 | list_del_init(&mnt->mnt_child); |
441 | list_del_init(&mnt->mnt_umounting); |
442 | list_move_tail(&mnt->mnt_list, to_umount); |
443 | } |
444 | |
445 | /* |
446 | * NOTE: unmounting 'mnt' naturally propagates to all other mounts its |
447 | * parent propagates to. |
448 | */ |
449 | static bool __propagate_umount(struct mount *mnt, |
450 | struct list_head *to_umount, |
451 | struct list_head *to_restore) |
452 | { |
453 | bool progress = false; |
454 | struct mount *child; |
455 | |
456 | /* |
457 | * The state of the parent won't change if this mount is |
458 | * already unmounted or marked as without children. |
459 | */ |
460 | if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED)) |
461 | goto out; |
462 | |
463 | /* Verify topper is the only grandchild that has not been |
464 | * speculatively unmounted. |
465 | */ |
466 | list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { |
467 | if (child->mnt_mountpoint == mnt->mnt.mnt_root) |
468 | continue; |
469 | if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child)) |
470 | continue; |
471 | /* Found a mounted child */ |
472 | goto children; |
473 | } |
474 | |
475 | /* Mark mounts that can be unmounted if not locked */ |
476 | SET_MNT_MARK(mnt); |
477 | progress = true; |
478 | |
479 | /* If a mount is without children and not locked umount it. */ |
480 | if (!IS_MNT_LOCKED(mnt)) { |
481 | umount_one(mnt, to_umount); |
482 | } else { |
483 | children: |
484 | list_move_tail(&mnt->mnt_umounting, to_restore); |
485 | } |
486 | out: |
487 | return progress; |
488 | } |
489 | |
490 | static void umount_list(struct list_head *to_umount, |
491 | struct list_head *to_restore) |
492 | { |
493 | struct mount *mnt, *child, *tmp; |
494 | list_for_each_entry(mnt, to_umount, mnt_list) { |
495 | list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) { |
496 | /* topper? */ |
497 | if (child->mnt_mountpoint == mnt->mnt.mnt_root) |
498 | list_move_tail(&child->mnt_umounting, to_restore); |
499 | else |
500 | umount_one(child, to_umount); |
501 | } |
502 | } |
503 | } |
504 | |
505 | static void restore_mounts(struct list_head *to_restore) |
506 | { |
507 | /* Restore mounts to a clean working state */ |
508 | while (!list_empty(to_restore)) { |
509 | struct mount *mnt, *parent; |
510 | struct mountpoint *mp; |
511 | |
512 | mnt = list_first_entry(to_restore, struct mount, mnt_umounting); |
513 | CLEAR_MNT_MARK(mnt); |
514 | list_del_init(&mnt->mnt_umounting); |
515 | |
516 | /* Should this mount be reparented? */ |
517 | mp = mnt->mnt_mp; |
518 | parent = mnt->mnt_parent; |
519 | while (parent->mnt.mnt_flags & MNT_UMOUNT) { |
520 | mp = parent->mnt_mp; |
521 | parent = parent->mnt_parent; |
522 | } |
523 | if (parent != mnt->mnt_parent) |
524 | mnt_change_mountpoint(parent, mp, mnt); |
525 | } |
526 | } |
527 | |
528 | static void cleanup_umount_visitations(struct list_head *visited) |
529 | { |
530 | while (!list_empty(visited)) { |
531 | struct mount *mnt = |
532 | list_first_entry(visited, struct mount, mnt_umounting); |
533 | list_del_init(&mnt->mnt_umounting); |
534 | } |
535 | } |
536 | |
537 | /* |
538 | * collect all mounts that receive propagation from the mount in @list, |
539 | * and return these additional mounts in the same list. |
540 | * @list: the list of mounts to be unmounted. |
541 | * |
542 | * vfsmount lock must be held for write |
543 | */ |
544 | int propagate_umount(struct list_head *list) |
545 | { |
546 | struct mount *mnt; |
547 | LIST_HEAD(to_restore); |
548 | LIST_HEAD(to_umount); |
549 | LIST_HEAD(visited); |
550 | |
551 | /* Find candidates for unmounting */ |
552 | list_for_each_entry_reverse(mnt, list, mnt_list) { |
553 | struct mount *parent = mnt->mnt_parent; |
554 | struct mount *m; |
555 | |
556 | /* |
557 | * If this mount has already been visited it is known that it's |
558 | * entire peer group and all of their slaves in the propagation |
559 | * tree for the mountpoint has already been visited and there is |
560 | * no need to visit them again. |
561 | */ |
562 | if (!list_empty(&mnt->mnt_umounting)) |
563 | continue; |
564 | |
565 | list_add_tail(&mnt->mnt_umounting, &visited); |
566 | for (m = propagation_next(parent, parent); m; |
567 | m = propagation_next(m, parent)) { |
568 | struct mount *child = __lookup_mnt(&m->mnt, |
569 | mnt->mnt_mountpoint); |
570 | if (!child) |
571 | continue; |
572 | |
573 | if (!list_empty(&child->mnt_umounting)) { |
574 | /* |
575 | * If the child has already been visited it is |
576 | * know that it's entire peer group and all of |
577 | * their slaves in the propgation tree for the |
578 | * mountpoint has already been visited and there |
579 | * is no need to visit this subtree again. |
580 | */ |
581 | m = skip_propagation_subtree(m, parent); |
582 | continue; |
583 | } else if (child->mnt.mnt_flags & MNT_UMOUNT) { |
584 | /* |
585 | * We have come accross an partially unmounted |
586 | * mount in list that has not been visited yet. |
587 | * Remember it has been visited and continue |
588 | * about our merry way. |
589 | */ |
590 | list_add_tail(&child->mnt_umounting, &visited); |
591 | continue; |
592 | } |
593 | |
594 | /* Check the child and parents while progress is made */ |
595 | while (__propagate_umount(child, |
596 | &to_umount, &to_restore)) { |
597 | /* Is the parent a umount candidate? */ |
598 | child = child->mnt_parent; |
599 | if (list_empty(&child->mnt_umounting)) |
600 | break; |
601 | } |
602 | } |
603 | } |
604 | |
605 | umount_list(&to_umount, &to_restore); |
606 | restore_mounts(&to_restore); |
607 | cleanup_umount_visitations(&visited); |
608 | list_splice_tail(&to_umount, list); |
609 | |
610 | return 0; |
611 | } |
612 | |
613 | void propagate_remount(struct mount *mnt) |
614 | { |
615 | struct mount *parent = mnt->mnt_parent; |
616 | struct mount *p = mnt, *m; |
617 | struct super_block *sb = mnt->mnt.mnt_sb; |
618 | |
619 | if (!sb->s_op->copy_mnt_data) |
620 | return; |
621 | for (p = propagation_next(parent, parent); p; |
622 | p = propagation_next(p, parent)) { |
623 | m = __lookup_mnt(&p->mnt, mnt->mnt_mountpoint); |
624 | if (m) |
625 | sb->s_op->copy_mnt_data(m->mnt.data, mnt->mnt.data); |
626 | } |
627 | } |
628 |