summaryrefslogtreecommitdiff
path: root/fs/namei.c (plain)
blob: 88eb41cdd63983af96c4aa8a6996b7274f7dc632
1/*
2 * linux/fs/namei.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * Some corrections by tytso.
9 */
10
11/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
12 * lookup logic.
13 */
14/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
15 */
16
17#include <linux/init.h>
18#include <linux/export.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/fs.h>
22#include <linux/namei.h>
23#include <linux/pagemap.h>
24#include <linux/fsnotify.h>
25#include <linux/personality.h>
26#include <linux/security.h>
27#include <linux/ima.h>
28#include <linux/syscalls.h>
29#include <linux/mount.h>
30#include <linux/audit.h>
31#include <linux/capability.h>
32#include <linux/file.h>
33#include <linux/fcntl.h>
34#include <linux/device_cgroup.h>
35#include <linux/fs_struct.h>
36#include <linux/posix_acl.h>
37#include <linux/hash.h>
38#include <linux/bitops.h>
39#include <linux/init_task.h>
40#include <asm/uaccess.h>
41
42#include "internal.h"
43#include "mount.h"
44
45/* [Feb-1997 T. Schoebel-Theuer]
46 * Fundamental changes in the pathname lookup mechanisms (namei)
47 * were necessary because of omirr. The reason is that omirr needs
48 * to know the _real_ pathname, not the user-supplied one, in case
49 * of symlinks (and also when transname replacements occur).
50 *
51 * The new code replaces the old recursive symlink resolution with
52 * an iterative one (in case of non-nested symlink chains). It does
53 * this with calls to <fs>_follow_link().
54 * As a side effect, dir_namei(), _namei() and follow_link() are now
55 * replaced with a single function lookup_dentry() that can handle all
56 * the special cases of the former code.
57 *
58 * With the new dcache, the pathname is stored at each inode, at least as
59 * long as the refcount of the inode is positive. As a side effect, the
60 * size of the dcache depends on the inode cache and thus is dynamic.
61 *
62 * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
63 * resolution to correspond with current state of the code.
64 *
65 * Note that the symlink resolution is not *completely* iterative.
66 * There is still a significant amount of tail- and mid- recursion in
67 * the algorithm. Also, note that <fs>_readlink() is not used in
68 * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
69 * may return different results than <fs>_follow_link(). Many virtual
70 * filesystems (including /proc) exhibit this behavior.
71 */
72
73/* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
74 * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
75 * and the name already exists in form of a symlink, try to create the new
76 * name indicated by the symlink. The old code always complained that the
77 * name already exists, due to not following the symlink even if its target
78 * is nonexistent. The new semantics affects also mknod() and link() when
79 * the name is a symlink pointing to a non-existent name.
80 *
81 * I don't know which semantics is the right one, since I have no access
82 * to standards. But I found by trial that HP-UX 9.0 has the full "new"
83 * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
84 * "old" one. Personally, I think the new semantics is much more logical.
85 * Note that "ln old new" where "new" is a symlink pointing to a non-existing
86 * file does succeed in both HP-UX and SunOs, but not in Solaris
87 * and in the old Linux semantics.
88 */
89
90/* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
91 * semantics. See the comments in "open_namei" and "do_link" below.
92 *
93 * [10-Sep-98 Alan Modra] Another symlink change.
94 */
95
96/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
97 * inside the path - always follow.
98 * in the last component in creation/removal/renaming - never follow.
99 * if LOOKUP_FOLLOW passed - follow.
100 * if the pathname has trailing slashes - follow.
101 * otherwise - don't follow.
102 * (applied in that order).
103 *
104 * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
105 * restored for 2.4. This is the last surviving part of old 4.2BSD bug.
106 * During the 2.4 we need to fix the userland stuff depending on it -
107 * hopefully we will be able to get rid of that wart in 2.5. So far only
108 * XEmacs seems to be relying on it...
109 */
110/*
111 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
112 * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
113 * any extra contention...
114 */
115
116/* In order to reduce some races, while at the same time doing additional
117 * checking and hopefully speeding things up, we copy filenames to the
118 * kernel data space before using them..
119 *
120 * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
121 * PATH_MAX includes the nul terminator --RR.
122 */
123
124#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
125
126struct filename *
127getname_flags(const char __user *filename, int flags, int *empty)
128{
129 struct filename *result;
130 char *kname;
131 int len;
132
133 result = audit_reusename(filename);
134 if (result)
135 return result;
136
137 result = __getname();
138 if (unlikely(!result))
139 return ERR_PTR(-ENOMEM);
140
141 /*
142 * First, try to embed the struct filename inside the names_cache
143 * allocation
144 */
145 kname = (char *)result->iname;
146 result->name = kname;
147
148 len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
149 if (unlikely(len < 0)) {
150 __putname(result);
151 return ERR_PTR(len);
152 }
153
154 /*
155 * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a
156 * separate struct filename so we can dedicate the entire
157 * names_cache allocation for the pathname, and re-do the copy from
158 * userland.
159 */
160 if (unlikely(len == EMBEDDED_NAME_MAX)) {
161 const size_t size = offsetof(struct filename, iname[1]);
162 kname = (char *)result;
163
164 /*
165 * size is chosen that way we to guarantee that
166 * result->iname[0] is within the same object and that
167 * kname can't be equal to result->iname, no matter what.
168 */
169 result = kzalloc(size, GFP_KERNEL);
170 if (unlikely(!result)) {
171 __putname(kname);
172 return ERR_PTR(-ENOMEM);
173 }
174 result->name = kname;
175 len = strncpy_from_user(kname, filename, PATH_MAX);
176 if (unlikely(len < 0)) {
177 __putname(kname);
178 kfree(result);
179 return ERR_PTR(len);
180 }
181 if (unlikely(len == PATH_MAX)) {
182 __putname(kname);
183 kfree(result);
184 return ERR_PTR(-ENAMETOOLONG);
185 }
186 }
187
188 result->refcnt = 1;
189 /* The empty path is special. */
190 if (unlikely(!len)) {
191 if (empty)
192 *empty = 1;
193 if (!(flags & LOOKUP_EMPTY)) {
194 putname(result);
195 return ERR_PTR(-ENOENT);
196 }
197 }
198
199 result->uptr = filename;
200 result->aname = NULL;
201 audit_getname(result);
202 return result;
203}
204
205struct filename *
206getname(const char __user * filename)
207{
208 return getname_flags(filename, 0, NULL);
209}
210
211struct filename *
212getname_kernel(const char * filename)
213{
214 struct filename *result;
215 int len = strlen(filename) + 1;
216
217 result = __getname();
218 if (unlikely(!result))
219 return ERR_PTR(-ENOMEM);
220
221 if (len <= EMBEDDED_NAME_MAX) {
222 result->name = (char *)result->iname;
223 } else if (len <= PATH_MAX) {
224 const size_t size = offsetof(struct filename, iname[1]);
225 struct filename *tmp;
226
227 tmp = kmalloc(size, GFP_KERNEL);
228 if (unlikely(!tmp)) {
229 __putname(result);
230 return ERR_PTR(-ENOMEM);
231 }
232 tmp->name = (char *)result;
233 result = tmp;
234 } else {
235 __putname(result);
236 return ERR_PTR(-ENAMETOOLONG);
237 }
238 memcpy((char *)result->name, filename, len);
239 result->uptr = NULL;
240 result->aname = NULL;
241 result->refcnt = 1;
242 audit_getname(result);
243
244 return result;
245}
246
247void putname(struct filename *name)
248{
249 BUG_ON(name->refcnt <= 0);
250
251 if (--name->refcnt > 0)
252 return;
253
254 if (name->name != name->iname) {
255 __putname(name->name);
256 kfree(name);
257 } else
258 __putname(name);
259}
260
261static int check_acl(struct inode *inode, int mask)
262{
263#ifdef CONFIG_FS_POSIX_ACL
264 struct posix_acl *acl;
265
266 if (mask & MAY_NOT_BLOCK) {
267 acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
268 if (!acl)
269 return -EAGAIN;
270 /* no ->get_acl() calls in RCU mode... */
271 if (is_uncached_acl(acl))
272 return -ECHILD;
273 return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
274 }
275
276 acl = get_acl(inode, ACL_TYPE_ACCESS);
277 if (IS_ERR(acl))
278 return PTR_ERR(acl);
279 if (acl) {
280 int error = posix_acl_permission(inode, acl, mask);
281 posix_acl_release(acl);
282 return error;
283 }
284#endif
285
286 return -EAGAIN;
287}
288
289/*
290 * This does the basic permission checking
291 */
292static int acl_permission_check(struct inode *inode, int mask)
293{
294 unsigned int mode = inode->i_mode;
295
296 if (likely(uid_eq(current_fsuid(), inode->i_uid)))
297 mode >>= 6;
298 else {
299 if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
300 int error = check_acl(inode, mask);
301 if (error != -EAGAIN)
302 return error;
303 }
304
305 if (in_group_p(inode->i_gid))
306 mode >>= 3;
307 }
308
309 /*
310 * If the DACs are ok we don't need any capability check.
311 */
312 if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
313 return 0;
314 return -EACCES;
315}
316
317/**
318 * generic_permission - check for access rights on a Posix-like filesystem
319 * @inode: inode to check access rights for
320 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
321 *
322 * Used to check for read/write/execute permissions on a file.
323 * We use "fsuid" for this, letting us set arbitrary permissions
324 * for filesystem access without changing the "normal" uids which
325 * are used for other things.
326 *
327 * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
328 * request cannot be satisfied (eg. requires blocking or too much complexity).
329 * It would then be called again in ref-walk mode.
330 */
331int generic_permission(struct inode *inode, int mask)
332{
333 int ret;
334
335 /*
336 * Do the basic permission checks.
337 */
338 ret = acl_permission_check(inode, mask);
339 if (ret != -EACCES)
340 return ret;
341
342 if (S_ISDIR(inode->i_mode)) {
343 /* DACs are overridable for directories */
344 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
345 return 0;
346 if (!(mask & MAY_WRITE))
347 if (capable_wrt_inode_uidgid(inode,
348 CAP_DAC_READ_SEARCH))
349 return 0;
350 return -EACCES;
351 }
352 /*
353 * Read/write DACs are always overridable.
354 * Executable DACs are overridable when there is
355 * at least one exec bit set.
356 */
357 if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
358 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
359 return 0;
360
361 /*
362 * Searching includes executable on directories, else just read.
363 */
364 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
365 if (mask == MAY_READ)
366 if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
367 return 0;
368
369 return -EACCES;
370}
371EXPORT_SYMBOL(generic_permission);
372
373/*
374 * We _really_ want to just do "generic_permission()" without
375 * even looking at the inode->i_op values. So we keep a cache
376 * flag in inode->i_opflags, that says "this has not special
377 * permission function, use the fast case".
378 */
379static inline int do_inode_permission(struct vfsmount *mnt, struct inode *inode, int mask)
380{
381 if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
382 if (likely(mnt && inode->i_op->permission2))
383 return inode->i_op->permission2(mnt, inode, mask);
384 if (likely(inode->i_op->permission))
385 return inode->i_op->permission(inode, mask);
386
387 /* This gets set once for the inode lifetime */
388 spin_lock(&inode->i_lock);
389 inode->i_opflags |= IOP_FASTPERM;
390 spin_unlock(&inode->i_lock);
391 }
392 return generic_permission(inode, mask);
393}
394
395/**
396 * __inode_permission - Check for access rights to a given inode
397 * @inode: Inode to check permission on
398 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
399 *
400 * Check for read/write/execute permissions on an inode.
401 *
402 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
403 *
404 * This does not check for a read-only file system. You probably want
405 * inode_permission().
406 */
407int __inode_permission2(struct vfsmount *mnt, struct inode *inode, int mask)
408{
409 int retval;
410
411 if (unlikely(mask & MAY_WRITE)) {
412 /*
413 * Nobody gets write access to an immutable file.
414 */
415 if (IS_IMMUTABLE(inode))
416 return -EPERM;
417
418 /*
419 * Updating mtime will likely cause i_uid and i_gid to be
420 * written back improperly if their true value is unknown
421 * to the vfs.
422 */
423 if (HAS_UNMAPPED_ID(inode))
424 return -EACCES;
425 }
426
427 retval = do_inode_permission(mnt, inode, mask);
428 if (retval)
429 return retval;
430
431 retval = devcgroup_inode_permission(inode, mask);
432 if (retval)
433 return retval;
434
435 retval = security_inode_permission(inode, mask);
436 return retval;
437}
438EXPORT_SYMBOL(__inode_permission2);
439
440int __inode_permission(struct inode *inode, int mask)
441{
442 return __inode_permission2(NULL, inode, mask);
443}
444EXPORT_SYMBOL(__inode_permission);
445
446/**
447 * sb_permission - Check superblock-level permissions
448 * @sb: Superblock of inode to check permission on
449 * @inode: Inode to check permission on
450 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
451 *
452 * Separate out file-system wide checks from inode-specific permission checks.
453 */
454static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
455{
456 if (unlikely(mask & MAY_WRITE)) {
457 umode_t mode = inode->i_mode;
458
459 /* Nobody gets write access to a read-only fs. */
460 if ((sb->s_flags & MS_RDONLY) &&
461 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
462 return -EROFS;
463 }
464 return 0;
465}
466
467/**
468 * inode_permission - Check for access rights to a given inode
469 * @inode: Inode to check permission on
470 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
471 *
472 * Check for read/write/execute permissions on an inode. We use fs[ug]id for
473 * this, letting us set arbitrary permissions for filesystem access without
474 * changing the "normal" UIDs which are used for other things.
475 *
476 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
477 */
478int inode_permission2(struct vfsmount *mnt, struct inode *inode, int mask)
479{
480 int retval;
481
482 retval = sb_permission(inode->i_sb, inode, mask);
483 if (retval)
484 return retval;
485 return __inode_permission2(mnt, inode, mask);
486}
487EXPORT_SYMBOL(inode_permission2);
488
489int inode_permission(struct inode *inode, int mask)
490{
491 return inode_permission2(NULL, inode, mask);
492}
493EXPORT_SYMBOL(inode_permission);
494
495/**
496 * path_get - get a reference to a path
497 * @path: path to get the reference to
498 *
499 * Given a path increment the reference count to the dentry and the vfsmount.
500 */
501void path_get(const struct path *path)
502{
503 mntget(path->mnt);
504 dget(path->dentry);
505}
506EXPORT_SYMBOL(path_get);
507
508/**
509 * path_put - put a reference to a path
510 * @path: path to put the reference to
511 *
512 * Given a path decrement the reference count to the dentry and the vfsmount.
513 */
514void path_put(const struct path *path)
515{
516 dput(path->dentry);
517 mntput(path->mnt);
518}
519EXPORT_SYMBOL(path_put);
520
521#define EMBEDDED_LEVELS 2
522struct nameidata {
523 struct path path;
524 struct qstr last;
525 struct path root;
526 struct inode *inode; /* path.dentry.d_inode */
527 unsigned int flags;
528 unsigned seq, m_seq;
529 int last_type;
530 unsigned depth;
531 int total_link_count;
532 struct saved {
533 struct path link;
534 struct delayed_call done;
535 const char *name;
536 unsigned seq;
537 } *stack, internal[EMBEDDED_LEVELS];
538 struct filename *name;
539 struct nameidata *saved;
540 struct inode *link_inode;
541 unsigned root_seq;
542 int dfd;
543};
544
545static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
546{
547 struct nameidata *old = current->nameidata;
548 p->stack = p->internal;
549 p->dfd = dfd;
550 p->name = name;
551 p->total_link_count = old ? old->total_link_count : 0;
552 p->saved = old;
553 current->nameidata = p;
554}
555
556static void restore_nameidata(void)
557{
558 struct nameidata *now = current->nameidata, *old = now->saved;
559
560 current->nameidata = old;
561 if (old)
562 old->total_link_count = now->total_link_count;
563 if (now->stack != now->internal)
564 kfree(now->stack);
565}
566
567static int __nd_alloc_stack(struct nameidata *nd)
568{
569 struct saved *p;
570
571 if (nd->flags & LOOKUP_RCU) {
572 p= kmalloc(MAXSYMLINKS * sizeof(struct saved),
573 GFP_ATOMIC);
574 if (unlikely(!p))
575 return -ECHILD;
576 } else {
577 p= kmalloc(MAXSYMLINKS * sizeof(struct saved),
578 GFP_KERNEL);
579 if (unlikely(!p))
580 return -ENOMEM;
581 }
582 memcpy(p, nd->internal, sizeof(nd->internal));
583 nd->stack = p;
584 return 0;
585}
586
587/**
588 * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
589 * @path: nameidate to verify
590 *
591 * Rename can sometimes move a file or directory outside of a bind
592 * mount, path_connected allows those cases to be detected.
593 */
594static bool path_connected(const struct path *path)
595{
596 struct vfsmount *mnt = path->mnt;
597 struct super_block *sb = mnt->mnt_sb;
598
599 /* Bind mounts and multi-root filesystems can have disconnected paths */
600 if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
601 return true;
602
603 return is_subdir(path->dentry, mnt->mnt_root);
604}
605
606static inline int nd_alloc_stack(struct nameidata *nd)
607{
608 if (likely(nd->depth != EMBEDDED_LEVELS))
609 return 0;
610 if (likely(nd->stack != nd->internal))
611 return 0;
612 return __nd_alloc_stack(nd);
613}
614
615static void drop_links(struct nameidata *nd)
616{
617 int i = nd->depth;
618 while (i--) {
619 struct saved *last = nd->stack + i;
620 do_delayed_call(&last->done);
621 clear_delayed_call(&last->done);
622 }
623}
624
625static void terminate_walk(struct nameidata *nd)
626{
627 drop_links(nd);
628 if (!(nd->flags & LOOKUP_RCU)) {
629 int i;
630 path_put(&nd->path);
631 for (i = 0; i < nd->depth; i++)
632 path_put(&nd->stack[i].link);
633 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
634 path_put(&nd->root);
635 nd->root.mnt = NULL;
636 }
637 } else {
638 nd->flags &= ~LOOKUP_RCU;
639 if (!(nd->flags & LOOKUP_ROOT))
640 nd->root.mnt = NULL;
641 rcu_read_unlock();
642 }
643 nd->depth = 0;
644}
645
646/* path_put is needed afterwards regardless of success or failure */
647static bool legitimize_path(struct nameidata *nd,
648 struct path *path, unsigned seq)
649{
650 int res = __legitimize_mnt(path->mnt, nd->m_seq);
651 if (unlikely(res)) {
652 if (res > 0)
653 path->mnt = NULL;
654 path->dentry = NULL;
655 return false;
656 }
657 if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) {
658 path->dentry = NULL;
659 return false;
660 }
661 return !read_seqcount_retry(&path->dentry->d_seq, seq);
662}
663
664static bool legitimize_links(struct nameidata *nd)
665{
666 int i;
667 for (i = 0; i < nd->depth; i++) {
668 struct saved *last = nd->stack + i;
669 if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
670 drop_links(nd);
671 nd->depth = i + 1;
672 return false;
673 }
674 }
675 return true;
676}
677
678/*
679 * Path walking has 2 modes, rcu-walk and ref-walk (see
680 * Documentation/filesystems/path-lookup.txt). In situations when we can't
681 * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
682 * normal reference counts on dentries and vfsmounts to transition to ref-walk
683 * mode. Refcounts are grabbed at the last known good point before rcu-walk
684 * got stuck, so ref-walk may continue from there. If this is not successful
685 * (eg. a seqcount has changed), then failure is returned and it's up to caller
686 * to restart the path walk from the beginning in ref-walk mode.
687 */
688
689/**
690 * unlazy_walk - try to switch to ref-walk mode.
691 * @nd: nameidata pathwalk data
692 * @dentry: child of nd->path.dentry or NULL
693 * @seq: seq number to check dentry against
694 * Returns: 0 on success, -ECHILD on failure
695 *
696 * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
697 * for ref-walk mode. @dentry must be a path found by a do_lookup call on
698 * @nd or NULL. Must be called from rcu-walk context.
699 * Nothing should touch nameidata between unlazy_walk() failure and
700 * terminate_walk().
701 */
702static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq)
703{
704 struct dentry *parent = nd->path.dentry;
705
706 BUG_ON(!(nd->flags & LOOKUP_RCU));
707
708 nd->flags &= ~LOOKUP_RCU;
709 if (unlikely(!legitimize_links(nd)))
710 goto out2;
711 if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
712 goto out2;
713 if (unlikely(!lockref_get_not_dead(&parent->d_lockref)))
714 goto out1;
715
716 /*
717 * For a negative lookup, the lookup sequence point is the parents
718 * sequence point, and it only needs to revalidate the parent dentry.
719 *
720 * For a positive lookup, we need to move both the parent and the
721 * dentry from the RCU domain to be properly refcounted. And the
722 * sequence number in the dentry validates *both* dentry counters,
723 * since we checked the sequence number of the parent after we got
724 * the child sequence number. So we know the parent must still
725 * be valid if the child sequence number is still valid.
726 */
727 if (!dentry) {
728 if (read_seqcount_retry(&parent->d_seq, nd->seq))
729 goto out;
730 BUG_ON(nd->inode != parent->d_inode);
731 } else {
732 if (!lockref_get_not_dead(&dentry->d_lockref))
733 goto out;
734 if (read_seqcount_retry(&dentry->d_seq, seq))
735 goto drop_dentry;
736 }
737
738 /*
739 * Sequence counts matched. Now make sure that the root is
740 * still valid and get it if required.
741 */
742 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
743 if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) {
744 rcu_read_unlock();
745 dput(dentry);
746 return -ECHILD;
747 }
748 }
749
750 rcu_read_unlock();
751 return 0;
752
753drop_dentry:
754 rcu_read_unlock();
755 dput(dentry);
756 goto drop_root_mnt;
757out2:
758 nd->path.mnt = NULL;
759out1:
760 nd->path.dentry = NULL;
761out:
762 rcu_read_unlock();
763drop_root_mnt:
764 if (!(nd->flags & LOOKUP_ROOT))
765 nd->root.mnt = NULL;
766 return -ECHILD;
767}
768
769static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq)
770{
771 if (unlikely(!legitimize_path(nd, link, seq))) {
772 drop_links(nd);
773 nd->depth = 0;
774 nd->flags &= ~LOOKUP_RCU;
775 nd->path.mnt = NULL;
776 nd->path.dentry = NULL;
777 if (!(nd->flags & LOOKUP_ROOT))
778 nd->root.mnt = NULL;
779 rcu_read_unlock();
780 } else if (likely(unlazy_walk(nd, NULL, 0)) == 0) {
781 return 0;
782 }
783 path_put(link);
784 return -ECHILD;
785}
786
787static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
788{
789 return dentry->d_op->d_revalidate(dentry, flags);
790}
791
792/**
793 * complete_walk - successful completion of path walk
794 * @nd: pointer nameidata
795 *
796 * If we had been in RCU mode, drop out of it and legitimize nd->path.
797 * Revalidate the final result, unless we'd already done that during
798 * the path walk or the filesystem doesn't ask for it. Return 0 on
799 * success, -error on failure. In case of failure caller does not
800 * need to drop nd->path.
801 */
802static int complete_walk(struct nameidata *nd)
803{
804 struct dentry *dentry = nd->path.dentry;
805 int status;
806
807 if (nd->flags & LOOKUP_RCU) {
808 if (!(nd->flags & LOOKUP_ROOT))
809 nd->root.mnt = NULL;
810 if (unlikely(unlazy_walk(nd, NULL, 0)))
811 return -ECHILD;
812 }
813
814 if (likely(!(nd->flags & LOOKUP_JUMPED)))
815 return 0;
816
817 if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
818 return 0;
819
820 status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
821 if (status > 0)
822 return 0;
823
824 if (!status)
825 status = -ESTALE;
826
827 return status;
828}
829
830static void set_root(struct nameidata *nd)
831{
832 struct fs_struct *fs = current->fs;
833
834 if (nd->flags & LOOKUP_RCU) {
835 unsigned seq;
836
837 do {
838 seq = read_seqcount_begin(&fs->seq);
839 nd->root = fs->root;
840 nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
841 } while (read_seqcount_retry(&fs->seq, seq));
842 } else {
843 get_fs_root(fs, &nd->root);
844 }
845}
846
847static void path_put_conditional(struct path *path, struct nameidata *nd)
848{
849 dput(path->dentry);
850 if (path->mnt != nd->path.mnt)
851 mntput(path->mnt);
852}
853
854static inline void path_to_nameidata(const struct path *path,
855 struct nameidata *nd)
856{
857 if (!(nd->flags & LOOKUP_RCU)) {
858 dput(nd->path.dentry);
859 if (nd->path.mnt != path->mnt)
860 mntput(nd->path.mnt);
861 }
862 nd->path.mnt = path->mnt;
863 nd->path.dentry = path->dentry;
864}
865
866static int nd_jump_root(struct nameidata *nd)
867{
868 if (nd->flags & LOOKUP_RCU) {
869 struct dentry *d;
870 nd->path = nd->root;
871 d = nd->path.dentry;
872 nd->inode = d->d_inode;
873 nd->seq = nd->root_seq;
874 if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq)))
875 return -ECHILD;
876 } else {
877 path_put(&nd->path);
878 nd->path = nd->root;
879 path_get(&nd->path);
880 nd->inode = nd->path.dentry->d_inode;
881 }
882 nd->flags |= LOOKUP_JUMPED;
883 return 0;
884}
885
886/*
887 * Helper to directly jump to a known parsed path from ->get_link,
888 * caller must have taken a reference to path beforehand.
889 */
890void nd_jump_link(struct path *path)
891{
892 struct nameidata *nd = current->nameidata;
893 path_put(&nd->path);
894
895 nd->path = *path;
896 nd->inode = nd->path.dentry->d_inode;
897 nd->flags |= LOOKUP_JUMPED;
898}
899
900static inline void put_link(struct nameidata *nd)
901{
902 struct saved *last = nd->stack + --nd->depth;
903 do_delayed_call(&last->done);
904 if (!(nd->flags & LOOKUP_RCU))
905 path_put(&last->link);
906}
907
908int sysctl_protected_symlinks __read_mostly = 0;
909int sysctl_protected_hardlinks __read_mostly = 0;
910int sysctl_protected_fifos __read_mostly;
911int sysctl_protected_regular __read_mostly;
912
913/**
914 * may_follow_link - Check symlink following for unsafe situations
915 * @nd: nameidata pathwalk data
916 *
917 * In the case of the sysctl_protected_symlinks sysctl being enabled,
918 * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
919 * in a sticky world-writable directory. This is to protect privileged
920 * processes from failing races against path names that may change out
921 * from under them by way of other users creating malicious symlinks.
922 * It will permit symlinks to be followed only when outside a sticky
923 * world-writable directory, or when the uid of the symlink and follower
924 * match, or when the directory owner matches the symlink's owner.
925 *
926 * Returns 0 if following the symlink is allowed, -ve on error.
927 */
928static inline int may_follow_link(struct nameidata *nd)
929{
930 const struct inode *inode;
931 const struct inode *parent;
932 kuid_t puid;
933
934 if (!sysctl_protected_symlinks)
935 return 0;
936
937 /* Allowed if owner and follower match. */
938 inode = nd->link_inode;
939 if (uid_eq(current_cred()->fsuid, inode->i_uid))
940 return 0;
941
942 /* Allowed if parent directory not sticky and world-writable. */
943 parent = nd->inode;
944 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
945 return 0;
946
947 /* Allowed if parent directory and link owner match. */
948 puid = parent->i_uid;
949 if (uid_valid(puid) && uid_eq(puid, inode->i_uid))
950 return 0;
951
952 if (nd->flags & LOOKUP_RCU)
953 return -ECHILD;
954
955 audit_log_link_denied("follow_link", &nd->stack[0].link);
956 return -EACCES;
957}
958
959/**
960 * safe_hardlink_source - Check for safe hardlink conditions
961 * @inode: the source inode to hardlink from
962 *
963 * Return false if at least one of the following conditions:
964 * - inode is not a regular file
965 * - inode is setuid
966 * - inode is setgid and group-exec
967 * - access failure for read and write
968 *
969 * Otherwise returns true.
970 */
971static bool safe_hardlink_source(struct inode *inode)
972{
973 umode_t mode = inode->i_mode;
974
975 /* Special files should not get pinned to the filesystem. */
976 if (!S_ISREG(mode))
977 return false;
978
979 /* Setuid files should not get pinned to the filesystem. */
980 if (mode & S_ISUID)
981 return false;
982
983 /* Executable setgid files should not get pinned to the filesystem. */
984 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
985 return false;
986
987 /* Hardlinking to unreadable or unwritable sources is dangerous. */
988 if (inode_permission(inode, MAY_READ | MAY_WRITE))
989 return false;
990
991 return true;
992}
993
994/**
995 * may_linkat - Check permissions for creating a hardlink
996 * @link: the source to hardlink from
997 *
998 * Block hardlink when all of:
999 * - sysctl_protected_hardlinks enabled
1000 * - fsuid does not match inode
1001 * - hardlink source is unsafe (see safe_hardlink_source() above)
1002 * - not CAP_FOWNER in a namespace with the inode owner uid mapped
1003 *
1004 * Returns 0 if successful, -ve on error.
1005 */
1006static int may_linkat(struct path *link)
1007{
1008 struct inode *inode;
1009
1010 if (!sysctl_protected_hardlinks)
1011 return 0;
1012
1013 inode = link->dentry->d_inode;
1014
1015 /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
1016 * otherwise, it must be a safe source.
1017 */
1018 if (inode_owner_or_capable(inode) || safe_hardlink_source(inode))
1019 return 0;
1020
1021 audit_log_link_denied("linkat", link);
1022 return -EPERM;
1023}
1024
1025/**
1026 * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
1027 * should be allowed, or not, on files that already
1028 * exist.
1029 * @dir: the sticky parent directory
1030 * @inode: the inode of the file to open
1031 *
1032 * Block an O_CREAT open of a FIFO (or a regular file) when:
1033 * - sysctl_protected_fifos (or sysctl_protected_regular) is enabled
1034 * - the file already exists
1035 * - we are in a sticky directory
1036 * - we don't own the file
1037 * - the owner of the directory doesn't own the file
1038 * - the directory is world writable
1039 * If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2
1040 * the directory doesn't have to be world writable: being group writable will
1041 * be enough.
1042 *
1043 * Returns 0 if the open is allowed, -ve on error.
1044 */
1045static int may_create_in_sticky(struct dentry * const dir,
1046 struct inode * const inode)
1047{
1048 if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
1049 (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
1050 likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
1051 uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
1052 uid_eq(current_fsuid(), inode->i_uid))
1053 return 0;
1054
1055 if (likely(dir->d_inode->i_mode & 0002) ||
1056 (dir->d_inode->i_mode & 0020 &&
1057 ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
1058 (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
1059 return -EACCES;
1060 }
1061 return 0;
1062}
1063
1064static __always_inline
1065const char *get_link(struct nameidata *nd)
1066{
1067 struct saved *last = nd->stack + nd->depth - 1;
1068 struct dentry *dentry = last->link.dentry;
1069 struct inode *inode = nd->link_inode;
1070 int error;
1071 const char *res;
1072
1073 if (!(nd->flags & LOOKUP_RCU)) {
1074 touch_atime(&last->link);
1075 cond_resched();
1076 } else if (atime_needs_update_rcu(&last->link, inode)) {
1077 if (unlikely(unlazy_walk(nd, NULL, 0)))
1078 return ERR_PTR(-ECHILD);
1079 touch_atime(&last->link);
1080 }
1081
1082 error = security_inode_follow_link(dentry, inode,
1083 nd->flags & LOOKUP_RCU);
1084 if (unlikely(error))
1085 return ERR_PTR(error);
1086
1087 nd->last_type = LAST_BIND;
1088 res = inode->i_link;
1089 if (!res) {
1090 const char * (*get)(struct dentry *, struct inode *,
1091 struct delayed_call *);
1092 get = inode->i_op->get_link;
1093 if (nd->flags & LOOKUP_RCU) {
1094 res = get(NULL, inode, &last->done);
1095 if (res == ERR_PTR(-ECHILD)) {
1096 if (unlikely(unlazy_walk(nd, NULL, 0)))
1097 return ERR_PTR(-ECHILD);
1098 res = get(dentry, inode, &last->done);
1099 }
1100 } else {
1101 res = get(dentry, inode, &last->done);
1102 }
1103 if (IS_ERR_OR_NULL(res))
1104 return res;
1105 }
1106 if (*res == '/') {
1107 if (!nd->root.mnt)
1108 set_root(nd);
1109 if (unlikely(nd_jump_root(nd)))
1110 return ERR_PTR(-ECHILD);
1111 while (unlikely(*++res == '/'))
1112 ;
1113 }
1114 if (!*res)
1115 res = NULL;
1116 return res;
1117}
1118
1119/*
1120 * follow_up - Find the mountpoint of path's vfsmount
1121 *
1122 * Given a path, find the mountpoint of its source file system.
1123 * Replace @path with the path of the mountpoint in the parent mount.
1124 * Up is towards /.
1125 *
1126 * Return 1 if we went up a level and 0 if we were already at the
1127 * root.
1128 */
1129int follow_up(struct path *path)
1130{
1131 struct mount *mnt = real_mount(path->mnt);
1132 struct mount *parent;
1133 struct dentry *mountpoint;
1134
1135 read_seqlock_excl(&mount_lock);
1136 parent = mnt->mnt_parent;
1137 if (parent == mnt) {
1138 read_sequnlock_excl(&mount_lock);
1139 return 0;
1140 }
1141 mntget(&parent->mnt);
1142 mountpoint = dget(mnt->mnt_mountpoint);
1143 read_sequnlock_excl(&mount_lock);
1144 dput(path->dentry);
1145 path->dentry = mountpoint;
1146 mntput(path->mnt);
1147 path->mnt = &parent->mnt;
1148 return 1;
1149}
1150EXPORT_SYMBOL(follow_up);
1151
1152/*
1153 * Perform an automount
1154 * - return -EISDIR to tell follow_managed() to stop and return the path we
1155 * were called with.
1156 */
1157static int follow_automount(struct path *path, struct nameidata *nd,
1158 bool *need_mntput)
1159{
1160 struct vfsmount *mnt;
1161 int err;
1162
1163 if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
1164 return -EREMOTE;
1165
1166 /* We don't want to mount if someone's just doing a stat -
1167 * unless they're stat'ing a directory and appended a '/' to
1168 * the name.
1169 *
1170 * We do, however, want to mount if someone wants to open or
1171 * create a file of any type under the mountpoint, wants to
1172 * traverse through the mountpoint or wants to open the
1173 * mounted directory. Also, autofs may mark negative dentries
1174 * as being automount points. These will need the attentions
1175 * of the daemon to instantiate them before they can be used.
1176 */
1177 if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
1178 LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
1179 path->dentry->d_inode)
1180 return -EISDIR;
1181
1182 nd->total_link_count++;
1183 if (nd->total_link_count >= 40)
1184 return -ELOOP;
1185
1186 mnt = path->dentry->d_op->d_automount(path);
1187 if (IS_ERR(mnt)) {
1188 /*
1189 * The filesystem is allowed to return -EISDIR here to indicate
1190 * it doesn't want to automount. For instance, autofs would do
1191 * this so that its userspace daemon can mount on this dentry.
1192 *
1193 * However, we can only permit this if it's a terminal point in
1194 * the path being looked up; if it wasn't then the remainder of
1195 * the path is inaccessible and we should say so.
1196 */
1197 if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT))
1198 return -EREMOTE;
1199 return PTR_ERR(mnt);
1200 }
1201
1202 if (!mnt) /* mount collision */
1203 return 0;
1204
1205 if (!*need_mntput) {
1206 /* lock_mount() may release path->mnt on error */
1207 mntget(path->mnt);
1208 *need_mntput = true;
1209 }
1210 err = finish_automount(mnt, path);
1211
1212 switch (err) {
1213 case -EBUSY:
1214 /* Someone else made a mount here whilst we were busy */
1215 return 0;
1216 case 0:
1217 path_put(path);
1218 path->mnt = mnt;
1219 path->dentry = dget(mnt->mnt_root);
1220 return 0;
1221 default:
1222 return err;
1223 }
1224
1225}
1226
1227/*
1228 * Handle a dentry that is managed in some way.
1229 * - Flagged for transit management (autofs)
1230 * - Flagged as mountpoint
1231 * - Flagged as automount point
1232 *
1233 * This may only be called in refwalk mode.
1234 *
1235 * Serialization is taken care of in namespace.c
1236 */
1237static int follow_managed(struct path *path, struct nameidata *nd)
1238{
1239 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
1240 unsigned managed;
1241 bool need_mntput = false;
1242 int ret = 0;
1243
1244 /* Given that we're not holding a lock here, we retain the value in a
1245 * local variable for each dentry as we look at it so that we don't see
1246 * the components of that value change under us */
1247 while (managed = ACCESS_ONCE(path->dentry->d_flags),
1248 managed &= DCACHE_MANAGED_DENTRY,
1249 unlikely(managed != 0)) {
1250 /* Allow the filesystem to manage the transit without i_mutex
1251 * being held. */
1252 if (managed & DCACHE_MANAGE_TRANSIT) {
1253 BUG_ON(!path->dentry->d_op);
1254 BUG_ON(!path->dentry->d_op->d_manage);
1255 ret = path->dentry->d_op->d_manage(path->dentry, false);
1256 if (ret < 0)
1257 break;
1258 }
1259
1260 /* Transit to a mounted filesystem. */
1261 if (managed & DCACHE_MOUNTED) {
1262 struct vfsmount *mounted = lookup_mnt(path);
1263 if (mounted) {
1264 dput(path->dentry);
1265 if (need_mntput)
1266 mntput(path->mnt);
1267 path->mnt = mounted;
1268 path->dentry = dget(mounted->mnt_root);
1269 need_mntput = true;
1270 continue;
1271 }
1272
1273 /* Something is mounted on this dentry in another
1274 * namespace and/or whatever was mounted there in this
1275 * namespace got unmounted before lookup_mnt() could
1276 * get it */
1277 }
1278
1279 /* Handle an automount point */
1280 if (managed & DCACHE_NEED_AUTOMOUNT) {
1281 ret = follow_automount(path, nd, &need_mntput);
1282 if (ret < 0)
1283 break;
1284 continue;
1285 }
1286
1287 /* We didn't change the current path point */
1288 break;
1289 }
1290
1291 if (need_mntput && path->mnt == mnt)
1292 mntput(path->mnt);
1293 if (ret == -EISDIR || !ret)
1294 ret = 1;
1295 if (need_mntput)
1296 nd->flags |= LOOKUP_JUMPED;
1297 if (unlikely(ret < 0))
1298 path_put_conditional(path, nd);
1299 return ret;
1300}
1301
1302int follow_down_one(struct path *path)
1303{
1304 struct vfsmount *mounted;
1305
1306 mounted = lookup_mnt(path);
1307 if (mounted) {
1308 dput(path->dentry);
1309 mntput(path->mnt);
1310 path->mnt = mounted;
1311 path->dentry = dget(mounted->mnt_root);
1312 return 1;
1313 }
1314 return 0;
1315}
1316EXPORT_SYMBOL(follow_down_one);
1317
1318static inline int managed_dentry_rcu(struct dentry *dentry)
1319{
1320 return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ?
1321 dentry->d_op->d_manage(dentry, true) : 0;
1322}
1323
1324/*
1325 * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
1326 * we meet a managed dentry that would need blocking.
1327 */
1328static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
1329 struct inode **inode, unsigned *seqp)
1330{
1331 for (;;) {
1332 struct mount *mounted;
1333 /*
1334 * Don't forget we might have a non-mountpoint managed dentry
1335 * that wants to block transit.
1336 */
1337 switch (managed_dentry_rcu(path->dentry)) {
1338 case -ECHILD:
1339 default:
1340 return false;
1341 case -EISDIR:
1342 return true;
1343 case 0:
1344 break;
1345 }
1346
1347 if (!d_mountpoint(path->dentry))
1348 return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
1349
1350 mounted = __lookup_mnt(path->mnt, path->dentry);
1351 if (!mounted)
1352 break;
1353 path->mnt = &mounted->mnt;
1354 path->dentry = mounted->mnt.mnt_root;
1355 nd->flags |= LOOKUP_JUMPED;
1356 *seqp = read_seqcount_begin(&path->dentry->d_seq);
1357 /*
1358 * Update the inode too. We don't need to re-check the
1359 * dentry sequence number here after this d_inode read,
1360 * because a mount-point is always pinned.
1361 */
1362 *inode = path->dentry->d_inode;
1363 }
1364 return !read_seqretry(&mount_lock, nd->m_seq) &&
1365 !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
1366}
1367
1368static int follow_dotdot_rcu(struct nameidata *nd)
1369{
1370 struct inode *inode = nd->inode;
1371
1372 while (1) {
1373 if (path_equal(&nd->path, &nd->root))
1374 break;
1375 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1376 struct dentry *old = nd->path.dentry;
1377 struct dentry *parent = old->d_parent;
1378 unsigned seq;
1379
1380 inode = parent->d_inode;
1381 seq = read_seqcount_begin(&parent->d_seq);
1382 if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
1383 return -ECHILD;
1384 nd->path.dentry = parent;
1385 nd->seq = seq;
1386 if (unlikely(!path_connected(&nd->path)))
1387 return -ENOENT;
1388 break;
1389 } else {
1390 struct mount *mnt = real_mount(nd->path.mnt);
1391 struct mount *mparent = mnt->mnt_parent;
1392 struct dentry *mountpoint = mnt->mnt_mountpoint;
1393 struct inode *inode2 = mountpoint->d_inode;
1394 unsigned seq = read_seqcount_begin(&mountpoint->d_seq);
1395 if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
1396 return -ECHILD;
1397 if (&mparent->mnt == nd->path.mnt)
1398 break;
1399 /* we know that mountpoint was pinned */
1400 nd->path.dentry = mountpoint;
1401 nd->path.mnt = &mparent->mnt;
1402 inode = inode2;
1403 nd->seq = seq;
1404 }
1405 }
1406 while (unlikely(d_mountpoint(nd->path.dentry))) {
1407 struct mount *mounted;
1408 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
1409 if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
1410 return -ECHILD;
1411 if (!mounted)
1412 break;
1413 nd->path.mnt = &mounted->mnt;
1414 nd->path.dentry = mounted->mnt.mnt_root;
1415 inode = nd->path.dentry->d_inode;
1416 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
1417 }
1418 nd->inode = inode;
1419 return 0;
1420}
1421
1422/*
1423 * Follow down to the covering mount currently visible to userspace. At each
1424 * point, the filesystem owning that dentry may be queried as to whether the
1425 * caller is permitted to proceed or not.
1426 */
1427int follow_down(struct path *path)
1428{
1429 unsigned managed;
1430 int ret;
1431
1432 while (managed = ACCESS_ONCE(path->dentry->d_flags),
1433 unlikely(managed & DCACHE_MANAGED_DENTRY)) {
1434 /* Allow the filesystem to manage the transit without i_mutex
1435 * being held.
1436 *
1437 * We indicate to the filesystem if someone is trying to mount
1438 * something here. This gives autofs the chance to deny anyone
1439 * other than its daemon the right to mount on its
1440 * superstructure.
1441 *
1442 * The filesystem may sleep at this point.
1443 */
1444 if (managed & DCACHE_MANAGE_TRANSIT) {
1445 BUG_ON(!path->dentry->d_op);
1446 BUG_ON(!path->dentry->d_op->d_manage);
1447 ret = path->dentry->d_op->d_manage(
1448 path->dentry, false);
1449 if (ret < 0)
1450 return ret == -EISDIR ? 0 : ret;
1451 }
1452
1453 /* Transit to a mounted filesystem. */
1454 if (managed & DCACHE_MOUNTED) {
1455 struct vfsmount *mounted = lookup_mnt(path);
1456 if (!mounted)
1457 break;
1458 dput(path->dentry);
1459 mntput(path->mnt);
1460 path->mnt = mounted;
1461 path->dentry = dget(mounted->mnt_root);
1462 continue;
1463 }
1464
1465 /* Don't handle automount points here */
1466 break;
1467 }
1468 return 0;
1469}
1470EXPORT_SYMBOL(follow_down);
1471
1472/*
1473 * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
1474 */
1475static void follow_mount(struct path *path)
1476{
1477 while (d_mountpoint(path->dentry)) {
1478 struct vfsmount *mounted = lookup_mnt(path);
1479 if (!mounted)
1480 break;
1481 dput(path->dentry);
1482 mntput(path->mnt);
1483 path->mnt = mounted;
1484 path->dentry = dget(mounted->mnt_root);
1485 }
1486}
1487
1488static int path_parent_directory(struct path *path)
1489{
1490 struct dentry *old = path->dentry;
1491 /* rare case of legitimate dget_parent()... */
1492 path->dentry = dget_parent(path->dentry);
1493 dput(old);
1494 if (unlikely(!path_connected(path)))
1495 return -ENOENT;
1496 return 0;
1497}
1498
1499static int follow_dotdot(struct nameidata *nd)
1500{
1501 while(1) {
1502 if (nd->path.dentry == nd->root.dentry &&
1503 nd->path.mnt == nd->root.mnt) {
1504 break;
1505 }
1506 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1507 int ret = path_parent_directory(&nd->path);
1508 if (ret)
1509 return ret;
1510 break;
1511 }
1512 if (!follow_up(&nd->path))
1513 break;
1514 }
1515 follow_mount(&nd->path);
1516 nd->inode = nd->path.dentry->d_inode;
1517 return 0;
1518}
1519
1520/*
1521 * This looks up the name in dcache and possibly revalidates the found dentry.
1522 * NULL is returned if the dentry does not exist in the cache.
1523 */
1524static struct dentry *lookup_dcache(const struct qstr *name,
1525 struct dentry *dir,
1526 unsigned int flags)
1527{
1528 struct dentry *dentry;
1529 int error;
1530
1531 dentry = d_lookup(dir, name);
1532 if (dentry) {
1533 if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
1534 error = d_revalidate(dentry, flags);
1535 if (unlikely(error <= 0)) {
1536 if (!error)
1537 d_invalidate(dentry);
1538 dput(dentry);
1539 return ERR_PTR(error);
1540 }
1541 }
1542 }
1543 return dentry;
1544}
1545
1546/*
1547 * Call i_op->lookup on the dentry. The dentry must be negative and
1548 * unhashed.
1549 *
1550 * dir->d_inode->i_mutex must be held
1551 */
1552static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
1553 unsigned int flags)
1554{
1555 struct dentry *old;
1556
1557 /* Don't create child dentry for a dead directory. */
1558 if (unlikely(IS_DEADDIR(dir))) {
1559 dput(dentry);
1560 return ERR_PTR(-ENOENT);
1561 }
1562
1563 old = dir->i_op->lookup(dir, dentry, flags);
1564 if (unlikely(old)) {
1565 dput(dentry);
1566 dentry = old;
1567 }
1568 return dentry;
1569}
1570
1571static struct dentry *__lookup_hash(const struct qstr *name,
1572 struct dentry *base, unsigned int flags)
1573{
1574 struct dentry *dentry = lookup_dcache(name, base, flags);
1575
1576 if (dentry)
1577 return dentry;
1578
1579 dentry = d_alloc(base, name);
1580 if (unlikely(!dentry))
1581 return ERR_PTR(-ENOMEM);
1582
1583 return lookup_real(base->d_inode, dentry, flags);
1584}
1585
1586static int lookup_fast(struct nameidata *nd,
1587 struct path *path, struct inode **inode,
1588 unsigned *seqp)
1589{
1590 struct vfsmount *mnt = nd->path.mnt;
1591 struct dentry *dentry, *parent = nd->path.dentry;
1592 int status = 1;
1593 int err;
1594
1595 /*
1596 * Rename seqlock is not required here because in the off chance
1597 * of a false negative due to a concurrent rename, the caller is
1598 * going to fall back to non-racy lookup.
1599 */
1600 if (nd->flags & LOOKUP_RCU) {
1601 unsigned seq;
1602 bool negative;
1603 dentry = __d_lookup_rcu(parent, &nd->last, &seq);
1604 if (unlikely(!dentry)) {
1605 if (unlazy_walk(nd, NULL, 0))
1606 return -ECHILD;
1607 return 0;
1608 }
1609
1610 /*
1611 * This sequence count validates that the inode matches
1612 * the dentry name information from lookup.
1613 */
1614 *inode = d_backing_inode(dentry);
1615 negative = d_is_negative(dentry);
1616 if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
1617 return -ECHILD;
1618
1619 /*
1620 * This sequence count validates that the parent had no
1621 * changes while we did the lookup of the dentry above.
1622 *
1623 * The memory barrier in read_seqcount_begin of child is
1624 * enough, we can use __read_seqcount_retry here.
1625 */
1626 if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq)))
1627 return -ECHILD;
1628
1629 *seqp = seq;
1630 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
1631 status = d_revalidate(dentry, nd->flags);
1632 if (unlikely(status <= 0)) {
1633 if (unlazy_walk(nd, dentry, seq))
1634 return -ECHILD;
1635 if (status == -ECHILD)
1636 status = d_revalidate(dentry, nd->flags);
1637 } else {
1638 /*
1639 * Note: do negative dentry check after revalidation in
1640 * case that drops it.
1641 */
1642 if (unlikely(negative))
1643 return -ENOENT;
1644 path->mnt = mnt;
1645 path->dentry = dentry;
1646 if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
1647 return 1;
1648 if (unlazy_walk(nd, dentry, seq))
1649 return -ECHILD;
1650 }
1651 } else {
1652 dentry = __d_lookup(parent, &nd->last);
1653 if (unlikely(!dentry))
1654 return 0;
1655 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
1656 status = d_revalidate(dentry, nd->flags);
1657 }
1658 if (unlikely(status <= 0)) {
1659 if (!status)
1660 d_invalidate(dentry);
1661 dput(dentry);
1662 return status;
1663 }
1664 if (unlikely(d_is_negative(dentry))) {
1665 dput(dentry);
1666 return -ENOENT;
1667 }
1668
1669 path->mnt = mnt;
1670 path->dentry = dentry;
1671 err = follow_managed(path, nd);
1672 if (likely(err > 0))
1673 *inode = d_backing_inode(path->dentry);
1674 return err;
1675}
1676
1677/* Fast lookup failed, do it the slow way */
1678static struct dentry *lookup_slow(const struct qstr *name,
1679 struct dentry *dir,
1680 unsigned int flags)
1681{
1682 struct dentry *dentry = ERR_PTR(-ENOENT), *old;
1683 struct inode *inode = dir->d_inode;
1684 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
1685
1686 inode_lock_shared(inode);
1687 /* Don't go there if it's already dead */
1688 if (unlikely(IS_DEADDIR(inode)))
1689 goto out;
1690again:
1691 dentry = d_alloc_parallel(dir, name, &wq);
1692 if (IS_ERR(dentry))
1693 goto out;
1694 if (unlikely(!d_in_lookup(dentry))) {
1695 if ((dentry->d_flags & DCACHE_OP_REVALIDATE) &&
1696 !(flags & LOOKUP_NO_REVAL)) {
1697 int error = d_revalidate(dentry, flags);
1698 if (unlikely(error <= 0)) {
1699 if (!error) {
1700 d_invalidate(dentry);
1701 dput(dentry);
1702 goto again;
1703 }
1704 dput(dentry);
1705 dentry = ERR_PTR(error);
1706 }
1707 }
1708 } else {
1709 old = inode->i_op->lookup(inode, dentry, flags);
1710 d_lookup_done(dentry);
1711 if (unlikely(old)) {
1712 dput(dentry);
1713 dentry = old;
1714 }
1715 }
1716out:
1717 inode_unlock_shared(inode);
1718 return dentry;
1719}
1720
1721static inline int may_lookup(struct nameidata *nd)
1722{
1723 if (nd->flags & LOOKUP_RCU) {
1724 int err = inode_permission2(nd->path.mnt, nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
1725 if (err != -ECHILD)
1726 return err;
1727 if (unlazy_walk(nd, NULL, 0))
1728 return -ECHILD;
1729 }
1730 return inode_permission2(nd->path.mnt, nd->inode, MAY_EXEC);
1731}
1732
1733static inline int handle_dots(struct nameidata *nd, int type)
1734{
1735 if (type == LAST_DOTDOT) {
1736 if (!nd->root.mnt)
1737 set_root(nd);
1738 if (nd->flags & LOOKUP_RCU) {
1739 return follow_dotdot_rcu(nd);
1740 } else
1741 return follow_dotdot(nd);
1742 }
1743 return 0;
1744}
1745
1746static int pick_link(struct nameidata *nd, struct path *link,
1747 struct inode *inode, unsigned seq)
1748{
1749 int error;
1750 struct saved *last;
1751 if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) {
1752 path_to_nameidata(link, nd);
1753 return -ELOOP;
1754 }
1755 if (!(nd->flags & LOOKUP_RCU)) {
1756 if (link->mnt == nd->path.mnt)
1757 mntget(link->mnt);
1758 }
1759 error = nd_alloc_stack(nd);
1760 if (unlikely(error)) {
1761 if (error == -ECHILD) {
1762 if (unlikely(unlazy_link(nd, link, seq)))
1763 return -ECHILD;
1764 error = nd_alloc_stack(nd);
1765 }
1766 if (error) {
1767 path_put(link);
1768 return error;
1769 }
1770 }
1771
1772 last = nd->stack + nd->depth++;
1773 last->link = *link;
1774 clear_delayed_call(&last->done);
1775 nd->link_inode = inode;
1776 last->seq = seq;
1777 return 1;
1778}
1779
1780/*
1781 * Do we need to follow links? We _really_ want to be able
1782 * to do this check without having to look at inode->i_op,
1783 * so we keep a cache of "no, this doesn't need follow_link"
1784 * for the common case.
1785 */
1786static inline int should_follow_link(struct nameidata *nd, struct path *link,
1787 int follow,
1788 struct inode *inode, unsigned seq)
1789{
1790 if (likely(!d_is_symlink(link->dentry)))
1791 return 0;
1792 if (!follow)
1793 return 0;
1794 /* make sure that d_is_symlink above matches inode */
1795 if (nd->flags & LOOKUP_RCU) {
1796 if (read_seqcount_retry(&link->dentry->d_seq, seq))
1797 return -ECHILD;
1798 }
1799 return pick_link(nd, link, inode, seq);
1800}
1801
1802enum {WALK_GET = 1, WALK_PUT = 2};
1803
1804static int walk_component(struct nameidata *nd, int flags)
1805{
1806 struct path path;
1807 struct inode *inode;
1808 unsigned seq;
1809 int err;
1810 /*
1811 * "." and ".." are special - ".." especially so because it has
1812 * to be able to know about the current root directory and
1813 * parent relationships.
1814 */
1815 if (unlikely(nd->last_type != LAST_NORM)) {
1816 err = handle_dots(nd, nd->last_type);
1817 if (flags & WALK_PUT)
1818 put_link(nd);
1819 return err;
1820 }
1821 err = lookup_fast(nd, &path, &inode, &seq);
1822 if (unlikely(err <= 0)) {
1823 if (err < 0)
1824 return err;
1825 path.dentry = lookup_slow(&nd->last, nd->path.dentry,
1826 nd->flags);
1827 if (IS_ERR(path.dentry))
1828 return PTR_ERR(path.dentry);
1829
1830 path.mnt = nd->path.mnt;
1831 err = follow_managed(&path, nd);
1832 if (unlikely(err < 0))
1833 return err;
1834
1835 if (unlikely(d_is_negative(path.dentry))) {
1836 path_to_nameidata(&path, nd);
1837 return -ENOENT;
1838 }
1839
1840 seq = 0; /* we are already out of RCU mode */
1841 inode = d_backing_inode(path.dentry);
1842 }
1843
1844 if (flags & WALK_PUT)
1845 put_link(nd);
1846 err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq);
1847 if (unlikely(err))
1848 return err;
1849 path_to_nameidata(&path, nd);
1850 nd->inode = inode;
1851 nd->seq = seq;
1852 return 0;
1853}
1854
1855/*
1856 * We can do the critical dentry name comparison and hashing
1857 * operations one word at a time, but we are limited to:
1858 *
1859 * - Architectures with fast unaligned word accesses. We could
1860 * do a "get_unaligned()" if this helps and is sufficiently
1861 * fast.
1862 *
1863 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1864 * do not trap on the (extremely unlikely) case of a page
1865 * crossing operation.
1866 *
1867 * - Furthermore, we need an efficient 64-bit compile for the
1868 * 64-bit case in order to generate the "number of bytes in
1869 * the final mask". Again, that could be replaced with a
1870 * efficient population count instruction or similar.
1871 */
1872#ifdef CONFIG_DCACHE_WORD_ACCESS
1873
1874#include <asm/word-at-a-time.h>
1875
1876#ifdef HASH_MIX
1877
1878/* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */
1879
1880#elif defined(CONFIG_64BIT)
1881/*
1882 * Register pressure in the mixing function is an issue, particularly
1883 * on 32-bit x86, but almost any function requires one state value and
1884 * one temporary. Instead, use a function designed for two state values
1885 * and no temporaries.
1886 *
1887 * This function cannot create a collision in only two iterations, so
1888 * we have two iterations to achieve avalanche. In those two iterations,
1889 * we have six layers of mixing, which is enough to spread one bit's
1890 * influence out to 2^6 = 64 state bits.
1891 *
1892 * Rotate constants are scored by considering either 64 one-bit input
1893 * deltas or 64*63/2 = 2016 two-bit input deltas, and finding the
1894 * probability of that delta causing a change to each of the 128 output
1895 * bits, using a sample of random initial states.
1896 *
1897 * The Shannon entropy of the computed probabilities is then summed
1898 * to produce a score. Ideally, any input change has a 50% chance of
1899 * toggling any given output bit.
1900 *
1901 * Mixing scores (in bits) for (12,45):
1902 * Input delta: 1-bit 2-bit
1903 * 1 round: 713.3 42542.6
1904 * 2 rounds: 2753.7 140389.8
1905 * 3 rounds: 5954.1 233458.2
1906 * 4 rounds: 7862.6 256672.2
1907 * Perfect: 8192 258048
1908 * (64*128) (64*63/2 * 128)
1909 */
1910#define HASH_MIX(x, y, a) \
1911 ( x ^= (a), \
1912 y ^= x, x = rol64(x,12),\
1913 x += y, y = rol64(y,45),\
1914 y *= 9 )
1915
1916/*
1917 * Fold two longs into one 32-bit hash value. This must be fast, but
1918 * latency isn't quite as critical, as there is a fair bit of additional
1919 * work done before the hash value is used.
1920 */
1921static inline unsigned int fold_hash(unsigned long x, unsigned long y)
1922{
1923 y ^= x * GOLDEN_RATIO_64;
1924 y *= GOLDEN_RATIO_64;
1925 return y >> 32;
1926}
1927
1928#else /* 32-bit case */
1929
1930/*
1931 * Mixing scores (in bits) for (7,20):
1932 * Input delta: 1-bit 2-bit
1933 * 1 round: 330.3 9201.6
1934 * 2 rounds: 1246.4 25475.4
1935 * 3 rounds: 1907.1 31295.1
1936 * 4 rounds: 2042.3 31718.6
1937 * Perfect: 2048 31744
1938 * (32*64) (32*31/2 * 64)
1939 */
1940#define HASH_MIX(x, y, a) \
1941 ( x ^= (a), \
1942 y ^= x, x = rol32(x, 7),\
1943 x += y, y = rol32(y,20),\
1944 y *= 9 )
1945
1946static inline unsigned int fold_hash(unsigned long x, unsigned long y)
1947{
1948 /* Use arch-optimized multiply if one exists */
1949 return __hash_32(y ^ __hash_32(x));
1950}
1951
1952#endif
1953
1954/*
1955 * Return the hash of a string of known length. This is carfully
1956 * designed to match hash_name(), which is the more critical function.
1957 * In particular, we must end by hashing a final word containing 0..7
1958 * payload bytes, to match the way that hash_name() iterates until it
1959 * finds the delimiter after the name.
1960 */
1961unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
1962{
1963 unsigned long a, x = 0, y = (unsigned long)salt;
1964
1965 for (;;) {
1966 if (!len)
1967 goto done;
1968 a = load_unaligned_zeropad(name);
1969 if (len < sizeof(unsigned long))
1970 break;
1971 HASH_MIX(x, y, a);
1972 name += sizeof(unsigned long);
1973 len -= sizeof(unsigned long);
1974 }
1975 x ^= a & bytemask_from_count(len);
1976done:
1977 return fold_hash(x, y);
1978}
1979EXPORT_SYMBOL(full_name_hash);
1980
1981/* Return the "hash_len" (hash and length) of a null-terminated string */
1982u64 hashlen_string(const void *salt, const char *name)
1983{
1984 unsigned long a = 0, x = 0, y = (unsigned long)salt;
1985 unsigned long adata, mask, len;
1986 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1987
1988 len = 0;
1989 goto inside;
1990
1991 do {
1992 HASH_MIX(x, y, a);
1993 len += sizeof(unsigned long);
1994inside:
1995 a = load_unaligned_zeropad(name+len);
1996 } while (!has_zero(a, &adata, &constants));
1997
1998 adata = prep_zero_mask(a, adata, &constants);
1999 mask = create_zero_mask(adata);
2000 x ^= a & zero_bytemask(mask);
2001
2002 return hashlen_create(fold_hash(x, y), len + find_zero(mask));
2003}
2004EXPORT_SYMBOL(hashlen_string);
2005
2006/*
2007 * Calculate the length and hash of the path component, and
2008 * return the "hash_len" as the result.
2009 */
2010static inline u64 hash_name(const void *salt, const char *name)
2011{
2012 unsigned long a = 0, b, x = 0, y = (unsigned long)salt;
2013 unsigned long adata, bdata, mask, len;
2014 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
2015
2016 len = 0;
2017 goto inside;
2018
2019 do {
2020 HASH_MIX(x, y, a);
2021 len += sizeof(unsigned long);
2022inside:
2023 a = load_unaligned_zeropad(name+len);
2024 b = a ^ REPEAT_BYTE('/');
2025 } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
2026
2027 adata = prep_zero_mask(a, adata, &constants);
2028 bdata = prep_zero_mask(b, bdata, &constants);
2029 mask = create_zero_mask(adata | bdata);
2030 x ^= a & zero_bytemask(mask);
2031
2032 return hashlen_create(fold_hash(x, y), len + find_zero(mask));
2033}
2034
2035#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
2036
2037/* Return the hash of a string of known length */
2038unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
2039{
2040 unsigned long hash = init_name_hash(salt);
2041 while (len--)
2042 hash = partial_name_hash((unsigned char)*name++, hash);
2043 return end_name_hash(hash);
2044}
2045EXPORT_SYMBOL(full_name_hash);
2046
2047/* Return the "hash_len" (hash and length) of a null-terminated string */
2048u64 hashlen_string(const void *salt, const char *name)
2049{
2050 unsigned long hash = init_name_hash(salt);
2051 unsigned long len = 0, c;
2052
2053 c = (unsigned char)*name;
2054 while (c) {
2055 len++;
2056 hash = partial_name_hash(c, hash);
2057 c = (unsigned char)name[len];
2058 }
2059 return hashlen_create(end_name_hash(hash), len);
2060}
2061EXPORT_SYMBOL(hashlen_string);
2062
2063/*
2064 * We know there's a real path component here of at least
2065 * one character.
2066 */
2067static inline u64 hash_name(const void *salt, const char *name)
2068{
2069 unsigned long hash = init_name_hash(salt);
2070 unsigned long len = 0, c;
2071
2072 c = (unsigned char)*name;
2073 do {
2074 len++;
2075 hash = partial_name_hash(c, hash);
2076 c = (unsigned char)name[len];
2077 } while (c && c != '/');
2078 return hashlen_create(end_name_hash(hash), len);
2079}
2080
2081#endif
2082
2083/*
2084 * Name resolution.
2085 * This is the basic name resolution function, turning a pathname into
2086 * the final dentry. We expect 'base' to be positive and a directory.
2087 *
2088 * Returns 0 and nd will have valid dentry and mnt on success.
2089 * Returns error and drops reference to input namei data on failure.
2090 */
2091static int link_path_walk(const char *name, struct nameidata *nd)
2092{
2093 int err;
2094
2095 while (*name=='/')
2096 name++;
2097 if (!*name)
2098 return 0;
2099
2100 /* At this point we know we have a real path component. */
2101 for(;;) {
2102 u64 hash_len;
2103 int type;
2104
2105 err = may_lookup(nd);
2106 if (err)
2107 return err;
2108
2109 hash_len = hash_name(nd->path.dentry, name);
2110
2111 type = LAST_NORM;
2112 if (name[0] == '.') switch (hashlen_len(hash_len)) {
2113 case 2:
2114 if (name[1] == '.') {
2115 type = LAST_DOTDOT;
2116 nd->flags |= LOOKUP_JUMPED;
2117 }
2118 break;
2119 case 1:
2120 type = LAST_DOT;
2121 }
2122 if (likely(type == LAST_NORM)) {
2123 struct dentry *parent = nd->path.dentry;
2124 nd->flags &= ~LOOKUP_JUMPED;
2125 if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
2126 struct qstr this = { { .hash_len = hash_len }, .name = name };
2127 err = parent->d_op->d_hash(parent, &this);
2128 if (err < 0)
2129 return err;
2130 hash_len = this.hash_len;
2131 name = this.name;
2132 }
2133 }
2134
2135 nd->last.hash_len = hash_len;
2136 nd->last.name = name;
2137 nd->last_type = type;
2138
2139 name += hashlen_len(hash_len);
2140 if (!*name)
2141 goto OK;
2142 /*
2143 * If it wasn't NUL, we know it was '/'. Skip that
2144 * slash, and continue until no more slashes.
2145 */
2146 do {
2147 name++;
2148 } while (unlikely(*name == '/'));
2149 if (unlikely(!*name)) {
2150OK:
2151 /* pathname body, done */
2152 if (!nd->depth)
2153 return 0;
2154 name = nd->stack[nd->depth - 1].name;
2155 /* trailing symlink, done */
2156 if (!name)
2157 return 0;
2158 /* last component of nested symlink */
2159 err = walk_component(nd, WALK_GET | WALK_PUT);
2160 } else {
2161 err = walk_component(nd, WALK_GET);
2162 }
2163 if (err < 0)
2164 return err;
2165
2166 if (err) {
2167 const char *s = get_link(nd);
2168
2169 if (IS_ERR(s))
2170 return PTR_ERR(s);
2171 err = 0;
2172 if (unlikely(!s)) {
2173 /* jumped */
2174 put_link(nd);
2175 } else {
2176 nd->stack[nd->depth - 1].name = name;
2177 name = s;
2178 continue;
2179 }
2180 }
2181 if (unlikely(!d_can_lookup(nd->path.dentry))) {
2182 if (nd->flags & LOOKUP_RCU) {
2183 if (unlazy_walk(nd, NULL, 0))
2184 return -ECHILD;
2185 }
2186 return -ENOTDIR;
2187 }
2188 }
2189}
2190
2191static const char *path_init(struct nameidata *nd, unsigned flags)
2192{
2193 int retval = 0;
2194 const char *s = nd->name->name;
2195
2196 if (!*s)
2197 flags &= ~LOOKUP_RCU;
2198
2199 nd->last_type = LAST_ROOT; /* if there are only slashes... */
2200 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
2201 nd->depth = 0;
2202 if (flags & LOOKUP_ROOT) {
2203 struct dentry *root = nd->root.dentry;
2204 struct vfsmount *mnt = nd->root.mnt;
2205 struct inode *inode = root->d_inode;
2206 if (*s) {
2207 if (!d_can_lookup(root))
2208 return ERR_PTR(-ENOTDIR);
2209 retval = inode_permission2(mnt, inode, MAY_EXEC);
2210 if (retval)
2211 return ERR_PTR(retval);
2212 }
2213 nd->path = nd->root;
2214 nd->inode = inode;
2215 if (flags & LOOKUP_RCU) {
2216 rcu_read_lock();
2217 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
2218 nd->root_seq = nd->seq;
2219 nd->m_seq = read_seqbegin(&mount_lock);
2220 } else {
2221 path_get(&nd->path);
2222 }
2223 return s;
2224 }
2225
2226 nd->root.mnt = NULL;
2227 nd->path.mnt = NULL;
2228 nd->path.dentry = NULL;
2229
2230 nd->m_seq = read_seqbegin(&mount_lock);
2231 if (*s == '/') {
2232 if (flags & LOOKUP_RCU)
2233 rcu_read_lock();
2234 set_root(nd);
2235 if (likely(!nd_jump_root(nd)))
2236 return s;
2237 nd->root.mnt = NULL;
2238 rcu_read_unlock();
2239 return ERR_PTR(-ECHILD);
2240 } else if (nd->dfd == AT_FDCWD) {
2241 if (flags & LOOKUP_RCU) {
2242 struct fs_struct *fs = current->fs;
2243 unsigned seq;
2244
2245 rcu_read_lock();
2246
2247 do {
2248 seq = read_seqcount_begin(&fs->seq);
2249 nd->path = fs->pwd;
2250 nd->inode = nd->path.dentry->d_inode;
2251 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
2252 } while (read_seqcount_retry(&fs->seq, seq));
2253 } else {
2254 get_fs_pwd(current->fs, &nd->path);
2255 nd->inode = nd->path.dentry->d_inode;
2256 }
2257 return s;
2258 } else {
2259 /* Caller must check execute permissions on the starting path component */
2260 struct fd f = fdget_raw(nd->dfd);
2261 struct dentry *dentry;
2262
2263 if (!f.file)
2264 return ERR_PTR(-EBADF);
2265
2266 dentry = f.file->f_path.dentry;
2267
2268 if (*s) {
2269 if (!d_can_lookup(dentry)) {
2270 fdput(f);
2271 return ERR_PTR(-ENOTDIR);
2272 }
2273 }
2274
2275 nd->path = f.file->f_path;
2276 if (flags & LOOKUP_RCU) {
2277 rcu_read_lock();
2278 nd->inode = nd->path.dentry->d_inode;
2279 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
2280 } else {
2281 path_get(&nd->path);
2282 nd->inode = nd->path.dentry->d_inode;
2283 }
2284 fdput(f);
2285 return s;
2286 }
2287}
2288
2289static const char *trailing_symlink(struct nameidata *nd)
2290{
2291 const char *s;
2292 int error = may_follow_link(nd);
2293 if (unlikely(error))
2294 return ERR_PTR(error);
2295 nd->flags |= LOOKUP_PARENT;
2296 nd->stack[0].name = NULL;
2297 s = get_link(nd);
2298 return s ? s : "";
2299}
2300
2301static inline int lookup_last(struct nameidata *nd)
2302{
2303 if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
2304 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
2305
2306 nd->flags &= ~LOOKUP_PARENT;
2307 return walk_component(nd,
2308 nd->flags & LOOKUP_FOLLOW
2309 ? nd->depth
2310 ? WALK_PUT | WALK_GET
2311 : WALK_GET
2312 : 0);
2313}
2314
2315/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
2316static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
2317{
2318 const char *s = path_init(nd, flags);
2319 int err;
2320
2321 if (IS_ERR(s))
2322 return PTR_ERR(s);
2323 while (!(err = link_path_walk(s, nd))
2324 && ((err = lookup_last(nd)) > 0)) {
2325 s = trailing_symlink(nd);
2326 if (IS_ERR(s)) {
2327 err = PTR_ERR(s);
2328 break;
2329 }
2330 }
2331 if (!err)
2332 err = complete_walk(nd);
2333
2334 if (!err && nd->flags & LOOKUP_DIRECTORY)
2335 if (!d_can_lookup(nd->path.dentry))
2336 err = -ENOTDIR;
2337 if (!err) {
2338 *path = nd->path;
2339 nd->path.mnt = NULL;
2340 nd->path.dentry = NULL;
2341 }
2342 terminate_walk(nd);
2343 return err;
2344}
2345
2346static int filename_lookup(int dfd, struct filename *name, unsigned flags,
2347 struct path *path, struct path *root)
2348{
2349 int retval;
2350 struct nameidata nd;
2351 if (IS_ERR(name))
2352 return PTR_ERR(name);
2353 if (unlikely(root)) {
2354 nd.root = *root;
2355 flags |= LOOKUP_ROOT;
2356 }
2357 set_nameidata(&nd, dfd, name);
2358 retval = path_lookupat(&nd, flags | LOOKUP_RCU, path);
2359 if (unlikely(retval == -ECHILD))
2360 retval = path_lookupat(&nd, flags, path);
2361 if (unlikely(retval == -ESTALE))
2362 retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
2363
2364 if (likely(!retval))
2365 audit_inode(name, path->dentry, flags & LOOKUP_PARENT);
2366 restore_nameidata();
2367 putname(name);
2368 return retval;
2369}
2370
2371/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
2372static int path_parentat(struct nameidata *nd, unsigned flags,
2373 struct path *parent)
2374{
2375 const char *s = path_init(nd, flags);
2376 int err;
2377 if (IS_ERR(s))
2378 return PTR_ERR(s);
2379 err = link_path_walk(s, nd);
2380 if (!err)
2381 err = complete_walk(nd);
2382 if (!err) {
2383 *parent = nd->path;
2384 nd->path.mnt = NULL;
2385 nd->path.dentry = NULL;
2386 }
2387 terminate_walk(nd);
2388 return err;
2389}
2390
2391static struct filename *filename_parentat(int dfd, struct filename *name,
2392 unsigned int flags, struct path *parent,
2393 struct qstr *last, int *type)
2394{
2395 int retval;
2396 struct nameidata nd;
2397
2398 if (IS_ERR(name))
2399 return name;
2400 set_nameidata(&nd, dfd, name);
2401 retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
2402 if (unlikely(retval == -ECHILD))
2403 retval = path_parentat(&nd, flags, parent);
2404 if (unlikely(retval == -ESTALE))
2405 retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent);
2406 if (likely(!retval)) {
2407 *last = nd.last;
2408 *type = nd.last_type;
2409 audit_inode(name, parent->dentry, LOOKUP_PARENT);
2410 } else {
2411 putname(name);
2412 name = ERR_PTR(retval);
2413 }
2414 restore_nameidata();
2415 return name;
2416}
2417
2418/* does lookup, returns the object with parent locked */
2419struct dentry *kern_path_locked(const char *name, struct path *path)
2420{
2421 struct filename *filename;
2422 struct dentry *d;
2423 struct qstr last;
2424 int type;
2425
2426 filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path,
2427 &last, &type);
2428 if (IS_ERR(filename))
2429 return ERR_CAST(filename);
2430 if (unlikely(type != LAST_NORM)) {
2431 path_put(path);
2432 putname(filename);
2433 return ERR_PTR(-EINVAL);
2434 }
2435 inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
2436 d = __lookup_hash(&last, path->dentry, 0);
2437 if (IS_ERR(d)) {
2438 inode_unlock(path->dentry->d_inode);
2439 path_put(path);
2440 }
2441 putname(filename);
2442 return d;
2443}
2444
2445int kern_path(const char *name, unsigned int flags, struct path *path)
2446{
2447 return filename_lookup(AT_FDCWD, getname_kernel(name),
2448 flags, path, NULL);
2449}
2450EXPORT_SYMBOL(kern_path);
2451
2452/**
2453 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
2454 * @dentry: pointer to dentry of the base directory
2455 * @mnt: pointer to vfs mount of the base directory
2456 * @name: pointer to file name
2457 * @flags: lookup flags
2458 * @path: pointer to struct path to fill
2459 */
2460int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
2461 const char *name, unsigned int flags,
2462 struct path *path)
2463{
2464 struct path root = {.mnt = mnt, .dentry = dentry};
2465 /* the first argument of filename_lookup() is ignored with root */
2466 return filename_lookup(AT_FDCWD, getname_kernel(name),
2467 flags , path, &root);
2468}
2469EXPORT_SYMBOL(vfs_path_lookup);
2470
2471/**
2472 * lookup_one_len - filesystem helper to lookup single pathname component
2473 * @name: pathname component to lookup
2474 * @mnt: mount we are looking up on
2475 * @base: base directory to lookup from
2476 * @len: maximum length @len should be interpreted to
2477 *
2478 * Note that this routine is purely a helper for filesystem usage and should
2479 * not be called by generic code.
2480 *
2481 * The caller must hold base->i_mutex.
2482 */
2483struct dentry *lookup_one_len2(const char *name, struct vfsmount *mnt, struct dentry *base, int len)
2484{
2485 struct qstr this;
2486 unsigned int c;
2487 int err;
2488
2489 WARN_ON_ONCE(!inode_is_locked(base->d_inode));
2490
2491 this.name = name;
2492 this.len = len;
2493 this.hash = full_name_hash(base, name, len);
2494 if (!len)
2495 return ERR_PTR(-EACCES);
2496
2497 if (unlikely(name[0] == '.')) {
2498 if (len < 2 || (len == 2 && name[1] == '.'))
2499 return ERR_PTR(-EACCES);
2500 }
2501
2502 while (len--) {
2503 c = *(const unsigned char *)name++;
2504 if (c == '/' || c == '\0')
2505 return ERR_PTR(-EACCES);
2506 }
2507 /*
2508 * See if the low-level filesystem might want
2509 * to use its own hash..
2510 */
2511 if (base->d_flags & DCACHE_OP_HASH) {
2512 int err = base->d_op->d_hash(base, &this);
2513 if (err < 0)
2514 return ERR_PTR(err);
2515 }
2516
2517 err = inode_permission2(mnt, base->d_inode, MAY_EXEC);
2518 if (err)
2519 return ERR_PTR(err);
2520
2521 return __lookup_hash(&this, base, 0);
2522}
2523EXPORT_SYMBOL(lookup_one_len2);
2524
2525struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
2526{
2527 return lookup_one_len2(name, NULL, base, len);
2528}
2529EXPORT_SYMBOL(lookup_one_len);
2530
2531/**
2532 * lookup_one_len_unlocked - filesystem helper to lookup single pathname component
2533 * @name: pathname component to lookup
2534 * @base: base directory to lookup from
2535 * @len: maximum length @len should be interpreted to
2536 *
2537 * Note that this routine is purely a helper for filesystem usage and should
2538 * not be called by generic code.
2539 *
2540 * Unlike lookup_one_len, it should be called without the parent
2541 * i_mutex held, and will take the i_mutex itself if necessary.
2542 */
2543struct dentry *lookup_one_len_unlocked(const char *name,
2544 struct dentry *base, int len)
2545{
2546 struct qstr this;
2547 unsigned int c;
2548 int err;
2549 struct dentry *ret;
2550
2551 this.name = name;
2552 this.len = len;
2553 this.hash = full_name_hash(base, name, len);
2554 if (!len)
2555 return ERR_PTR(-EACCES);
2556
2557 if (unlikely(name[0] == '.')) {
2558 if (len < 2 || (len == 2 && name[1] == '.'))
2559 return ERR_PTR(-EACCES);
2560 }
2561
2562 while (len--) {
2563 c = *(const unsigned char *)name++;
2564 if (c == '/' || c == '\0')
2565 return ERR_PTR(-EACCES);
2566 }
2567 /*
2568 * See if the low-level filesystem might want
2569 * to use its own hash..
2570 */
2571 if (base->d_flags & DCACHE_OP_HASH) {
2572 int err = base->d_op->d_hash(base, &this);
2573 if (err < 0)
2574 return ERR_PTR(err);
2575 }
2576
2577 err = inode_permission(base->d_inode, MAY_EXEC);
2578 if (err)
2579 return ERR_PTR(err);
2580
2581 ret = lookup_dcache(&this, base, 0);
2582 if (!ret)
2583 ret = lookup_slow(&this, base, 0);
2584 return ret;
2585}
2586EXPORT_SYMBOL(lookup_one_len_unlocked);
2587
2588#ifdef CONFIG_UNIX98_PTYS
2589int path_pts(struct path *path)
2590{
2591 /* Find something mounted on "pts" in the same directory as
2592 * the input path.
2593 */
2594 struct dentry *child, *parent;
2595 struct qstr this;
2596 int ret;
2597
2598 ret = path_parent_directory(path);
2599 if (ret)
2600 return ret;
2601
2602 parent = path->dentry;
2603 this.name = "pts";
2604 this.len = 3;
2605 child = d_hash_and_lookup(parent, &this);
2606 if (!child)
2607 return -ENOENT;
2608
2609 path->dentry = child;
2610 dput(parent);
2611 follow_mount(path);
2612 return 0;
2613}
2614#endif
2615
2616int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
2617 struct path *path, int *empty)
2618{
2619 return filename_lookup(dfd, getname_flags(name, flags, empty),
2620 flags, path, NULL);
2621}
2622EXPORT_SYMBOL(user_path_at_empty);
2623
2624/*
2625 * NB: most callers don't do anything directly with the reference to the
2626 * to struct filename, but the nd->last pointer points into the name string
2627 * allocated by getname. So we must hold the reference to it until all
2628 * path-walking is complete.
2629 */
2630static inline struct filename *
2631user_path_parent(int dfd, const char __user *path,
2632 struct path *parent,
2633 struct qstr *last,
2634 int *type,
2635 unsigned int flags)
2636{
2637 /* only LOOKUP_REVAL is allowed in extra flags */
2638 return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL,
2639 parent, last, type);
2640}
2641
2642/**
2643 * mountpoint_last - look up last component for umount
2644 * @nd: pathwalk nameidata - currently pointing at parent directory of "last"
2645 * @path: pointer to container for result
2646 *
2647 * This is a special lookup_last function just for umount. In this case, we
2648 * need to resolve the path without doing any revalidation.
2649 *
2650 * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since
2651 * mountpoints are always pinned in the dcache, their ancestors are too. Thus,
2652 * in almost all cases, this lookup will be served out of the dcache. The only
2653 * cases where it won't are if nd->last refers to a symlink or the path is
2654 * bogus and it doesn't exist.
2655 *
2656 * Returns:
2657 * -error: if there was an error during lookup. This includes -ENOENT if the
2658 * lookup found a negative dentry. The nd->path reference will also be
2659 * put in this case.
2660 *
2661 * 0: if we successfully resolved nd->path and found it to not to be a
2662 * symlink that needs to be followed. "path" will also be populated.
2663 * The nd->path reference will also be put.
2664 *
2665 * 1: if we successfully resolved nd->last and found it to be a symlink
2666 * that needs to be followed. "path" will be populated with the path
2667 * to the link, and nd->path will *not* be put.
2668 */
2669static int
2670mountpoint_last(struct nameidata *nd, struct path *path)
2671{
2672 int error = 0;
2673 struct dentry *dentry;
2674 struct dentry *dir = nd->path.dentry;
2675
2676 /* If we're in rcuwalk, drop out of it to handle last component */
2677 if (nd->flags & LOOKUP_RCU) {
2678 if (unlazy_walk(nd, NULL, 0))
2679 return -ECHILD;
2680 }
2681
2682 nd->flags &= ~LOOKUP_PARENT;
2683
2684 if (unlikely(nd->last_type != LAST_NORM)) {
2685 error = handle_dots(nd, nd->last_type);
2686 if (error)
2687 return error;
2688 dentry = dget(nd->path.dentry);
2689 } else {
2690 dentry = d_lookup(dir, &nd->last);
2691 if (!dentry) {
2692 /*
2693 * No cached dentry. Mounted dentries are pinned in the
2694 * cache, so that means that this dentry is probably
2695 * a symlink or the path doesn't actually point
2696 * to a mounted dentry.
2697 */
2698 dentry = lookup_slow(&nd->last, dir,
2699 nd->flags | LOOKUP_NO_REVAL);
2700 if (IS_ERR(dentry))
2701 return PTR_ERR(dentry);
2702 }
2703 }
2704 if (d_is_negative(dentry)) {
2705 dput(dentry);
2706 return -ENOENT;
2707 }
2708 if (nd->depth)
2709 put_link(nd);
2710 path->dentry = dentry;
2711 path->mnt = nd->path.mnt;
2712 error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW,
2713 d_backing_inode(dentry), 0);
2714 if (unlikely(error))
2715 return error;
2716 mntget(path->mnt);
2717 follow_mount(path);
2718 return 0;
2719}
2720
2721/**
2722 * path_mountpoint - look up a path to be umounted
2723 * @nd: lookup context
2724 * @flags: lookup flags
2725 * @path: pointer to container for result
2726 *
2727 * Look up the given name, but don't attempt to revalidate the last component.
2728 * Returns 0 and "path" will be valid on success; Returns error otherwise.
2729 */
2730static int
2731path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
2732{
2733 const char *s = path_init(nd, flags);
2734 int err;
2735 if (IS_ERR(s))
2736 return PTR_ERR(s);
2737 while (!(err = link_path_walk(s, nd)) &&
2738 (err = mountpoint_last(nd, path)) > 0) {
2739 s = trailing_symlink(nd);
2740 if (IS_ERR(s)) {
2741 err = PTR_ERR(s);
2742 break;
2743 }
2744 }
2745 terminate_walk(nd);
2746 return err;
2747}
2748
2749static int
2750filename_mountpoint(int dfd, struct filename *name, struct path *path,
2751 unsigned int flags)
2752{
2753 struct nameidata nd;
2754 int error;
2755 if (IS_ERR(name))
2756 return PTR_ERR(name);
2757 set_nameidata(&nd, dfd, name);
2758 error = path_mountpoint(&nd, flags | LOOKUP_RCU, path);
2759 if (unlikely(error == -ECHILD))
2760 error = path_mountpoint(&nd, flags, path);
2761 if (unlikely(error == -ESTALE))
2762 error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path);
2763 if (likely(!error))
2764 audit_inode(name, path->dentry, 0);
2765 restore_nameidata();
2766 putname(name);
2767 return error;
2768}
2769
2770/**
2771 * user_path_mountpoint_at - lookup a path from userland in order to umount it
2772 * @dfd: directory file descriptor
2773 * @name: pathname from userland
2774 * @flags: lookup flags
2775 * @path: pointer to container to hold result
2776 *
2777 * A umount is a special case for path walking. We're not actually interested
2778 * in the inode in this situation, and ESTALE errors can be a problem. We
2779 * simply want track down the dentry and vfsmount attached at the mountpoint
2780 * and avoid revalidating the last component.
2781 *
2782 * Returns 0 and populates "path" on success.
2783 */
2784int
2785user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags,
2786 struct path *path)
2787{
2788 return filename_mountpoint(dfd, getname(name), path, flags);
2789}
2790
2791int
2792kern_path_mountpoint(int dfd, const char *name, struct path *path,
2793 unsigned int flags)
2794{
2795 return filename_mountpoint(dfd, getname_kernel(name), path, flags);
2796}
2797EXPORT_SYMBOL(kern_path_mountpoint);
2798
2799int __check_sticky(struct inode *dir, struct inode *inode)
2800{
2801 kuid_t fsuid = current_fsuid();
2802
2803 if (uid_eq(inode->i_uid, fsuid))
2804 return 0;
2805 if (uid_eq(dir->i_uid, fsuid))
2806 return 0;
2807 return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
2808}
2809EXPORT_SYMBOL(__check_sticky);
2810
2811/*
2812 * Check whether we can remove a link victim from directory dir, check
2813 * whether the type of victim is right.
2814 * 1. We can't do it if dir is read-only (done in permission())
2815 * 2. We should have write and exec permissions on dir
2816 * 3. We can't remove anything from append-only dir
2817 * 4. We can't do anything with immutable dir (done in permission())
2818 * 5. If the sticky bit on dir is set we should either
2819 * a. be owner of dir, or
2820 * b. be owner of victim, or
2821 * c. have CAP_FOWNER capability
2822 * 6. If the victim is append-only or immutable we can't do antyhing with
2823 * links pointing to it.
2824 * 7. If the victim has an unknown uid or gid we can't change the inode.
2825 * 8. If we were asked to remove a directory and victim isn't one - ENOTDIR.
2826 * 9. If we were asked to remove a non-directory and victim isn't one - EISDIR.
2827 * 10. We can't remove a root or mountpoint.
2828 * 11. We don't allow removal of NFS sillyrenamed files; it's handled by
2829 * nfs_async_unlink().
2830 */
2831static int may_delete(struct vfsmount *mnt, struct inode *dir, struct dentry *victim, bool isdir)
2832{
2833 struct inode *inode = d_backing_inode(victim);
2834 int error;
2835
2836 if (d_is_negative(victim))
2837 return -ENOENT;
2838 BUG_ON(!inode);
2839
2840 BUG_ON(victim->d_parent->d_inode != dir);
2841 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
2842
2843 error = inode_permission2(mnt, dir, MAY_WRITE | MAY_EXEC);
2844 if (error)
2845 return error;
2846 if (IS_APPEND(dir))
2847 return -EPERM;
2848
2849 if (check_sticky(dir, inode) || IS_APPEND(inode) ||
2850 IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) || HAS_UNMAPPED_ID(inode))
2851 return -EPERM;
2852 if (isdir) {
2853 if (!d_is_dir(victim))
2854 return -ENOTDIR;
2855 if (IS_ROOT(victim))
2856 return -EBUSY;
2857 } else if (d_is_dir(victim))
2858 return -EISDIR;
2859 if (IS_DEADDIR(dir))
2860 return -ENOENT;
2861 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
2862 return -EBUSY;
2863 return 0;
2864}
2865
2866/* Check whether we can create an object with dentry child in directory
2867 * dir.
2868 * 1. We can't do it if child already exists (open has special treatment for
2869 * this case, but since we are inlined it's OK)
2870 * 2. We can't do it if dir is read-only (done in permission())
2871 * 3. We can't do it if the fs can't represent the fsuid or fsgid.
2872 * 4. We should have write and exec permissions on dir
2873 * 5. We can't do it if dir is immutable (done in permission())
2874 */
2875static inline int may_create(struct vfsmount *mnt, struct inode *dir, struct dentry *child)
2876{
2877 struct user_namespace *s_user_ns;
2878 audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
2879 if (child->d_inode)
2880 return -EEXIST;
2881 if (IS_DEADDIR(dir))
2882 return -ENOENT;
2883 s_user_ns = dir->i_sb->s_user_ns;
2884 if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
2885 !kgid_has_mapping(s_user_ns, current_fsgid()))
2886 return -EOVERFLOW;
2887 return inode_permission2(mnt, dir, MAY_WRITE | MAY_EXEC);
2888}
2889
2890/*
2891 * p1 and p2 should be directories on the same fs.
2892 */
2893struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
2894{
2895 struct dentry *p;
2896
2897 if (p1 == p2) {
2898 inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
2899 return NULL;
2900 }
2901
2902 mutex_lock(&p1->d_sb->s_vfs_rename_mutex);
2903
2904 p = d_ancestor(p2, p1);
2905 if (p) {
2906 inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
2907 inode_lock_nested(p1->d_inode, I_MUTEX_CHILD);
2908 return p;
2909 }
2910
2911 p = d_ancestor(p1, p2);
2912 if (p) {
2913 inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
2914 inode_lock_nested(p2->d_inode, I_MUTEX_CHILD);
2915 return p;
2916 }
2917
2918 inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
2919 inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
2920 return NULL;
2921}
2922EXPORT_SYMBOL(lock_rename);
2923
2924void unlock_rename(struct dentry *p1, struct dentry *p2)
2925{
2926 inode_unlock(p1->d_inode);
2927 if (p1 != p2) {
2928 inode_unlock(p2->d_inode);
2929 mutex_unlock(&p1->d_sb->s_vfs_rename_mutex);
2930 }
2931}
2932EXPORT_SYMBOL(unlock_rename);
2933
2934int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry,
2935 umode_t mode, bool want_excl)
2936{
2937 int error = may_create(mnt, dir, dentry);
2938 if (error)
2939 return error;
2940
2941 if (!dir->i_op->create)
2942 return -EACCES; /* shouldn't it be ENOSYS? */
2943 mode &= S_IALLUGO;
2944 mode |= S_IFREG;
2945 error = security_inode_create(dir, dentry, mode);
2946 if (error)
2947 return error;
2948 error = dir->i_op->create(dir, dentry, mode, want_excl);
2949 if (!error)
2950 fsnotify_create(dir, dentry);
2951 return error;
2952}
2953EXPORT_SYMBOL(vfs_create2);
2954
2955int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2956 bool want_excl)
2957{
2958 return vfs_create2(NULL, dir, dentry, mode, want_excl);
2959}
2960EXPORT_SYMBOL(vfs_create);
2961
2962bool may_open_dev(const struct path *path)
2963{
2964 return !(path->mnt->mnt_flags & MNT_NODEV) &&
2965 !(path->mnt->mnt_sb->s_iflags & SB_I_NODEV);
2966}
2967
2968static int may_open(struct path *path, int acc_mode, int flag)
2969{
2970 struct dentry *dentry = path->dentry;
2971 struct vfsmount *mnt = path->mnt;
2972 struct inode *inode = dentry->d_inode;
2973 int error;
2974
2975 if (!inode)
2976 return -ENOENT;
2977
2978 switch (inode->i_mode & S_IFMT) {
2979 case S_IFLNK:
2980 return -ELOOP;
2981 case S_IFDIR:
2982 if (acc_mode & MAY_WRITE)
2983 return -EISDIR;
2984 break;
2985 case S_IFBLK:
2986 case S_IFCHR:
2987 if (!may_open_dev(path))
2988 return -EACCES;
2989 /*FALLTHRU*/
2990 case S_IFIFO:
2991 case S_IFSOCK:
2992 flag &= ~O_TRUNC;
2993 break;
2994 }
2995
2996 error = inode_permission2(mnt, inode, MAY_OPEN | acc_mode);
2997 if (error)
2998 return error;
2999
3000 /*
3001 * An append-only file must be opened in append mode for writing.
3002 */
3003 if (IS_APPEND(inode)) {
3004 if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
3005 return -EPERM;
3006 if (flag & O_TRUNC)
3007 return -EPERM;
3008 }
3009
3010 /* O_NOATIME can only be set by the owner or superuser */
3011 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
3012 return -EPERM;
3013
3014 return 0;
3015}
3016
3017static int handle_truncate(struct file *filp)
3018{
3019 struct path *path = &filp->f_path;
3020 struct inode *inode = path->dentry->d_inode;
3021 int error = get_write_access(inode);
3022 if (error)
3023 return error;
3024 /*
3025 * Refuse to truncate files with mandatory locks held on them.
3026 */
3027 error = locks_verify_locked(filp);
3028 if (!error)
3029 error = security_path_truncate(path);
3030 if (!error) {
3031 error = do_truncate2(path->mnt, path->dentry, 0,
3032 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
3033 filp);
3034 }
3035 put_write_access(inode);
3036 return error;
3037}
3038
3039static inline int open_to_namei_flags(int flag)
3040{
3041 if ((flag & O_ACCMODE) == 3)
3042 flag--;
3043 return flag;
3044}
3045
3046static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode)
3047{
3048 struct user_namespace *s_user_ns;
3049 int error = security_path_mknod(dir, dentry, mode, 0);
3050 if (error)
3051 return error;
3052
3053 s_user_ns = dir->dentry->d_sb->s_user_ns;
3054 if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
3055 !kgid_has_mapping(s_user_ns, current_fsgid()))
3056 return -EOVERFLOW;
3057
3058 error = inode_permission2(dir->mnt, dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
3059 if (error)
3060 return error;
3061
3062 return security_inode_create(dir->dentry->d_inode, dentry, mode);
3063}
3064
3065/*
3066 * Attempt to atomically look up, create and open a file from a negative
3067 * dentry.
3068 *
3069 * Returns 0 if successful. The file will have been created and attached to
3070 * @file by the filesystem calling finish_open().
3071 *
3072 * Returns 1 if the file was looked up only or didn't need creating. The
3073 * caller will need to perform the open themselves. @path will have been
3074 * updated to point to the new dentry. This may be negative.
3075 *
3076 * Returns an error code otherwise.
3077 */
3078static int atomic_open(struct nameidata *nd, struct dentry *dentry,
3079 struct path *path, struct file *file,
3080 const struct open_flags *op,
3081 int open_flag, umode_t mode,
3082 int *opened)
3083{
3084 struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
3085 struct inode *dir = nd->path.dentry->d_inode;
3086 int error;
3087
3088 if (!(~open_flag & (O_EXCL | O_CREAT))) /* both O_EXCL and O_CREAT */
3089 open_flag &= ~O_TRUNC;
3090
3091 if (nd->flags & LOOKUP_DIRECTORY)
3092 open_flag |= O_DIRECTORY;
3093
3094 file->f_path.dentry = DENTRY_NOT_SET;
3095 file->f_path.mnt = nd->path.mnt;
3096 error = dir->i_op->atomic_open(dir, dentry, file,
3097 open_to_namei_flags(open_flag),
3098 mode, opened);
3099 d_lookup_done(dentry);
3100 if (!error) {
3101 /*
3102 * We didn't have the inode before the open, so check open
3103 * permission here.
3104 */
3105 int acc_mode = op->acc_mode;
3106 if (*opened & FILE_CREATED) {
3107 WARN_ON(!(open_flag & O_CREAT));
3108 fsnotify_create(dir, dentry);
3109 acc_mode = 0;
3110 }
3111 error = may_open(&file->f_path, acc_mode, open_flag);
3112 if (WARN_ON(error > 0))
3113 error = -EINVAL;
3114 } else if (error > 0) {
3115 if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
3116 error = -EIO;
3117 } else {
3118 if (file->f_path.dentry) {
3119 dput(dentry);
3120 dentry = file->f_path.dentry;
3121 }
3122 if (*opened & FILE_CREATED)
3123 fsnotify_create(dir, dentry);
3124 if (unlikely(d_is_negative(dentry))) {
3125 error = -ENOENT;
3126 } else {
3127 path->dentry = dentry;
3128 path->mnt = nd->path.mnt;
3129 return 1;
3130 }
3131 }
3132 }
3133 dput(dentry);
3134 return error;
3135}
3136
3137/*
3138 * Look up and maybe create and open the last component.
3139 *
3140 * Must be called with i_mutex held on parent.
3141 *
3142 * Returns 0 if the file was successfully atomically created (if necessary) and
3143 * opened. In this case the file will be returned attached to @file.
3144 *
3145 * Returns 1 if the file was not completely opened at this time, though lookups
3146 * and creations will have been performed and the dentry returned in @path will
3147 * be positive upon return if O_CREAT was specified. If O_CREAT wasn't
3148 * specified then a negative dentry may be returned.
3149 *
3150 * An error code is returned otherwise.
3151 *
3152 * FILE_CREATE will be set in @*opened if the dentry was created and will be
3153 * cleared otherwise prior to returning.
3154 */
3155static int lookup_open(struct nameidata *nd, struct path *path,
3156 struct file *file,
3157 const struct open_flags *op,
3158 bool got_write, int *opened)
3159{
3160 struct dentry *dir = nd->path.dentry;
3161 struct inode *dir_inode = dir->d_inode;
3162 int open_flag = op->open_flag;
3163 struct dentry *dentry;
3164 int error, create_error = 0;
3165 umode_t mode = op->mode;
3166 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
3167
3168 if (unlikely(IS_DEADDIR(dir_inode)))
3169 return -ENOENT;
3170
3171 *opened &= ~FILE_CREATED;
3172 dentry = d_lookup(dir, &nd->last);
3173 for (;;) {
3174 if (!dentry) {
3175 dentry = d_alloc_parallel(dir, &nd->last, &wq);
3176 if (IS_ERR(dentry))
3177 return PTR_ERR(dentry);
3178 }
3179 if (d_in_lookup(dentry))
3180 break;
3181
3182 if (!(dentry->d_flags & DCACHE_OP_REVALIDATE))
3183 break;
3184
3185 error = d_revalidate(dentry, nd->flags);
3186 if (likely(error > 0))
3187 break;
3188 if (error)
3189 goto out_dput;
3190 d_invalidate(dentry);
3191 dput(dentry);
3192 dentry = NULL;
3193 }
3194 if (dentry->d_inode) {
3195 /* Cached positive dentry: will open in f_op->open */
3196 goto out_no_open;
3197 }
3198
3199 /*
3200 * Checking write permission is tricky, bacuse we don't know if we are
3201 * going to actually need it: O_CREAT opens should work as long as the
3202 * file exists. But checking existence breaks atomicity. The trick is
3203 * to check access and if not granted clear O_CREAT from the flags.
3204 *
3205 * Another problem is returing the "right" error value (e.g. for an
3206 * O_EXCL open we want to return EEXIST not EROFS).
3207 */
3208 if (open_flag & O_CREAT) {
3209 if (!IS_POSIXACL(dir->d_inode))
3210 mode &= ~current_umask();
3211 if (unlikely(!got_write)) {
3212 create_error = -EROFS;
3213 open_flag &= ~O_CREAT;
3214 if (open_flag & (O_EXCL | O_TRUNC))
3215 goto no_open;
3216 /* No side effects, safe to clear O_CREAT */
3217 } else {
3218 create_error = may_o_create(&nd->path, dentry, mode);
3219 if (create_error) {
3220 open_flag &= ~O_CREAT;
3221 if (open_flag & O_EXCL)
3222 goto no_open;
3223 }
3224 }
3225 } else if ((open_flag & (O_TRUNC|O_WRONLY|O_RDWR)) &&
3226 unlikely(!got_write)) {
3227 /*
3228 * No O_CREATE -> atomicity not a requirement -> fall
3229 * back to lookup + open
3230 */
3231 goto no_open;
3232 }
3233
3234 if (dir_inode->i_op->atomic_open) {
3235 error = atomic_open(nd, dentry, path, file, op, open_flag,
3236 mode, opened);
3237 if (unlikely(error == -ENOENT) && create_error)
3238 error = create_error;
3239 return error;
3240 }
3241
3242no_open:
3243 if (d_in_lookup(dentry)) {
3244 struct dentry *res = dir_inode->i_op->lookup(dir_inode, dentry,
3245 nd->flags);
3246 d_lookup_done(dentry);
3247 if (unlikely(res)) {
3248 if (IS_ERR(res)) {
3249 error = PTR_ERR(res);
3250 goto out_dput;
3251 }
3252 dput(dentry);
3253 dentry = res;
3254 }
3255 }
3256
3257 /* Negative dentry, just create the file */
3258 if (!dentry->d_inode && (open_flag & O_CREAT)) {
3259 *opened |= FILE_CREATED;
3260 audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
3261 if (!dir_inode->i_op->create) {
3262 error = -EACCES;
3263 goto out_dput;
3264 }
3265 error = dir_inode->i_op->create(dir_inode, dentry, mode,
3266 open_flag & O_EXCL);
3267 if (error)
3268 goto out_dput;
3269 fsnotify_create(dir_inode, dentry);
3270 }
3271 if (unlikely(create_error) && !dentry->d_inode) {
3272 error = create_error;
3273 goto out_dput;
3274 }
3275out_no_open:
3276 path->dentry = dentry;
3277 path->mnt = nd->path.mnt;
3278 return 1;
3279
3280out_dput:
3281 dput(dentry);
3282 return error;
3283}
3284
3285/*
3286 * Handle the last step of open()
3287 */
3288static int do_last(struct nameidata *nd,
3289 struct file *file, const struct open_flags *op,
3290 int *opened)
3291{
3292 struct dentry *dir = nd->path.dentry;
3293 int open_flag = op->open_flag;
3294 bool will_truncate = (open_flag & O_TRUNC) != 0;
3295 bool got_write = false;
3296 int acc_mode = op->acc_mode;
3297 unsigned seq;
3298 struct inode *inode;
3299 struct path path;
3300 int error;
3301
3302 nd->flags &= ~LOOKUP_PARENT;
3303 nd->flags |= op->intent;
3304
3305 if (nd->last_type != LAST_NORM) {
3306 error = handle_dots(nd, nd->last_type);
3307 if (unlikely(error))
3308 return error;
3309 goto finish_open;
3310 }
3311
3312 if (!(open_flag & O_CREAT)) {
3313 if (nd->last.name[nd->last.len])
3314 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
3315 /* we _can_ be in RCU mode here */
3316 error = lookup_fast(nd, &path, &inode, &seq);
3317 if (likely(error > 0))
3318 goto finish_lookup;
3319
3320 if (error < 0)
3321 return error;
3322
3323 BUG_ON(nd->inode != dir->d_inode);
3324 BUG_ON(nd->flags & LOOKUP_RCU);
3325 } else {
3326 /* create side of things */
3327 /*
3328 * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED
3329 * has been cleared when we got to the last component we are
3330 * about to look up
3331 */
3332 error = complete_walk(nd);
3333 if (error)
3334 return error;
3335
3336 audit_inode(nd->name, dir, LOOKUP_PARENT);
3337 /* trailing slashes? */
3338 if (unlikely(nd->last.name[nd->last.len]))
3339 return -EISDIR;
3340 }
3341
3342 if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
3343 error = mnt_want_write(nd->path.mnt);
3344 if (!error)
3345 got_write = true;
3346 /*
3347 * do _not_ fail yet - we might not need that or fail with
3348 * a different error; let lookup_open() decide; we'll be
3349 * dropping this one anyway.
3350 */
3351 }
3352 if (open_flag & O_CREAT)
3353 inode_lock(dir->d_inode);
3354 else
3355 inode_lock_shared(dir->d_inode);
3356 error = lookup_open(nd, &path, file, op, got_write, opened);
3357 if (open_flag & O_CREAT)
3358 inode_unlock(dir->d_inode);
3359 else
3360 inode_unlock_shared(dir->d_inode);
3361
3362 if (error <= 0) {
3363 if (error)
3364 goto out;
3365
3366 if ((*opened & FILE_CREATED) ||
3367 !S_ISREG(file_inode(file)->i_mode))
3368 will_truncate = false;
3369
3370 audit_inode(nd->name, file->f_path.dentry, 0);
3371 goto opened;
3372 }
3373
3374 if (*opened & FILE_CREATED) {
3375 /* Don't check for write permission, don't truncate */
3376 open_flag &= ~O_TRUNC;
3377 will_truncate = false;
3378 acc_mode = 0;
3379 path_to_nameidata(&path, nd);
3380 goto finish_open_created;
3381 }
3382
3383 /*
3384 * If atomic_open() acquired write access it is dropped now due to
3385 * possible mount and symlink following (this might be optimized away if
3386 * necessary...)
3387 */
3388 if (got_write) {
3389 mnt_drop_write(nd->path.mnt);
3390 got_write = false;
3391 }
3392
3393 error = follow_managed(&path, nd);
3394 if (unlikely(error < 0))
3395 return error;
3396
3397 if (unlikely(d_is_negative(path.dentry))) {
3398 path_to_nameidata(&path, nd);
3399 return -ENOENT;
3400 }
3401
3402 /*
3403 * create/update audit record if it already exists.
3404 */
3405 audit_inode(nd->name, path.dentry, 0);
3406
3407 if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) {
3408 path_to_nameidata(&path, nd);
3409 return -EEXIST;
3410 }
3411
3412 seq = 0; /* out of RCU mode, so the value doesn't matter */
3413 inode = d_backing_inode(path.dentry);
3414finish_lookup:
3415 if (nd->depth)
3416 put_link(nd);
3417 error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW,
3418 inode, seq);
3419 if (unlikely(error))
3420 return error;
3421
3422 path_to_nameidata(&path, nd);
3423 nd->inode = inode;
3424 nd->seq = seq;
3425 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
3426finish_open:
3427 error = complete_walk(nd);
3428 if (error)
3429 return error;
3430 audit_inode(nd->name, nd->path.dentry, 0);
3431 if (open_flag & O_CREAT) {
3432 error = -EISDIR;
3433 if (d_is_dir(nd->path.dentry))
3434 goto out;
3435 error = may_create_in_sticky(dir,
3436 d_backing_inode(nd->path.dentry));
3437 if (unlikely(error))
3438 goto out;
3439 }
3440 error = -ENOTDIR;
3441 if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
3442 goto out;
3443 if (!d_is_reg(nd->path.dentry))
3444 will_truncate = false;
3445
3446 if (will_truncate) {
3447 error = mnt_want_write(nd->path.mnt);
3448 if (error)
3449 goto out;
3450 got_write = true;
3451 }
3452finish_open_created:
3453 error = may_open(&nd->path, acc_mode, open_flag);
3454 if (error)
3455 goto out;
3456 BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
3457 error = vfs_open(&nd->path, file, current_cred());
3458 if (error)
3459 goto out;
3460 *opened |= FILE_OPENED;
3461opened:
3462 error = open_check_o_direct(file);
3463 if (!error)
3464 error = ima_file_check(file, op->acc_mode, *opened);
3465 if (!error && will_truncate)
3466 error = handle_truncate(file);
3467out:
3468 if (unlikely(error) && (*opened & FILE_OPENED))
3469 fput(file);
3470 if (unlikely(error > 0)) {
3471 WARN_ON(1);
3472 error = -EINVAL;
3473 }
3474 if (got_write)
3475 mnt_drop_write(nd->path.mnt);
3476 return error;
3477}
3478
3479static int do_tmpfile(struct nameidata *nd, unsigned flags,
3480 const struct open_flags *op,
3481 struct file *file, int *opened)
3482{
3483 static const struct qstr name = QSTR_INIT("/", 1);
3484 struct dentry *child;
3485 struct inode *dir;
3486 struct path path;
3487 int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
3488 if (unlikely(error))
3489 return error;
3490 error = mnt_want_write(path.mnt);
3491 if (unlikely(error))
3492 goto out;
3493 dir = path.dentry->d_inode;
3494 /* we want directory to be writable */
3495 error = inode_permission2(nd->path.mnt, dir, MAY_WRITE | MAY_EXEC);
3496 if (error)
3497 goto out2;
3498 if (!dir->i_op->tmpfile) {
3499 error = -EOPNOTSUPP;
3500 goto out2;
3501 }
3502 child = d_alloc(path.dentry, &name);
3503 if (unlikely(!child)) {
3504 error = -ENOMEM;
3505 goto out2;
3506 }
3507 dput(path.dentry);
3508 path.dentry = child;
3509 error = dir->i_op->tmpfile(dir, child, op->mode);
3510 if (error)
3511 goto out2;
3512 audit_inode(nd->name, child, 0);
3513 /* Don't check for other permissions, the inode was just created */
3514 error = may_open(&path, 0, op->open_flag);
3515 if (error)
3516 goto out2;
3517 file->f_path.mnt = path.mnt;
3518 error = finish_open(file, child, NULL, opened);
3519 if (error)
3520 goto out2;
3521 error = open_check_o_direct(file);
3522 if (error) {
3523 fput(file);
3524 } else if (!(op->open_flag & O_EXCL)) {
3525 struct inode *inode = file_inode(file);
3526 spin_lock(&inode->i_lock);
3527 inode->i_state |= I_LINKABLE;
3528 spin_unlock(&inode->i_lock);
3529 }
3530out2:
3531 mnt_drop_write(path.mnt);
3532out:
3533 path_put(&path);
3534 return error;
3535}
3536
3537static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
3538{
3539 struct path path;
3540 int error = path_lookupat(nd, flags, &path);
3541 if (!error) {
3542 audit_inode(nd->name, path.dentry, 0);
3543 error = vfs_open(&path, file, current_cred());
3544 path_put(&path);
3545 }
3546 return error;
3547}
3548
3549static struct file *path_openat(struct nameidata *nd,
3550 const struct open_flags *op, unsigned flags)
3551{
3552 const char *s;
3553 struct file *file;
3554 int opened = 0;
3555 int error;
3556
3557 file = get_empty_filp();
3558 if (IS_ERR(file))
3559 return file;
3560
3561 file->f_flags = op->open_flag;
3562
3563 if (unlikely(file->f_flags & __O_TMPFILE)) {
3564 error = do_tmpfile(nd, flags, op, file, &opened);
3565 goto out2;
3566 }
3567
3568 if (unlikely(file->f_flags & O_PATH)) {
3569 error = do_o_path(nd, flags, file);
3570 if (!error)
3571 opened |= FILE_OPENED;
3572 goto out2;
3573 }
3574
3575 s = path_init(nd, flags);
3576 if (IS_ERR(s)) {
3577 put_filp(file);
3578 return ERR_CAST(s);
3579 }
3580 while (!(error = link_path_walk(s, nd)) &&
3581 (error = do_last(nd, file, op, &opened)) > 0) {
3582 nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
3583 s = trailing_symlink(nd);
3584 if (IS_ERR(s)) {
3585 error = PTR_ERR(s);
3586 break;
3587 }
3588 }
3589 terminate_walk(nd);
3590out2:
3591 if (!(opened & FILE_OPENED)) {
3592 BUG_ON(!error);
3593 put_filp(file);
3594 }
3595 if (unlikely(error)) {
3596 if (error == -EOPENSTALE) {
3597 if (flags & LOOKUP_RCU)
3598 error = -ECHILD;
3599 else
3600 error = -ESTALE;
3601 }
3602 file = ERR_PTR(error);
3603 }
3604 return file;
3605}
3606
3607struct file *do_filp_open(int dfd, struct filename *pathname,
3608 const struct open_flags *op)
3609{
3610 struct nameidata nd;
3611 int flags = op->lookup_flags;
3612 struct file *filp;
3613
3614 set_nameidata(&nd, dfd, pathname);
3615 filp = path_openat(&nd, op, flags | LOOKUP_RCU);
3616 if (unlikely(filp == ERR_PTR(-ECHILD)))
3617 filp = path_openat(&nd, op, flags);
3618 if (unlikely(filp == ERR_PTR(-ESTALE)))
3619 filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
3620 restore_nameidata();
3621 return filp;
3622}
3623
3624struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
3625 const char *name, const struct open_flags *op)
3626{
3627 struct nameidata nd;
3628 struct file *file;
3629 struct filename *filename;
3630 int flags = op->lookup_flags | LOOKUP_ROOT;
3631
3632 nd.root.mnt = mnt;
3633 nd.root.dentry = dentry;
3634
3635 if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN)
3636 return ERR_PTR(-ELOOP);
3637
3638 filename = getname_kernel(name);
3639 if (IS_ERR(filename))
3640 return ERR_CAST(filename);
3641
3642 set_nameidata(&nd, -1, filename);
3643 file = path_openat(&nd, op, flags | LOOKUP_RCU);
3644 if (unlikely(file == ERR_PTR(-ECHILD)))
3645 file = path_openat(&nd, op, flags);
3646 if (unlikely(file == ERR_PTR(-ESTALE)))
3647 file = path_openat(&nd, op, flags | LOOKUP_REVAL);
3648 restore_nameidata();
3649 putname(filename);
3650 return file;
3651}
3652
3653static struct dentry *filename_create(int dfd, struct filename *name,
3654 struct path *path, unsigned int lookup_flags)
3655{
3656 struct dentry *dentry = ERR_PTR(-EEXIST);
3657 struct qstr last;
3658 int type;
3659 int err2;
3660 int error;
3661 bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
3662
3663 /*
3664 * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
3665 * other flags passed in are ignored!
3666 */
3667 lookup_flags &= LOOKUP_REVAL;
3668
3669 name = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
3670 if (IS_ERR(name))
3671 return ERR_CAST(name);
3672
3673 /*
3674 * Yucky last component or no last component at all?
3675 * (foo/., foo/.., /////)
3676 */
3677 if (unlikely(type != LAST_NORM))
3678 goto out;
3679
3680 /* don't fail immediately if it's r/o, at least try to report other errors */
3681 err2 = mnt_want_write(path->mnt);
3682 /*
3683 * Do the final lookup.
3684 */
3685 lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
3686 inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
3687 dentry = __lookup_hash(&last, path->dentry, lookup_flags);
3688 if (IS_ERR(dentry))
3689 goto unlock;
3690
3691 error = -EEXIST;
3692 if (d_is_positive(dentry))
3693 goto fail;
3694
3695 /*
3696 * Special case - lookup gave negative, but... we had foo/bar/
3697 * From the vfs_mknod() POV we just have a negative dentry -
3698 * all is fine. Let's be bastards - you had / on the end, you've
3699 * been asking for (non-existent) directory. -ENOENT for you.
3700 */
3701 if (unlikely(!is_dir && last.name[last.len])) {
3702 error = -ENOENT;
3703 goto fail;
3704 }
3705 if (unlikely(err2)) {
3706 error = err2;
3707 goto fail;
3708 }
3709 putname(name);
3710 return dentry;
3711fail:
3712 dput(dentry);
3713 dentry = ERR_PTR(error);
3714unlock:
3715 inode_unlock(path->dentry->d_inode);
3716 if (!err2)
3717 mnt_drop_write(path->mnt);
3718out:
3719 path_put(path);
3720 putname(name);
3721 return dentry;
3722}
3723
3724struct dentry *kern_path_create(int dfd, const char *pathname,
3725 struct path *path, unsigned int lookup_flags)
3726{
3727 return filename_create(dfd, getname_kernel(pathname),
3728 path, lookup_flags);
3729}
3730EXPORT_SYMBOL(kern_path_create);
3731
3732void done_path_create(struct path *path, struct dentry *dentry)
3733{
3734 dput(dentry);
3735 inode_unlock(path->dentry->d_inode);
3736 mnt_drop_write(path->mnt);
3737 path_put(path);
3738}
3739EXPORT_SYMBOL(done_path_create);
3740
3741inline struct dentry *user_path_create(int dfd, const char __user *pathname,
3742 struct path *path, unsigned int lookup_flags)
3743{
3744 return filename_create(dfd, getname(pathname), path, lookup_flags);
3745}
3746EXPORT_SYMBOL(user_path_create);
3747
3748int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
3749{
3750 int error = may_create(mnt, dir, dentry);
3751
3752 if (error)
3753 return error;
3754
3755 if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
3756 return -EPERM;
3757
3758 if (!dir->i_op->mknod)
3759 return -EPERM;
3760
3761 error = devcgroup_inode_mknod(mode, dev);
3762 if (error)
3763 return error;
3764
3765 error = security_inode_mknod(dir, dentry, mode, dev);
3766 if (error)
3767 return error;
3768
3769 error = dir->i_op->mknod(dir, dentry, mode, dev);
3770 if (!error)
3771 fsnotify_create(dir, dentry);
3772 return error;
3773}
3774EXPORT_SYMBOL(vfs_mknod2);
3775
3776int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
3777{
3778 return vfs_mknod2(NULL, dir, dentry, mode, dev);
3779}
3780EXPORT_SYMBOL(vfs_mknod);
3781
3782static int may_mknod(umode_t mode)
3783{
3784 switch (mode & S_IFMT) {
3785 case S_IFREG:
3786 case S_IFCHR:
3787 case S_IFBLK:
3788 case S_IFIFO:
3789 case S_IFSOCK:
3790 case 0: /* zero mode translates to S_IFREG */
3791 return 0;
3792 case S_IFDIR:
3793 return -EPERM;
3794 default:
3795 return -EINVAL;
3796 }
3797}
3798
3799SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
3800 unsigned, dev)
3801{
3802 struct dentry *dentry;
3803 struct path path;
3804 int error;
3805 unsigned int lookup_flags = 0;
3806
3807 error = may_mknod(mode);
3808 if (error)
3809 return error;
3810retry:
3811 dentry = user_path_create(dfd, filename, &path, lookup_flags);
3812 if (IS_ERR(dentry))
3813 return PTR_ERR(dentry);
3814
3815 if (!IS_POSIXACL(path.dentry->d_inode))
3816 mode &= ~current_umask();
3817 error = security_path_mknod(&path, dentry, mode, dev);
3818 if (error)
3819 goto out;
3820 switch (mode & S_IFMT) {
3821 case 0: case S_IFREG:
3822 error = vfs_create2(path.mnt, path.dentry->d_inode,dentry,mode,true);
3823 if (!error)
3824 ima_post_path_mknod(dentry);
3825 break;
3826 case S_IFCHR: case S_IFBLK:
3827 error = vfs_mknod2(path.mnt, path.dentry->d_inode,dentry,mode,
3828 new_decode_dev(dev));
3829 break;
3830 case S_IFIFO: case S_IFSOCK:
3831 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
3832 break;
3833 }
3834out:
3835 done_path_create(&path, dentry);
3836 if (retry_estale(error, lookup_flags)) {
3837 lookup_flags |= LOOKUP_REVAL;
3838 goto retry;
3839 }
3840 return error;
3841}
3842
3843SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
3844{
3845 return sys_mknodat(AT_FDCWD, filename, mode, dev);
3846}
3847
3848int vfs_mkdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, umode_t mode)
3849{
3850 int error = may_create(mnt, dir, dentry);
3851 unsigned max_links = dir->i_sb->s_max_links;
3852
3853 if (error)
3854 return error;
3855
3856 if (!dir->i_op->mkdir)
3857 return -EPERM;
3858
3859 mode &= (S_IRWXUGO|S_ISVTX);
3860 error = security_inode_mkdir(dir, dentry, mode);
3861 if (error)
3862 return error;
3863
3864 if (max_links && dir->i_nlink >= max_links)
3865 return -EMLINK;
3866
3867 error = dir->i_op->mkdir(dir, dentry, mode);
3868 if (!error)
3869 fsnotify_mkdir(dir, dentry);
3870 return error;
3871}
3872EXPORT_SYMBOL(vfs_mkdir2);
3873
3874int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
3875{
3876 return vfs_mkdir2(NULL, dir, dentry, mode);
3877}
3878EXPORT_SYMBOL(vfs_mkdir);
3879
3880SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
3881{
3882 struct dentry *dentry;
3883 struct path path;
3884 int error;
3885 unsigned int lookup_flags = LOOKUP_DIRECTORY;
3886
3887retry:
3888 dentry = user_path_create(dfd, pathname, &path, lookup_flags);
3889 if (IS_ERR(dentry))
3890 return PTR_ERR(dentry);
3891
3892 if (!IS_POSIXACL(path.dentry->d_inode))
3893 mode &= ~current_umask();
3894 error = security_path_mkdir(&path, dentry, mode);
3895 if (!error)
3896 error = vfs_mkdir2(path.mnt, path.dentry->d_inode, dentry, mode);
3897 done_path_create(&path, dentry);
3898 if (retry_estale(error, lookup_flags)) {
3899 lookup_flags |= LOOKUP_REVAL;
3900 goto retry;
3901 }
3902 return error;
3903}
3904
3905SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
3906{
3907 return sys_mkdirat(AT_FDCWD, pathname, mode);
3908}
3909
3910int vfs_rmdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry)
3911{
3912 int error = may_delete(mnt, dir, dentry, 1);
3913
3914 if (error)
3915 return error;
3916
3917 if (!dir->i_op->rmdir)
3918 return -EPERM;
3919
3920 dget(dentry);
3921 inode_lock(dentry->d_inode);
3922
3923 error = -EBUSY;
3924 if (is_local_mountpoint(dentry))
3925 goto out;
3926
3927 error = security_inode_rmdir(dir, dentry);
3928 if (error)
3929 goto out;
3930
3931 shrink_dcache_parent(dentry);
3932 error = dir->i_op->rmdir(dir, dentry);
3933 if (error)
3934 goto out;
3935
3936 dentry->d_inode->i_flags |= S_DEAD;
3937 dont_mount(dentry);
3938 detach_mounts(dentry);
3939
3940out:
3941 inode_unlock(dentry->d_inode);
3942 dput(dentry);
3943 if (!error)
3944 d_delete(dentry);
3945 return error;
3946}
3947EXPORT_SYMBOL(vfs_rmdir2);
3948
3949int vfs_rmdir(struct inode *dir, struct dentry *dentry)
3950{
3951 return vfs_rmdir2(NULL, dir, dentry);
3952}
3953EXPORT_SYMBOL(vfs_rmdir);
3954
3955static long do_rmdir(int dfd, const char __user *pathname)
3956{
3957 int error = 0;
3958 struct filename *name;
3959 struct dentry *dentry;
3960 struct path path;
3961 struct qstr last;
3962 int type;
3963 unsigned int lookup_flags = 0;
3964retry:
3965 name = user_path_parent(dfd, pathname,
3966 &path, &last, &type, lookup_flags);
3967 if (IS_ERR(name))
3968 return PTR_ERR(name);
3969
3970 switch (type) {
3971 case LAST_DOTDOT:
3972 error = -ENOTEMPTY;
3973 goto exit1;
3974 case LAST_DOT:
3975 error = -EINVAL;
3976 goto exit1;
3977 case LAST_ROOT:
3978 error = -EBUSY;
3979 goto exit1;
3980 }
3981
3982 error = mnt_want_write(path.mnt);
3983 if (error)
3984 goto exit1;
3985
3986 inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
3987 dentry = __lookup_hash(&last, path.dentry, lookup_flags);
3988 error = PTR_ERR(dentry);
3989 if (IS_ERR(dentry))
3990 goto exit2;
3991 if (!dentry->d_inode) {
3992 error = -ENOENT;
3993 goto exit3;
3994 }
3995 error = security_path_rmdir(&path, dentry);
3996 if (error)
3997 goto exit3;
3998 error = vfs_rmdir2(path.mnt, path.dentry->d_inode, dentry);
3999exit3:
4000 dput(dentry);
4001exit2:
4002 inode_unlock(path.dentry->d_inode);
4003 mnt_drop_write(path.mnt);
4004exit1:
4005 path_put(&path);
4006 putname(name);
4007 if (retry_estale(error, lookup_flags)) {
4008 lookup_flags |= LOOKUP_REVAL;
4009 goto retry;
4010 }
4011 return error;
4012}
4013
4014SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
4015{
4016 return do_rmdir(AT_FDCWD, pathname);
4017}
4018
4019/**
4020 * vfs_unlink - unlink a filesystem object
4021 * @dir: parent directory
4022 * @dentry: victim
4023 * @delegated_inode: returns victim inode, if the inode is delegated.
4024 *
4025 * The caller must hold dir->i_mutex.
4026 *
4027 * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
4028 * return a reference to the inode in delegated_inode. The caller
4029 * should then break the delegation on that inode and retry. Because
4030 * breaking a delegation may take a long time, the caller should drop
4031 * dir->i_mutex before doing so.
4032 *
4033 * Alternatively, a caller may pass NULL for delegated_inode. This may
4034 * be appropriate for callers that expect the underlying filesystem not
4035 * to be NFS exported.
4036 */
4037int vfs_unlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
4038{
4039 struct inode *target = dentry->d_inode;
4040 int error = may_delete(mnt, dir, dentry, 0);
4041
4042 if (error)
4043 return error;
4044
4045 if (!dir->i_op->unlink)
4046 return -EPERM;
4047
4048 inode_lock(target);
4049 if (is_local_mountpoint(dentry))
4050 error = -EBUSY;
4051 else {
4052 error = security_inode_unlink(dir, dentry);
4053 if (!error) {
4054 error = try_break_deleg(target, delegated_inode);
4055 if (error)
4056 goto out;
4057 error = dir->i_op->unlink(dir, dentry);
4058 if (!error) {
4059 dont_mount(dentry);
4060 detach_mounts(dentry);
4061 }
4062 }
4063 }
4064out:
4065 inode_unlock(target);
4066
4067 /* We don't d_delete() NFS sillyrenamed files--they still exist. */
4068 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
4069 fsnotify_link_count(target);
4070 d_delete(dentry);
4071 }
4072
4073 return error;
4074}
4075EXPORT_SYMBOL(vfs_unlink2);
4076
4077int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
4078{
4079 return vfs_unlink2(NULL, dir, dentry, delegated_inode);
4080}
4081EXPORT_SYMBOL(vfs_unlink);
4082
4083/*
4084 * Make sure that the actual truncation of the file will occur outside its
4085 * directory's i_mutex. Truncate can take a long time if there is a lot of
4086 * writeout happening, and we don't want to prevent access to the directory
4087 * while waiting on the I/O.
4088 */
4089static long do_unlinkat(int dfd, const char __user *pathname)
4090{
4091 int error;
4092 struct filename *name;
4093 struct dentry *dentry;
4094 struct path path;
4095 struct qstr last;
4096 int type;
4097 struct inode *inode = NULL;
4098 struct inode *delegated_inode = NULL;
4099 unsigned int lookup_flags = 0;
4100retry:
4101 name = user_path_parent(dfd, pathname,
4102 &path, &last, &type, lookup_flags);
4103 if (IS_ERR(name))
4104 return PTR_ERR(name);
4105
4106 error = -EISDIR;
4107 if (type != LAST_NORM)
4108 goto exit1;
4109
4110 error = mnt_want_write(path.mnt);
4111 if (error)
4112 goto exit1;
4113retry_deleg:
4114 inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
4115 dentry = __lookup_hash(&last, path.dentry, lookup_flags);
4116 error = PTR_ERR(dentry);
4117 if (!IS_ERR(dentry)) {
4118 /* Why not before? Because we want correct error value */
4119 if (last.name[last.len])
4120 goto slashes;
4121 inode = dentry->d_inode;
4122 if (d_is_negative(dentry))
4123 goto slashes;
4124 ihold(inode);
4125 error = security_path_unlink(&path, dentry);
4126 if (error)
4127 goto exit2;
4128 error = vfs_unlink2(path.mnt, path.dentry->d_inode, dentry, &delegated_inode);
4129exit2:
4130 dput(dentry);
4131 }
4132 inode_unlock(path.dentry->d_inode);
4133 if (inode)
4134 iput(inode); /* truncate the inode here */
4135 inode = NULL;
4136 if (delegated_inode) {
4137 error = break_deleg_wait(&delegated_inode);
4138 if (!error)
4139 goto retry_deleg;
4140 }
4141 mnt_drop_write(path.mnt);
4142exit1:
4143 path_put(&path);
4144 putname(name);
4145 if (retry_estale(error, lookup_flags)) {
4146 lookup_flags |= LOOKUP_REVAL;
4147 inode = NULL;
4148 goto retry;
4149 }
4150 return error;
4151
4152slashes:
4153 if (d_is_negative(dentry))
4154 error = -ENOENT;
4155 else if (d_is_dir(dentry))
4156 error = -EISDIR;
4157 else
4158 error = -ENOTDIR;
4159 goto exit2;
4160}
4161
4162SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
4163{
4164 if ((flag & ~AT_REMOVEDIR) != 0)
4165 return -EINVAL;
4166
4167 if (flag & AT_REMOVEDIR)
4168 return do_rmdir(dfd, pathname);
4169
4170 return do_unlinkat(dfd, pathname);
4171}
4172
4173SYSCALL_DEFINE1(unlink, const char __user *, pathname)
4174{
4175 return do_unlinkat(AT_FDCWD, pathname);
4176}
4177
4178int vfs_symlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, const char *oldname)
4179{
4180 int error = may_create(mnt, dir, dentry);
4181
4182 if (error)
4183 return error;
4184
4185 if (!dir->i_op->symlink)
4186 return -EPERM;
4187
4188 error = security_inode_symlink(dir, dentry, oldname);
4189 if (error)
4190 return error;
4191
4192 error = dir->i_op->symlink(dir, dentry, oldname);
4193 if (!error)
4194 fsnotify_create(dir, dentry);
4195 return error;
4196}
4197EXPORT_SYMBOL(vfs_symlink2);
4198
4199int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
4200{
4201 return vfs_symlink2(NULL, dir, dentry, oldname);
4202}
4203EXPORT_SYMBOL(vfs_symlink);
4204
4205SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
4206 int, newdfd, const char __user *, newname)
4207{
4208 int error;
4209 struct filename *from;
4210 struct dentry *dentry;
4211 struct path path;
4212 unsigned int lookup_flags = 0;
4213
4214 from = getname(oldname);
4215 if (IS_ERR(from))
4216 return PTR_ERR(from);
4217retry:
4218 dentry = user_path_create(newdfd, newname, &path, lookup_flags);
4219 error = PTR_ERR(dentry);
4220 if (IS_ERR(dentry))
4221 goto out_putname;
4222
4223 error = security_path_symlink(&path, dentry, from->name);
4224 if (!error)
4225 error = vfs_symlink2(path.mnt, path.dentry->d_inode, dentry, from->name);
4226 done_path_create(&path, dentry);
4227 if (retry_estale(error, lookup_flags)) {
4228 lookup_flags |= LOOKUP_REVAL;
4229 goto retry;
4230 }
4231out_putname:
4232 putname(from);
4233 return error;
4234}
4235
4236SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
4237{
4238 return sys_symlinkat(oldname, AT_FDCWD, newname);
4239}
4240
4241/**
4242 * vfs_link - create a new link
4243 * @old_dentry: object to be linked
4244 * @dir: new parent
4245 * @new_dentry: where to create the new link
4246 * @delegated_inode: returns inode needing a delegation break
4247 *
4248 * The caller must hold dir->i_mutex
4249 *
4250 * If vfs_link discovers a delegation on the to-be-linked file in need
4251 * of breaking, it will return -EWOULDBLOCK and return a reference to the
4252 * inode in delegated_inode. The caller should then break the delegation
4253 * and retry. Because breaking a delegation may take a long time, the
4254 * caller should drop the i_mutex before doing so.
4255 *
4256 * Alternatively, a caller may pass NULL for delegated_inode. This may
4257 * be appropriate for callers that expect the underlying filesystem not
4258 * to be NFS exported.
4259 */
4260int vfs_link2(struct vfsmount *mnt, struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
4261{
4262 struct inode *inode = old_dentry->d_inode;
4263 unsigned max_links = dir->i_sb->s_max_links;
4264 int error;
4265
4266 if (!inode)
4267 return -ENOENT;
4268
4269 error = may_create(mnt, dir, new_dentry);
4270 if (error)
4271 return error;
4272
4273 if (dir->i_sb != inode->i_sb)
4274 return -EXDEV;
4275
4276 /*
4277 * A link to an append-only or immutable file cannot be created.
4278 */
4279 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4280 return -EPERM;
4281 /*
4282 * Updating the link count will likely cause i_uid and i_gid to
4283 * be writen back improperly if their true value is unknown to
4284 * the vfs.
4285 */
4286 if (HAS_UNMAPPED_ID(inode))
4287 return -EPERM;
4288 if (!dir->i_op->link)
4289 return -EPERM;
4290 if (S_ISDIR(inode->i_mode))
4291 return -EPERM;
4292
4293 error = security_inode_link(old_dentry, dir, new_dentry);
4294 if (error)
4295 return error;
4296
4297 inode_lock(inode);
4298 /* Make sure we don't allow creating hardlink to an unlinked file */
4299 if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
4300 error = -ENOENT;
4301 else if (max_links && inode->i_nlink >= max_links)
4302 error = -EMLINK;
4303 else {
4304 error = try_break_deleg(inode, delegated_inode);
4305 if (!error)
4306 error = dir->i_op->link(old_dentry, dir, new_dentry);
4307 }
4308
4309 if (!error && (inode->i_state & I_LINKABLE)) {
4310 spin_lock(&inode->i_lock);
4311 inode->i_state &= ~I_LINKABLE;
4312 spin_unlock(&inode->i_lock);
4313 }
4314 inode_unlock(inode);
4315 if (!error)
4316 fsnotify_link(dir, inode, new_dentry);
4317 return error;
4318}
4319EXPORT_SYMBOL(vfs_link2);
4320
4321int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
4322{
4323 return vfs_link2(NULL, old_dentry, dir, new_dentry, delegated_inode);
4324}
4325EXPORT_SYMBOL(vfs_link);
4326
4327/*
4328 * Hardlinks are often used in delicate situations. We avoid
4329 * security-related surprises by not following symlinks on the
4330 * newname. --KAB
4331 *
4332 * We don't follow them on the oldname either to be compatible
4333 * with linux 2.0, and to avoid hard-linking to directories
4334 * and other special files. --ADM
4335 */
4336SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
4337 int, newdfd, const char __user *, newname, int, flags)
4338{
4339 struct dentry *new_dentry;
4340 struct path old_path, new_path;
4341 struct inode *delegated_inode = NULL;
4342 int how = 0;
4343 int error;
4344
4345 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
4346 return -EINVAL;
4347 /*
4348 * To use null names we require CAP_DAC_READ_SEARCH
4349 * This ensures that not everyone will be able to create
4350 * handlink using the passed filedescriptor.
4351 */
4352 if (flags & AT_EMPTY_PATH) {
4353 if (!capable(CAP_DAC_READ_SEARCH))
4354 return -ENOENT;
4355 how = LOOKUP_EMPTY;
4356 }
4357
4358 if (flags & AT_SYMLINK_FOLLOW)
4359 how |= LOOKUP_FOLLOW;
4360retry:
4361 error = user_path_at(olddfd, oldname, how, &old_path);
4362 if (error)
4363 return error;
4364
4365 new_dentry = user_path_create(newdfd, newname, &new_path,
4366 (how & LOOKUP_REVAL));
4367 error = PTR_ERR(new_dentry);
4368 if (IS_ERR(new_dentry))
4369 goto out;
4370
4371 error = -EXDEV;
4372 if (old_path.mnt != new_path.mnt)
4373 goto out_dput;
4374 error = may_linkat(&old_path);
4375 if (unlikely(error))
4376 goto out_dput;
4377 error = security_path_link(old_path.dentry, &new_path, new_dentry);
4378 if (error)
4379 goto out_dput;
4380 error = vfs_link2(old_path.mnt, old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
4381out_dput:
4382 done_path_create(&new_path, new_dentry);
4383 if (delegated_inode) {
4384 error = break_deleg_wait(&delegated_inode);
4385 if (!error) {
4386 path_put(&old_path);
4387 goto retry;
4388 }
4389 }
4390 if (retry_estale(error, how)) {
4391 path_put(&old_path);
4392 how |= LOOKUP_REVAL;
4393 goto retry;
4394 }
4395out:
4396 path_put(&old_path);
4397
4398 return error;
4399}
4400
4401SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
4402{
4403 return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
4404}
4405
4406/**
4407 * vfs_rename - rename a filesystem object
4408 * @old_dir: parent of source
4409 * @old_dentry: source
4410 * @new_dir: parent of destination
4411 * @new_dentry: destination
4412 * @delegated_inode: returns an inode needing a delegation break
4413 * @flags: rename flags
4414 *
4415 * The caller must hold multiple mutexes--see lock_rename()).
4416 *
4417 * If vfs_rename discovers a delegation in need of breaking at either
4418 * the source or destination, it will return -EWOULDBLOCK and return a
4419 * reference to the inode in delegated_inode. The caller should then
4420 * break the delegation and retry. Because breaking a delegation may
4421 * take a long time, the caller should drop all locks before doing
4422 * so.
4423 *
4424 * Alternatively, a caller may pass NULL for delegated_inode. This may
4425 * be appropriate for callers that expect the underlying filesystem not
4426 * to be NFS exported.
4427 *
4428 * The worst of all namespace operations - renaming directory. "Perverted"
4429 * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
4430 * Problems:
4431 * a) we can get into loop creation.
4432 * b) race potential - two innocent renames can create a loop together.
4433 * That's where 4.4 screws up. Current fix: serialization on
4434 * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
4435 * story.
4436 * c) we have to lock _four_ objects - parents and victim (if it exists),
4437 * and source (if it is not a directory).
4438 * And that - after we got ->i_mutex on parents (until then we don't know
4439 * whether the target exists). Solution: try to be smart with locking
4440 * order for inodes. We rely on the fact that tree topology may change
4441 * only under ->s_vfs_rename_mutex _and_ that parent of the object we
4442 * move will be locked. Thus we can rank directories by the tree
4443 * (ancestors first) and rank all non-directories after them.
4444 * That works since everybody except rename does "lock parent, lookup,
4445 * lock child" and rename is under ->s_vfs_rename_mutex.
4446 * HOWEVER, it relies on the assumption that any object with ->lookup()
4447 * has no more than 1 dentry. If "hybrid" objects will ever appear,
4448 * we'd better make sure that there's no link(2) for them.
4449 * d) conversion from fhandle to dentry may come in the wrong moment - when
4450 * we are removing the target. Solution: we will have to grab ->i_mutex
4451 * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
4452 * ->i_mutex on parents, which works but leads to some truly excessive
4453 * locking].
4454 */
4455int vfs_rename2(struct vfsmount *mnt,
4456 struct inode *old_dir, struct dentry *old_dentry,
4457 struct inode *new_dir, struct dentry *new_dentry,
4458 struct inode **delegated_inode, unsigned int flags)
4459{
4460 int error;
4461 bool is_dir = d_is_dir(old_dentry);
4462 struct inode *source = old_dentry->d_inode;
4463 struct inode *target = new_dentry->d_inode;
4464 bool new_is_dir = false;
4465 unsigned max_links = new_dir->i_sb->s_max_links;
4466 struct name_snapshot old_name;
4467
4468 /*
4469 * Check source == target.
4470 * On overlayfs need to look at underlying inodes.
4471 */
4472 if (d_real_inode(old_dentry) == d_real_inode(new_dentry))
4473 return 0;
4474
4475 error = may_delete(mnt, old_dir, old_dentry, is_dir);
4476 if (error)
4477 return error;
4478
4479 if (!target) {
4480 error = may_create(mnt, new_dir, new_dentry);
4481 } else {
4482 new_is_dir = d_is_dir(new_dentry);
4483
4484 if (!(flags & RENAME_EXCHANGE))
4485 error = may_delete(mnt, new_dir, new_dentry, is_dir);
4486 else
4487 error = may_delete(mnt, new_dir, new_dentry, new_is_dir);
4488 }
4489 if (error)
4490 return error;
4491
4492 if (!old_dir->i_op->rename)
4493 return -EPERM;
4494
4495 /*
4496 * If we are going to change the parent - check write permissions,
4497 * we'll need to flip '..'.
4498 */
4499 if (new_dir != old_dir) {
4500 if (is_dir) {
4501 error = inode_permission2(mnt, source, MAY_WRITE);
4502 if (error)
4503 return error;
4504 }
4505 if ((flags & RENAME_EXCHANGE) && new_is_dir) {
4506 error = inode_permission2(mnt, target, MAY_WRITE);
4507 if (error)
4508 return error;
4509 }
4510 }
4511
4512 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry,
4513 flags);
4514 if (error)
4515 return error;
4516
4517 take_dentry_name_snapshot(&old_name, old_dentry);
4518 dget(new_dentry);
4519 if (!is_dir || (flags & RENAME_EXCHANGE))
4520 lock_two_nondirectories(source, target);
4521 else if (target)
4522 inode_lock(target);
4523
4524 error = -EBUSY;
4525 if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
4526 goto out;
4527
4528 if (max_links && new_dir != old_dir) {
4529 error = -EMLINK;
4530 if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links)
4531 goto out;
4532 if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir &&
4533 old_dir->i_nlink >= max_links)
4534 goto out;
4535 }
4536 if (is_dir && !(flags & RENAME_EXCHANGE) && target)
4537 shrink_dcache_parent(new_dentry);
4538 if (!is_dir) {
4539 error = try_break_deleg(source, delegated_inode);
4540 if (error)
4541 goto out;
4542 }
4543 if (target && !new_is_dir) {
4544 error = try_break_deleg(target, delegated_inode);
4545 if (error)
4546 goto out;
4547 }
4548 error = old_dir->i_op->rename(old_dir, old_dentry,
4549 new_dir, new_dentry, flags);
4550 if (error)
4551 goto out;
4552
4553 if (!(flags & RENAME_EXCHANGE) && target) {
4554 if (is_dir)
4555 target->i_flags |= S_DEAD;
4556 dont_mount(new_dentry);
4557 detach_mounts(new_dentry);
4558 }
4559 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
4560 if (!(flags & RENAME_EXCHANGE))
4561 d_move(old_dentry, new_dentry);
4562 else
4563 d_exchange(old_dentry, new_dentry);
4564 }
4565out:
4566 if (!is_dir || (flags & RENAME_EXCHANGE))
4567 unlock_two_nondirectories(source, target);
4568 else if (target)
4569 inode_unlock(target);
4570 dput(new_dentry);
4571 if (!error) {
4572 fsnotify_move(old_dir, new_dir, old_name.name, is_dir,
4573 !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
4574 if (flags & RENAME_EXCHANGE) {
4575 fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
4576 new_is_dir, NULL, new_dentry);
4577 }
4578 }
4579 release_dentry_name_snapshot(&old_name);
4580
4581 return error;
4582}
4583EXPORT_SYMBOL(vfs_rename2);
4584
4585int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4586 struct inode *new_dir, struct dentry *new_dentry,
4587 struct inode **delegated_inode, unsigned int flags)
4588{
4589 return vfs_rename2(NULL, old_dir, old_dentry, new_dir, new_dentry, delegated_inode, flags);
4590}
4591EXPORT_SYMBOL(vfs_rename);
4592
4593SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
4594 int, newdfd, const char __user *, newname, unsigned int, flags)
4595{
4596 struct dentry *old_dentry, *new_dentry;
4597 struct dentry *trap;
4598 struct path old_path, new_path;
4599 struct qstr old_last, new_last;
4600 int old_type, new_type;
4601 struct inode *delegated_inode = NULL;
4602 struct filename *from;
4603 struct filename *to;
4604 unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET;
4605 bool should_retry = false;
4606 int error;
4607
4608 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
4609 return -EINVAL;
4610
4611 if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) &&
4612 (flags & RENAME_EXCHANGE))
4613 return -EINVAL;
4614
4615 if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD))
4616 return -EPERM;
4617
4618 if (flags & RENAME_EXCHANGE)
4619 target_flags = 0;
4620
4621retry:
4622 from = user_path_parent(olddfd, oldname,
4623 &old_path, &old_last, &old_type, lookup_flags);
4624 if (IS_ERR(from)) {
4625 error = PTR_ERR(from);
4626 goto exit;
4627 }
4628
4629 to = user_path_parent(newdfd, newname,
4630 &new_path, &new_last, &new_type, lookup_flags);
4631 if (IS_ERR(to)) {
4632 error = PTR_ERR(to);
4633 goto exit1;
4634 }
4635
4636 error = -EXDEV;
4637 if (old_path.mnt != new_path.mnt)
4638 goto exit2;
4639
4640 error = -EBUSY;
4641 if (old_type != LAST_NORM)
4642 goto exit2;
4643
4644 if (flags & RENAME_NOREPLACE)
4645 error = -EEXIST;
4646 if (new_type != LAST_NORM)
4647 goto exit2;
4648
4649 error = mnt_want_write(old_path.mnt);
4650 if (error)
4651 goto exit2;
4652
4653retry_deleg:
4654 trap = lock_rename(new_path.dentry, old_path.dentry);
4655
4656 old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags);
4657 error = PTR_ERR(old_dentry);
4658 if (IS_ERR(old_dentry))
4659 goto exit3;
4660 /* source must exist */
4661 error = -ENOENT;
4662 if (d_is_negative(old_dentry))
4663 goto exit4;
4664 new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags);
4665 error = PTR_ERR(new_dentry);
4666 if (IS_ERR(new_dentry))
4667 goto exit4;
4668 error = -EEXIST;
4669 if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry))
4670 goto exit5;
4671 if (flags & RENAME_EXCHANGE) {
4672 error = -ENOENT;
4673 if (d_is_negative(new_dentry))
4674 goto exit5;
4675
4676 if (!d_is_dir(new_dentry)) {
4677 error = -ENOTDIR;
4678 if (new_last.name[new_last.len])
4679 goto exit5;
4680 }
4681 }
4682 /* unless the source is a directory trailing slashes give -ENOTDIR */
4683 if (!d_is_dir(old_dentry)) {
4684 error = -ENOTDIR;
4685 if (old_last.name[old_last.len])
4686 goto exit5;
4687 if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len])
4688 goto exit5;
4689 }
4690 /* source should not be ancestor of target */
4691 error = -EINVAL;
4692 if (old_dentry == trap)
4693 goto exit5;
4694 /* target should not be an ancestor of source */
4695 if (!(flags & RENAME_EXCHANGE))
4696 error = -ENOTEMPTY;
4697 if (new_dentry == trap)
4698 goto exit5;
4699
4700 error = security_path_rename(&old_path, old_dentry,
4701 &new_path, new_dentry, flags);
4702 if (error)
4703 goto exit5;
4704 error = vfs_rename2(old_path.mnt, old_path.dentry->d_inode, old_dentry,
4705 new_path.dentry->d_inode, new_dentry,
4706 &delegated_inode, flags);
4707exit5:
4708 dput(new_dentry);
4709exit4:
4710 dput(old_dentry);
4711exit3:
4712 unlock_rename(new_path.dentry, old_path.dentry);
4713 if (delegated_inode) {
4714 error = break_deleg_wait(&delegated_inode);
4715 if (!error)
4716 goto retry_deleg;
4717 }
4718 mnt_drop_write(old_path.mnt);
4719exit2:
4720 if (retry_estale(error, lookup_flags))
4721 should_retry = true;
4722 path_put(&new_path);
4723 putname(to);
4724exit1:
4725 path_put(&old_path);
4726 putname(from);
4727 if (should_retry) {
4728 should_retry = false;
4729 lookup_flags |= LOOKUP_REVAL;
4730 goto retry;
4731 }
4732exit:
4733 return error;
4734}
4735
4736SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
4737 int, newdfd, const char __user *, newname)
4738{
4739 return sys_renameat2(olddfd, oldname, newdfd, newname, 0);
4740}
4741
4742SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
4743{
4744 return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
4745}
4746
4747int vfs_whiteout(struct inode *dir, struct dentry *dentry)
4748{
4749 int error = may_create(NULL, dir, dentry);
4750 if (error)
4751 return error;
4752
4753 if (!dir->i_op->mknod)
4754 return -EPERM;
4755
4756 return dir->i_op->mknod(dir, dentry,
4757 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
4758}
4759EXPORT_SYMBOL(vfs_whiteout);
4760
4761int readlink_copy(char __user *buffer, int buflen, const char *link)
4762{
4763 int len = PTR_ERR(link);
4764 if (IS_ERR(link))
4765 goto out;
4766
4767 len = strlen(link);
4768 if (len > (unsigned) buflen)
4769 len = buflen;
4770 if (copy_to_user(buffer, link, len))
4771 len = -EFAULT;
4772out:
4773 return len;
4774}
4775
4776/*
4777 * A helper for ->readlink(). This should be used *ONLY* for symlinks that
4778 * have ->get_link() not calling nd_jump_link(). Using (or not using) it
4779 * for any given inode is up to filesystem.
4780 */
4781int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
4782{
4783 DEFINE_DELAYED_CALL(done);
4784 struct inode *inode = d_inode(dentry);
4785 const char *link = inode->i_link;
4786 int res;
4787
4788 if (!link) {
4789 link = inode->i_op->get_link(dentry, inode, &done);
4790 if (IS_ERR(link))
4791 return PTR_ERR(link);
4792 }
4793 res = readlink_copy(buffer, buflen, link);
4794 do_delayed_call(&done);
4795 return res;
4796}
4797EXPORT_SYMBOL(generic_readlink);
4798
4799/**
4800 * vfs_get_link - get symlink body
4801 * @dentry: dentry on which to get symbolic link
4802 * @done: caller needs to free returned data with this
4803 *
4804 * Calls security hook and i_op->get_link() on the supplied inode.
4805 *
4806 * It does not touch atime. That's up to the caller if necessary.
4807 *
4808 * Does not work on "special" symlinks like /proc/$$/fd/N
4809 */
4810const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
4811{
4812 const char *res = ERR_PTR(-EINVAL);
4813 struct inode *inode = d_inode(dentry);
4814
4815 if (d_is_symlink(dentry)) {
4816 res = ERR_PTR(security_inode_readlink(dentry));
4817 if (!res)
4818 res = inode->i_op->get_link(dentry, inode, done);
4819 }
4820 return res;
4821}
4822EXPORT_SYMBOL(vfs_get_link);
4823
4824/* get the link contents into pagecache */
4825const char *page_get_link(struct dentry *dentry, struct inode *inode,
4826 struct delayed_call *callback)
4827{
4828 char *kaddr;
4829 struct page *page;
4830 struct address_space *mapping = inode->i_mapping;
4831
4832 if (!dentry) {
4833 page = find_get_page(mapping, 0);
4834 if (!page)
4835 return ERR_PTR(-ECHILD);
4836 if (!PageUptodate(page)) {
4837 put_page(page);
4838 return ERR_PTR(-ECHILD);
4839 }
4840 } else {
4841 page = read_mapping_page(mapping, 0, NULL);
4842 if (IS_ERR(page))
4843 return (char*)page;
4844 }
4845 set_delayed_call(callback, page_put_link, page);
4846 BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
4847 kaddr = page_address(page);
4848 nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
4849 return kaddr;
4850}
4851
4852EXPORT_SYMBOL(page_get_link);
4853
4854void page_put_link(void *arg)
4855{
4856 put_page(arg);
4857}
4858EXPORT_SYMBOL(page_put_link);
4859
4860int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
4861{
4862 DEFINE_DELAYED_CALL(done);
4863 int res = readlink_copy(buffer, buflen,
4864 page_get_link(dentry, d_inode(dentry),
4865 &done));
4866 do_delayed_call(&done);
4867 return res;
4868}
4869EXPORT_SYMBOL(page_readlink);
4870
4871/*
4872 * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
4873 */
4874int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
4875{
4876 struct address_space *mapping = inode->i_mapping;
4877 struct page *page;
4878 void *fsdata;
4879 int err;
4880 unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
4881 if (nofs)
4882 flags |= AOP_FLAG_NOFS;
4883
4884retry:
4885 err = pagecache_write_begin(NULL, mapping, 0, len-1,
4886 flags, &page, &fsdata);
4887 if (err)
4888 goto fail;
4889
4890 memcpy(page_address(page), symname, len-1);
4891
4892 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
4893 page, fsdata);
4894 if (err < 0)
4895 goto fail;
4896 if (err < len-1)
4897 goto retry;
4898
4899 mark_inode_dirty(inode);
4900 return 0;
4901fail:
4902 return err;
4903}
4904EXPORT_SYMBOL(__page_symlink);
4905
4906int page_symlink(struct inode *inode, const char *symname, int len)
4907{
4908 return __page_symlink(inode, symname, len,
4909 !mapping_gfp_constraint(inode->i_mapping, __GFP_FS));
4910}
4911EXPORT_SYMBOL(page_symlink);
4912
4913const struct inode_operations page_symlink_inode_operations = {
4914 .readlink = generic_readlink,
4915 .get_link = page_get_link,
4916};
4917EXPORT_SYMBOL(page_symlink_inode_operations);
4918