From 700920eb5ba4de5417b446c9a8bb008df2b973e0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 18 Jan 2012 15:31:45 +0000 Subject: KEYS: Allow special keyrings to be cleared The kernel contains some special internal keyrings, for instance the DNS resolver keyring : 2a93faf1 I----- 1 perm 1f030000 0 0 keyring .dns_resolver: empty It would occasionally be useful to allow the contents of such keyrings to be flushed by root (cache invalidation). Allow a flag to be set on a keyring to mark that someone possessing the sysadmin capability can clear the keyring, even without normal write access to the keyring. Set this flag on the special keyrings created by the DNS resolver, the NFS identity mapper and the CIFS identity mapper. Signed-off-by: David Howells Acked-by: Jeff Layton Acked-by: Steve Dickson Signed-off-by: James Morris --- fs/cifs/cifsacl.c | 1 + fs/nfs/idmap.c | 1 + 2 files changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 72ddf23ef6f7..854749d21bb1 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -556,6 +556,7 @@ init_cifs_idmap(void) /* instruct request_key() to use this special keyring as a cache for * the results it looks up */ + set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; root_cred = cred; diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 2c05f1991e1e..a1bbf7780dfc 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -198,6 +198,7 @@ int nfs_idmap_init(void) if (ret < 0) goto failed_put_key; + set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; id_resolver_cache = cred; -- cgit From 3167760f83899ccda312b9ad9306ec9e5dda06d4 Mon Sep 17 00:00:00 2001 From: Dan Magenheimer Date: Wed, 21 Sep 2011 11:56:28 -0400 Subject: mm: cleancache: s/flush/invalidate/ Per akpm suggestions alter the use of the term flush to be invalidate. The next patch will do this across all MM. This change is completely cosmetic. [v9: akpm@linux-foundation.org: change "flush" to "invalidate", part 3] Signed-off-by: Dan Magenheimer Cc: Kamezawa Hiroyuki Cc: Jan Beulich Reviewed-by: Seth Jennings Cc: Jeremy Fitzhardinge Cc: Hugh Dickins Cc: Johannes Weiner Cc: Nitin Gupta Cc: Matthew Wilcox Cc: Chris Mason Cc: Rik Riel Cc: Andrew Morton [v10: Fixed fs: move code out of buffer.c conflict change] Signed-off-by: Konrad Rzeszutek Wilk --- fs/block_dev.c | 2 +- fs/super.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 69a5b6fbee2b..d6d5f29463cd 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -110,7 +110,7 @@ void invalidate_bdev(struct block_device *bdev) /* 99% of the time, we don't need to flush the cleancache on the bdev. * But, for the strange corners, lets be cautious */ - cleancache_flush_inode(mapping); + cleancache_invalidate_inode(mapping); } EXPORT_SYMBOL(invalidate_bdev); diff --git a/fs/super.c b/fs/super.c index de41e1e46f09..e5d9765ff5f4 100644 --- a/fs/super.c +++ b/fs/super.c @@ -250,7 +250,7 @@ void deactivate_locked_super(struct super_block *s) { struct file_system_type *fs = s->s_type; if (atomic_dec_and_test(&s->s_active)) { - cleancache_flush_fs(s); + cleancache_invalidate_fs(s); fs->kill_sb(s); /* caches are now gone, we can safely kill the shrinker now */ -- cgit From 4e4d6d860b9393c5395ba5920edb5b4c5d43a3a3 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 18 Dec 2011 20:05:43 -0800 Subject: sysfs: Add s_hash to sysfs_dirent and order directory entries by hash Compute a 31 bit hash of directory entries (that can fit in a signed 32bit off_t) and index the sysfs directory entries by that hash, replacing the per directory indexes by name and by inode. Because we now only use a single rbtree this reduces the size of sysfs_dirent by 2 pointers. Because we have fewer cases to deal with the code is now simpler. For now I use the simple hash that the dcache uses as that is easy to use and seems simple enough. In addition to makeing the code simpler using a hash for the file position in readdir brings sysfs in line with other filesystems that have non-trivial directory structures. Signed-off-by: Eric W. Biederman Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/dir.c | 219 +++++++++++++++++++++++++++++-------------------------- fs/sysfs/sysfs.h | 9 +-- 2 files changed, 120 insertions(+), 108 deletions(-) (limited to 'fs') diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 7fdf6a7b7436..0daf255b7bf9 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -22,76 +22,103 @@ #include #include #include +#include #include "sysfs.h" DEFINE_MUTEX(sysfs_mutex); DEFINE_SPINLOCK(sysfs_assoc_lock); +#define to_sysfs_dirent(X) rb_entry((X), struct sysfs_dirent, s_rb); + static DEFINE_SPINLOCK(sysfs_ino_lock); static DEFINE_IDA(sysfs_ino_ida); /** - * sysfs_link_sibling - link sysfs_dirent into sibling list + * sysfs_name_hash + * @ns: Namespace tag to hash + * @name: Null terminated string to hash + * + * Returns 31 bit hash of ns + name (so it fits in an off_t ) + */ +static unsigned int sysfs_name_hash(const void *ns, const char *name) +{ + unsigned long hash = init_name_hash(); + unsigned int len = strlen(name); + while (len--) + hash = partial_name_hash(*name++, hash); + hash = ( end_name_hash(hash) ^ hash_ptr( (void *)ns, 31 ) ); + hash &= 0x7fffffffU; + /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */ + if (hash < 1) + hash += 2; + if (hash >= INT_MAX) + hash = INT_MAX - 1; + return hash; +} + +static int sysfs_name_compare(unsigned int hash, const void *ns, + const char *name, const struct sysfs_dirent *sd) +{ + if (hash != sd->s_hash) + return hash - sd->s_hash; + if (ns != sd->s_ns) + return ns - sd->s_ns; + return strcmp(name, sd->s_name); +} + +static int sysfs_sd_compare(const struct sysfs_dirent *left, + const struct sysfs_dirent *right) +{ + return sysfs_name_compare(left->s_hash, left->s_ns, left->s_name, + right); +} + +/** + * sysfs_link_subling - link sysfs_dirent into sibling rbtree * @sd: sysfs_dirent of interest * - * Link @sd into its sibling list which starts from + * Link @sd into its sibling rbtree which starts from * sd->s_parent->s_dir.children. * * Locking: * mutex_lock(sysfs_mutex) + * + * RETURNS: + * 0 on susccess -EEXIST on failure. */ -static void sysfs_link_sibling(struct sysfs_dirent *sd) +static int sysfs_link_sibling(struct sysfs_dirent *sd) { - struct sysfs_dirent *parent_sd = sd->s_parent; - - struct rb_node **p; - struct rb_node *parent; + struct rb_node **node = &sd->s_parent->s_dir.children.rb_node; + struct rb_node *parent = NULL; if (sysfs_type(sd) == SYSFS_DIR) - parent_sd->s_dir.subdirs++; - - p = &parent_sd->s_dir.inode_tree.rb_node; - parent = NULL; - while (*p) { - parent = *p; -#define node rb_entry(parent, struct sysfs_dirent, inode_node) - if (sd->s_ino < node->s_ino) { - p = &node->inode_node.rb_left; - } else if (sd->s_ino > node->s_ino) { - p = &node->inode_node.rb_right; - } else { - printk(KERN_CRIT "sysfs: inserting duplicate inode '%lx'\n", - (unsigned long) sd->s_ino); - BUG(); - } -#undef node - } - rb_link_node(&sd->inode_node, parent, p); - rb_insert_color(&sd->inode_node, &parent_sd->s_dir.inode_tree); - - p = &parent_sd->s_dir.name_tree.rb_node; - parent = NULL; - while (*p) { - int c; - parent = *p; -#define node rb_entry(parent, struct sysfs_dirent, name_node) - c = strcmp(sd->s_name, node->s_name); - if (c < 0) { - p = &node->name_node.rb_left; - } else { - p = &node->name_node.rb_right; - } -#undef node + sd->s_parent->s_dir.subdirs++; + + while (*node) { + struct sysfs_dirent *pos; + int result; + + pos = to_sysfs_dirent(*node); + parent = *node; + result = sysfs_sd_compare(sd, pos); + if (result < 0) + node = &pos->s_rb.rb_left; + else if (result > 0) + node = &pos->s_rb.rb_right; + else + return -EEXIST; } - rb_link_node(&sd->name_node, parent, p); - rb_insert_color(&sd->name_node, &parent_sd->s_dir.name_tree); + /* add new node and rebalance the tree */ + rb_link_node(&sd->s_rb, parent, node); + rb_insert_color(&sd->s_rb, &sd->s_parent->s_dir.children); + return 0; } /** - * sysfs_unlink_sibling - unlink sysfs_dirent from sibling list + * sysfs_unlink_sibling - unlink sysfs_dirent from sibling rbtree * @sd: sysfs_dirent of interest * - * Unlink @sd from its sibling list which starts from + * Unlink @sd from its sibling rbtree which starts from * sd->s_parent->s_dir.children. * * Locking: @@ -102,8 +129,7 @@ static void sysfs_unlink_sibling(struct sysfs_dirent *sd) if (sysfs_type(sd) == SYSFS_DIR) sd->s_parent->s_dir.subdirs--; - rb_erase(&sd->inode_node, &sd->s_parent->s_dir.inode_tree); - rb_erase(&sd->name_node, &sd->s_parent->s_dir.name_tree); + rb_erase(&sd->s_rb, &sd->s_parent->s_dir.children); } /** @@ -402,6 +428,7 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt, int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd) { struct sysfs_inode_attrs *ps_iattr; + int ret; if (!!sysfs_ns_type(acxt->parent_sd) != !!sd->s_ns) { WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n", @@ -410,12 +437,12 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd) return -EINVAL; } - if (sysfs_find_dirent(acxt->parent_sd, sd->s_ns, sd->s_name)) - return -EEXIST; - + sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name); sd->s_parent = sysfs_get(acxt->parent_sd); - sysfs_link_sibling(sd); + ret = sysfs_link_sibling(sd); + if (ret) + return ret; /* Update timestamps on the parent */ ps_iattr = acxt->parent_sd->s_iattr; @@ -565,8 +592,8 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd, const void *ns, const unsigned char *name) { - struct rb_node *p = parent_sd->s_dir.name_tree.rb_node; - struct sysfs_dirent *found = NULL; + struct rb_node *node = parent_sd->s_dir.children.rb_node; + unsigned int hash; if (!!sysfs_ns_type(parent_sd) != !!ns) { WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n", @@ -575,33 +602,21 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd, return NULL; } - while (p) { - int c; -#define node rb_entry(p, struct sysfs_dirent, name_node) - c = strcmp(name, node->s_name); - if (c < 0) { - p = node->name_node.rb_left; - } else if (c > 0) { - p = node->name_node.rb_right; - } else { - found = node; - p = node->name_node.rb_left; - } -#undef node - } - - if (found) { - while (found->s_ns != ns) { - p = rb_next(&found->name_node); - if (!p) - return NULL; - found = rb_entry(p, struct sysfs_dirent, name_node); - if (strcmp(name, found->s_name)) - return NULL; - } + hash = sysfs_name_hash(ns, name); + while (node) { + struct sysfs_dirent *sd; + int result; + + sd = to_sysfs_dirent(node); + result = sysfs_name_compare(hash, ns, name, sd); + if (result < 0) + node = node->rb_left; + else if (result > 0) + node = node->rb_right; + else + return sd; } - - return found; + return NULL; } /** @@ -804,9 +819,9 @@ static void __sysfs_remove_dir(struct sysfs_dirent *dir_sd) pr_debug("sysfs %s: removing dir\n", dir_sd->s_name); sysfs_addrm_start(&acxt, dir_sd); - pos = rb_first(&dir_sd->s_dir.inode_tree); + pos = rb_first(&dir_sd->s_dir.children); while (pos) { - struct sysfs_dirent *sd = rb_entry(pos, struct sysfs_dirent, inode_node); + struct sysfs_dirent *sd = to_sysfs_dirent(pos); pos = rb_next(pos); if (sysfs_type(sd) != SYSFS_DIR) sysfs_remove_one(&acxt, sd); @@ -919,38 +934,36 @@ static int sysfs_dir_release(struct inode *inode, struct file *filp) } static struct sysfs_dirent *sysfs_dir_pos(const void *ns, - struct sysfs_dirent *parent_sd, ino_t ino, struct sysfs_dirent *pos) + struct sysfs_dirent *parent_sd, loff_t hash, struct sysfs_dirent *pos) { if (pos) { int valid = !(pos->s_flags & SYSFS_FLAG_REMOVED) && pos->s_parent == parent_sd && - ino == pos->s_ino; + hash == pos->s_hash; sysfs_put(pos); if (!valid) pos = NULL; } - if (!pos && (ino > 1) && (ino < INT_MAX)) { - struct rb_node *p = parent_sd->s_dir.inode_tree.rb_node; - while (p) { -#define node rb_entry(p, struct sysfs_dirent, inode_node) - if (ino < node->s_ino) { - pos = node; - p = node->inode_node.rb_left; - } else if (ino > node->s_ino) { - p = node->inode_node.rb_right; - } else { - pos = node; + if (!pos && (hash > 1) && (hash < INT_MAX)) { + struct rb_node *node = parent_sd->s_dir.children.rb_node; + while (node) { + pos = to_sysfs_dirent(node); + + if (hash < pos->s_hash) + node = node->rb_left; + else if (hash > pos->s_hash) + node = node->rb_right; + else break; - } -#undef node } } + /* Skip over entries in the wrong namespace */ while (pos && pos->s_ns != ns) { - struct rb_node *p = rb_next(&pos->inode_node); - if (!p) + struct rb_node *node = rb_next(&pos->s_rb); + if (!node) pos = NULL; else - pos = rb_entry(p, struct sysfs_dirent, inode_node); + pos = to_sysfs_dirent(node); } return pos; } @@ -960,11 +973,11 @@ static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns, { pos = sysfs_dir_pos(ns, parent_sd, ino, pos); if (pos) do { - struct rb_node *p = rb_next(&pos->inode_node); - if (!p) + struct rb_node *node = rb_next(&pos->s_rb); + if (!node) pos = NULL; else - pos = rb_entry(p, struct sysfs_dirent, inode_node); + pos = to_sysfs_dirent(node); } while (pos && pos->s_ns != ns); return pos; } @@ -1006,7 +1019,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) len = strlen(name); ino = pos->s_ino; type = dt_type(pos); - filp->f_pos = ino; + filp->f_pos = pos->s_hash; filp->private_data = sysfs_get(pos); mutex_unlock(&sysfs_mutex); diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 7484a36ee678..2b5c923b4b90 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -20,9 +20,8 @@ struct sysfs_elem_dir { struct kobject *kobj; unsigned long subdirs; - - struct rb_root inode_tree; - struct rb_root name_tree; + /* children rbtree starts here and goes through sd->s_rb */ + struct rb_root children; }; struct sysfs_elem_symlink { @@ -62,8 +61,7 @@ struct sysfs_dirent { struct sysfs_dirent *s_parent; const char *s_name; - struct rb_node inode_node; - struct rb_node name_node; + struct rb_node s_rb; union { struct completion *completion; @@ -71,6 +69,7 @@ struct sysfs_dirent { } u; const void *s_ns; /* namespace tag */ + unsigned int s_hash; /* ns + name hash */ union { struct sysfs_elem_dir s_dir; struct sysfs_elem_symlink s_symlink; -- cgit From 15a3382451e51925facfe430deeca63d90137f5d Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 18 Dec 2011 20:07:23 -0800 Subject: sysfs: Reduce s_flags to an unsinged short so it packs well with s_mode On 32bit this reduces sizeof(struct sysfs_dirent) by 2 bytes. Signed-off-by: Eric W. Biederman Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/sysfs.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 2b5c923b4b90..19994948ac5c 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -77,7 +77,7 @@ struct sysfs_dirent { struct sysfs_elem_bin_attr s_bin_attr; }; - unsigned int s_flags; + unsigned short s_flags; umode_t s_mode; ino_t s_ino; struct sysfs_inode_attrs *s_iattr; @@ -94,11 +94,11 @@ struct sysfs_dirent { #define SYSFS_ACTIVE_REF (SYSFS_KOBJ_ATTR | SYSFS_KOBJ_BIN_ATTR) /* identify any namespace tag on sysfs_dirents */ -#define SYSFS_NS_TYPE_MASK 0xff00 +#define SYSFS_NS_TYPE_MASK 0xf00 #define SYSFS_NS_TYPE_SHIFT 8 #define SYSFS_FLAG_MASK ~(SYSFS_NS_TYPE_MASK|SYSFS_TYPE_MASK) -#define SYSFS_FLAG_REMOVED 0x020000 +#define SYSFS_FLAG_REMOVED 0x02000 static inline unsigned int sysfs_type(struct sysfs_dirent *sd) { -- cgit From cafa6b5dd7ce4f0e0a30be301be4efed587a7808 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 18 Dec 2011 20:08:16 -0800 Subject: sysfs: Store the sysfs inode in an unsigned int. Store the sysfs inode number in an unsided int because ida inode allocator can return at most a 31 bit number, reducing the size of struct sysfs_dirent by 8 bytes on 64bit platforms. Signed-off-by: Eric W. Biederman Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/dir.c | 4 ++-- fs/sysfs/sysfs.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 0daf255b7bf9..0589c9a694bf 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -224,7 +224,7 @@ static void sysfs_deactivate(struct sysfs_dirent *sd) rwsem_release(&sd->dep_map, 1, _RET_IP_); } -static int sysfs_alloc_ino(ino_t *pino) +static int sysfs_alloc_ino(unsigned int *pino) { int ino, rc; @@ -243,7 +243,7 @@ static int sysfs_alloc_ino(ino_t *pino) return rc; } -static void sysfs_free_ino(ino_t ino) +static void sysfs_free_ino(unsigned int ino) { spin_lock(&sysfs_ino_lock); ida_remove(&sysfs_ino_ida, ino); diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 19994948ac5c..661a9639570b 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -79,7 +79,7 @@ struct sysfs_dirent { unsigned short s_flags; umode_t s_mode; - ino_t s_ino; + unsigned int s_ino; struct sysfs_inode_attrs *s_iattr; }; -- cgit From 524b6c5b39b931311dfe5a2f5abae2f5c9731676 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 18 Dec 2011 20:09:31 -0800 Subject: sysfs: Kill nlink counting. Tracking the number of subdirectories requires an extra field that increases the size of sysfs_dirent. nlinks are not particularly interesting for sysfs and the nlink counts are wrong when network namespaces are involved so stop counting them, and always return nlink == 1. Userspace already knows that directories with nlink == 1 have an nlink count they can't use to count subdirectories. This reduces the size of sysfs_dirent by 8 bytes on 64bit platforms. Signed-off-by: Eric W. Biederman Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/dir.c | 6 ------ fs/sysfs/inode.c | 3 --- fs/sysfs/sysfs.h | 1 - 3 files changed, 10 deletions(-) (limited to 'fs') diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 0589c9a694bf..ea64d01400ac 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -91,9 +91,6 @@ static int sysfs_link_sibling(struct sysfs_dirent *sd) struct rb_node **node = &sd->s_parent->s_dir.children.rb_node; struct rb_node *parent = NULL; - if (sysfs_type(sd) == SYSFS_DIR) - sd->s_parent->s_dir.subdirs++; - while (*node) { struct sysfs_dirent *pos; int result; @@ -126,9 +123,6 @@ static int sysfs_link_sibling(struct sysfs_dirent *sd) */ static void sysfs_unlink_sibling(struct sysfs_dirent *sd) { - if (sysfs_type(sd) == SYSFS_DIR) - sd->s_parent->s_dir.subdirs--; - rb_erase(&sd->s_rb, &sd->s_parent->s_dir.children); } diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 4a802b4a9056..0ac3e1c1a7d8 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -216,9 +216,6 @@ static void sysfs_refresh_inode(struct sysfs_dirent *sd, struct inode *inode) iattrs->ia_secdata, iattrs->ia_secdata_len); } - - if (sysfs_type(sd) == SYSFS_DIR) - set_nlink(inode, sd->s_dir.subdirs + 2); } int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 661a9639570b..6289a00287db 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -19,7 +19,6 @@ struct sysfs_open_dirent; struct sysfs_elem_dir { struct kobject *kobj; - unsigned long subdirs; /* children rbtree starts here and goes through sd->s_rb */ struct rb_root children; }; -- cgit From a4834c102f4a46808630cad1a545cb0706b3b0a2 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 5 Jan 2012 13:06:02 +0400 Subject: tty: move pty count limiting into devpts Let's move this stuff to the better place, where we can account pty right in tty-indexes managing code. Signed-off-by: Konstantin Khlebnikov Signed-off-by: Greg Kroah-Hartman --- fs/devpts/inode.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index c4e2a58a2e82..c2c7317d5687 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -36,7 +36,52 @@ #define DEVPTS_DEFAULT_PTMX_MODE 0000 #define PTMX_MINOR 2 -extern int pty_limit; /* Config limit on Unix98 ptys */ +/* + * sysctl support for setting limits on the number of Unix98 ptys allocated. + * Otherwise one can eat up all kernel memory by opening /dev/ptmx repeatedly. + */ +static int pty_limit = NR_UNIX98_PTY_DEFAULT; +static int pty_limit_min; +static int pty_limit_max = NR_UNIX98_PTY_MAX; +static int pty_count; + +static struct ctl_table pty_table[] = { + { + .procname = "max", + .maxlen = sizeof(int), + .mode = 0644, + .data = &pty_limit, + .proc_handler = proc_dointvec_minmax, + .extra1 = &pty_limit_min, + .extra2 = &pty_limit_max, + }, { + .procname = "nr", + .maxlen = sizeof(int), + .mode = 0444, + .data = &pty_count, + .proc_handler = proc_dointvec, + }, + {} +}; + +static struct ctl_table pty_kern_table[] = { + { + .procname = "pty", + .mode = 0555, + .child = pty_table, + }, + {} +}; + +static struct ctl_table pty_root_table[] = { + { + .procname = "kernel", + .mode = 0555, + .child = pty_kern_table, + }, + {} +}; + static DEFINE_MUTEX(allocated_ptys_lock); static struct vfsmount *devpts_mnt; @@ -451,6 +496,7 @@ retry: mutex_unlock(&allocated_ptys_lock); return -EIO; } + pty_count++; mutex_unlock(&allocated_ptys_lock); return index; } @@ -462,6 +508,7 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx) mutex_lock(&allocated_ptys_lock); ida_remove(&fsi->allocated_ptys, idx); + pty_count--; mutex_unlock(&allocated_ptys_lock); } @@ -558,11 +605,15 @@ void devpts_pty_kill(struct tty_struct *tty) static int __init init_devpts_fs(void) { int err = register_filesystem(&devpts_fs_type); + struct ctl_table_header *table; + if (!err) { + table = register_sysctl_table(pty_root_table); devpts_mnt = kern_mount(&devpts_fs_type); if (IS_ERR(devpts_mnt)) { err = PTR_ERR(devpts_mnt); unregister_filesystem(&devpts_fs_type); + unregister_sysctl_table(table); } } return err; -- cgit From e9aba5158a80098447ff207a452a3418ae7ee386 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 5 Jan 2012 13:06:11 +0400 Subject: tty: rework pty count limiting After adding devpts multiple-insrances sysctl kernel.pty.max limit pty count for each devpts instance independently, while kernel.pty.nr shows total pty count. This patch restores sysctl kernel.pty.max as global limit (4096 by default), adds pty reseve for main devpts (mounted without "newinstance" argument), and new sysctl to tune it: kernel.pty.reserve (1024 by default) Also it adds devpts mount option "max=%d" to limit pty count for each devpts instance independently. (by default NR_UNIX98_PTY_MAX == 2^20) Thus devpts instances in containers cannot eat up all available pty even if we didn't set any limits, while with "max" argument we can adjust limits more precisely. Plus, now open("/dev/ptmx") return -ENOSPC in case lack of pty indexes, this is more informative than -EIO. Signed-off-by: Konstantin Khlebnikov Signed-off-by: Greg Kroah-Hartman --- fs/devpts/inode.c | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index c2c7317d5687..1c6f908e38ca 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -41,8 +41,9 @@ * Otherwise one can eat up all kernel memory by opening /dev/ptmx repeatedly. */ static int pty_limit = NR_UNIX98_PTY_DEFAULT; +static int pty_reserve = NR_UNIX98_PTY_RESERVE; static int pty_limit_min; -static int pty_limit_max = NR_UNIX98_PTY_MAX; +static int pty_limit_max = INT_MAX; static int pty_count; static struct ctl_table pty_table[] = { @@ -54,6 +55,14 @@ static struct ctl_table pty_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = &pty_limit_min, .extra2 = &pty_limit_max, + }, { + .procname = "reserve", + .maxlen = sizeof(int), + .mode = 0644, + .data = &pty_reserve, + .proc_handler = proc_dointvec_minmax, + .extra1 = &pty_limit_min, + .extra2 = &pty_limit_max, }, { .procname = "nr", .maxlen = sizeof(int), @@ -94,10 +103,11 @@ struct pts_mount_opts { umode_t mode; umode_t ptmxmode; int newinstance; + int max; }; enum { - Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_newinstance, + Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_newinstance, Opt_max, Opt_err }; @@ -108,6 +118,7 @@ static const match_table_t tokens = { #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES {Opt_ptmxmode, "ptmxmode=%o"}, {Opt_newinstance, "newinstance"}, + {Opt_max, "max=%d"}, #endif {Opt_err, NULL} }; @@ -154,6 +165,7 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) opts->gid = 0; opts->mode = DEVPTS_DEFAULT_MODE; opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; + opts->max = NR_UNIX98_PTY_MAX; /* newinstance makes sense only on initial mount */ if (op == PARSE_MOUNT) @@ -197,6 +209,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) if (op == PARSE_MOUNT) opts->newinstance = 1; break; + case Opt_max: + if (match_int(&args[0], &option) || + option < 0 || option > NR_UNIX98_PTY_MAX) + return -EINVAL; + opts->max = option; + break; #endif default: printk(KERN_ERR "devpts: called with bogus options\n"); @@ -303,6 +321,8 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root) seq_printf(seq, ",mode=%03o", opts->mode); #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); + if (opts->max < NR_UNIX98_PTY_MAX) + seq_printf(seq, ",max=%d", opts->max); #endif return 0; @@ -483,6 +503,12 @@ retry: return -ENOMEM; mutex_lock(&allocated_ptys_lock); + if (pty_count >= pty_limit - + (fsi->mount_opts.newinstance ? pty_reserve : 0)) { + mutex_unlock(&allocated_ptys_lock); + return -ENOSPC; + } + ida_ret = ida_get_new(&fsi->allocated_ptys, &index); if (ida_ret < 0) { mutex_unlock(&allocated_ptys_lock); @@ -491,10 +517,10 @@ retry: return -EIO; } - if (index >= pty_limit) { + if (index >= fsi->mount_opts.max) { ida_remove(&fsi->allocated_ptys, index); mutex_unlock(&allocated_ptys_lock); - return -EIO; + return -ENOSPC; } pty_count++; mutex_unlock(&allocated_ptys_lock); -- cgit From c56d8a7362665d165ba992b6b7a8d6c13a26eafc Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 17 Jan 2012 12:17:22 +0000 Subject: sysfs: change permissions for /sys from 0755 to 0555 There is a misleading difference between /proc and /sys permissions, /proc is 0555 and /sys is 0755. But as it is impossible to create or unlink something in /sys it would be nice to have same permissions. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/mount.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index e34f0d99ea4e..140f26a34288 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -36,7 +36,7 @@ struct sysfs_dirent sysfs_root = { .s_name = "", .s_count = ATOMIC_INIT(1), .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT), - .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, + .s_mode = S_IFDIR | S_IRUGO | S_IXUGO, .s_ino = 1, }; -- cgit From 36885d7b1121c779e4060d45472fe53a5b21e09f Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 10 Jun 2011 02:36:05 -0300 Subject: sysctl: remove impossible condition check Remove checks for conditions that will never happen. If procname is NULL the loop would already had bailed out, so there's no need to check it again. At the same time this also compacts the function find_in_table() by refactoring it to be easier to read. Signed-off-by: Lucas De Marchi Reviewed-by: Jesper Juhl Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index a6b62173d4c3..d82f4a8b4b80 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -59,17 +59,11 @@ out: static struct ctl_table *find_in_table(struct ctl_table *p, struct qstr *name) { - int len; for ( ; p->procname; p++) { - - if (!p->procname) - continue; - - len = strlen(p->procname); - if (len != name->len) + if (strlen(p->procname) != name->len) continue; - if (memcmp(p->procname, name->name, len) != 0) + if (memcmp(p->procname, name->name, name->len) != 0) continue; /* I have a match */ @@ -266,10 +260,6 @@ static int scan(struct ctl_table_header *head, ctl_table *table, for (; table->procname; table++, (*pos)++) { int res; - /* Can't do anything without a proc name */ - if (!table->procname) - continue; - if (*pos < file->f_pos) continue; -- cgit From de4e83bd6b5e16d491ec068cd22801d5d063b07a Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 6 Jan 2012 03:34:20 -0800 Subject: sysctl: Register the base sysctl table like any other sysctl table. Simplify the code by treating the base sysctl table like any other sysctl table and register it with register_sysctl_table. To ensure this table is registered early enough to avoid problems call sysctl_init from proc_sys_init. Rename sysctl_net.c:sysctl_init() to net_sysctl_init() to avoid name conflicts now that kernel/sysctl.c:sysctl_init() is no longer static. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index d82f4a8b4b80..9d29d28af577 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -468,5 +468,6 @@ int __init proc_sys_init(void) proc_sys_root->proc_iops = &proc_sys_dir_operations; proc_sys_root->proc_fops = &proc_sys_dir_file_operations; proc_sys_root->nlink = 0; - return 0; + + return sysctl_init(); } -- cgit From 1f87f0b52b1d6581168cb80f86746bc4df918d01 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 6 Jan 2012 04:07:15 -0800 Subject: sysctl: Move the implementation into fs/proc/proc_sysctl.c Move the core sysctl code from kernel/sysctl.c and kernel/sysctl_check.c into fs/proc/proc_sysctl.c. Currently sysctl maintenance is hampered by the sysctl implementation being split across 3 files with artificial layering between them. Consolidate the entire sysctl implementation into 1 file so that it is easier to see what is going on and hopefully allowing for simpler maintenance. For functions that are now only used in fs/proc/proc_sysctl.c remove their declarations from sysctl.h and make them static in fs/proc/proc_sysctl.c Signed-off-by: Eric W. Biederman --- fs/proc/internal.h | 3 + fs/proc/proc_sysctl.c | 622 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 625 insertions(+) (limited to 'fs') diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 292577531ad1..3b5ecd960d6a 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -10,12 +10,15 @@ */ #include +struct ctl_table_header; extern struct proc_dir_entry proc_root; #ifdef CONFIG_PROC_SYSCTL extern int proc_sys_init(void); +extern void sysctl_head_put(struct ctl_table_header *head); #else static inline void proc_sys_init(void) { } +static inline void sysctl_head_put(struct ctl_table_header *head) { } #endif #ifdef CONFIG_NET extern int proc_net_init(void); diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 9d29d28af577..06e6f10ee8ec 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "internal.h" static const struct dentry_operations proc_sys_dentry_operations; @@ -24,6 +25,209 @@ void proc_sys_poll_notify(struct ctl_table_poll *poll) wake_up_interruptible(&poll->wait); } +static struct ctl_table root_table[1]; +static struct ctl_table_root sysctl_table_root; +static struct ctl_table_header root_table_header = { + {{.count = 1, + .ctl_table = root_table, + .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, + .root = &sysctl_table_root, + .set = &sysctl_table_root.default_set, +}; +static struct ctl_table_root sysctl_table_root = { + .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list), + .default_set.list = LIST_HEAD_INIT(root_table_header.ctl_entry), +}; + +static DEFINE_SPINLOCK(sysctl_lock); + +/* called under sysctl_lock */ +static int use_table(struct ctl_table_header *p) +{ + if (unlikely(p->unregistering)) + return 0; + p->used++; + return 1; +} + +/* called under sysctl_lock */ +static void unuse_table(struct ctl_table_header *p) +{ + if (!--p->used) + if (unlikely(p->unregistering)) + complete(p->unregistering); +} + +/* called under sysctl_lock, will reacquire if has to wait */ +static void start_unregistering(struct ctl_table_header *p) +{ + /* + * if p->used is 0, nobody will ever touch that entry again; + * we'll eliminate all paths to it before dropping sysctl_lock + */ + if (unlikely(p->used)) { + struct completion wait; + init_completion(&wait); + p->unregistering = &wait; + spin_unlock(&sysctl_lock); + wait_for_completion(&wait); + spin_lock(&sysctl_lock); + } else { + /* anything non-NULL; we'll never dereference it */ + p->unregistering = ERR_PTR(-EINVAL); + } + /* + * do not remove from the list until nobody holds it; walking the + * list in do_sysctl() relies on that. + */ + list_del_init(&p->ctl_entry); +} + +static void sysctl_head_get(struct ctl_table_header *head) +{ + spin_lock(&sysctl_lock); + head->count++; + spin_unlock(&sysctl_lock); +} + +void sysctl_head_put(struct ctl_table_header *head) +{ + spin_lock(&sysctl_lock); + if (!--head->count) + kfree_rcu(head, rcu); + spin_unlock(&sysctl_lock); +} + +static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head) +{ + if (!head) + BUG(); + spin_lock(&sysctl_lock); + if (!use_table(head)) + head = ERR_PTR(-ENOENT); + spin_unlock(&sysctl_lock); + return head; +} + +static void sysctl_head_finish(struct ctl_table_header *head) +{ + if (!head) + return; + spin_lock(&sysctl_lock); + unuse_table(head); + spin_unlock(&sysctl_lock); +} + +static struct ctl_table_set * +lookup_header_set(struct ctl_table_root *root, struct nsproxy *namespaces) +{ + struct ctl_table_set *set = &root->default_set; + if (root->lookup) + set = root->lookup(root, namespaces); + return set; +} + +static struct list_head * +lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces) +{ + struct ctl_table_set *set = lookup_header_set(root, namespaces); + return &set->list; +} + +static struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, + struct ctl_table_header *prev) +{ + struct ctl_table_root *root; + struct list_head *header_list; + struct ctl_table_header *head; + struct list_head *tmp; + + spin_lock(&sysctl_lock); + if (prev) { + head = prev; + tmp = &prev->ctl_entry; + unuse_table(prev); + goto next; + } + tmp = &root_table_header.ctl_entry; + for (;;) { + head = list_entry(tmp, struct ctl_table_header, ctl_entry); + + if (!use_table(head)) + goto next; + spin_unlock(&sysctl_lock); + return head; + next: + root = head->root; + tmp = tmp->next; + header_list = lookup_header_list(root, namespaces); + if (tmp != header_list) + continue; + + do { + root = list_entry(root->root_list.next, + struct ctl_table_root, root_list); + if (root == &sysctl_table_root) + goto out; + header_list = lookup_header_list(root, namespaces); + } while (list_empty(header_list)); + tmp = header_list->next; + } +out: + spin_unlock(&sysctl_lock); + return NULL; +} + +static struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev) +{ + return __sysctl_head_next(current->nsproxy, prev); +} + +void register_sysctl_root(struct ctl_table_root *root) +{ + spin_lock(&sysctl_lock); + list_add_tail(&root->root_list, &sysctl_table_root.root_list); + spin_unlock(&sysctl_lock); +} + +/* + * sysctl_perm does NOT grant the superuser all rights automatically, because + * some sysctl variables are readonly even to root. + */ + +static int test_perm(int mode, int op) +{ + if (!current_euid()) + mode >>= 6; + else if (in_egroup_p(0)) + mode >>= 3; + if ((op & ~mode & (MAY_READ|MAY_WRITE|MAY_EXEC)) == 0) + return 0; + return -EACCES; +} + +static int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op) +{ + int mode; + + if (root->permissions) + mode = root->permissions(root, current->nsproxy, table); + else + mode = table->mode; + + return test_perm(mode, op); +} + +static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table) +{ + for (; table->procname; table++) { + table->parent = parent; + if (table->child) + sysctl_set_parent(table, table->child); + } +} + + static struct inode *proc_sys_make_inode(struct super_block *sb, struct ctl_table_header *head, struct ctl_table *table) { @@ -435,6 +639,21 @@ static int proc_sys_delete(const struct dentry *dentry) return !!PROC_I(dentry->d_inode)->sysctl->unregistering; } +static int sysctl_is_seen(struct ctl_table_header *p) +{ + struct ctl_table_set *set = p->set; + int res; + spin_lock(&sysctl_lock); + if (p->unregistering) + res = 0; + else if (!set->is_seen) + res = 1; + else + res = set->is_seen(set); + spin_unlock(&sysctl_lock); + return res; +} + static int proc_sys_compare(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, @@ -460,6 +679,409 @@ static const struct dentry_operations proc_sys_dentry_operations = { .d_compare = proc_sys_compare, }; +static struct ctl_table *is_branch_in(struct ctl_table *branch, + struct ctl_table *table) +{ + struct ctl_table *p; + const char *s = branch->procname; + + /* branch should have named subdirectory as its first element */ + if (!s || !branch->child) + return NULL; + + /* ... and nothing else */ + if (branch[1].procname) + return NULL; + + /* table should contain subdirectory with the same name */ + for (p = table; p->procname; p++) { + if (!p->child) + continue; + if (p->procname && strcmp(p->procname, s) == 0) + return p; + } + return NULL; +} + +/* see if attaching q to p would be an improvement */ +static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q) +{ + struct ctl_table *to = p->ctl_table, *by = q->ctl_table; + struct ctl_table *next; + int is_better = 0; + int not_in_parent = !p->attached_by; + + while ((next = is_branch_in(by, to)) != NULL) { + if (by == q->attached_by) + is_better = 1; + if (to == p->attached_by) + not_in_parent = 1; + by = by->child; + to = next->child; + } + + if (is_better && not_in_parent) { + q->attached_by = by; + q->attached_to = to; + q->parent = p; + } +} + +#ifdef CONFIG_SYSCTL_SYSCALL_CHECK +static int sysctl_depth(struct ctl_table *table) +{ + struct ctl_table *tmp; + int depth; + + depth = 0; + for (tmp = table; tmp->parent; tmp = tmp->parent) + depth++; + + return depth; +} + +static struct ctl_table *sysctl_parent(struct ctl_table *table, int n) +{ + int i; + + for (i = 0; table && i < n; i++) + table = table->parent; + + return table; +} + + +static void sysctl_print_path(struct ctl_table *table) +{ + struct ctl_table *tmp; + int depth, i; + depth = sysctl_depth(table); + if (table->procname) { + for (i = depth; i >= 0; i--) { + tmp = sysctl_parent(table, i); + printk("/%s", tmp->procname?tmp->procname:""); + } + } + printk(" "); +} + +static struct ctl_table *sysctl_check_lookup(struct nsproxy *namespaces, + struct ctl_table *table) +{ + struct ctl_table_header *head; + struct ctl_table *ref, *test; + int depth, cur_depth; + + depth = sysctl_depth(table); + + for (head = __sysctl_head_next(namespaces, NULL); head; + head = __sysctl_head_next(namespaces, head)) { + cur_depth = depth; + ref = head->ctl_table; +repeat: + test = sysctl_parent(table, cur_depth); + for (; ref->procname; ref++) { + int match = 0; + if (cur_depth && !ref->child) + continue; + + if (test->procname && ref->procname && + (strcmp(test->procname, ref->procname) == 0)) + match++; + + if (match) { + if (cur_depth != 0) { + cur_depth--; + ref = ref->child; + goto repeat; + } + goto out; + } + } + } + ref = NULL; +out: + sysctl_head_finish(head); + return ref; +} + +static void set_fail(const char **fail, struct ctl_table *table, const char *str) +{ + if (*fail) { + printk(KERN_ERR "sysctl table check failed: "); + sysctl_print_path(table); + printk(" %s\n", *fail); + dump_stack(); + } + *fail = str; +} + +static void sysctl_check_leaf(struct nsproxy *namespaces, + struct ctl_table *table, const char **fail) +{ + struct ctl_table *ref; + + ref = sysctl_check_lookup(namespaces, table); + if (ref && (ref != table)) + set_fail(fail, table, "Sysctl already exists"); +} + +static int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) +{ + int error = 0; + for (; table->procname; table++) { + const char *fail = NULL; + + if (table->parent) { + if (!table->parent->procname) + set_fail(&fail, table, "Parent without procname"); + } + if (table->child) { + if (table->data) + set_fail(&fail, table, "Directory with data?"); + if (table->maxlen) + set_fail(&fail, table, "Directory with maxlen?"); + if ((table->mode & (S_IRUGO|S_IXUGO)) != table->mode) + set_fail(&fail, table, "Writable sysctl directory"); + if (table->proc_handler) + set_fail(&fail, table, "Directory with proc_handler"); + if (table->extra1) + set_fail(&fail, table, "Directory with extra1"); + if (table->extra2) + set_fail(&fail, table, "Directory with extra2"); + } else { + if ((table->proc_handler == proc_dostring) || + (table->proc_handler == proc_dointvec) || + (table->proc_handler == proc_dointvec_minmax) || + (table->proc_handler == proc_dointvec_jiffies) || + (table->proc_handler == proc_dointvec_userhz_jiffies) || + (table->proc_handler == proc_dointvec_ms_jiffies) || + (table->proc_handler == proc_doulongvec_minmax) || + (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { + if (!table->data) + set_fail(&fail, table, "No data"); + if (!table->maxlen) + set_fail(&fail, table, "No maxlen"); + } +#ifdef CONFIG_PROC_SYSCTL + if (!table->proc_handler) + set_fail(&fail, table, "No proc_handler"); +#endif + sysctl_check_leaf(namespaces, table, &fail); + } + if (table->mode > 0777) + set_fail(&fail, table, "bogus .mode"); + if (fail) { + set_fail(&fail, table, NULL); + error = -EINVAL; + } + if (table->child) + error |= sysctl_check_table(namespaces, table->child); + } + return error; +} +#endif /* CONFIG_SYSCTL_SYSCALL_CHECK */ + +/** + * __register_sysctl_paths - register a sysctl hierarchy + * @root: List of sysctl headers to register on + * @namespaces: Data to compute which lists of sysctl entries are visible + * @path: The path to the directory the sysctl table is in. + * @table: the top-level table structure + * + * Register a sysctl table hierarchy. @table should be a filled in ctl_table + * array. A completely 0 filled entry terminates the table. + * + * The members of the &struct ctl_table structure are used as follows: + * + * procname - the name of the sysctl file under /proc/sys. Set to %NULL to not + * enter a sysctl file + * + * data - a pointer to data for use by proc_handler + * + * maxlen - the maximum size in bytes of the data + * + * mode - the file permissions for the /proc/sys file, and for sysctl(2) + * + * child - a pointer to the child sysctl table if this entry is a directory, or + * %NULL. + * + * proc_handler - the text handler routine (described below) + * + * de - for internal use by the sysctl routines + * + * extra1, extra2 - extra pointers usable by the proc handler routines + * + * Leaf nodes in the sysctl tree will be represented by a single file + * under /proc; non-leaf nodes will be represented by directories. + * + * sysctl(2) can automatically manage read and write requests through + * the sysctl table. The data and maxlen fields of the ctl_table + * struct enable minimal validation of the values being written to be + * performed, and the mode field allows minimal authentication. + * + * There must be a proc_handler routine for any terminal nodes + * mirrored under /proc/sys (non-terminals are handled by a built-in + * directory handler). Several default handlers are available to + * cover common cases - + * + * proc_dostring(), proc_dointvec(), proc_dointvec_jiffies(), + * proc_dointvec_userhz_jiffies(), proc_dointvec_minmax(), + * proc_doulongvec_ms_jiffies_minmax(), proc_doulongvec_minmax() + * + * It is the handler's job to read the input buffer from user memory + * and process it. The handler should return 0 on success. + * + * This routine returns %NULL on a failure to register, and a pointer + * to the table header on success. + */ +struct ctl_table_header *__register_sysctl_paths( + struct ctl_table_root *root, + struct nsproxy *namespaces, + const struct ctl_path *path, struct ctl_table *table) +{ + struct ctl_table_header *header; + struct ctl_table *new, **prevp; + unsigned int n, npath; + struct ctl_table_set *set; + + /* Count the path components */ + for (npath = 0; path[npath].procname; ++npath) + ; + + /* + * For each path component, allocate a 2-element ctl_table array. + * The first array element will be filled with the sysctl entry + * for this, the second will be the sentinel (procname == 0). + * + * We allocate everything in one go so that we don't have to + * worry about freeing additional memory in unregister_sysctl_table. + */ + header = kzalloc(sizeof(struct ctl_table_header) + + (2 * npath * sizeof(struct ctl_table)), GFP_KERNEL); + if (!header) + return NULL; + + new = (struct ctl_table *) (header + 1); + + /* Now connect the dots */ + prevp = &header->ctl_table; + for (n = 0; n < npath; ++n, ++path) { + /* Copy the procname */ + new->procname = path->procname; + new->mode = 0555; + + *prevp = new; + prevp = &new->child; + + new += 2; + } + *prevp = table; + header->ctl_table_arg = table; + + INIT_LIST_HEAD(&header->ctl_entry); + header->used = 0; + header->unregistering = NULL; + header->root = root; + sysctl_set_parent(NULL, header->ctl_table); + header->count = 1; +#ifdef CONFIG_SYSCTL_SYSCALL_CHECK + if (sysctl_check_table(namespaces, header->ctl_table)) { + kfree(header); + return NULL; + } +#endif + spin_lock(&sysctl_lock); + header->set = lookup_header_set(root, namespaces); + header->attached_by = header->ctl_table; + header->attached_to = root_table; + header->parent = &root_table_header; + for (set = header->set; set; set = set->parent) { + struct ctl_table_header *p; + list_for_each_entry(p, &set->list, ctl_entry) { + if (p->unregistering) + continue; + try_attach(p, header); + } + } + header->parent->count++; + list_add_tail(&header->ctl_entry, &header->set->list); + spin_unlock(&sysctl_lock); + + return header; +} + +/** + * register_sysctl_table_path - register a sysctl table hierarchy + * @path: The path to the directory the sysctl table is in. + * @table: the top-level table structure + * + * Register a sysctl table hierarchy. @table should be a filled in ctl_table + * array. A completely 0 filled entry terminates the table. + * + * See __register_sysctl_paths for more details. + */ +struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, + struct ctl_table *table) +{ + return __register_sysctl_paths(&sysctl_table_root, current->nsproxy, + path, table); +} +EXPORT_SYMBOL(register_sysctl_paths); + +/** + * register_sysctl_table - register a sysctl table hierarchy + * @table: the top-level table structure + * + * Register a sysctl table hierarchy. @table should be a filled in ctl_table + * array. A completely 0 filled entry terminates the table. + * + * See register_sysctl_paths for more details. + */ +struct ctl_table_header *register_sysctl_table(struct ctl_table *table) +{ + static const struct ctl_path null_path[] = { {} }; + + return register_sysctl_paths(null_path, table); +} +EXPORT_SYMBOL(register_sysctl_table); + +/** + * unregister_sysctl_table - unregister a sysctl table hierarchy + * @header: the header returned from register_sysctl_table + * + * Unregisters the sysctl table and all children. proc entries may not + * actually be removed until they are no longer used by anyone. + */ +void unregister_sysctl_table(struct ctl_table_header * header) +{ + might_sleep(); + + if (header == NULL) + return; + + spin_lock(&sysctl_lock); + start_unregistering(header); + if (!--header->parent->count) { + WARN_ON(1); + kfree_rcu(header->parent, rcu); + } + if (!--header->count) + kfree_rcu(header, rcu); + spin_unlock(&sysctl_lock); +} +EXPORT_SYMBOL(unregister_sysctl_table); + +void setup_sysctl_set(struct ctl_table_set *p, + struct ctl_table_set *parent, + int (*is_seen)(struct ctl_table_set *)) +{ + INIT_LIST_HEAD(&p->list); + p->parent = parent ? parent : &sysctl_table_root.default_set; + p->is_seen = is_seen; +} + + int __init proc_sys_init(void) { struct proc_dir_entry *proc_sys_root; -- cgit From a15e20982e2fbb06e85da584a0f150784042c17d Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 8 Jan 2012 00:16:29 -0800 Subject: sysctl: Make the directories have nlink == 1 I goofed when I made sysctl directories have nlink == 0. nlink == 0 means the directory has been deleted. nlink == 1 meands a directory does not count subdirectories. Use the default nlink == 1 for sysctl directories. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 06e6f10ee8ec..f6aa75111b41 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -253,7 +253,6 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, inode->i_fop = &proc_sys_file_operations; } else { inode->i_mode |= S_IFDIR; - clear_nlink(inode); inode->i_op = &proc_sys_dir_operations; inode->i_fop = &proc_sys_dir_file_operations; } -- cgit From 97324cd804b7b9fb6044e114329335db79810425 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 9 Jan 2012 22:19:13 -0800 Subject: sysctl: Implement retire_sysctl_set This adds a small helper retire_sysctl_set to remove the intimate knowledge about the how a sysctl_set is implemented from net/sysct_net.c Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index f6aa75111b41..9d8223cd3655 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1080,6 +1080,10 @@ void setup_sysctl_set(struct ctl_table_set *p, p->is_seen = is_seen; } +void retire_sysctl_set(struct ctl_table_set *set) +{ + WARN_ON(!list_empty(&set->list)); +} int __init proc_sys_init(void) { -- cgit From bd295b56cfae85f2dd6c2b03951480c91e6d08f3 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 22 Jan 2012 21:10:21 -0800 Subject: sysctl: Remove the unnecessary sysctl_set parent concept. In sysctl_net register the two networking roots in the proper order. In register_sysctl walk the sysctl sets in the reverse order of the sysctl roots. Remove parent from ctl_table_set and setup_sysctl_set as it is no longer needed. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 9d8223cd3655..86d32a318e2c 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -995,13 +995,20 @@ struct ctl_table_header *__register_sysctl_paths( header->attached_by = header->ctl_table; header->attached_to = root_table; header->parent = &root_table_header; - for (set = header->set; set; set = set->parent) { + set = header->set; + root = header->root; + for (;;) { struct ctl_table_header *p; list_for_each_entry(p, &set->list, ctl_entry) { if (p->unregistering) continue; try_attach(p, header); } + if (root == &sysctl_table_root) + break; + root = list_entry(root->root_list.prev, + struct ctl_table_root, root_list); + set = lookup_header_set(root, namespaces); } header->parent->count++; list_add_tail(&header->ctl_entry, &header->set->list); @@ -1072,11 +1079,9 @@ void unregister_sysctl_table(struct ctl_table_header * header) EXPORT_SYMBOL(unregister_sysctl_table); void setup_sysctl_set(struct ctl_table_set *p, - struct ctl_table_set *parent, int (*is_seen)(struct ctl_table_set *)) { INIT_LIST_HEAD(&p->list); - p->parent = parent ? parent : &sysctl_table_root.default_set; p->is_seen = is_seen; } -- cgit From f05e53a7fbb28c951c0c8cf3963fa8019ae1d4d3 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 21 Jan 2012 10:03:13 -0800 Subject: sysctl: Create local copies of directory names used in paths Creating local copies of directory names is a good idea for two reasons. - The dynamic names used by callers must be copied into new strings by the callers today to ensure the strings do not change between register and unregister of the sysctl table. - Sysctl directories have a potentially different lifetime than the time between register and unregister of any particular sysctl table. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 86d32a318e2c..bcf60fb8dce5 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -943,10 +943,12 @@ struct ctl_table_header *__register_sysctl_paths( struct ctl_table *new, **prevp; unsigned int n, npath; struct ctl_table_set *set; + size_t path_bytes = 0; + char *new_name; /* Count the path components */ for (npath = 0; path[npath].procname; ++npath) - ; + path_bytes += strlen(path[npath].procname) + 1; /* * For each path component, allocate a 2-element ctl_table array. @@ -956,24 +958,27 @@ struct ctl_table_header *__register_sysctl_paths( * We allocate everything in one go so that we don't have to * worry about freeing additional memory in unregister_sysctl_table. */ - header = kzalloc(sizeof(struct ctl_table_header) + + header = kzalloc(sizeof(struct ctl_table_header) + path_bytes + (2 * npath * sizeof(struct ctl_table)), GFP_KERNEL); if (!header) return NULL; new = (struct ctl_table *) (header + 1); + new_name = (char *)(new + (2 * npath)); /* Now connect the dots */ prevp = &header->ctl_table; for (n = 0; n < npath; ++n, ++path) { /* Copy the procname */ - new->procname = path->procname; + strcpy(new_name, path->procname); + new->procname = new_name; new->mode = 0555; *prevp = new; prevp = &new->child; new += 2; + new_name += strlen(new_name) + 1; } *prevp = table; header->ctl_table_arg = table; -- cgit From 6e9d5164153ad6539edd31e7afb02a3e79124cad Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 21 Jan 2012 10:26:26 -0800 Subject: sysctl: Add support for register sysctl tables with a normal cstring path. Make __register_sysctl_table the core sysctl registration operation and make it take a char * string as path. Now that binary paths have been banished into the real of backwards compatibility in kernel/binary_sysctl.c where they can be safely ignored there is no longer a need to use struct ctl_path to represent path names when registering ctl_tables. Start the transition to using normal char * strings to represent pathnames when registering sysctl tables. Normal strings are easier to deal with both in the internal sysctl implementation and for programmers registering sysctl tables. __register_sysctl_paths is turned into a backwards compatibility wrapper that converts a ctl_path array into a normal char * string. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 94 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 84 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index bcf60fb8dce5..5704ff0e889f 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -882,7 +882,7 @@ static int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *tabl #endif /* CONFIG_SYSCTL_SYSCALL_CHECK */ /** - * __register_sysctl_paths - register a sysctl hierarchy + * __register_sysctl_table - register a sysctl table * @root: List of sysctl headers to register on * @namespaces: Data to compute which lists of sysctl entries are visible * @path: The path to the directory the sysctl table is in. @@ -934,21 +934,34 @@ static int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *tabl * This routine returns %NULL on a failure to register, and a pointer * to the table header on success. */ -struct ctl_table_header *__register_sysctl_paths( +struct ctl_table_header *__register_sysctl_table( struct ctl_table_root *root, struct nsproxy *namespaces, - const struct ctl_path *path, struct ctl_table *table) + const char *path, struct ctl_table *table) { struct ctl_table_header *header; struct ctl_table *new, **prevp; - unsigned int n, npath; + const char *name, *nextname; + unsigned int npath = 0; struct ctl_table_set *set; size_t path_bytes = 0; char *new_name; /* Count the path components */ - for (npath = 0; path[npath].procname; ++npath) - path_bytes += strlen(path[npath].procname) + 1; + for (name = path; name; name = nextname) { + int namelen; + nextname = strchr(name, '/'); + if (nextname) { + namelen = nextname - name; + nextname++; + } else { + namelen = strlen(name); + } + if (namelen == 0) + continue; + path_bytes += namelen + 1; + npath++; + } /* * For each path component, allocate a 2-element ctl_table array. @@ -968,9 +981,20 @@ struct ctl_table_header *__register_sysctl_paths( /* Now connect the dots */ prevp = &header->ctl_table; - for (n = 0; n < npath; ++n, ++path) { - /* Copy the procname */ - strcpy(new_name, path->procname); + for (name = path; name; name = nextname) { + int namelen; + nextname = strchr(name, '/'); + if (nextname) { + namelen = nextname - name; + nextname++; + } else { + namelen = strlen(name); + } + if (namelen == 0) + continue; + memcpy(new_name, name, namelen); + new_name[namelen] = '\0'; + new->procname = new_name; new->mode = 0555; @@ -978,7 +1002,7 @@ struct ctl_table_header *__register_sysctl_paths( prevp = &new->child; new += 2; - new_name += strlen(new_name) + 1; + new_name += namelen + 1; } *prevp = table; header->ctl_table_arg = table; @@ -1022,6 +1046,56 @@ struct ctl_table_header *__register_sysctl_paths( return header; } +static char *append_path(const char *path, char *pos, const char *name) +{ + int namelen; + namelen = strlen(name); + if (((pos - path) + namelen + 2) >= PATH_MAX) + return NULL; + memcpy(pos, name, namelen); + pos[namelen] = '/'; + pos[namelen + 1] = '\0'; + pos += namelen + 1; + return pos; +} + +/** + * __register_sysctl_paths - register a sysctl table hierarchy + * @root: List of sysctl headers to register on + * @namespaces: Data to compute which lists of sysctl entries are visible + * @path: The path to the directory the sysctl table is in. + * @table: the top-level table structure + * + * Register a sysctl table hierarchy. @table should be a filled in ctl_table + * array. A completely 0 filled entry terminates the table. + * + * See __register_sysctl_table for more details. + */ +struct ctl_table_header *__register_sysctl_paths( + struct ctl_table_root *root, + struct nsproxy *namespaces, + const struct ctl_path *path, struct ctl_table *table) +{ + struct ctl_table_header *header = NULL; + const struct ctl_path *component; + char *new_path, *pos; + + pos = new_path = kmalloc(PATH_MAX, GFP_KERNEL); + if (!new_path) + return NULL; + + pos[0] = '\0'; + for (component = path; component->procname; component++) { + pos = append_path(new_path, pos, component->procname); + if (!pos) + goto out; + } + header = __register_sysctl_table(root, namespaces, new_path, table); +out: + kfree(new_path); + return header; +} + /** * register_sysctl_table_path - register a sysctl table hierarchy * @path: The path to the directory the sysctl table is in. -- cgit From ec6a52668d0bbc6d648e978c327150254bf1ce7f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 21 Jan 2012 12:35:23 -0800 Subject: sysctl: Add ctl_table chains into cstring paths For any component of table passed to __register_sysctl_paths that actually serves as a path, add that to the cstring path that is passed to __register_sysctl_table. The result is that for most calls to __register_sysctl_paths we only pass a table to __register_sysctl_table that contains no child directories. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 5704ff0e889f..9b91deeeb56c 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1076,6 +1076,7 @@ struct ctl_table_header *__register_sysctl_paths( struct nsproxy *namespaces, const struct ctl_path *path, struct ctl_table *table) { + struct ctl_table *ctl_table_arg = table; struct ctl_table_header *header = NULL; const struct ctl_path *component; char *new_path, *pos; @@ -1090,7 +1091,15 @@ struct ctl_table_header *__register_sysctl_paths( if (!pos) goto out; } + while (table->procname && table->child && !table[1].procname) { + pos = append_path(new_path, pos, table->procname); + if (!pos) + goto out; + table = table->child; + } header = __register_sysctl_table(root, namespaces, new_path, table); + if (header) + header->ctl_table_arg = ctl_table_arg; out: kfree(new_path); return header; -- cgit From f728019bb72e655680c02ad1829323054a8e875f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 22 Jan 2012 18:22:05 -0800 Subject: sysctl: register only tables of sysctl files Split the registration of a complex ctl_table array which may have arbitrary numbers of directories (->child != NULL) and tables of files into a series of simpler registrations that only register tables of files. Graphically: register('dir', { + file-a + file-b + subdir1 + file-c + subdir2 + file-d + file-e }) is transformed into: wrapper->subheaders[0] = register('dir', {file1-a, file1-b}) wrapper->subheaders[1] = register('dir/subdir1', {file-c}) wrapper->subheaders[2] = register('dir/subdir2', {file-d, file-e}) return wrapper This guarantees that __register_sysctl_table will only see a simple ctl_table array with all entries having (->child == NULL). Care was taken to pass the original simple ctl_table arrays to __register_sysctl_table whenever possible. This change is derived from a similar patch written by Lucrian Grijincu. Inspired-by: Lucian Adrian Grijincu Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 165 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 147 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 9b91deeeb56c..6bab2ae9e395 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -882,7 +882,7 @@ static int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *tabl #endif /* CONFIG_SYSCTL_SYSCALL_CHECK */ /** - * __register_sysctl_table - register a sysctl table + * __register_sysctl_table - register a leaf sysctl table * @root: List of sysctl headers to register on * @namespaces: Data to compute which lists of sysctl entries are visible * @path: The path to the directory the sysctl table is in. @@ -900,29 +900,19 @@ static int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *tabl * * maxlen - the maximum size in bytes of the data * - * mode - the file permissions for the /proc/sys file, and for sysctl(2) + * mode - the file permissions for the /proc/sys file * - * child - a pointer to the child sysctl table if this entry is a directory, or - * %NULL. + * child - must be %NULL. * * proc_handler - the text handler routine (described below) * - * de - for internal use by the sysctl routines - * * extra1, extra2 - extra pointers usable by the proc handler routines * * Leaf nodes in the sysctl tree will be represented by a single file * under /proc; non-leaf nodes will be represented by directories. * - * sysctl(2) can automatically manage read and write requests through - * the sysctl table. The data and maxlen fields of the ctl_table - * struct enable minimal validation of the values being written to be - * performed, and the mode field allows minimal authentication. - * - * There must be a proc_handler routine for any terminal nodes - * mirrored under /proc/sys (non-terminals are handled by a built-in - * directory handler). Several default handlers are available to - * cover common cases - + * There must be a proc_handler routine for any terminal nodes. + * Several default handlers are available to cover common cases - * * proc_dostring(), proc_dointvec(), proc_dointvec_jiffies(), * proc_dointvec_userhz_jiffies(), proc_dointvec_minmax(), @@ -1059,6 +1049,100 @@ static char *append_path(const char *path, char *pos, const char *name) return pos; } +static int count_subheaders(struct ctl_table *table) +{ + int has_files = 0; + int nr_subheaders = 0; + struct ctl_table *entry; + + /* special case: no directory and empty directory */ + if (!table || !table->procname) + return 1; + + for (entry = table; entry->procname; entry++) { + if (entry->child) + nr_subheaders += count_subheaders(entry->child); + else + has_files = 1; + } + return nr_subheaders + has_files; +} + +static int register_leaf_sysctl_tables(const char *path, char *pos, + struct ctl_table_header ***subheader, + struct ctl_table_root *root, struct nsproxy *namespaces, + struct ctl_table *table) +{ + struct ctl_table *ctl_table_arg = NULL; + struct ctl_table *entry, *files; + int nr_files = 0; + int nr_dirs = 0; + int err = -ENOMEM; + + for (entry = table; entry->procname; entry++) { + if (entry->child) + nr_dirs++; + else + nr_files++; + } + + files = table; + /* If there are mixed files and directories we need a new table */ + if (nr_dirs && nr_files) { + struct ctl_table *new; + files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1), + GFP_KERNEL); + if (!files) + goto out; + + ctl_table_arg = files; + for (new = files, entry = table; entry->procname; entry++) { + if (entry->child) + continue; + *new = *entry; + new++; + } + } + + /* Register everything except a directory full of subdirectories */ + if (nr_files || !nr_dirs) { + struct ctl_table_header *header; + header = __register_sysctl_table(root, namespaces, path, files); + if (!header) { + kfree(ctl_table_arg); + goto out; + } + + /* Remember if we need to free the file table */ + header->ctl_table_arg = ctl_table_arg; + **subheader = header; + (*subheader)++; + } + + /* Recurse into the subdirectories. */ + for (entry = table; entry->procname; entry++) { + char *child_pos; + + if (!entry->child) + continue; + + err = -ENAMETOOLONG; + child_pos = append_path(path, pos, entry->procname); + if (!child_pos) + goto out; + + err = register_leaf_sysctl_tables(path, child_pos, subheader, + root, namespaces, entry->child); + pos[0] = '\0'; + if (err) + goto out; + } + err = 0; +out: + /* On failure our caller will unregister all registered subheaders */ + return err; +} + /** * __register_sysctl_paths - register a sysctl table hierarchy * @root: List of sysctl headers to register on @@ -1077,7 +1161,8 @@ struct ctl_table_header *__register_sysctl_paths( const struct ctl_path *path, struct ctl_table *table) { struct ctl_table *ctl_table_arg = table; - struct ctl_table_header *header = NULL; + int nr_subheaders = count_subheaders(table); + struct ctl_table_header *header = NULL, **subheaders, **subheader; const struct ctl_path *component; char *new_path, *pos; @@ -1097,12 +1182,39 @@ struct ctl_table_header *__register_sysctl_paths( goto out; table = table->child; } - header = __register_sysctl_table(root, namespaces, new_path, table); - if (header) + if (nr_subheaders == 1) { + header = __register_sysctl_table(root, namespaces, new_path, table); + if (header) + header->ctl_table_arg = ctl_table_arg; + } else { + header = kzalloc(sizeof(*header) + + sizeof(*subheaders)*nr_subheaders, GFP_KERNEL); + if (!header) + goto out; + + subheaders = (struct ctl_table_header **) (header + 1); + subheader = subheaders; header->ctl_table_arg = ctl_table_arg; + + if (register_leaf_sysctl_tables(new_path, pos, &subheader, + root, namespaces, table)) + goto err_register_leaves; + } + out: kfree(new_path); return header; + +err_register_leaves: + while (subheader > subheaders) { + struct ctl_table_header *subh = *(--subheader); + struct ctl_table *table = subh->ctl_table_arg; + unregister_sysctl_table(subh); + kfree(table); + } + kfree(header); + header = NULL; + goto out; } /** @@ -1149,11 +1261,28 @@ EXPORT_SYMBOL(register_sysctl_table); */ void unregister_sysctl_table(struct ctl_table_header * header) { + int nr_subheaders; might_sleep(); if (header == NULL) return; + nr_subheaders = count_subheaders(header->ctl_table_arg); + if (unlikely(nr_subheaders > 1)) { + struct ctl_table_header **subheaders; + int i; + + subheaders = (struct ctl_table_header **)(header + 1); + for (i = nr_subheaders -1; i >= 0; i--) { + struct ctl_table_header *subh = subheaders[i]; + struct ctl_table *table = subh->ctl_table_arg; + unregister_sysctl_table(subh); + kfree(table); + } + kfree(header); + return; + } + spin_lock(&sysctl_lock); start_unregistering(header); if (!--header->parent->count) { -- cgit From 7c60c48f58a78195acc1f71c9a9d01958c02ab89 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 21 Jan 2012 13:34:05 -0800 Subject: sysctl: Improve the sysctl sanity checks - Stop validating subdirectories now that we only register leaf tables - Cleanup and improve the duplicate filename check. * Run the duplicate filename check under the sysctl_lock to guarantee we never add duplicate names. * Reduce the duplicate filename check to nearly O(M*N) where M is the number of entries in tthe table we are registering and N is the number of entries in the directory before we got there. - Move the duplicate filename check into it's own function and call it directtly from __register_sysctl_table - Kill the config option as the sanity checks are now cheap enough the config option is unnecessary. The original reason for the config option was because we had a huge table used to verify the proc filename to binary sysctl mapping. That table has now evolved into the binary_sysctl translation layer and is no longer part of the sysctl_check code. - Tighten up the permission checks. Guarnateeing that files only have read or write permissions. - Removed redudant check for parents having a procname as now everything has a procname. - Generalize the backtrace logic so that we print a backtrace from any failure of __register_sysctl_table that was not caused by a memmory allocation failure. The backtrace allows us to track down who erroneously registered a sysctl table. Bechmark before (CONFIG_SYSCTL_CHECK=y): make-dummies 0 999 -> 12s rmmod dummy -> 0.08s Bechmark before (CONFIG_SYSCTL_CHECK=n): make-dummies 0 999 -> 0.7s rmmod dummy -> 0.06s make-dummies 0 99999 -> 1m13s rmmod dummy -> 0.38s Benchmark after: make-dummies 0 999 -> 0.65s rmmod dummy -> 0.055s make-dummies 0 9999 -> 1m10s rmmod dummy -> 0.39s The sysctl sanity checks now impose no measurable cost. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 222 +++++++++++++++++++------------------------------- 1 file changed, 86 insertions(+), 136 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 6bab2ae9e395..a492ff60e071 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -726,160 +726,106 @@ static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q) } } -#ifdef CONFIG_SYSCTL_SYSCALL_CHECK -static int sysctl_depth(struct ctl_table *table) +static int sysctl_check_table_dups(const char *path, struct ctl_table *old, + struct ctl_table *table) { - struct ctl_table *tmp; - int depth; - - depth = 0; - for (tmp = table; tmp->parent; tmp = tmp->parent) - depth++; + struct ctl_table *entry, *test; + int error = 0; - return depth; + for (entry = old; entry->procname; entry++) { + for (test = table; test->procname; test++) { + if (strcmp(entry->procname, test->procname) == 0) { + printk(KERN_ERR "sysctl duplicate entry: %s/%s\n", + path, test->procname); + error = -EEXIST; + } + } + } + return error; } -static struct ctl_table *sysctl_parent(struct ctl_table *table, int n) +static int sysctl_check_dups(struct nsproxy *namespaces, + struct ctl_table_header *header, + const char *path, struct ctl_table *table) { - int i; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_table_header *dir_head, *head; + struct ctl_table *dir_table; + int error = 0; - for (i = 0; table && i < n; i++) - table = table->parent; + /* No dups if we are the only member of our directory */ + if (header->attached_by != table) + return 0; - return table; -} + dir_head = header->parent; + dir_table = header->attached_to; + error = sysctl_check_table_dups(path, dir_table, table); -static void sysctl_print_path(struct ctl_table *table) -{ - struct ctl_table *tmp; - int depth, i; - depth = sysctl_depth(table); - if (table->procname) { - for (i = depth; i >= 0; i--) { - tmp = sysctl_parent(table, i); - printk("/%s", tmp->procname?tmp->procname:""); - } - } - printk(" "); -} + root = &sysctl_table_root; + do { + set = lookup_header_set(root, namespaces); -static struct ctl_table *sysctl_check_lookup(struct nsproxy *namespaces, - struct ctl_table *table) -{ - struct ctl_table_header *head; - struct ctl_table *ref, *test; - int depth, cur_depth; - - depth = sysctl_depth(table); - - for (head = __sysctl_head_next(namespaces, NULL); head; - head = __sysctl_head_next(namespaces, head)) { - cur_depth = depth; - ref = head->ctl_table; -repeat: - test = sysctl_parent(table, cur_depth); - for (; ref->procname; ref++) { - int match = 0; - if (cur_depth && !ref->child) + list_for_each_entry(head, &set->list, ctl_entry) { + if (head->unregistering) continue; - - if (test->procname && ref->procname && - (strcmp(test->procname, ref->procname) == 0)) - match++; - - if (match) { - if (cur_depth != 0) { - cur_depth--; - ref = ref->child; - goto repeat; - } - goto out; - } + if (head->attached_to != dir_table) + continue; + error = sysctl_check_table_dups(path, head->attached_by, + table); } - } - ref = NULL; -out: - sysctl_head_finish(head); - return ref; + root = list_entry(root->root_list.next, + struct ctl_table_root, root_list); + } while (root != &sysctl_table_root); + return error; } -static void set_fail(const char **fail, struct ctl_table *table, const char *str) +static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...) { - if (*fail) { - printk(KERN_ERR "sysctl table check failed: "); - sysctl_print_path(table); - printk(" %s\n", *fail); - dump_stack(); - } - *fail = str; -} + struct va_format vaf; + va_list args; -static void sysctl_check_leaf(struct nsproxy *namespaces, - struct ctl_table *table, const char **fail) -{ - struct ctl_table *ref; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_ERR "sysctl table check failed: %s/%s %pV\n", + path, table->procname, &vaf); - ref = sysctl_check_lookup(namespaces, table); - if (ref && (ref != table)) - set_fail(fail, table, "Sysctl already exists"); + va_end(args); + return -EINVAL; } -static int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) +static int sysctl_check_table(const char *path, struct ctl_table *table) { - int error = 0; + int err = 0; for (; table->procname; table++) { - const char *fail = NULL; - - if (table->parent) { - if (!table->parent->procname) - set_fail(&fail, table, "Parent without procname"); - } - if (table->child) { - if (table->data) - set_fail(&fail, table, "Directory with data?"); - if (table->maxlen) - set_fail(&fail, table, "Directory with maxlen?"); - if ((table->mode & (S_IRUGO|S_IXUGO)) != table->mode) - set_fail(&fail, table, "Writable sysctl directory"); - if (table->proc_handler) - set_fail(&fail, table, "Directory with proc_handler"); - if (table->extra1) - set_fail(&fail, table, "Directory with extra1"); - if (table->extra2) - set_fail(&fail, table, "Directory with extra2"); - } else { - if ((table->proc_handler == proc_dostring) || - (table->proc_handler == proc_dointvec) || - (table->proc_handler == proc_dointvec_minmax) || - (table->proc_handler == proc_dointvec_jiffies) || - (table->proc_handler == proc_dointvec_userhz_jiffies) || - (table->proc_handler == proc_dointvec_ms_jiffies) || - (table->proc_handler == proc_doulongvec_minmax) || - (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { - if (!table->data) - set_fail(&fail, table, "No data"); - if (!table->maxlen) - set_fail(&fail, table, "No maxlen"); - } -#ifdef CONFIG_PROC_SYSCTL - if (!table->proc_handler) - set_fail(&fail, table, "No proc_handler"); -#endif - sysctl_check_leaf(namespaces, table, &fail); - } - if (table->mode > 0777) - set_fail(&fail, table, "bogus .mode"); - if (fail) { - set_fail(&fail, table, NULL); - error = -EINVAL; - } if (table->child) - error |= sysctl_check_table(namespaces, table->child); + err = sysctl_err(path, table, "Not a file"); + + if ((table->proc_handler == proc_dostring) || + (table->proc_handler == proc_dointvec) || + (table->proc_handler == proc_dointvec_minmax) || + (table->proc_handler == proc_dointvec_jiffies) || + (table->proc_handler == proc_dointvec_userhz_jiffies) || + (table->proc_handler == proc_dointvec_ms_jiffies) || + (table->proc_handler == proc_doulongvec_minmax) || + (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { + if (!table->data) + err = sysctl_err(path, table, "No data"); + if (!table->maxlen) + err = sysctl_err(path, table, "No maxlen"); + } + if (!table->proc_handler) + err = sysctl_err(path, table, "No proc_handler"); + + if ((table->mode & (S_IRUGO|S_IWUGO)) != table->mode) + err = sysctl_err(path, table, "bogus .mode 0%o", + table->mode); } - return error; + return err; } -#endif /* CONFIG_SYSCTL_SYSCALL_CHECK */ /** * __register_sysctl_table - register a leaf sysctl table @@ -1003,12 +949,8 @@ struct ctl_table_header *__register_sysctl_table( header->root = root; sysctl_set_parent(NULL, header->ctl_table); header->count = 1; -#ifdef CONFIG_SYSCTL_SYSCALL_CHECK - if (sysctl_check_table(namespaces, header->ctl_table)) { - kfree(header); - return NULL; - } -#endif + if (sysctl_check_table(path, table)) + goto fail; spin_lock(&sysctl_lock); header->set = lookup_header_set(root, namespaces); header->attached_by = header->ctl_table; @@ -1029,11 +971,19 @@ struct ctl_table_header *__register_sysctl_table( struct ctl_table_root, root_list); set = lookup_header_set(root, namespaces); } + if (sysctl_check_dups(namespaces, header, path, table)) + goto fail_locked; header->parent->count++; list_add_tail(&header->ctl_entry, &header->set->list); spin_unlock(&sysctl_lock); return header; +fail_locked: + spin_unlock(&sysctl_lock); +fail: + kfree(header); + dump_stack(); + return NULL; } static char *append_path(const char *path, char *pos, const char *name) -- cgit From 8d6ecfcc014332fd2fe933f64194160f0e3a6696 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 6 Jan 2012 11:55:30 -0800 Subject: sysctl: Remove the now unused ctl_table parent field. While useful at one time for selinux and the sysctl sanity checks those users no longer use the parent field and we can safely remove it. Inspired-by: Lucian Adrian Grijincu Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index a492ff60e071..e573f9b4f22e 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -218,16 +218,6 @@ static int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int return test_perm(mode, op); } -static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table) -{ - for (; table->procname; table++) { - table->parent = parent; - if (table->child) - sysctl_set_parent(table, table->child); - } -} - - static struct inode *proc_sys_make_inode(struct super_block *sb, struct ctl_table_header *head, struct ctl_table *table) { @@ -947,10 +937,10 @@ struct ctl_table_header *__register_sysctl_table( header->used = 0; header->unregistering = NULL; header->root = root; - sysctl_set_parent(NULL, header->ctl_table); header->count = 1; if (sysctl_check_table(path, table)) goto fail; + spin_lock(&sysctl_lock); header->set = lookup_header_set(root, namespaces); header->attached_by = header->ctl_table; -- cgit From 3cc3e04636d603778d921854b84ae7bd34a349a2 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 7 Jan 2012 06:57:47 -0800 Subject: sysctl: A more obvious version of grab_header. Instead of relying on sysct_head_next(NULL) to magically return the right header for the root directory instead explicitly transform NULL into the root directories header. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index e573f9b4f22e..15444850b3e8 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -267,10 +267,10 @@ static struct ctl_table *find_in_table(struct ctl_table *p, struct qstr *name) static struct ctl_table_header *grab_header(struct inode *inode) { - if (PROC_I(inode)->sysctl) - return sysctl_head_grab(PROC_I(inode)->sysctl); - else - return sysctl_head_next(NULL); + struct ctl_table_header *head = PROC_I(inode)->sysctl; + if (!head) + head = &root_table_header; + return sysctl_head_grab(head); } static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, -- cgit From 938aaa4f9249aa1519fd0db07fc72125de2df338 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 9 Jan 2012 17:24:30 -0800 Subject: sysctl: Initial support for auto-unregistering sysctl tables. Add nreg to ctl_table_header. When nreg drops to 0 the ctl_table_header will be unregistered. Factor out drop_sysctl_table from unregister_sysctl_table, and add the logic for decrementing nreg. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 15444850b3e8..13faa48c467e 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -29,8 +29,9 @@ static struct ctl_table root_table[1]; static struct ctl_table_root sysctl_table_root; static struct ctl_table_header root_table_header = { {{.count = 1, - .ctl_table = root_table, - .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, + .nreg = 1, + .ctl_table = root_table, + .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, .root = &sysctl_table_root, .set = &sysctl_table_root.default_set, }; @@ -938,6 +939,7 @@ struct ctl_table_header *__register_sysctl_table( header->unregistering = NULL; header->root = root; header->count = 1; + header->nreg = 1; if (sysctl_check_table(path, table)) goto fail; @@ -1192,6 +1194,20 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table *table) } EXPORT_SYMBOL(register_sysctl_table); +static void drop_sysctl_table(struct ctl_table_header *header) +{ + if (--header->nreg) + return; + + start_unregistering(header); + if (!--header->parent->count) { + WARN_ON(1); + kfree_rcu(header->parent, rcu); + } + if (!--header->count) + kfree_rcu(header, rcu); +} + /** * unregister_sysctl_table - unregister a sysctl table hierarchy * @header: the header returned from register_sysctl_table @@ -1224,13 +1240,7 @@ void unregister_sysctl_table(struct ctl_table_header * header) } spin_lock(&sysctl_lock); - start_unregistering(header); - if (!--header->parent->count) { - WARN_ON(1); - kfree_rcu(header->parent, rcu); - } - if (!--header->count) - kfree_rcu(header, rcu); + drop_sysctl_table(header); spin_unlock(&sysctl_lock); } EXPORT_SYMBOL(unregister_sysctl_table); -- cgit From e0d045290a8454ecd7f63c78c10d412f35d6ef94 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 9 Jan 2012 22:36:41 -0800 Subject: sysctl: Factor out init_header from __register_sysctl_paths Factor out a routing to initialize the sysctl_table_header. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 13faa48c467e..49799259b0f3 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -42,6 +42,21 @@ static struct ctl_table_root sysctl_table_root = { static DEFINE_SPINLOCK(sysctl_lock); +static void init_header(struct ctl_table_header *head, + struct ctl_table_root *root, struct ctl_table_set *set, + struct ctl_table *table) +{ + head->ctl_table_arg = table; + INIT_LIST_HEAD(&head->ctl_entry); + head->used = 0; + head->count = 1; + head->nreg = 1; + head->unregistering = NULL; + head->root = root; + head->set = set; + head->parent = NULL; +} + /* called under sysctl_lock */ static int use_table(struct ctl_table_header *p) { @@ -932,14 +947,8 @@ struct ctl_table_header *__register_sysctl_table( new_name += namelen + 1; } *prevp = table; - header->ctl_table_arg = table; - - INIT_LIST_HEAD(&header->ctl_entry); - header->used = 0; - header->unregistering = NULL; - header->root = root; - header->count = 1; - header->nreg = 1; + + init_header(header, root, NULL, table); if (sysctl_check_table(path, table)) goto fail; -- cgit From 8425d6aaf0704b98480131ed339c208ffce12e44 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 9 Jan 2012 17:35:01 -0800 Subject: sysctl: Factor out insert_header and erase_header Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 49799259b0f3..7e96a2681b60 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -57,6 +57,17 @@ static void init_header(struct ctl_table_header *head, head->parent = NULL; } +static void erase_header(struct ctl_table_header *head) +{ + list_del_init(&head->ctl_entry); +} + +static void insert_header(struct ctl_table_header *header) +{ + header->parent->count++; + list_add_tail(&header->ctl_entry, &header->set->list); +} + /* called under sysctl_lock */ static int use_table(struct ctl_table_header *p) { @@ -96,7 +107,7 @@ static void start_unregistering(struct ctl_table_header *p) * do not remove from the list until nobody holds it; walking the * list in do_sysctl() relies on that. */ - list_del_init(&p->ctl_entry); + erase_header(p); } static void sysctl_head_get(struct ctl_table_header *head) @@ -974,8 +985,7 @@ struct ctl_table_header *__register_sysctl_table( } if (sysctl_check_dups(namespaces, header, path, table)) goto fail_locked; - header->parent->count++; - list_add_tail(&header->ctl_entry, &header->set->list); + insert_header(header); spin_unlock(&sysctl_lock); return header; -- cgit From a194558e8698621a9ce7f2c6a720123e644af131 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 21 Jan 2012 17:51:48 -0800 Subject: sysctl: Normalize the root_table data structure. Every other directory has a .child member and we look at the .child for our entries. Do the same for the root_table. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 7e96a2681b60..88d1b06cc5c0 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -25,7 +25,14 @@ void proc_sys_poll_notify(struct ctl_table_poll *poll) wake_up_interruptible(&poll->wait); } -static struct ctl_table root_table[1]; +static struct ctl_table root_table[] = { + { + .procname = "", + .mode = S_IRUGO|S_IXUGO, + .child = &root_table[1], + }, + { } +}; static struct ctl_table_root sysctl_table_root; static struct ctl_table_header root_table_header = { {{.count = 1, @@ -319,7 +326,7 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, goto out; } - table = table ? table->child : head->ctl_table; + table = table ? table->child : &head->ctl_table[1]; p = find_in_table(table, name); if (!p) { @@ -510,7 +517,7 @@ static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir) goto out; } - table = table ? table->child : head->ctl_table; + table = table ? table->child : &head->ctl_table[1]; ret = 0; /* Avoid a switch here: arm builds fail with missing __cmpdi2 */ @@ -966,7 +973,7 @@ struct ctl_table_header *__register_sysctl_table( spin_lock(&sysctl_lock); header->set = lookup_header_set(root, namespaces); header->attached_by = header->ctl_table; - header->attached_to = root_table; + header->attached_to = &root_table[1]; header->parent = &root_table_header; set = header->set; root = header->root; -- cgit From 076c3eed2c31773200b082568957fd8852ae93d7 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 9 Jan 2012 21:42:02 -0800 Subject: sysctl: Rewrite proc_sys_lookup introducing find_entry and lookup_entry. Replace the helpers that proc_sys_lookup uses with helpers that work in terms of an entire sysctl directory. This is worse for sysctl_lock hold times but it is much better for code clarity and the code cleanups to come. find_in_table is no longer needed so it is removed. find_entry a general helper to find entries in a directory is added. lookup_entry is a simple wrapper around find_entry that takes the sysctl_lock increases the use count if an entry is found and drops the sysctl_lock. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 102 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 76 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 88d1b06cc5c0..3b63f298ce28 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -49,6 +49,55 @@ static struct ctl_table_root sysctl_table_root = { static DEFINE_SPINLOCK(sysctl_lock); +static int namecmp(const char *name1, int len1, const char *name2, int len2) +{ + int minlen; + int cmp; + + minlen = len1; + if (minlen > len2) + minlen = len2; + + cmp = memcmp(name1, name2, minlen); + if (cmp == 0) + cmp = len1 - len2; + return cmp; +} + +static struct ctl_table *find_entry(struct ctl_table_header **phead, + struct ctl_table_set *set, + struct ctl_table_header *dir_head, struct ctl_table *dir, + const char *name, int namelen) +{ + struct ctl_table_header *head; + struct ctl_table *entry; + + if (dir_head->set == set) { + for (entry = dir; entry->procname; entry++) { + const char *procname = entry->procname; + if (namecmp(procname, strlen(procname), name, namelen) == 0) { + *phead = dir_head; + return entry; + } + } + } + + list_for_each_entry(head, &set->list, ctl_entry) { + if (head->unregistering) + continue; + if (head->attached_to != dir) + continue; + for (entry = head->attached_by; entry->procname; entry++) { + const char *procname = entry->procname; + if (namecmp(procname, strlen(procname), name, namelen) == 0) { + *phead = head; + return entry; + } + } + } + return NULL; +} + static void init_header(struct ctl_table_header *head, struct ctl_table_root *root, struct ctl_table_set *set, struct ctl_table *table) @@ -168,6 +217,32 @@ lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces) return &set->list; } +static struct ctl_table *lookup_entry(struct ctl_table_header **phead, + struct ctl_table_header *dir_head, + struct ctl_table *dir, + const char *name, int namelen) +{ + struct ctl_table_header *head; + struct ctl_table *entry; + struct ctl_table_root *root; + struct ctl_table_set *set; + + spin_lock(&sysctl_lock); + root = &sysctl_table_root; + do { + set = lookup_header_set(root, current->nsproxy); + entry = find_entry(&head, set, dir_head, dir, name, namelen); + if (entry && use_table(head)) + *phead = head; + else + entry = NULL; + root = list_entry(root->root_list.next, + struct ctl_table_root, root_list); + } while (!entry && root != &sysctl_table_root); + spin_unlock(&sysctl_lock); + return entry; +} + static struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, struct ctl_table_header *prev) { @@ -284,21 +359,6 @@ out: return inode; } -static struct ctl_table *find_in_table(struct ctl_table *p, struct qstr *name) -{ - for ( ; p->procname; p++) { - if (strlen(p->procname) != name->len) - continue; - - if (memcmp(p->procname, name->name, name->len) != 0) - continue; - - /* I have a match */ - return p; - } - return NULL; -} - static struct ctl_table_header *grab_header(struct inode *inode) { struct ctl_table_header *head = PROC_I(inode)->sysctl; @@ -328,17 +388,7 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, table = table ? table->child : &head->ctl_table[1]; - p = find_in_table(table, name); - if (!p) { - for (h = sysctl_head_next(NULL); h; h = sysctl_head_next(h)) { - if (h->attached_to != table) - continue; - p = find_in_table(h->attached_by, name); - if (p) - break; - } - } - + p = lookup_entry(&h, head, table, name->name, name->len); if (!p) goto out; -- cgit From 6a75ce167c53b41f15088d3c2c7e51c89dc8798a Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 18 Jan 2012 03:15:51 -0800 Subject: sysctl: Rewrite proc_sys_readdir in terms of first_entry and next_entry Replace sysctl_head_next with first_entry and next_entry. These new iterators operate at the level of sysctl table entries and filter out any sysctl tables that should not be shown. Utilizing two specialized functions instead of a single function removes conditionals for handling awkward special cases that only come up at the beginning of iteration, making the iterators easier to read and understand. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 98 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 62 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 3b63f298ce28..d9c3ae6afe4c 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -243,31 +243,25 @@ static struct ctl_table *lookup_entry(struct ctl_table_header **phead, return entry; } -static struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, - struct ctl_table_header *prev) +static struct ctl_table_header *next_usable_entry(struct ctl_table *dir, + struct ctl_table_root *root, struct list_head *tmp) { - struct ctl_table_root *root; + struct nsproxy *namespaces = current->nsproxy; struct list_head *header_list; struct ctl_table_header *head; - struct list_head *tmp; - spin_lock(&sysctl_lock); - if (prev) { - head = prev; - tmp = &prev->ctl_entry; - unuse_table(prev); - goto next; - } - tmp = &root_table_header.ctl_entry; + goto next; for (;;) { head = list_entry(tmp, struct ctl_table_header, ctl_entry); + root = head->root; - if (!use_table(head)) + if (head->attached_to != dir || + !head->attached_by->procname || + !use_table(head)) goto next; - spin_unlock(&sysctl_lock); + return head; next: - root = head->root; tmp = tmp->next; header_list = lookup_header_list(root, namespaces); if (tmp != header_list) @@ -283,13 +277,53 @@ static struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, tmp = header_list->next; } out: - spin_unlock(&sysctl_lock); return NULL; } -static struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev) +static void first_entry( + struct ctl_table_header *dir_head, struct ctl_table *dir, + struct ctl_table_header **phead, struct ctl_table **pentry) { - return __sysctl_head_next(current->nsproxy, prev); + struct ctl_table_header *head = dir_head; + struct ctl_table *entry = dir; + + spin_lock(&sysctl_lock); + if (entry->procname) { + use_table(head); + } else { + head = next_usable_entry(dir, &sysctl_table_root, + &sysctl_table_root.default_set.list); + if (head) + entry = head->attached_by; + } + spin_unlock(&sysctl_lock); + *phead = head; + *pentry = entry; +} + +static void next_entry(struct ctl_table *dir, + struct ctl_table_header **phead, struct ctl_table **pentry) +{ + struct ctl_table_header *head = *phead; + struct ctl_table *entry = *pentry; + + entry++; + if (!entry->procname) { + struct ctl_table_root *root = head->root; + struct list_head *tmp = &head->ctl_entry; + if (head->attached_to != dir) { + root = &sysctl_table_root; + tmp = &sysctl_table_root.default_set.list; + } + spin_lock(&sysctl_lock); + unuse_table(head); + head = next_usable_entry(dir, root, tmp); + spin_unlock(&sysctl_lock); + if (head) + entry = head->attached_by; + } + *phead = head; + *pentry = entry; } void register_sysctl_root(struct ctl_table_root *root) @@ -533,20 +567,17 @@ static int scan(struct ctl_table_header *head, ctl_table *table, unsigned long *pos, struct file *file, void *dirent, filldir_t filldir) { + int res; - for (; table->procname; table++, (*pos)++) { - int res; + if ((*pos)++ < file->f_pos) + return 0; - if (*pos < file->f_pos) - continue; + res = proc_sys_fill_cache(file, dirent, filldir, head, table); - res = proc_sys_fill_cache(file, dirent, filldir, head, table); - if (res) - return res; + if (res == 0) + file->f_pos = *pos; - file->f_pos = *pos + 1; - } - return 0; + return res; } static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir) @@ -556,6 +587,7 @@ static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir) struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; struct ctl_table_header *h = NULL; + struct ctl_table *entry; unsigned long pos; int ret = -EINVAL; @@ -585,14 +617,8 @@ static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir) } pos = 2; - ret = scan(head, table, &pos, filp, dirent, filldir); - if (ret) - goto out; - - for (h = sysctl_head_next(NULL); h; h = sysctl_head_next(h)) { - if (h->attached_to != table) - continue; - ret = scan(h, h->attached_by, &pos, filp, dirent, filldir); + for (first_entry(head, table, &h, &entry); h; next_entry(table, &h, &entry)) { + ret = scan(h, entry, &pos, filp, dirent, filldir); if (ret) { sysctl_head_finish(h); break; -- cgit From 9eb47c26f09e27506d343ef52e634b2a50ee21ef Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 22 Jan 2012 21:26:00 -0800 Subject: sysctl: Add a root pointer to ctl_table_set Add a ctl_table_root pointer to ctl_table set so it is easy to go from a ctl_table_set to a ctl_table_root. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index d9c3ae6afe4c..65c13dddceae 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -45,6 +45,7 @@ static struct ctl_table_header root_table_header = { static struct ctl_table_root sysctl_table_root = { .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list), .default_set.list = LIST_HEAD_INIT(root_table_header.ctl_entry), + .default_set.root = &sysctl_table_root, }; static DEFINE_SPINLOCK(sysctl_lock); @@ -1348,9 +1349,11 @@ void unregister_sysctl_table(struct ctl_table_header * header) EXPORT_SYMBOL(unregister_sysctl_table); void setup_sysctl_set(struct ctl_table_set *p, + struct ctl_table_root *root, int (*is_seen)(struct ctl_table_set *)) { INIT_LIST_HEAD(&p->list); + p->root = root; p->is_seen = is_seen; } -- cgit From 7ec66d06362da7684a4948c4c2bf1f8546425df4 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 29 Dec 2011 08:24:29 -0800 Subject: sysctl: Stop requiring explicit management of sysctl directories Simplify the code and the sysctl semantics by autogenerating sysctl directories when a sysctl table is registered that needs the directories and autodeleting the directories when there are no more sysctl tables registered that need them. Autogenerating directories keeps sysctl tables from depending on each other, removing all of the arcane register/unregister ordering constraints and makes it impossible to get the order wrong when reigsering and unregistering sysctl tables. Autogenerating directories yields one unique entity that dentries can point to, retaining the current effective use of the dcache. Add struct ctl_dir as the type of these new autogenerated directories. The attached_by and attached_to fields in ctl_table_header are removed as they are no longer needed. The child field in ctl_table is no longer needed by the core of the sysctl code. ctl_table.child can be removed once all of the existing users have been updated. Benchmark before: make-dummies 0 999 -> 0.7s rmmod dummy -> 0.07s make-dummies 0 9999 -> 1m10s rmmod dummy -> 0.4s Benchmark after: make-dummies 0 999 -> 0.44s rmmod dummy -> 0.065s make-dummies 0 9999 -> 1m36s rmmod dummy -> 0.4s Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 342 +++++++++++++++++++++----------------------------- 1 file changed, 143 insertions(+), 199 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 65c13dddceae..3c0767d5a55f 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -28,28 +28,31 @@ void proc_sys_poll_notify(struct ctl_table_poll *poll) static struct ctl_table root_table[] = { { .procname = "", - .mode = S_IRUGO|S_IXUGO, - .child = &root_table[1], + .mode = S_IFDIR|S_IRUGO|S_IXUGO, }, { } }; static struct ctl_table_root sysctl_table_root; -static struct ctl_table_header root_table_header = { - {{.count = 1, - .nreg = 1, - .ctl_table = root_table, - .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, - .root = &sysctl_table_root, - .set = &sysctl_table_root.default_set, +static struct ctl_dir sysctl_root_dir = { + .header = { + {{.count = 1, + .nreg = 1, + .ctl_table = root_table, + .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, + .root = &sysctl_table_root, + .set = &sysctl_table_root.default_set, + }, }; static struct ctl_table_root sysctl_table_root = { .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list), - .default_set.list = LIST_HEAD_INIT(root_table_header.ctl_entry), + .default_set.list = LIST_HEAD_INIT(sysctl_root_dir.header.ctl_entry), .default_set.root = &sysctl_table_root, }; static DEFINE_SPINLOCK(sysctl_lock); +static void drop_sysctl_table(struct ctl_table_header *header); + static int namecmp(const char *name1, int len1, const char *name2, int len2) { int minlen; @@ -66,29 +69,18 @@ static int namecmp(const char *name1, int len1, const char *name2, int len2) } static struct ctl_table *find_entry(struct ctl_table_header **phead, - struct ctl_table_set *set, - struct ctl_table_header *dir_head, struct ctl_table *dir, + struct ctl_table_set *set, struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; - if (dir_head->set == set) { - for (entry = dir; entry->procname; entry++) { - const char *procname = entry->procname; - if (namecmp(procname, strlen(procname), name, namelen) == 0) { - *phead = dir_head; - return entry; - } - } - } - list_for_each_entry(head, &set->list, ctl_entry) { if (head->unregistering) continue; - if (head->attached_to != dir) + if (head->parent != dir) continue; - for (entry = head->attached_by; entry->procname; entry++) { + for (entry = head->ctl_table; entry->procname; entry++) { const char *procname = entry->procname; if (namecmp(procname, strlen(procname), name, namelen) == 0) { *phead = head; @@ -103,6 +95,7 @@ static void init_header(struct ctl_table_header *head, struct ctl_table_root *root, struct ctl_table_set *set, struct ctl_table *table) { + head->ctl_table = table; head->ctl_table_arg = table; INIT_LIST_HEAD(&head->ctl_entry); head->used = 0; @@ -119,9 +112,10 @@ static void erase_header(struct ctl_table_header *head) list_del_init(&head->ctl_entry); } -static void insert_header(struct ctl_table_header *header) +static void insert_header(struct ctl_dir *dir, struct ctl_table_header *header) { - header->parent->count++; + header->parent = dir; + header->parent->header.nreg++; list_add_tail(&header->ctl_entry, &header->set->list); } @@ -219,8 +213,7 @@ lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces) } static struct ctl_table *lookup_entry(struct ctl_table_header **phead, - struct ctl_table_header *dir_head, - struct ctl_table *dir, + struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; @@ -232,7 +225,7 @@ static struct ctl_table *lookup_entry(struct ctl_table_header **phead, root = &sysctl_table_root; do { set = lookup_header_set(root, current->nsproxy); - entry = find_entry(&head, set, dir_head, dir, name, namelen); + entry = find_entry(&head, set, dir, name, namelen); if (entry && use_table(head)) *phead = head; else @@ -244,7 +237,7 @@ static struct ctl_table *lookup_entry(struct ctl_table_header **phead, return entry; } -static struct ctl_table_header *next_usable_entry(struct ctl_table *dir, +static struct ctl_table_header *next_usable_entry(struct ctl_dir *dir, struct ctl_table_root *root, struct list_head *tmp) { struct nsproxy *namespaces = current->nsproxy; @@ -256,8 +249,8 @@ static struct ctl_table_header *next_usable_entry(struct ctl_table *dir, head = list_entry(tmp, struct ctl_table_header, ctl_entry); root = head->root; - if (head->attached_to != dir || - !head->attached_by->procname || + if (head->parent != dir || + !head->ctl_table->procname || !use_table(head)) goto next; @@ -281,47 +274,35 @@ out: return NULL; } -static void first_entry( - struct ctl_table_header *dir_head, struct ctl_table *dir, +static void first_entry(struct ctl_dir *dir, struct ctl_table_header **phead, struct ctl_table **pentry) { - struct ctl_table_header *head = dir_head; - struct ctl_table *entry = dir; + struct ctl_table_header *head; + struct ctl_table *entry = NULL; spin_lock(&sysctl_lock); - if (entry->procname) { - use_table(head); - } else { - head = next_usable_entry(dir, &sysctl_table_root, - &sysctl_table_root.default_set.list); - if (head) - entry = head->attached_by; - } + head = next_usable_entry(dir, &sysctl_table_root, + &sysctl_table_root.default_set.list); spin_unlock(&sysctl_lock); + if (head) + entry = head->ctl_table; *phead = head; *pentry = entry; } -static void next_entry(struct ctl_table *dir, - struct ctl_table_header **phead, struct ctl_table **pentry) +static void next_entry(struct ctl_table_header **phead, struct ctl_table **pentry) { struct ctl_table_header *head = *phead; struct ctl_table *entry = *pentry; entry++; if (!entry->procname) { - struct ctl_table_root *root = head->root; - struct list_head *tmp = &head->ctl_entry; - if (head->attached_to != dir) { - root = &sysctl_table_root; - tmp = &sysctl_table_root.default_set.list; - } spin_lock(&sysctl_lock); unuse_table(head); - head = next_usable_entry(dir, root, tmp); + head = next_usable_entry(head->parent, head->root, &head->ctl_entry); spin_unlock(&sysctl_lock); if (head) - entry = head->attached_by; + entry = head->ctl_table; } *phead = head; *pentry = entry; @@ -381,7 +362,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_mode = table->mode; - if (!table->child) { + if (!S_ISDIR(table->mode)) { inode->i_mode |= S_IFREG; inode->i_op = &proc_sys_inode_operations; inode->i_fop = &proc_sys_file_operations; @@ -398,7 +379,7 @@ static struct ctl_table_header *grab_header(struct inode *inode) { struct ctl_table_header *head = PROC_I(inode)->sysctl; if (!head) - head = &root_table_header; + head = &sysctl_root_dir.header; return sysctl_head_grab(head); } @@ -406,24 +387,19 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct ctl_table_header *head = grab_header(dir); - struct ctl_table *table = PROC_I(dir)->sysctl_entry; struct ctl_table_header *h = NULL; struct qstr *name = &dentry->d_name; struct ctl_table *p; struct inode *inode; struct dentry *err = ERR_PTR(-ENOENT); + struct ctl_dir *ctl_dir; if (IS_ERR(head)) return ERR_CAST(head); - if (table && !table->child) { - WARN_ON(1); - goto out; - } + ctl_dir = container_of(head, struct ctl_dir, header); - table = table ? table->child : &head->ctl_table[1]; - - p = lookup_entry(&h, head, table, name->name, name->len); + p = lookup_entry(&h, ctl_dir, name->name, name->len); if (!p) goto out; @@ -586,21 +562,16 @@ static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir) struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; struct ctl_table_header *head = grab_header(inode); - struct ctl_table *table = PROC_I(inode)->sysctl_entry; struct ctl_table_header *h = NULL; struct ctl_table *entry; + struct ctl_dir *ctl_dir; unsigned long pos; int ret = -EINVAL; if (IS_ERR(head)) return PTR_ERR(head); - if (table && !table->child) { - WARN_ON(1); - goto out; - } - - table = table ? table->child : &head->ctl_table[1]; + ctl_dir = container_of(head, struct ctl_dir, header); ret = 0; /* Avoid a switch here: arm builds fail with missing __cmpdi2 */ @@ -618,7 +589,7 @@ static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir) } pos = 2; - for (first_entry(head, table, &h, &entry); h; next_entry(table, &h, &entry)) { + for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) { ret = scan(h, entry, &pos, filp, dirent, filldir); if (ret) { sysctl_head_finish(h); @@ -779,52 +750,86 @@ static const struct dentry_operations proc_sys_dentry_operations = { .d_compare = proc_sys_compare, }; -static struct ctl_table *is_branch_in(struct ctl_table *branch, - struct ctl_table *table) +static struct ctl_dir *find_subdir(struct ctl_table_set *set, struct ctl_dir *dir, + const char *name, int namelen) { - struct ctl_table *p; - const char *s = branch->procname; + struct ctl_table_header *head; + struct ctl_table *entry; - /* branch should have named subdirectory as its first element */ - if (!s || !branch->child) - return NULL; + entry = find_entry(&head, set, dir, name, namelen); + if (!entry) + return ERR_PTR(-ENOENT); + if (S_ISDIR(entry->mode)) + return container_of(head, struct ctl_dir, header); + return ERR_PTR(-ENOTDIR); +} + +static struct ctl_dir *new_dir(struct ctl_table_set *set, + const char *name, int namelen) +{ + struct ctl_table *table; + struct ctl_dir *new; + char *new_name; - /* ... and nothing else */ - if (branch[1].procname) + new = kzalloc(sizeof(*new) + sizeof(struct ctl_table)*2 + + namelen + 1, GFP_KERNEL); + if (!new) return NULL; - /* table should contain subdirectory with the same name */ - for (p = table; p->procname; p++) { - if (!p->child) - continue; - if (p->procname && strcmp(p->procname, s) == 0) - return p; - } - return NULL; + table = (struct ctl_table *)(new + 1); + new_name = (char *)(table + 2); + memcpy(new_name, name, namelen); + new_name[namelen] = '\0'; + table[0].procname = new_name; + table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO; + init_header(&new->header, set->root, set, table); + + return new; } -/* see if attaching q to p would be an improvement */ -static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q) +static struct ctl_dir *get_subdir(struct ctl_table_set *set, + struct ctl_dir *dir, const char *name, int namelen) { - struct ctl_table *to = p->ctl_table, *by = q->ctl_table; - struct ctl_table *next; - int is_better = 0; - int not_in_parent = !p->attached_by; - - while ((next = is_branch_in(by, to)) != NULL) { - if (by == q->attached_by) - is_better = 1; - if (to == p->attached_by) - not_in_parent = 1; - by = by->child; - to = next->child; - } + struct ctl_dir *subdir, *new = NULL; - if (is_better && not_in_parent) { - q->attached_by = by; - q->attached_to = to; - q->parent = p; + spin_lock(&sysctl_lock); + subdir = find_subdir(dir->header.set, dir, name, namelen); + if (!IS_ERR(subdir)) + goto found; + if ((PTR_ERR(subdir) == -ENOENT) && set != dir->header.set) + subdir = find_subdir(set, dir, name, namelen); + if (!IS_ERR(subdir)) + goto found; + if (PTR_ERR(subdir) != -ENOENT) + goto failed; + + spin_unlock(&sysctl_lock); + new = new_dir(set, name, namelen); + spin_lock(&sysctl_lock); + subdir = ERR_PTR(-ENOMEM); + if (!new) + goto failed; + + subdir = find_subdir(set, dir, name, namelen); + if (!IS_ERR(subdir)) + goto found; + if (PTR_ERR(subdir) != -ENOENT) + goto failed; + + insert_header(dir, &new->header); + subdir = new; +found: + subdir->header.nreg++; +failed: + if (unlikely(IS_ERR(subdir))) { + printk(KERN_ERR "sysctl could not get directory: %*.*s %ld\n", + namelen, namelen, name, PTR_ERR(subdir)); } + drop_sysctl_table(&dir->header); + if (new) + drop_sysctl_table(&new->header); + spin_unlock(&sysctl_lock); + return subdir; } static int sysctl_check_table_dups(const char *path, struct ctl_table *old, @@ -846,24 +851,14 @@ static int sysctl_check_table_dups(const char *path, struct ctl_table *old, } static int sysctl_check_dups(struct nsproxy *namespaces, - struct ctl_table_header *header, + struct ctl_dir *dir, const char *path, struct ctl_table *table) { struct ctl_table_root *root; struct ctl_table_set *set; - struct ctl_table_header *dir_head, *head; - struct ctl_table *dir_table; + struct ctl_table_header *head; int error = 0; - /* No dups if we are the only member of our directory */ - if (header->attached_by != table) - return 0; - - dir_head = header->parent; - dir_table = header->attached_to; - - error = sysctl_check_table_dups(path, dir_table, table); - root = &sysctl_table_root; do { set = lookup_header_set(root, namespaces); @@ -871,9 +866,9 @@ static int sysctl_check_dups(struct nsproxy *namespaces, list_for_each_entry(head, &set->list, ctl_entry) { if (head->unregistering) continue; - if (head->attached_to != dir_table) + if (head->parent != dir) continue; - error = sysctl_check_table_dups(path, head->attached_by, + error = sysctl_check_table_dups(path, head->ctl_table, table); } root = list_entry(root->root_list.next, @@ -977,47 +972,25 @@ struct ctl_table_header *__register_sysctl_table( const char *path, struct ctl_table *table) { struct ctl_table_header *header; - struct ctl_table *new, **prevp; const char *name, *nextname; - unsigned int npath = 0; struct ctl_table_set *set; - size_t path_bytes = 0; - char *new_name; - - /* Count the path components */ - for (name = path; name; name = nextname) { - int namelen; - nextname = strchr(name, '/'); - if (nextname) { - namelen = nextname - name; - nextname++; - } else { - namelen = strlen(name); - } - if (namelen == 0) - continue; - path_bytes += namelen + 1; - npath++; - } + struct ctl_dir *dir; - /* - * For each path component, allocate a 2-element ctl_table array. - * The first array element will be filled with the sysctl entry - * for this, the second will be the sentinel (procname == 0). - * - * We allocate everything in one go so that we don't have to - * worry about freeing additional memory in unregister_sysctl_table. - */ - header = kzalloc(sizeof(struct ctl_table_header) + path_bytes + - (2 * npath * sizeof(struct ctl_table)), GFP_KERNEL); + header = kzalloc(sizeof(struct ctl_table_header), GFP_KERNEL); if (!header) return NULL; - new = (struct ctl_table *) (header + 1); - new_name = (char *)(new + (2 * npath)); + init_header(header, root, NULL, table); + if (sysctl_check_table(path, table)) + goto fail; + + spin_lock(&sysctl_lock); + header->set = set = lookup_header_set(root, namespaces); + dir = &sysctl_root_dir; + dir->header.nreg++; + spin_unlock(&sysctl_lock); - /* Now connect the dots */ - prevp = &header->ctl_table; + /* Find the directory for the ctl_table */ for (name = path; name; name = nextname) { int namelen; nextname = strchr(name, '/'); @@ -1029,51 +1002,21 @@ struct ctl_table_header *__register_sysctl_table( } if (namelen == 0) continue; - memcpy(new_name, name, namelen); - new_name[namelen] = '\0'; - - new->procname = new_name; - new->mode = 0555; - - *prevp = new; - prevp = &new->child; - new += 2; - new_name += namelen + 1; + dir = get_subdir(set, dir, name, namelen); + if (IS_ERR(dir)) + goto fail; } - *prevp = table; - - init_header(header, root, NULL, table); - if (sysctl_check_table(path, table)) - goto fail; - spin_lock(&sysctl_lock); - header->set = lookup_header_set(root, namespaces); - header->attached_by = header->ctl_table; - header->attached_to = &root_table[1]; - header->parent = &root_table_header; - set = header->set; - root = header->root; - for (;;) { - struct ctl_table_header *p; - list_for_each_entry(p, &set->list, ctl_entry) { - if (p->unregistering) - continue; - try_attach(p, header); - } - if (root == &sysctl_table_root) - break; - root = list_entry(root->root_list.prev, - struct ctl_table_root, root_list); - set = lookup_header_set(root, namespaces); - } - if (sysctl_check_dups(namespaces, header, path, table)) - goto fail_locked; - insert_header(header); + if (sysctl_check_dups(namespaces, dir, path, table)) + goto fail_put_dir_locked; + insert_header(dir, header); + drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); return header; -fail_locked: +fail_put_dir_locked: + drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); fail: kfree(header); @@ -1299,16 +1242,17 @@ EXPORT_SYMBOL(register_sysctl_table); static void drop_sysctl_table(struct ctl_table_header *header) { + struct ctl_dir *parent = header->parent; + if (--header->nreg) return; start_unregistering(header); - if (!--header->parent->count) { - WARN_ON(1); - kfree_rcu(header->parent, rcu); - } if (!--header->count) kfree_rcu(header, rcu); + + if (parent) + drop_sysctl_table(&parent->header); } /** -- cgit From 6980128fe1b834c92a85e556ca8198030f0d8d01 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 21 Jan 2012 20:09:45 -0800 Subject: sysctl: Add sysctl_print_dir and use it in get_subdir When there are errors it is very nice to know the full sysctl path. Add a simple function that computes the sysctl path and prints it out. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 3c0767d5a55f..a78556514a87 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -53,6 +53,13 @@ static DEFINE_SPINLOCK(sysctl_lock); static void drop_sysctl_table(struct ctl_table_header *header); +static void sysctl_print_dir(struct ctl_dir *dir) +{ + if (dir->header.parent) + sysctl_print_dir(dir->header.parent); + printk(KERN_CONT "%s/", dir->header.ctl_table[0].procname); +} + static int namecmp(const char *name1, int len1, const char *name2, int len2) { int minlen; @@ -822,7 +829,9 @@ found: subdir->header.nreg++; failed: if (unlikely(IS_ERR(subdir))) { - printk(KERN_ERR "sysctl could not get directory: %*.*s %ld\n", + printk(KERN_ERR "sysctl could not get directory: "); + sysctl_print_dir(dir); + printk(KERN_CONT "/%*.*s %ld\n", namelen, namelen, name, PTR_ERR(subdir)); } drop_sysctl_table(&dir->header); -- cgit From 0e47c99d7fe25e0f3907d9f3401079169d904891 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 7 Jan 2012 23:24:30 -0800 Subject: sysctl: Replace root_list with links between sysctl_table_sets. Piecing together directories by looking first in one directory tree, than in another directory tree and finally in a third directory tree makes it hard to verify that some directory entries are not multiply defined and makes it hard to create efficient implementations the sysctl filesystem. Replace the sysctl wide list of roots with autogenerated links from the core sysctl directory tree to the other sysctl directory trees. This simplifies sysctl directory reading and lookups as now only entries in a single sysctl directory tree need to be considered. Benchmark before: make-dummies 0 999 -> 0.44s rmmod dummy -> 0.065s make-dummies 0 9999 -> 1m36s rmmod dummy -> 0.4s Benchmark after: make-dummies 0 999 -> 0.63s rmmod dummy -> 0.12s make-dummies 0 9999 -> 2m35s rmmod dummy -> 18s The slowdown is caused by the lookups used in insert_headers and put_links to see if we need to add links or remove links. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 397 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 295 insertions(+), 102 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index a78556514a87..ec54a57c4690 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -32,26 +32,26 @@ static struct ctl_table root_table[] = { }, { } }; -static struct ctl_table_root sysctl_table_root; -static struct ctl_dir sysctl_root_dir = { - .header = { +static struct ctl_table_root sysctl_table_root = { + .default_set.list = LIST_HEAD_INIT(sysctl_table_root.default_set.dir.header.ctl_entry), + .default_set.dir.header = { {{.count = 1, .nreg = 1, .ctl_table = root_table, .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, + .ctl_table_arg = root_table, .root = &sysctl_table_root, .set = &sysctl_table_root.default_set, }, }; -static struct ctl_table_root sysctl_table_root = { - .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list), - .default_set.list = LIST_HEAD_INIT(sysctl_root_dir.header.ctl_entry), - .default_set.root = &sysctl_table_root, -}; static DEFINE_SPINLOCK(sysctl_lock); static void drop_sysctl_table(struct ctl_table_header *header); +static int sysctl_follow_link(struct ctl_table_header **phead, + struct ctl_table **pentry, struct nsproxy *namespaces); +static int insert_links(struct ctl_table_header *head); +static void put_links(struct ctl_table_header *header); static void sysctl_print_dir(struct ctl_dir *dir) { @@ -76,9 +76,9 @@ static int namecmp(const char *name1, int len1, const char *name2, int len2) } static struct ctl_table *find_entry(struct ctl_table_header **phead, - struct ctl_table_set *set, struct ctl_dir *dir, - const char *name, int namelen) + struct ctl_dir *dir, const char *name, int namelen) { + struct ctl_table_set *set = dir->header.set; struct ctl_table_header *head; struct ctl_table *entry; @@ -119,11 +119,21 @@ static void erase_header(struct ctl_table_header *head) list_del_init(&head->ctl_entry); } -static void insert_header(struct ctl_dir *dir, struct ctl_table_header *header) +static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header) { + int err; + + dir->header.nreg++; header->parent = dir; - header->parent->header.nreg++; + err = insert_links(header); + if (err) + goto fail_links; list_add_tail(&header->ctl_entry, &header->set->list); + return 0; +fail_links: + header->parent = NULL; + drop_sysctl_table(&dir->header); + return err; } /* called under sysctl_lock */ @@ -212,72 +222,39 @@ lookup_header_set(struct ctl_table_root *root, struct nsproxy *namespaces) return set; } -static struct list_head * -lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces) -{ - struct ctl_table_set *set = lookup_header_set(root, namespaces); - return &set->list; -} - static struct ctl_table *lookup_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; - struct ctl_table_root *root; - struct ctl_table_set *set; spin_lock(&sysctl_lock); - root = &sysctl_table_root; - do { - set = lookup_header_set(root, current->nsproxy); - entry = find_entry(&head, set, dir, name, namelen); - if (entry && use_table(head)) - *phead = head; - else - entry = NULL; - root = list_entry(root->root_list.next, - struct ctl_table_root, root_list); - } while (!entry && root != &sysctl_table_root); + entry = find_entry(&head, dir, name, namelen); + if (entry && use_table(head)) + *phead = head; + else + entry = NULL; spin_unlock(&sysctl_lock); return entry; } static struct ctl_table_header *next_usable_entry(struct ctl_dir *dir, - struct ctl_table_root *root, struct list_head *tmp) + struct list_head *tmp) { - struct nsproxy *namespaces = current->nsproxy; - struct list_head *header_list; + struct ctl_table_set *set = dir->header.set; struct ctl_table_header *head; - goto next; - for (;;) { + for (tmp = tmp->next; tmp != &set->list; tmp = tmp->next) { head = list_entry(tmp, struct ctl_table_header, ctl_entry); - root = head->root; if (head->parent != dir || !head->ctl_table->procname || !use_table(head)) - goto next; - - return head; - next: - tmp = tmp->next; - header_list = lookup_header_list(root, namespaces); - if (tmp != header_list) continue; - do { - root = list_entry(root->root_list.next, - struct ctl_table_root, root_list); - if (root == &sysctl_table_root) - goto out; - header_list = lookup_header_list(root, namespaces); - } while (list_empty(header_list)); - tmp = header_list->next; + return head; } -out: return NULL; } @@ -288,8 +265,7 @@ static void first_entry(struct ctl_dir *dir, struct ctl_table *entry = NULL; spin_lock(&sysctl_lock); - head = next_usable_entry(dir, &sysctl_table_root, - &sysctl_table_root.default_set.list); + head = next_usable_entry(dir, &dir->header.set->list); spin_unlock(&sysctl_lock); if (head) entry = head->ctl_table; @@ -306,7 +282,7 @@ static void next_entry(struct ctl_table_header **phead, struct ctl_table **pentr if (!entry->procname) { spin_lock(&sysctl_lock); unuse_table(head); - head = next_usable_entry(head->parent, head->root, &head->ctl_entry); + head = next_usable_entry(head->parent, &head->ctl_entry); spin_unlock(&sysctl_lock); if (head) entry = head->ctl_table; @@ -317,9 +293,6 @@ static void next_entry(struct ctl_table_header **phead, struct ctl_table **pentr void register_sysctl_root(struct ctl_table_root *root) { - spin_lock(&sysctl_lock); - list_add_tail(&root->root_list, &sysctl_table_root.root_list); - spin_unlock(&sysctl_lock); } /* @@ -386,7 +359,7 @@ static struct ctl_table_header *grab_header(struct inode *inode) { struct ctl_table_header *head = PROC_I(inode)->sysctl; if (!head) - head = &sysctl_root_dir.header; + head = &sysctl_table_root.default_set.dir.header; return sysctl_head_grab(head); } @@ -400,6 +373,7 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, struct inode *inode; struct dentry *err = ERR_PTR(-ENOENT); struct ctl_dir *ctl_dir; + int ret; if (IS_ERR(head)) return ERR_CAST(head); @@ -410,6 +384,11 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, if (!p) goto out; + ret = sysctl_follow_link(&h, &p, current->nsproxy); + err = ERR_PTR(ret); + if (ret) + goto out; + err = ERR_PTR(-ENOMEM); inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); if (h) @@ -547,6 +526,25 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent, return !!filldir(dirent, qname.name, qname.len, filp->f_pos, ino, type); } +static int proc_sys_link_fill_cache(struct file *filp, void *dirent, + filldir_t filldir, + struct ctl_table_header *head, + struct ctl_table *table) +{ + int err, ret = 0; + head = sysctl_head_grab(head); + + /* It is not an error if we can not follow the link ignore it */ + err = sysctl_follow_link(&head, &table, current->nsproxy); + if (err) + goto out; + + ret = proc_sys_fill_cache(filp, dirent, filldir, head, table); +out: + sysctl_head_finish(head); + return ret; +} + static int scan(struct ctl_table_header *head, ctl_table *table, unsigned long *pos, struct file *file, void *dirent, filldir_t filldir) @@ -556,7 +554,10 @@ static int scan(struct ctl_table_header *head, ctl_table *table, if ((*pos)++ < file->f_pos) return 0; - res = proc_sys_fill_cache(file, dirent, filldir, head, table); + if (unlikely(S_ISLNK(table->mode))) + res = proc_sys_link_fill_cache(file, dirent, filldir, head, table); + else + res = proc_sys_fill_cache(file, dirent, filldir, head, table); if (res == 0) file->f_pos = *pos; @@ -757,13 +758,13 @@ static const struct dentry_operations proc_sys_dentry_operations = { .d_compare = proc_sys_compare, }; -static struct ctl_dir *find_subdir(struct ctl_table_set *set, struct ctl_dir *dir, - const char *name, int namelen) +static struct ctl_dir *find_subdir(struct ctl_dir *dir, + const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; - entry = find_entry(&head, set, dir, name, namelen); + entry = find_entry(&head, dir, name, namelen); if (!entry) return ERR_PTR(-ENOENT); if (S_ISDIR(entry->mode)) @@ -772,7 +773,7 @@ static struct ctl_dir *find_subdir(struct ctl_table_set *set, struct ctl_dir *di } static struct ctl_dir *new_dir(struct ctl_table_set *set, - const char *name, int namelen) + const char *name, int namelen) { struct ctl_table *table; struct ctl_dir *new; @@ -789,22 +790,19 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set, new_name[namelen] = '\0'; table[0].procname = new_name; table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO; - init_header(&new->header, set->root, set, table); + init_header(&new->header, set->dir.header.root, set, table); return new; } -static struct ctl_dir *get_subdir(struct ctl_table_set *set, - struct ctl_dir *dir, const char *name, int namelen) +static struct ctl_dir *get_subdir(struct ctl_dir *dir, + const char *name, int namelen) { + struct ctl_table_set *set = dir->header.set; struct ctl_dir *subdir, *new = NULL; spin_lock(&sysctl_lock); - subdir = find_subdir(dir->header.set, dir, name, namelen); - if (!IS_ERR(subdir)) - goto found; - if ((PTR_ERR(subdir) == -ENOENT) && set != dir->header.set) - subdir = find_subdir(set, dir, name, namelen); + subdir = find_subdir(dir, name, namelen); if (!IS_ERR(subdir)) goto found; if (PTR_ERR(subdir) != -ENOENT) @@ -817,13 +815,14 @@ static struct ctl_dir *get_subdir(struct ctl_table_set *set, if (!new) goto failed; - subdir = find_subdir(set, dir, name, namelen); + subdir = find_subdir(dir, name, namelen); if (!IS_ERR(subdir)) goto found; if (PTR_ERR(subdir) != -ENOENT) goto failed; - insert_header(dir, &new->header); + if (insert_header(dir, &new->header)) + goto failed; subdir = new; found: subdir->header.nreg++; @@ -841,6 +840,57 @@ failed: return subdir; } +static struct ctl_dir *xlate_dir(struct ctl_table_set *set, struct ctl_dir *dir) +{ + struct ctl_dir *parent; + const char *procname; + if (!dir->header.parent) + return &set->dir; + parent = xlate_dir(set, dir->header.parent); + if (IS_ERR(parent)) + return parent; + procname = dir->header.ctl_table[0].procname; + return find_subdir(parent, procname, strlen(procname)); +} + +static int sysctl_follow_link(struct ctl_table_header **phead, + struct ctl_table **pentry, struct nsproxy *namespaces) +{ + struct ctl_table_header *head; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_table *entry; + struct ctl_dir *dir; + int ret; + + /* Get out quickly if not a link */ + if (!S_ISLNK((*pentry)->mode)) + return 0; + + ret = 0; + spin_lock(&sysctl_lock); + root = (*pentry)->data; + set = lookup_header_set(root, namespaces); + dir = xlate_dir(set, (*phead)->parent); + if (IS_ERR(dir)) + ret = PTR_ERR(dir); + else { + const char *procname = (*pentry)->procname; + head = NULL; + entry = find_entry(&head, dir, procname, strlen(procname)); + ret = -ENOENT; + if (entry && use_table(head)) { + unuse_table(*phead); + *phead = head; + *pentry = entry; + ret = 0; + } + } + + spin_unlock(&sysctl_lock); + return ret; +} + static int sysctl_check_table_dups(const char *path, struct ctl_table *old, struct ctl_table *table) { @@ -859,30 +909,21 @@ static int sysctl_check_table_dups(const char *path, struct ctl_table *old, return error; } -static int sysctl_check_dups(struct nsproxy *namespaces, - struct ctl_dir *dir, +static int sysctl_check_dups(struct ctl_dir *dir, const char *path, struct ctl_table *table) { - struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_table_header *head; int error = 0; - root = &sysctl_table_root; - do { - set = lookup_header_set(root, namespaces); - - list_for_each_entry(head, &set->list, ctl_entry) { - if (head->unregistering) - continue; - if (head->parent != dir) - continue; - error = sysctl_check_table_dups(path, head->ctl_table, - table); - } - root = list_entry(root->root_list.next, - struct ctl_table_root, root_list); - } while (root != &sysctl_table_root); + set = dir->header.set; + list_for_each_entry(head, &set->list, ctl_entry) { + if (head->unregistering) + continue; + if (head->parent != dir) + continue; + error = sysctl_check_table_dups(path, head->ctl_table, table); + } return error; } @@ -932,6 +973,115 @@ static int sysctl_check_table(const char *path, struct ctl_table *table) return err; } +static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table, + struct ctl_table_root *link_root) +{ + struct ctl_table *link_table, *entry, *link; + struct ctl_table_header *links; + char *link_name; + int nr_entries, name_bytes; + + name_bytes = 0; + nr_entries = 0; + for (entry = table; entry->procname; entry++) { + nr_entries++; + name_bytes += strlen(entry->procname) + 1; + } + + links = kzalloc(sizeof(struct ctl_table_header) + + sizeof(struct ctl_table)*(nr_entries + 1) + + name_bytes, + GFP_KERNEL); + + if (!links) + return NULL; + + link_table = (struct ctl_table *)(links + 1); + link_name = (char *)&link_table[nr_entries + 1]; + + for (link = link_table, entry = table; entry->procname; link++, entry++) { + int len = strlen(entry->procname) + 1; + memcpy(link_name, entry->procname, len); + link->procname = link_name; + link->mode = S_IFLNK|S_IRWXUGO; + link->data = link_root; + link_name += len; + } + init_header(links, dir->header.root, dir->header.set, link_table); + links->nreg = nr_entries; + + return links; +} + +static bool get_links(struct ctl_dir *dir, + struct ctl_table *table, struct ctl_table_root *link_root) +{ + struct ctl_table_header *head; + struct ctl_table *entry, *link; + + /* Are there links available for every entry in table? */ + for (entry = table; entry->procname; entry++) { + const char *procname = entry->procname; + link = find_entry(&head, dir, procname, strlen(procname)); + if (!link) + return false; + if (S_ISDIR(link->mode) && S_ISDIR(entry->mode)) + continue; + if (S_ISLNK(link->mode) && (link->data == link_root)) + continue; + return false; + } + + /* The checks passed. Increase the registration count on the links */ + for (entry = table; entry->procname; entry++) { + const char *procname = entry->procname; + link = find_entry(&head, dir, procname, strlen(procname)); + head->nreg++; + } + return true; +} + +static int insert_links(struct ctl_table_header *head) +{ + struct ctl_table_set *root_set = &sysctl_table_root.default_set; + struct ctl_dir *core_parent = NULL; + struct ctl_table_header *links; + int err; + + if (head->set == root_set) + return 0; + + core_parent = xlate_dir(root_set, head->parent); + if (IS_ERR(core_parent)) + return 0; + + if (get_links(core_parent, head->ctl_table, head->root)) + return 0; + + core_parent->header.nreg++; + spin_unlock(&sysctl_lock); + + links = new_links(core_parent, head->ctl_table, head->root); + + spin_lock(&sysctl_lock); + err = -ENOMEM; + if (!links) + goto out; + + err = 0; + if (get_links(core_parent, head->ctl_table, head->root)) { + kfree(links); + goto out; + } + + err = insert_header(core_parent, links); + if (err) + kfree(links); +out: + drop_sysctl_table(&core_parent->header); + return err; +} + /** * __register_sysctl_table - register a leaf sysctl table * @root: List of sysctl headers to register on @@ -980,6 +1130,7 @@ struct ctl_table_header *__register_sysctl_table( struct nsproxy *namespaces, const char *path, struct ctl_table *table) { + struct ctl_table_header *links = NULL; struct ctl_table_header *header; const char *name, *nextname; struct ctl_table_set *set; @@ -995,7 +1146,7 @@ struct ctl_table_header *__register_sysctl_table( spin_lock(&sysctl_lock); header->set = set = lookup_header_set(root, namespaces); - dir = &sysctl_root_dir; + dir = &set->dir; dir->header.nreg++; spin_unlock(&sysctl_lock); @@ -1012,22 +1163,28 @@ struct ctl_table_header *__register_sysctl_table( if (namelen == 0) continue; - dir = get_subdir(set, dir, name, namelen); + dir = get_subdir(dir, name, namelen); if (IS_ERR(dir)) goto fail; } + spin_lock(&sysctl_lock); - if (sysctl_check_dups(namespaces, dir, path, table)) + if (sysctl_check_dups(dir, path, table)) + goto fail_put_dir_locked; + + if (insert_header(dir, header)) goto fail_put_dir_locked; - insert_header(dir, header); + drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); return header; + fail_put_dir_locked: drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); fail: + kfree(links); kfree(header); dump_stack(); return NULL; @@ -1249,6 +1406,40 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table *table) } EXPORT_SYMBOL(register_sysctl_table); +static void put_links(struct ctl_table_header *header) +{ + struct ctl_table_set *root_set = &sysctl_table_root.default_set; + struct ctl_table_root *root = header->root; + struct ctl_dir *parent = header->parent; + struct ctl_dir *core_parent; + struct ctl_table *entry; + + if (header->set == root_set) + return; + + core_parent = xlate_dir(root_set, parent); + if (IS_ERR(core_parent)) + return; + + for (entry = header->ctl_table; entry->procname; entry++) { + struct ctl_table_header *link_head; + struct ctl_table *link; + const char *name = entry->procname; + + link = find_entry(&link_head, core_parent, name, strlen(name)); + if (link && + ((S_ISDIR(link->mode) && S_ISDIR(entry->mode)) || + (S_ISLNK(link->mode) && (link->data == root)))) { + drop_sysctl_table(link_head); + } + else { + printk(KERN_ERR "sysctl link missing during unregister: "); + sysctl_print_dir(parent); + printk(KERN_CONT "/%s\n", name); + } + } +} + static void drop_sysctl_table(struct ctl_table_header *header) { struct ctl_dir *parent = header->parent; @@ -1256,6 +1447,7 @@ static void drop_sysctl_table(struct ctl_table_header *header) if (--header->nreg) return; + put_links(header); start_unregistering(header); if (!--header->count) kfree_rcu(header, rcu); @@ -1301,13 +1493,14 @@ void unregister_sysctl_table(struct ctl_table_header * header) } EXPORT_SYMBOL(unregister_sysctl_table); -void setup_sysctl_set(struct ctl_table_set *p, +void setup_sysctl_set(struct ctl_table_set *set, struct ctl_table_root *root, int (*is_seen)(struct ctl_table_set *)) { - INIT_LIST_HEAD(&p->list); - p->root = root; - p->is_seen = is_seen; + memset(set, sizeof(*set), 0); + INIT_LIST_HEAD(&set->list); + set->is_seen = is_seen; + init_header(&set->dir.header, root, set, root_table); } void retire_sysctl_set(struct ctl_table_set *set) -- cgit From 60a47a2e823cbe6b609346bffff61a00c0c76470 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 8 Jan 2012 00:02:37 -0800 Subject: sysctl: Modify __register_sysctl_paths to take a set instead of a root and an nsproxy An nsproxy argument here has always been awkard and now the nsproxy argument is completely unnecessary so remove it, replacing it with the set we want the registered tables to show up in. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index ec54a57c4690..e0d3e7e59cbd 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1084,8 +1084,7 @@ out: /** * __register_sysctl_table - register a leaf sysctl table - * @root: List of sysctl headers to register on - * @namespaces: Data to compute which lists of sysctl entries are visible + * @set: Sysctl tree to register on * @path: The path to the directory the sysctl table is in. * @table: the top-level table structure * @@ -1126,26 +1125,24 @@ out: * to the table header on success. */ struct ctl_table_header *__register_sysctl_table( - struct ctl_table_root *root, - struct nsproxy *namespaces, + struct ctl_table_set *set, const char *path, struct ctl_table *table) { + struct ctl_table_root *root = set->dir.header.root; struct ctl_table_header *links = NULL; struct ctl_table_header *header; const char *name, *nextname; - struct ctl_table_set *set; struct ctl_dir *dir; header = kzalloc(sizeof(struct ctl_table_header), GFP_KERNEL); if (!header) return NULL; - init_header(header, root, NULL, table); + init_header(header, root, set, table); if (sysctl_check_table(path, table)) goto fail; spin_lock(&sysctl_lock); - header->set = set = lookup_header_set(root, namespaces); dir = &set->dir; dir->header.nreg++; spin_unlock(&sysctl_lock); @@ -1223,8 +1220,7 @@ static int count_subheaders(struct ctl_table *table) } static int register_leaf_sysctl_tables(const char *path, char *pos, - struct ctl_table_header ***subheader, - struct ctl_table_root *root, struct nsproxy *namespaces, + struct ctl_table_header ***subheader, struct ctl_table_set *set, struct ctl_table *table) { struct ctl_table *ctl_table_arg = NULL; @@ -1261,7 +1257,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos, /* Register everything except a directory full of subdirectories */ if (nr_files || !nr_dirs) { struct ctl_table_header *header; - header = __register_sysctl_table(root, namespaces, path, files); + header = __register_sysctl_table(set, path, files); if (!header) { kfree(ctl_table_arg); goto out; @@ -1286,7 +1282,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos, goto out; err = register_leaf_sysctl_tables(path, child_pos, subheader, - root, namespaces, entry->child); + set, entry->child); pos[0] = '\0'; if (err) goto out; @@ -1299,8 +1295,7 @@ out: /** * __register_sysctl_paths - register a sysctl table hierarchy - * @root: List of sysctl headers to register on - * @namespaces: Data to compute which lists of sysctl entries are visible + * @set: Sysctl tree to register on * @path: The path to the directory the sysctl table is in. * @table: the top-level table structure * @@ -1310,8 +1305,7 @@ out: * See __register_sysctl_table for more details. */ struct ctl_table_header *__register_sysctl_paths( - struct ctl_table_root *root, - struct nsproxy *namespaces, + struct ctl_table_set *set, const struct ctl_path *path, struct ctl_table *table) { struct ctl_table *ctl_table_arg = table; @@ -1337,7 +1331,7 @@ struct ctl_table_header *__register_sysctl_paths( table = table->child; } if (nr_subheaders == 1) { - header = __register_sysctl_table(root, namespaces, new_path, table); + header = __register_sysctl_table(set, new_path, table); if (header) header->ctl_table_arg = ctl_table_arg; } else { @@ -1351,7 +1345,7 @@ struct ctl_table_header *__register_sysctl_paths( header->ctl_table_arg = ctl_table_arg; if (register_leaf_sysctl_tables(new_path, pos, &subheader, - root, namespaces, table)) + set, table)) goto err_register_leaves; } @@ -1384,7 +1378,7 @@ err_register_leaves: struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, struct ctl_table *table) { - return __register_sysctl_paths(&sysctl_table_root, current->nsproxy, + return __register_sysctl_paths(&sysctl_table_root.default_set, path, table); } EXPORT_SYMBOL(register_sysctl_paths); -- cgit From e54012cede6749528899f66a72312522a179d427 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 18 Jan 2012 22:57:15 -0800 Subject: sysctl: Move sysctl_check_dups into insert_header Simplify the callers of insert_header by removing explicit calls to check for duplicates and instead have insert_header do the work. This makes the code slightly more maintainable by enabling changes to data structures where the insertion of new entries without duplicate suppression is not possible. There is not always a convenient path string where insert_header is called so modify sysctl_check_dups to use sysctl_print_dir when printing the full path when a duplicate is discovered. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index e0d3e7e59cbd..160d5781638e 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -52,6 +52,7 @@ static int sysctl_follow_link(struct ctl_table_header **phead, struct ctl_table **pentry, struct nsproxy *namespaces); static int insert_links(struct ctl_table_header *head); static void put_links(struct ctl_table_header *header); +static int sysctl_check_dups(struct ctl_dir *dir, struct ctl_table *table); static void sysctl_print_dir(struct ctl_dir *dir) { @@ -123,6 +124,10 @@ static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header) { int err; + err = sysctl_check_dups(dir, header->ctl_table); + if (err) + return err; + dir->header.nreg++; header->parent = dir; err = insert_links(header); @@ -891,7 +896,7 @@ static int sysctl_follow_link(struct ctl_table_header **phead, return ret; } -static int sysctl_check_table_dups(const char *path, struct ctl_table *old, +static int sysctl_check_table_dups(struct ctl_dir *dir, struct ctl_table *old, struct ctl_table *table) { struct ctl_table *entry, *test; @@ -900,8 +905,9 @@ static int sysctl_check_table_dups(const char *path, struct ctl_table *old, for (entry = old; entry->procname; entry++) { for (test = table; test->procname; test++) { if (strcmp(entry->procname, test->procname) == 0) { - printk(KERN_ERR "sysctl duplicate entry: %s/%s\n", - path, test->procname); + printk(KERN_ERR "sysctl duplicate entry: "); + sysctl_print_dir(dir); + printk(KERN_CONT "/%s\n", test->procname); error = -EEXIST; } } @@ -909,8 +915,7 @@ static int sysctl_check_table_dups(const char *path, struct ctl_table *old, return error; } -static int sysctl_check_dups(struct ctl_dir *dir, - const char *path, struct ctl_table *table) +static int sysctl_check_dups(struct ctl_dir *dir, struct ctl_table *table) { struct ctl_table_set *set; struct ctl_table_header *head; @@ -922,7 +927,7 @@ static int sysctl_check_dups(struct ctl_dir *dir, continue; if (head->parent != dir) continue; - error = sysctl_check_table_dups(path, head->ctl_table, table); + error = sysctl_check_table_dups(dir, head->ctl_table, table); } return error; } @@ -1166,9 +1171,6 @@ struct ctl_table_header *__register_sysctl_table( } spin_lock(&sysctl_lock); - if (sysctl_check_dups(dir, path, table)) - goto fail_put_dir_locked; - if (insert_header(dir, header)) goto fail_put_dir_locked; -- cgit From 9e3d47df35abd6430fed04fb40a76c7358b1e815 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 7 Jan 2012 23:45:12 -0800 Subject: sysctl: Make the header lists per directory. Slightly enhance efficiency and clarity of the code by making the header list per directory instead of per set. Benchmark before: make-dummies 0 999 -> 0.63s rmmod dummy -> 0.12s make-dummies 0 9999 -> 2m35s rmmod dummy -> 18s Benchmark after: make-dummies 0 999 -> 0.32s rmmod dummy -> 0.12s make-dummies 0 9999 -> 1m17s rmmod dummy -> 17s Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 160d5781638e..e971ccccac4a 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -33,12 +33,12 @@ static struct ctl_table root_table[] = { { } }; static struct ctl_table_root sysctl_table_root = { - .default_set.list = LIST_HEAD_INIT(sysctl_table_root.default_set.dir.header.ctl_entry), + .default_set.dir.list = LIST_HEAD_INIT(sysctl_table_root.default_set.dir.list), .default_set.dir.header = { {{.count = 1, .nreg = 1, .ctl_table = root_table, - .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, + .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.dir.header.ctl_entry),}}, .ctl_table_arg = root_table, .root = &sysctl_table_root, .set = &sysctl_table_root.default_set, @@ -79,15 +79,12 @@ static int namecmp(const char *name1, int len1, const char *name2, int len2) static struct ctl_table *find_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { - struct ctl_table_set *set = dir->header.set; struct ctl_table_header *head; struct ctl_table *entry; - list_for_each_entry(head, &set->list, ctl_entry) { + list_for_each_entry(head, &dir->list, ctl_entry) { if (head->unregistering) continue; - if (head->parent != dir) - continue; for (entry = head->ctl_table; entry->procname; entry++) { const char *procname = entry->procname; if (namecmp(procname, strlen(procname), name, namelen) == 0) { @@ -133,7 +130,7 @@ static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header) err = insert_links(header); if (err) goto fail_links; - list_add_tail(&header->ctl_entry, &header->set->list); + list_add_tail(&header->ctl_entry, &header->parent->list); return 0; fail_links: header->parent = NULL; @@ -247,14 +244,12 @@ static struct ctl_table *lookup_entry(struct ctl_table_header **phead, static struct ctl_table_header *next_usable_entry(struct ctl_dir *dir, struct list_head *tmp) { - struct ctl_table_set *set = dir->header.set; struct ctl_table_header *head; - for (tmp = tmp->next; tmp != &set->list; tmp = tmp->next) { + for (tmp = tmp->next; tmp != &dir->list; tmp = tmp->next) { head = list_entry(tmp, struct ctl_table_header, ctl_entry); - if (head->parent != dir || - !head->ctl_table->procname || + if (!head->ctl_table->procname || !use_table(head)) continue; @@ -270,7 +265,7 @@ static void first_entry(struct ctl_dir *dir, struct ctl_table *entry = NULL; spin_lock(&sysctl_lock); - head = next_usable_entry(dir, &dir->header.set->list); + head = next_usable_entry(dir, &dir->list); spin_unlock(&sysctl_lock); if (head) entry = head->ctl_table; @@ -793,6 +788,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set, new_name = (char *)(table + 2); memcpy(new_name, name, namelen); new_name[namelen] = '\0'; + INIT_LIST_HEAD(&new->list); table[0].procname = new_name; table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO; init_header(&new->header, set->dir.header.root, set, table); @@ -917,12 +913,10 @@ static int sysctl_check_table_dups(struct ctl_dir *dir, struct ctl_table *old, static int sysctl_check_dups(struct ctl_dir *dir, struct ctl_table *table) { - struct ctl_table_set *set; struct ctl_table_header *head; int error = 0; - set = dir->header.set; - list_for_each_entry(head, &set->list, ctl_entry) { + list_for_each_entry(head, &dir->list, ctl_entry) { if (head->unregistering) continue; if (head->parent != dir) @@ -1494,14 +1488,14 @@ void setup_sysctl_set(struct ctl_table_set *set, int (*is_seen)(struct ctl_table_set *)) { memset(set, sizeof(*set), 0); - INIT_LIST_HEAD(&set->list); set->is_seen = is_seen; + INIT_LIST_HEAD(&set->dir.list); init_header(&set->dir.header, root, set, root_table); } void retire_sysctl_set(struct ctl_table_set *set) { - WARN_ON(!list_empty(&set->list)); + WARN_ON(!list_empty(&set->dir.list)); } int __init proc_sys_init(void) -- cgit From ac13ac6f4c6c0504d2c927862216f4e422a2c0b5 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 9 Jan 2012 17:24:30 -0800 Subject: sysctl: Index sysctl directories with rbtrees. One of the most important jobs of sysctl is to export network stack tunables. Several of those tunables are per network device. In several instances people are running with 1000+ network devices in there network stacks, which makes the simple per directory linked list in sysctl a scaling bottleneck. Replace O(N^2) sysctl insertion and lookup times with O(NlogN) by using an rbtree to index the sysctl directories. Benchmark before: make-dummies 0 999 -> 0.32s rmmod dummy -> 0.12s make-dummies 0 9999 -> 1m17s rmmod dummy -> 17s Benchmark after: make-dummies 0 999 -> 0.074s rmmod dummy -> 0.070s make-dummies 0 9999 -> 3.4s rmmod dummy -> 0.44s Benchmark after (without dev_snmp6): make-dummies 0 9999 -> 0.75s rmmod dummy -> 0.44s make-dummies 0 99999 -> 11s rmmod dummy -> 4.3s At 10,000 dummy devices the bottleneck becomes the time to add and remove the files under /proc/sys/net/dev_snmp6. I have commented out the code that adds and removes files under /proc/sys/net/dev_snmp6 and taken measurments of creating and destroying 100,000 dummies to verify the sysctl continues to scale. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 224 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 134 insertions(+), 90 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index e971ccccac4a..05c393a5c530 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -33,12 +33,10 @@ static struct ctl_table root_table[] = { { } }; static struct ctl_table_root sysctl_table_root = { - .default_set.dir.list = LIST_HEAD_INIT(sysctl_table_root.default_set.dir.list), .default_set.dir.header = { {{.count = 1, .nreg = 1, - .ctl_table = root_table, - .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.dir.header.ctl_entry),}}, + .ctl_table = root_table }}, .ctl_table_arg = root_table, .root = &sysctl_table_root, .set = &sysctl_table_root.default_set, @@ -52,7 +50,6 @@ static int sysctl_follow_link(struct ctl_table_header **phead, struct ctl_table **pentry, struct nsproxy *namespaces); static int insert_links(struct ctl_table_header *head); static void put_links(struct ctl_table_header *header); -static int sysctl_check_dups(struct ctl_dir *dir, struct ctl_table *table); static void sysctl_print_dir(struct ctl_dir *dir) { @@ -81,28 +78,83 @@ static struct ctl_table *find_entry(struct ctl_table_header **phead, { struct ctl_table_header *head; struct ctl_table *entry; + struct rb_node *node = dir->root.rb_node; - list_for_each_entry(head, &dir->list, ctl_entry) { - if (head->unregistering) - continue; - for (entry = head->ctl_table; entry->procname; entry++) { - const char *procname = entry->procname; - if (namecmp(procname, strlen(procname), name, namelen) == 0) { - *phead = head; - return entry; - } + while (node) + { + struct ctl_node *ctl_node; + const char *procname; + int cmp; + + ctl_node = rb_entry(node, struct ctl_node, node); + head = ctl_node->header; + entry = &head->ctl_table[ctl_node - head->node]; + procname = entry->procname; + + cmp = namecmp(name, namelen, procname, strlen(procname)); + if (cmp < 0) + node = node->rb_left; + else if (cmp > 0) + node = node->rb_right; + else { + *phead = head; + return entry; } } return NULL; } +static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) +{ + struct rb_node *node = &head->node[entry - head->ctl_table].node; + struct rb_node **p = &head->parent->root.rb_node; + struct rb_node *parent = NULL; + const char *name = entry->procname; + int namelen = strlen(name); + + while (*p) { + struct ctl_table_header *parent_head; + struct ctl_table *parent_entry; + struct ctl_node *parent_node; + const char *parent_name; + int cmp; + + parent = *p; + parent_node = rb_entry(parent, struct ctl_node, node); + parent_head = parent_node->header; + parent_entry = &parent_head->ctl_table[parent_node - parent_head->node]; + parent_name = parent_entry->procname; + + cmp = namecmp(name, namelen, parent_name, strlen(parent_name)); + if (cmp < 0) + p = &(*p)->rb_left; + else if (cmp > 0) + p = &(*p)->rb_right; + else { + printk(KERN_ERR "sysctl duplicate entry: "); + sysctl_print_dir(head->parent); + printk(KERN_CONT "/%s\n", entry->procname); + return -EEXIST; + } + } + + rb_link_node(node, parent, p); + return 0; +} + +static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) +{ + struct rb_node *node = &head->node[entry - head->ctl_table].node; + + rb_erase(node, &head->parent->root); +} + static void init_header(struct ctl_table_header *head, struct ctl_table_root *root, struct ctl_table_set *set, - struct ctl_table *table) + struct ctl_node *node, struct ctl_table *table) { head->ctl_table = table; head->ctl_table_arg = table; - INIT_LIST_HEAD(&head->ctl_entry); head->used = 0; head->count = 1; head->nreg = 1; @@ -110,28 +162,42 @@ static void init_header(struct ctl_table_header *head, head->root = root; head->set = set; head->parent = NULL; + head->node = node; + if (node) { + struct ctl_table *entry; + for (entry = table; entry->procname; entry++, node++) { + rb_init_node(&node->node); + node->header = head; + } + } } static void erase_header(struct ctl_table_header *head) { - list_del_init(&head->ctl_entry); + struct ctl_table *entry; + for (entry = head->ctl_table; entry->procname; entry++) + erase_entry(head, entry); } static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header) { + struct ctl_table *entry; int err; - err = sysctl_check_dups(dir, header->ctl_table); - if (err) - return err; - dir->header.nreg++; header->parent = dir; err = insert_links(header); if (err) goto fail_links; - list_add_tail(&header->ctl_entry, &header->parent->list); + for (entry = header->ctl_table; entry->procname; entry++) { + err = insert_entry(header, entry); + if (err) + goto fail; + } return 0; +fail: + erase_header(header); + put_links(header); fail_links: header->parent = NULL; drop_sysctl_table(&dir->header); @@ -241,19 +307,14 @@ static struct ctl_table *lookup_entry(struct ctl_table_header **phead, return entry; } -static struct ctl_table_header *next_usable_entry(struct ctl_dir *dir, - struct list_head *tmp) +static struct ctl_node *first_usable_entry(struct rb_node *node) { - struct ctl_table_header *head; - - for (tmp = tmp->next; tmp != &dir->list; tmp = tmp->next) { - head = list_entry(tmp, struct ctl_table_header, ctl_entry); + struct ctl_node *ctl_node; - if (!head->ctl_table->procname || - !use_table(head)) - continue; - - return head; + for (;node; node = rb_next(node)) { + ctl_node = rb_entry(node, struct ctl_node, node); + if (use_table(ctl_node->header)) + return ctl_node; } return NULL; } @@ -261,14 +322,17 @@ static struct ctl_table_header *next_usable_entry(struct ctl_dir *dir, static void first_entry(struct ctl_dir *dir, struct ctl_table_header **phead, struct ctl_table **pentry) { - struct ctl_table_header *head; + struct ctl_table_header *head = NULL; struct ctl_table *entry = NULL; + struct ctl_node *ctl_node; spin_lock(&sysctl_lock); - head = next_usable_entry(dir, &dir->list); + ctl_node = first_usable_entry(rb_first(&dir->root)); spin_unlock(&sysctl_lock); - if (head) - entry = head->ctl_table; + if (ctl_node) { + head = ctl_node->header; + entry = &head->ctl_table[ctl_node - head->node]; + } *phead = head; *pentry = entry; } @@ -277,15 +341,17 @@ static void next_entry(struct ctl_table_header **phead, struct ctl_table **pentr { struct ctl_table_header *head = *phead; struct ctl_table *entry = *pentry; + struct ctl_node *ctl_node = &head->node[entry - head->ctl_table]; - entry++; - if (!entry->procname) { - spin_lock(&sysctl_lock); - unuse_table(head); - head = next_usable_entry(head->parent, &head->ctl_entry); - spin_unlock(&sysctl_lock); - if (head) - entry = head->ctl_table; + spin_lock(&sysctl_lock); + unuse_table(head); + + ctl_node = first_usable_entry(rb_next(&ctl_node->node)); + spin_unlock(&sysctl_lock); + head = NULL; + if (ctl_node) { + head = ctl_node->header; + entry = &head->ctl_table[ctl_node - head->node]; } *phead = head; *pentry = entry; @@ -777,21 +843,23 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set, { struct ctl_table *table; struct ctl_dir *new; + struct ctl_node *node; char *new_name; - new = kzalloc(sizeof(*new) + sizeof(struct ctl_table)*2 + - namelen + 1, GFP_KERNEL); + new = kzalloc(sizeof(*new) + sizeof(struct ctl_node) + + sizeof(struct ctl_table)*2 + namelen + 1, + GFP_KERNEL); if (!new) return NULL; - table = (struct ctl_table *)(new + 1); + node = (struct ctl_node *)(new + 1); + table = (struct ctl_table *)(node + 1); new_name = (char *)(table + 2); memcpy(new_name, name, namelen); new_name[namelen] = '\0'; - INIT_LIST_HEAD(&new->list); table[0].procname = new_name; table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO; - init_header(&new->header, set->dir.header.root, set, table); + init_header(&new->header, set->dir.header.root, set, node, table); return new; } @@ -892,40 +960,6 @@ static int sysctl_follow_link(struct ctl_table_header **phead, return ret; } -static int sysctl_check_table_dups(struct ctl_dir *dir, struct ctl_table *old, - struct ctl_table *table) -{ - struct ctl_table *entry, *test; - int error = 0; - - for (entry = old; entry->procname; entry++) { - for (test = table; test->procname; test++) { - if (strcmp(entry->procname, test->procname) == 0) { - printk(KERN_ERR "sysctl duplicate entry: "); - sysctl_print_dir(dir); - printk(KERN_CONT "/%s\n", test->procname); - error = -EEXIST; - } - } - } - return error; -} - -static int sysctl_check_dups(struct ctl_dir *dir, struct ctl_table *table) -{ - struct ctl_table_header *head; - int error = 0; - - list_for_each_entry(head, &dir->list, ctl_entry) { - if (head->unregistering) - continue; - if (head->parent != dir) - continue; - error = sysctl_check_table_dups(dir, head->ctl_table, table); - } - return error; -} - static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...) { struct va_format vaf; @@ -977,6 +1011,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table { struct ctl_table *link_table, *entry, *link; struct ctl_table_header *links; + struct ctl_node *node; char *link_name; int nr_entries, name_bytes; @@ -988,6 +1023,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table } links = kzalloc(sizeof(struct ctl_table_header) + + sizeof(struct ctl_node)*nr_entries + sizeof(struct ctl_table)*(nr_entries + 1) + name_bytes, GFP_KERNEL); @@ -995,7 +1031,8 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table if (!links) return NULL; - link_table = (struct ctl_table *)(links + 1); + node = (struct ctl_node *)(links + 1); + link_table = (struct ctl_table *)(node + nr_entries); link_name = (char *)&link_table[nr_entries + 1]; for (link = link_table, entry = table; entry->procname; link++, entry++) { @@ -1006,7 +1043,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table link->data = link_root; link_name += len; } - init_header(links, dir->header.root, dir->header.set, link_table); + init_header(links, dir->header.root, dir->header.set, node, link_table); links->nreg = nr_entries; return links; @@ -1132,12 +1169,20 @@ struct ctl_table_header *__register_sysctl_table( struct ctl_table_header *header; const char *name, *nextname; struct ctl_dir *dir; + struct ctl_table *entry; + struct ctl_node *node; + int nr_entries = 0; + + for (entry = table; entry->procname; entry++) + nr_entries++; - header = kzalloc(sizeof(struct ctl_table_header), GFP_KERNEL); + header = kzalloc(sizeof(struct ctl_table_header) + + sizeof(struct ctl_node)*nr_entries, GFP_KERNEL); if (!header) return NULL; - init_header(header, root, set, table); + node = (struct ctl_node *)(header + 1); + init_header(header, root, set, node, table); if (sysctl_check_table(path, table)) goto fail; @@ -1489,13 +1534,12 @@ void setup_sysctl_set(struct ctl_table_set *set, { memset(set, sizeof(*set), 0); set->is_seen = is_seen; - INIT_LIST_HEAD(&set->dir.list); - init_header(&set->dir.header, root, set, root_table); + init_header(&set->dir.header, root, set, NULL, root_table); } void retire_sysctl_set(struct ctl_table_set *set) { - WARN_ON(!list_empty(&set->dir.list)); + WARN_ON(!RB_EMPTY_ROOT(&set->dir.root)); } int __init proc_sys_init(void) -- cgit From fea478d4101a4285aa25c5bafaaf4cec35026fe0 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 20 Jan 2012 21:47:03 -0800 Subject: sysctl: Add register_sysctl for normal sysctl users The plan is to convert all callers of register_sysctl_table and register_sysctl_paths to register_sysctl. The interface to register_sysctl is enough nicer this should make the callers a bit more readable. Additionally after the conversion the 230 lines of backwards compatibility can be removed. Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 05c393a5c530..8dc7f0e46e7e 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1228,6 +1228,23 @@ fail: return NULL; } +/** + * register_sysctl - register a sysctl table + * @path: The path to the directory the sysctl table is in. + * @table: the table structure + * + * Register a sysctl table. @table should be a filled in ctl_table + * array. A completely 0 filled entry terminates the table. + * + * See __register_sysctl_table for more details. + */ +struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table) +{ + return __register_sysctl_table(&sysctl_table_root.default_set, + path, table); +} +EXPORT_SYMBOL(register_sysctl); + static char *append_path(const char *path, char *pos, const char *name) { int namelen; -- cgit From d6e486868cde585842d55ba3b6ec57af090fc343 Mon Sep 17 00:00:00 2001 From: Ludwig Nussel Date: Wed, 25 Jan 2012 11:52:28 +0100 Subject: debugfs: add mode, uid and gid options Cautious admins may want to restrict access to debugfs. Currently a manual chown/chmod e.g. in an init script is needed to achieve that. Distributions that want to make the mount options configurable need to add extra config files. By allowing to set the root inode's uid, gid and mode via mount options no such hacks are needed anymore. Instead configuration becomes straight forward via fstab. Signed-off-by: Ludwig Nussel Signed-off-by: Greg Kroah-Hartman --- fs/debugfs/inode.c | 149 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 148 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 956d5ddddf6e..b80bc846a15a 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -23,9 +23,13 @@ #include #include #include +#include +#include #include #include +#define DEBUGFS_DEFAULT_MODE 0755 + static struct vfsmount *debugfs_mount; static int debugfs_mount_count; static bool debugfs_registered; @@ -125,11 +129,154 @@ static inline int debugfs_positive(struct dentry *dentry) return dentry->d_inode && !d_unhashed(dentry); } +struct debugfs_mount_opts { + uid_t uid; + gid_t gid; + umode_t mode; +}; + +enum { + Opt_uid, + Opt_gid, + Opt_mode, + Opt_err +}; + +static const match_table_t tokens = { + {Opt_uid, "uid=%u"}, + {Opt_gid, "gid=%u"}, + {Opt_mode, "mode=%o"}, + {Opt_err, NULL} +}; + +struct debugfs_fs_info { + struct debugfs_mount_opts mount_opts; +}; + +static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts) +{ + substring_t args[MAX_OPT_ARGS]; + int option; + int token; + char *p; + + opts->mode = DEBUGFS_DEFAULT_MODE; + + while ((p = strsep(&data, ",")) != NULL) { + if (!*p) + continue; + + token = match_token(p, tokens, args); + switch (token) { + case Opt_uid: + if (match_int(&args[0], &option)) + return -EINVAL; + opts->uid = option; + break; + case Opt_gid: + if (match_octal(&args[0], &option)) + return -EINVAL; + opts->gid = option; + break; + case Opt_mode: + if (match_octal(&args[0], &option)) + return -EINVAL; + opts->mode = option & S_IALLUGO; + break; + /* + * We might like to report bad mount options here; + * but traditionally debugfs has ignored all mount options + */ + } + } + + return 0; +} + +static int debugfs_apply_options(struct super_block *sb) +{ + struct debugfs_fs_info *fsi = sb->s_fs_info; + struct inode *inode = sb->s_root->d_inode; + struct debugfs_mount_opts *opts = &fsi->mount_opts; + + inode->i_mode &= ~S_IALLUGO; + inode->i_mode |= opts->mode; + + inode->i_uid = opts->uid; + inode->i_gid = opts->gid; + + return 0; +} + +static int debugfs_remount(struct super_block *sb, int *flags, char *data) +{ + int err; + struct debugfs_fs_info *fsi = sb->s_fs_info; + + err = debugfs_parse_options(data, &fsi->mount_opts); + if (err) + goto fail; + + debugfs_apply_options(sb); + +fail: + return err; +} + +static int debugfs_show_options(struct seq_file *m, struct dentry *root) +{ + struct debugfs_fs_info *fsi = root->d_sb->s_fs_info; + struct debugfs_mount_opts *opts = &fsi->mount_opts; + + if (opts->uid != 0) + seq_printf(m, ",uid=%u", opts->uid); + if (opts->gid != 0) + seq_printf(m, ",gid=%u", opts->gid); + if (opts->mode != DEBUGFS_DEFAULT_MODE) + seq_printf(m, ",mode=%o", opts->mode); + + return 0; +} + +static const struct super_operations debugfs_super_operations = { + .statfs = simple_statfs, + .remount_fs = debugfs_remount, + .show_options = debugfs_show_options, +}; + static int debug_fill_super(struct super_block *sb, void *data, int silent) { static struct tree_descr debug_files[] = {{""}}; + struct debugfs_fs_info *fsi; + int err; + + save_mount_options(sb, data); + + fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL); + sb->s_fs_info = fsi; + if (!fsi) { + err = -ENOMEM; + goto fail; + } + + err = debugfs_parse_options(data, &fsi->mount_opts); + if (err) + goto fail; + + err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files); + if (err) + goto fail; + + sb->s_op = &debugfs_super_operations; + + debugfs_apply_options(sb); + + return 0; - return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files); +fail: + kfree(fsi); + sb->s_fs_info = NULL; + return err; } static struct dentry *debug_mount(struct file_system_type *fs_type, -- cgit From 47981787092aecb87dc3cb2d478455dcfb77516a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 30 Jan 2012 16:39:59 +0300 Subject: sysctl: remove an unused variable "links" is never used, so we can remove it. Signed-off-by: Dan Carpenter Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 8dc7f0e46e7e..1b1f5b8f4e0f 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1165,7 +1165,6 @@ struct ctl_table_header *__register_sysctl_table( const char *path, struct ctl_table *table) { struct ctl_table_root *root = set->dir.header.root; - struct ctl_table_header *links = NULL; struct ctl_table_header *header; const char *name, *nextname; struct ctl_dir *dir; @@ -1222,7 +1221,6 @@ fail_put_dir_locked: drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); fail: - kfree(links); kfree(header); dump_stack(); return NULL; -- cgit From 1347440db6f76ec5ae0af8d8558387f571a5e1dd Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 30 Jan 2012 16:40:29 +0300 Subject: sysctl: fix memset parameters in setup_sysctl_set() The current code is a nop. Signed-off-by: Dan Carpenter Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 1b1f5b8f4e0f..27e265ba1afe 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1547,7 +1547,7 @@ void setup_sysctl_set(struct ctl_table_set *set, struct ctl_table_root *root, int (*is_seen)(struct ctl_table_set *)) { - memset(set, sizeof(*set), 0); + memset(set, 0, sizeof(*set)); set->is_seen = is_seen; init_header(&set->dir.header, root, set, NULL, root_table); } -- cgit From d5c38b137ac8a6e3dbed13bc494d60df5b69dfc4 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 31 Jan 2012 06:40:26 -0800 Subject: sysfs: Update the name hash when renaming sysfs entries This fixes a bug introduced with sysfs name hashes where renaming a network device appears to succeed but silently makes the sysfs files for that network device inaccessible. In at least one configuration this bug has stopped networking from coming up during boot. Signed-off-by: Eric W. Biederman Tested-by: Jiri Slaby Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/dir.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index ea64d01400ac..dd3779cf3a3b 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -872,6 +872,7 @@ int sysfs_rename(struct sysfs_dirent *sd, dup_name = sd->s_name; sd->s_name = new_name; + sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name); } /* Move to the appropriate place in the appropriate directories rbtree. */ -- cgit From 021000e59c0db2d3a8113e906bde3183c33fa84b Mon Sep 17 00:00:00 2001 From: Mitsuo Hayasaka Date: Fri, 13 Jan 2012 05:58:39 +0000 Subject: xfs: show uuid when mount fails due to duplicate uuid When a system tries to mount a filesystem (FS) using UUID, the xfs returns -EINVAL and shows a message if a FS with the same UUID has been already mounted. It is useful to output the duplicate UUID with it. Signed-off-by: Mitsuo Hayasaka Reviewed-by: Christoph Hellwig Cc: Ben Myers Cc: Alex Elder Cc: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_mount.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index d06afbc3540d..e07f8528c5ef 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -158,7 +158,7 @@ xfs_uuid_mount( out_duplicate: mutex_unlock(&xfs_uuid_table_mutex); - xfs_warn(mp, "Filesystem has duplicate UUID - can't mount"); + xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); return XFS_ERROR(EINVAL); } -- cgit From d706ed1f50d3f7fae61a177183562179abe8e4bb Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:43:49 +0300 Subject: SUNPRC: cleanup RPC PipeFS pipes upcall interface RPC pipe upcall doesn't requires only private pipe data. Thus RPC inode references in this code can be removed. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayoutdev.c | 2 +- fs/nfs/blocklayout/blocklayoutdm.c | 2 +- fs/nfs/idmap.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c index d08ba9107fde..81019190e46d 100644 --- a/fs/nfs/blocklayout/blocklayoutdev.c +++ b/fs/nfs/blocklayout/blocklayoutdev.c @@ -146,7 +146,7 @@ nfs4_blk_decode_device(struct nfs_server *server, dprintk("%s CALLING USERSPACE DAEMON\n", __func__); add_wait_queue(&bl_wq, &wq); - rc = rpc_queue_upcall(bl_device_pipe->d_inode, &msg); + rc = rpc_queue_upcall(RPC_I(bl_device_pipe->d_inode)->pipe, &msg); if (rc < 0) { remove_wait_queue(&bl_wq, &wq); rv = ERR_PTR(rc); diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index d055c7558073..3c38244a8724 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -66,7 +66,7 @@ static void dev_remove(dev_t dev) msg.len = sizeof(bl_msg) + bl_msg.totallen; add_wait_queue(&bl_wq, &wq); - if (rpc_queue_upcall(bl_device_pipe->d_inode, &msg) < 0) { + if (rpc_queue_upcall(RPC_I(bl_device_pipe->d_inode)->pipe, &msg) < 0) { remove_wait_queue(&bl_wq, &wq); goto out; } diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 2c05f1991e1e..3c63c47c793d 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -589,7 +589,7 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h, msg.len = sizeof(*im); add_wait_queue(&idmap->idmap_wq, &wq); - if (rpc_queue_upcall(idmap->idmap_dentry->d_inode, &msg) < 0) { + if (rpc_queue_upcall(RPC_I(idmap->idmap_dentry->d_inode)->pipe, &msg) < 0) { remove_wait_queue(&idmap->idmap_wq, &wq); goto out; } @@ -650,7 +650,7 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h, add_wait_queue(&idmap->idmap_wq, &wq); - if (rpc_queue_upcall(idmap->idmap_dentry->d_inode, &msg) < 0) { + if (rpc_queue_upcall(RPC_I(idmap->idmap_dentry->d_inode)->pipe, &msg) < 0) { remove_wait_queue(&idmap->idmap_wq, &wq); goto out; } -- cgit From c239d83b9921b8a8005a3bcd23000cfe18acf5c2 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:44:06 +0300 Subject: SUNRPC: split SUNPRC PipeFS dentry and private pipe data creation This patch is a final step towards to removing PipeFS inode references from kernel code other than PipeFS itself. It makes all kernel SUNRPC PipeFS users depends on pipe private data, which state depend on their specific operations, etc. This patch completes SUNRPC PipeFS preparations and allows to create pipe private data and PipeFS dentries independently. Next step will be making SUNPRC PipeFS dentries allocated by SUNRPC PipeFS network namespace aware routines. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.c | 16 ++++++++++++---- fs/nfs/blocklayout/blocklayout.h | 2 +- fs/nfs/blocklayout/blocklayoutdev.c | 2 +- fs/nfs/blocklayout/blocklayoutdm.c | 2 +- fs/nfs/idmap.c | 28 ++++++++++++++++++++-------- 5 files changed, 35 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 48cfac31f64c..848660fd58c4 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -46,7 +46,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Andy Adamson "); MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); -struct dentry *bl_device_pipe; +struct rpc_pipe *bl_device_pipe; wait_queue_head_t bl_wq; static void print_page(struct page *page) @@ -1051,16 +1051,23 @@ static int __init nfs4blocklayout_init(void) if (ret) goto out_putrpc; - bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL, - &bl_upcall_ops, 0); + bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); path_put(&path); if (IS_ERR(bl_device_pipe)) { ret = PTR_ERR(bl_device_pipe); goto out_putrpc; } + bl_device_pipe->dentry = rpc_mkpipe_dentry(path.dentry, "blocklayout", + NULL, bl_device_pipe); + if (IS_ERR(bl_device_pipe->dentry)) { + ret = PTR_ERR(bl_device_pipe->dentry); + goto out_destroy_pipe; + } out: return ret; +out_destroy_pipe: + rpc_destroy_pipe_data(bl_device_pipe); out_putrpc: rpc_put_mount(); out_remove: @@ -1074,7 +1081,8 @@ static void __exit nfs4blocklayout_exit(void) __func__); pnfs_unregister_layoutdriver(&blocklayout_type); - rpc_unlink(bl_device_pipe); + rpc_unlink(bl_device_pipe->dentry); + rpc_destroy_pipe_data(bl_device_pipe); rpc_put_mount(); } diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h index e31a2df28e70..49c670b18a9e 100644 --- a/fs/nfs/blocklayout/blocklayout.h +++ b/fs/nfs/blocklayout/blocklayout.h @@ -161,7 +161,7 @@ struct bl_msg_hdr { u16 totallen; /* length of entire message, including hdr itself */ }; -extern struct dentry *bl_device_pipe; +extern struct rpc_pipe *bl_device_pipe; extern wait_queue_head_t bl_wq; #define BL_DEVICE_UMOUNT 0x0 /* Umount--delete devices */ diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c index 81019190e46d..949b62478799 100644 --- a/fs/nfs/blocklayout/blocklayoutdev.c +++ b/fs/nfs/blocklayout/blocklayoutdev.c @@ -146,7 +146,7 @@ nfs4_blk_decode_device(struct nfs_server *server, dprintk("%s CALLING USERSPACE DAEMON\n", __func__); add_wait_queue(&bl_wq, &wq); - rc = rpc_queue_upcall(RPC_I(bl_device_pipe->d_inode)->pipe, &msg); + rc = rpc_queue_upcall(bl_device_pipe, &msg); if (rc < 0) { remove_wait_queue(&bl_wq, &wq); rv = ERR_PTR(rc); diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index 3c38244a8724..631f254d12ab 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -66,7 +66,7 @@ static void dev_remove(dev_t dev) msg.len = sizeof(bl_msg) + bl_msg.totallen; add_wait_queue(&bl_wq, &wq); - if (rpc_queue_upcall(RPC_I(bl_device_pipe->d_inode)->pipe, &msg) < 0) { + if (rpc_queue_upcall(bl_device_pipe, &msg) < 0) { remove_wait_queue(&bl_wq, &wq); goto out; } diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 3c63c47c793d..2992cb854e12 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -410,7 +410,7 @@ struct idmap_hashtable { }; struct idmap { - struct dentry *idmap_dentry; + struct rpc_pipe *idmap_pipe; wait_queue_head_t idmap_wq; struct idmap_msg idmap_im; struct mutex idmap_lock; /* Serializes upcalls */ @@ -435,6 +435,7 @@ int nfs_idmap_new(struct nfs_client *clp) { struct idmap *idmap; + struct rpc_pipe *pipe; int error; BUG_ON(clp->cl_idmap != NULL); @@ -443,14 +444,23 @@ nfs_idmap_new(struct nfs_client *clp) if (idmap == NULL) return -ENOMEM; - idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_path.dentry, - "idmap", idmap, &idmap_upcall_ops, 0); - if (IS_ERR(idmap->idmap_dentry)) { - error = PTR_ERR(idmap->idmap_dentry); + pipe = rpc_mkpipe_data(&idmap_upcall_ops, 0); + if (IS_ERR(pipe)) { + error = PTR_ERR(pipe); kfree(idmap); return error; } + if (clp->cl_rpcclient->cl_path.dentry) + pipe->dentry = rpc_mkpipe_dentry(clp->cl_rpcclient->cl_path.dentry, + "idmap", idmap, pipe); + if (IS_ERR(pipe->dentry)) { + error = PTR_ERR(pipe->dentry); + rpc_destroy_pipe_data(pipe); + kfree(idmap); + return error; + } + idmap->idmap_pipe = pipe; mutex_init(&idmap->idmap_lock); mutex_init(&idmap->idmap_im_lock); init_waitqueue_head(&idmap->idmap_wq); @@ -468,7 +478,9 @@ nfs_idmap_delete(struct nfs_client *clp) if (!idmap) return; - rpc_unlink(idmap->idmap_dentry); + if (idmap->idmap_pipe->dentry) + rpc_unlink(idmap->idmap_pipe->dentry); + rpc_destroy_pipe_data(idmap->idmap_pipe); clp->cl_idmap = NULL; kfree(idmap); } @@ -589,7 +601,7 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h, msg.len = sizeof(*im); add_wait_queue(&idmap->idmap_wq, &wq); - if (rpc_queue_upcall(RPC_I(idmap->idmap_dentry->d_inode)->pipe, &msg) < 0) { + if (rpc_queue_upcall(idmap->idmap_pipe, &msg) < 0) { remove_wait_queue(&idmap->idmap_wq, &wq); goto out; } @@ -650,7 +662,7 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h, add_wait_queue(&idmap->idmap_wq, &wq); - if (rpc_queue_upcall(RPC_I(idmap->idmap_dentry->d_inode)->pipe, &msg) < 0) { + if (rpc_queue_upcall(idmap->idmap_pipe, &msg) < 0) { remove_wait_queue(&idmap->idmap_wq, &wq); goto out; } -- cgit From 0157d021d23a087eecfa830502f81cfe843f0d16 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Wed, 11 Jan 2012 19:18:01 +0400 Subject: SUNRPC: handle RPC client pipefs dentries by network namespace aware routines v2: 1) "Over-put" of PipeFS mount point fixed. Fix is ugly, but allows to bisect the patch set. And it will be removed later in the series. This patch makes RPC clients PipeFs dentries allocations in it's owner network namespace context. RPC client pipefs dentries creation logic has been changed: 1) Pipefs dentries creation by sb was moved to separated function, which will be used for handling PipeFS mount notification. 2) Initial value of RPC client PipeFS dir dentry is set no NULL now. RPC client pipefs dentries cleanup logic has been changed: 1) Cleanup is done now in separated rpc_remove_pipedir() function, which takes care about pipefs superblock locking. Also this patch removes slashes from cb_program.pipe_dir_name and from NFS_PIPE_DIRNAME to make rpc_d_lookup_sb() work. This doesn't affect vfs_path_lookup() results in nfs4blocklayout_init() since this slash is cutted off anyway in link_path_walk(). Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfsd/nfs4callback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 6f3ebb48b12f..426ccb171650 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -622,7 +622,7 @@ static struct rpc_program cb_program = { .nrvers = ARRAY_SIZE(nfs_cb_version), .version = nfs_cb_version, .stats = &cb_stats, - .pipe_dir_name = "/nfsd4_cb", + .pipe_dir_name = "nfsd4_cb", }; static int max_cb_time(void) -- cgit From 30507f58ce11e7664512059c708347d7a7d75271 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Wed, 11 Jan 2012 19:18:42 +0400 Subject: SUNRPC: remove RPC PipeFS mount point reference from RPC client This is a cleanup patch. We don't need this reference anymore. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/idmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 2992cb854e12..588d7da5b17e 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -451,8 +451,8 @@ nfs_idmap_new(struct nfs_client *clp) return error; } - if (clp->cl_rpcclient->cl_path.dentry) - pipe->dentry = rpc_mkpipe_dentry(clp->cl_rpcclient->cl_path.dentry, + if (clp->cl_rpcclient->cl_dentry) + pipe->dentry = rpc_mkpipe_dentry(clp->cl_rpcclient->cl_dentry, "idmap", idmap, pipe); if (IS_ERR(pipe->dentry)) { error = PTR_ERR(pipe->dentry); -- cgit From 820f9442e711a81749e70c40f149fc54c4ce0ca8 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 25 Nov 2011 17:12:40 +0300 Subject: SUNRPC: split cache creation and PipeFS registration This precursor patch splits SUNRPC cache creation and PipeFS registartion. It's required for latter split of NFS DNS resolver cache creation per network namespace context and PipeFS registration/unregistration on MOUNT/UMOUNT events. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/cache_lib.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c index c98b439332fc..d62a8951cb12 100644 --- a/fs/nfs/cache_lib.c +++ b/fs/nfs/cache_lib.c @@ -120,6 +120,7 @@ int nfs_cache_register(struct cache_detail *cd) mnt = rpc_get_mount(); if (IS_ERR(mnt)) return PTR_ERR(mnt); + sunrpc_init_cache_detail(cd); ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &path); if (ret) goto err; @@ -128,6 +129,7 @@ int nfs_cache_register(struct cache_detail *cd) if (!ret) return ret; err: + sunrpc_destroy_cache_detail(cd); rpc_put_mount(); return ret; } @@ -135,6 +137,7 @@ err: void nfs_cache_unregister(struct cache_detail *cd) { sunrpc_cache_unregister_pipefs(cd); + sunrpc_destroy_cache_detail(cd); rpc_put_mount(); } -- cgit From 9222b955065dbb047b8db9eb2431979bff3ce700 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 25 Nov 2011 17:12:48 +0300 Subject: NFS: split cache creation and PipeFS registration This precursor patch splits NFS cache creation and PipeFS registartion. It's required for latter split of NFS DNS resolver cache creation per network namespace context and PipeFS registration/unregistration on MOUNT/UMOUNT events. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/cache_lib.c | 11 +++++++++-- fs/nfs/cache_lib.h | 2 ++ fs/nfs/dns_resolve.c | 11 ++++++++++- 3 files changed, 21 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c index d62a8951cb12..9d79a2eaab27 100644 --- a/fs/nfs/cache_lib.c +++ b/fs/nfs/cache_lib.c @@ -120,7 +120,6 @@ int nfs_cache_register(struct cache_detail *cd) mnt = rpc_get_mount(); if (IS_ERR(mnt)) return PTR_ERR(mnt); - sunrpc_init_cache_detail(cd); ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &path); if (ret) goto err; @@ -129,7 +128,6 @@ int nfs_cache_register(struct cache_detail *cd) if (!ret) return ret; err: - sunrpc_destroy_cache_detail(cd); rpc_put_mount(); return ret; } @@ -141,3 +139,12 @@ void nfs_cache_unregister(struct cache_detail *cd) rpc_put_mount(); } +void nfs_cache_init(struct cache_detail *cd) +{ + sunrpc_init_cache_detail(cd); +} + +void nfs_cache_destroy(struct cache_detail *cd) +{ + sunrpc_destroy_cache_detail(cd); +} diff --git a/fs/nfs/cache_lib.h b/fs/nfs/cache_lib.h index 7cf6cafcc007..815dd6651c9f 100644 --- a/fs/nfs/cache_lib.h +++ b/fs/nfs/cache_lib.h @@ -23,5 +23,7 @@ extern struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void); extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq); extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq); +extern void nfs_cache_init(struct cache_detail *cd); +extern void nfs_cache_destroy(struct cache_detail *cd); extern int nfs_cache_register(struct cache_detail *cd); extern void nfs_cache_unregister(struct cache_detail *cd); diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index a6e711ad130f..619dea6b5ccf 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -361,12 +361,21 @@ ssize_t nfs_dns_resolve_name(char *name, size_t namelen, int nfs_dns_resolver_init(void) { - return nfs_cache_register(&nfs_dns_resolve); + int err; + + nfs_cache_init(&nfs_dns_resolve); + err = nfs_cache_register(&nfs_dns_resolve); + if (err) { + nfs_cache_destroy(&nfs_dns_resolve); + return err; + } + return 0; } void nfs_dns_resolver_destroy(void) { nfs_cache_unregister(&nfs_dns_resolve); + nfs_cache_destroy(&nfs_dns_resolve); } #endif -- cgit From 5c1cacb175185ed925d7dc13ac7e0653e7a633cd Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 25 Nov 2011 17:12:56 +0300 Subject: NFS: handle NFS caches dentries by network namespace aware routines This patch makes NFS caches PipeFS dentries allocated and destroyed in network namespace context by PipeFS network namespace aware routines. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/cache_lib.c | 44 +++++++++++++++++++++++++++++++++++--------- fs/nfs/cache_lib.h | 4 ++-- fs/nfs/dns_resolve.c | 4 ++-- 3 files changed, 39 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c index 9d79a2eaab27..5dd017bbb7a2 100644 --- a/fs/nfs/cache_lib.c +++ b/fs/nfs/cache_lib.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "cache_lib.h" @@ -111,20 +112,34 @@ int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq) return 0; } -int nfs_cache_register(struct cache_detail *cd) +static int nfs_cache_register_sb(struct super_block *sb, struct cache_detail *cd) +{ + int ret; + struct dentry *dir; + + dir = rpc_d_lookup_sb(sb, "cache"); + BUG_ON(dir == NULL); + ret = sunrpc_cache_register_pipefs(dir, cd->name, 0600, cd); + dput(dir); + return ret; +} + +int nfs_cache_register_net(struct net *net, struct cache_detail *cd) { struct vfsmount *mnt; - struct path path; + struct super_block *pipefs_sb; int ret; mnt = rpc_get_mount(); if (IS_ERR(mnt)) return PTR_ERR(mnt); - ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &path); - if (ret) + pipefs_sb = rpc_get_sb_net(net); + if (!pipefs_sb) { + ret = -ENOENT; goto err; - ret = sunrpc_cache_register_pipefs(path.dentry, cd->name, 0600, cd); - path_put(&path); + } + ret = nfs_cache_register_sb(pipefs_sb, cd); + rpc_put_sb_net(net); if (!ret) return ret; err: @@ -132,10 +147,21 @@ err: return ret; } -void nfs_cache_unregister(struct cache_detail *cd) +static void nfs_cache_unregister_sb(struct super_block *sb, struct cache_detail *cd) { - sunrpc_cache_unregister_pipefs(cd); - sunrpc_destroy_cache_detail(cd); + if (cd->u.pipefs.dir) + sunrpc_cache_unregister_pipefs(cd); +} + +void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd) +{ + struct super_block *pipefs_sb; + + pipefs_sb = rpc_get_sb_net(net); + if (pipefs_sb) { + nfs_cache_unregister_sb(pipefs_sb, cd); + rpc_put_sb_net(net); + } rpc_put_mount(); } diff --git a/fs/nfs/cache_lib.h b/fs/nfs/cache_lib.h index 815dd6651c9f..e0a6cc4b01b9 100644 --- a/fs/nfs/cache_lib.h +++ b/fs/nfs/cache_lib.h @@ -25,5 +25,5 @@ extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq); extern void nfs_cache_init(struct cache_detail *cd); extern void nfs_cache_destroy(struct cache_detail *cd); -extern int nfs_cache_register(struct cache_detail *cd); -extern void nfs_cache_unregister(struct cache_detail *cd); +extern int nfs_cache_register_net(struct net *net, struct cache_detail *cd); +extern void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd); diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index 619dea6b5ccf..3cbf4b88f827 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -364,7 +364,7 @@ int nfs_dns_resolver_init(void) int err; nfs_cache_init(&nfs_dns_resolve); - err = nfs_cache_register(&nfs_dns_resolve); + err = nfs_cache_register_net(&init_net, &nfs_dns_resolve); if (err) { nfs_cache_destroy(&nfs_dns_resolve); return err; @@ -374,7 +374,7 @@ int nfs_dns_resolver_init(void) void nfs_dns_resolver_destroy(void) { - nfs_cache_unregister(&nfs_dns_resolve); + nfs_cache_unregister_net(&init_net, &nfs_dns_resolve); nfs_cache_destroy(&nfs_dns_resolve); } -- cgit From 1b340d0118da1d7c60c664f17d7c8fce2bb1cd9d Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 25 Nov 2011 17:13:04 +0300 Subject: NFS: DNS resolver cache per network namespace context introduced This patch implements DNS resolver cache creation and registration for each alive network namespace context. This was done by registering NFS per-net operations, responsible for DNS cache allocation/register and unregister/destructioning instead of initialization and destruction of static "nfs_dns_resolve" cache detail (this one was removed). Pointer to network dns resolver cache is stored in new per-net "nfs_net" structure. This patch also changes nfs_dns_resolve_name() function prototype (and it's calls) by adding network pointer parameter, which is used to get proper DNS resolver cache pointer for do_cache_lookup_wait() call. Note: empty nfs_dns_resolver_init() and nfs_dns_resolver_destroy() functions will be used in next patch in the series. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/dns_resolve.c | 96 +++++++++++++++++++++++++++++++++----------------- fs/nfs/dns_resolve.h | 14 ++++++-- fs/nfs/inode.c | 33 +++++++++++++++-- fs/nfs/netns.h | 13 +++++++ fs/nfs/nfs4namespace.c | 8 +++-- 5 files changed, 123 insertions(+), 41 deletions(-) create mode 100644 fs/nfs/netns.h (limited to 'fs') diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index 3cbf4b88f827..9aea78ab86ac 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -11,7 +11,7 @@ #include #include -ssize_t nfs_dns_resolve_name(char *name, size_t namelen, +ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen, struct sockaddr *sa, size_t salen) { ssize_t ret; @@ -43,12 +43,11 @@ ssize_t nfs_dns_resolve_name(char *name, size_t namelen, #include "dns_resolve.h" #include "cache_lib.h" +#include "netns.h" #define NFS_DNS_HASHBITS 4 #define NFS_DNS_HASHTBL_SIZE (1 << NFS_DNS_HASHBITS) -static struct cache_head *nfs_dns_table[NFS_DNS_HASHTBL_SIZE]; - struct nfs_dns_ent { struct cache_head h; @@ -259,21 +258,6 @@ out: return ret; } -static struct cache_detail nfs_dns_resolve = { - .owner = THIS_MODULE, - .hash_size = NFS_DNS_HASHTBL_SIZE, - .hash_table = nfs_dns_table, - .name = "dns_resolve", - .cache_put = nfs_dns_ent_put, - .cache_upcall = nfs_dns_upcall, - .cache_parse = nfs_dns_parse, - .cache_show = nfs_dns_show, - .match = nfs_dns_match, - .init = nfs_dns_ent_init, - .update = nfs_dns_ent_update, - .alloc = nfs_dns_ent_alloc, -}; - static int do_cache_lookup(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item, @@ -336,8 +320,8 @@ out: return ret; } -ssize_t nfs_dns_resolve_name(char *name, size_t namelen, - struct sockaddr *sa, size_t salen) +ssize_t nfs_dns_resolve_name(struct net *net, char *name, + size_t namelen, struct sockaddr *sa, size_t salen) { struct nfs_dns_ent key = { .hostname = name, @@ -345,37 +329,83 @@ ssize_t nfs_dns_resolve_name(char *name, size_t namelen, }; struct nfs_dns_ent *item = NULL; ssize_t ret; + struct nfs_net *nn = net_generic(net, nfs_net_id); - ret = do_cache_lookup_wait(&nfs_dns_resolve, &key, &item); + ret = do_cache_lookup_wait(nn->nfs_dns_resolve, &key, &item); if (ret == 0) { if (salen >= item->addrlen) { memcpy(sa, &item->addr, item->addrlen); ret = item->addrlen; } else ret = -EOVERFLOW; - cache_put(&item->h, &nfs_dns_resolve); + cache_put(&item->h, nn->nfs_dns_resolve); } else if (ret == -ENOENT) ret = -ESRCH; return ret; } -int nfs_dns_resolver_init(void) +int nfs_dns_resolver_cache_init(struct net *net) { - int err; + int err = -ENOMEM; + struct nfs_net *nn = net_generic(net, nfs_net_id); + struct cache_detail *cd; + struct cache_head **tbl; + + cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL); + if (cd == NULL) + goto err_cd; + + tbl = kzalloc(NFS_DNS_HASHTBL_SIZE * sizeof(struct cache_head *), + GFP_KERNEL); + if (tbl == NULL) + goto err_tbl; + + cd->owner = THIS_MODULE, + cd->hash_size = NFS_DNS_HASHTBL_SIZE, + cd->hash_table = tbl, + cd->name = "dns_resolve", + cd->cache_put = nfs_dns_ent_put, + cd->cache_upcall = nfs_dns_upcall, + cd->cache_parse = nfs_dns_parse, + cd->cache_show = nfs_dns_show, + cd->match = nfs_dns_match, + cd->init = nfs_dns_ent_init, + cd->update = nfs_dns_ent_update, + cd->alloc = nfs_dns_ent_alloc, + + nfs_cache_init(cd); + err = nfs_cache_register_net(net, cd); + if (err) + goto err_reg; + nn->nfs_dns_resolve = cd; + return 0; - nfs_cache_init(&nfs_dns_resolve); - err = nfs_cache_register_net(&init_net, &nfs_dns_resolve); - if (err) { - nfs_cache_destroy(&nfs_dns_resolve); - return err; - } +err_reg: + nfs_cache_destroy(cd); + kfree(cd->hash_table); +err_tbl: + kfree(cd); +err_cd: + return err; +} + +void nfs_dns_resolver_cache_destroy(struct net *net) +{ + struct nfs_net *nn = net_generic(net, nfs_net_id); + struct cache_detail *cd = nn->nfs_dns_resolve; + + nfs_cache_unregister_net(net, cd); + nfs_cache_destroy(cd); + kfree(cd->hash_table); + kfree(cd); +} + +int nfs_dns_resolver_init(void) +{ return 0; } void nfs_dns_resolver_destroy(void) { - nfs_cache_unregister_net(&init_net, &nfs_dns_resolve); - nfs_cache_destroy(&nfs_dns_resolve); } - #endif diff --git a/fs/nfs/dns_resolve.h b/fs/nfs/dns_resolve.h index 199bb5543a91..2e4f596d2923 100644 --- a/fs/nfs/dns_resolve.h +++ b/fs/nfs/dns_resolve.h @@ -15,12 +15,22 @@ static inline int nfs_dns_resolver_init(void) static inline void nfs_dns_resolver_destroy(void) {} + +static inline int nfs_dns_resolver_cache_init(struct net *net) +{ + return 0; +} + +static inline void nfs_dns_resolver_cache_destroy(struct net *net) +{} #else extern int nfs_dns_resolver_init(void); extern void nfs_dns_resolver_destroy(void); +extern int nfs_dns_resolver_cache_init(struct net *net); +extern void nfs_dns_resolver_cache_destroy(struct net *net); #endif -extern ssize_t nfs_dns_resolve_name(char *name, size_t namelen, - struct sockaddr *sa, size_t salen); +extern ssize_t nfs_dns_resolve_name(struct net *net, char *name, + size_t namelen, struct sockaddr *sa, size_t salen); #endif diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index f649fba8c384..0335f6e4ff7e 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -51,6 +51,7 @@ #include "fscache.h" #include "dns_resolve.h" #include "pnfs.h" +#include "netns.h" #define NFSDBG_FACILITY NFSDBG_VFS @@ -1552,6 +1553,25 @@ static void nfsiod_stop(void) destroy_workqueue(wq); } +int nfs_net_id; + +static int nfs_net_init(struct net *net) +{ + return nfs_dns_resolver_cache_init(net); +} + +static void nfs_net_exit(struct net *net) +{ + nfs_dns_resolver_cache_destroy(net); +} + +static struct pernet_operations nfs_net_ops = { + .init = nfs_net_init, + .exit = nfs_net_exit, + .id = &nfs_net_id, + .size = sizeof(struct nfs_net), +}; + /* * Initialize NFS */ @@ -1561,9 +1581,13 @@ static int __init init_nfs_fs(void) err = nfs_idmap_init(); if (err < 0) - goto out9; + goto out10; err = nfs_dns_resolver_init(); + if (err < 0) + goto out9; + + err = register_pernet_subsys(&nfs_net_ops); if (err < 0) goto out8; @@ -1625,10 +1649,12 @@ out5: out6: nfs_fscache_unregister(); out7: - nfs_dns_resolver_destroy(); + unregister_pernet_subsys(&nfs_net_ops); out8: - nfs_idmap_quit(); + nfs_dns_resolver_destroy(); out9: + nfs_idmap_quit(); +out10: return err; } @@ -1640,6 +1666,7 @@ static void __exit exit_nfs_fs(void) nfs_destroy_inodecache(); nfs_destroy_nfspagecache(); nfs_fscache_unregister(); + unregister_pernet_subsys(&nfs_net_ops); nfs_dns_resolver_destroy(); nfs_idmap_quit(); #ifdef CONFIG_PROC_FS diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h new file mode 100644 index 000000000000..8c1f130d6ca2 --- /dev/null +++ b/fs/nfs/netns.h @@ -0,0 +1,13 @@ +#ifndef __NFS_NETNS_H__ +#define __NFS_NETNS_H__ + +#include +#include + +struct nfs_net { + struct cache_detail *nfs_dns_resolve; +}; + +extern int nfs_net_id; + +#endif diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index bb80c49b6533..919a36935924 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -94,13 +94,14 @@ static int nfs4_validate_fspath(struct dentry *dentry, } static size_t nfs_parse_server_name(char *string, size_t len, - struct sockaddr *sa, size_t salen) + struct sockaddr *sa, size_t salen, struct nfs_server *server) { ssize_t ret; ret = rpc_pton(string, len, sa, salen); if (ret == 0) { - ret = nfs_dns_resolve_name(string, len, sa, salen); + ret = nfs_dns_resolve_name(server->client->cl_xprt->xprt_net, + string, len, sa, salen); if (ret < 0) ret = 0; } @@ -137,7 +138,8 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata, continue; mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len, - mountdata->addr, addr_bufsize); + mountdata->addr, addr_bufsize, + NFS_SB(mountdata->sb)); if (mountdata->addrlen == 0) continue; -- cgit From 9df69c81b469780b64f9b26bb87c048613fdeddf Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 25 Nov 2011 17:13:12 +0300 Subject: NFS: DNS resolver PipeFS notifier introduced This patch subscribes DNS resolver caches to RPC pipefs notifications. Notifier is registering on NFS module load. This notifier callback is responsible for creation/destruction of PipeFS DNS resolver cache directory. Note that no locking required in notifier callback because PipeFS superblock pointer is passed as an argument from it's creation or destruction routine and thus we can be sure about it's validity. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/cache_lib.c | 4 ++-- fs/nfs/cache_lib.h | 4 ++++ fs/nfs/dns_resolve.c | 38 +++++++++++++++++++++++++++++++++++++- 3 files changed, 43 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c index 5dd017bbb7a2..5905a31211e5 100644 --- a/fs/nfs/cache_lib.c +++ b/fs/nfs/cache_lib.c @@ -112,7 +112,7 @@ int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq) return 0; } -static int nfs_cache_register_sb(struct super_block *sb, struct cache_detail *cd) +int nfs_cache_register_sb(struct super_block *sb, struct cache_detail *cd) { int ret; struct dentry *dir; @@ -147,7 +147,7 @@ err: return ret; } -static void nfs_cache_unregister_sb(struct super_block *sb, struct cache_detail *cd) +void nfs_cache_unregister_sb(struct super_block *sb, struct cache_detail *cd) { if (cd->u.pipefs.dir) sunrpc_cache_unregister_pipefs(cd); diff --git a/fs/nfs/cache_lib.h b/fs/nfs/cache_lib.h index e0a6cc4b01b9..317db95e37f8 100644 --- a/fs/nfs/cache_lib.h +++ b/fs/nfs/cache_lib.h @@ -27,3 +27,7 @@ extern void nfs_cache_init(struct cache_detail *cd); extern void nfs_cache_destroy(struct cache_detail *cd); extern int nfs_cache_register_net(struct net *net, struct cache_detail *cd); extern void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd); +extern int nfs_cache_register_sb(struct super_block *sb, + struct cache_detail *cd); +extern void nfs_cache_unregister_sb(struct super_block *sb, + struct cache_detail *cd); diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index 9aea78ab86ac..200eb67c95d9 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -40,6 +40,7 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen, #include #include #include +#include #include "dns_resolve.h" #include "cache_lib.h" @@ -400,12 +401,47 @@ void nfs_dns_resolver_cache_destroy(struct net *net) kfree(cd); } +static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct super_block *sb = ptr; + struct net *net = sb->s_fs_info; + struct nfs_net *nn = net_generic(net, nfs_net_id); + struct cache_detail *cd = nn->nfs_dns_resolve; + int ret = 0; + + if (cd == NULL) + return 0; + + if (!try_module_get(THIS_MODULE)) + return 0; + + switch (event) { + case RPC_PIPEFS_MOUNT: + ret = nfs_cache_register_sb(sb, cd); + break; + case RPC_PIPEFS_UMOUNT: + nfs_cache_unregister_sb(sb, cd); + break; + default: + ret = -ENOTSUPP; + break; + } + module_put(THIS_MODULE); + return ret; +} + +static struct notifier_block nfs_dns_resolver_block = { + .notifier_call = rpc_pipefs_event, +}; + int nfs_dns_resolver_init(void) { - return 0; + return rpc_pipefs_notifier_register(&nfs_dns_resolver_block); } void nfs_dns_resolver_destroy(void) { + rpc_pipefs_notifier_unregister(&nfs_dns_resolver_block); } #endif -- cgit From 39cb67b9a04300df41e201d9e6392691cdad080f Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 25 Nov 2011 17:13:20 +0300 Subject: NFS: remove RPC PipeFS mount point references from NFS cache routines This is a cleanup patch. We don't need this reference anymore, because DNS resolver cache now creates it's dentries in per-net operations and on PipeFS mount/umount notification. Note that nfs_cache_register_net() now returns 0 instead of -ENOENT in case of PiepFS superblock absence. This is ok, Dns resolver cache will be regestered on PipeFS mount event. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/cache_lib.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c index 5905a31211e5..dded26368111 100644 --- a/fs/nfs/cache_lib.c +++ b/fs/nfs/cache_lib.c @@ -126,24 +126,14 @@ int nfs_cache_register_sb(struct super_block *sb, struct cache_detail *cd) int nfs_cache_register_net(struct net *net, struct cache_detail *cd) { - struct vfsmount *mnt; struct super_block *pipefs_sb; - int ret; + int ret = 0; - mnt = rpc_get_mount(); - if (IS_ERR(mnt)) - return PTR_ERR(mnt); pipefs_sb = rpc_get_sb_net(net); - if (!pipefs_sb) { - ret = -ENOENT; - goto err; + if (pipefs_sb) { + ret = nfs_cache_register_sb(pipefs_sb, cd); + rpc_put_sb_net(net); } - ret = nfs_cache_register_sb(pipefs_sb, cd); - rpc_put_sb_net(net); - if (!ret) - return ret; -err: - rpc_put_mount(); return ret; } @@ -162,7 +152,6 @@ void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd) nfs_cache_unregister_sb(pipefs_sb, cd); rpc_put_sb_net(net); } - rpc_put_mount(); } void nfs_cache_init(struct cache_detail *cd) -- cgit From e50a7a1a42335243c94eeea4a8d23413cb02370d Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 16:12:46 +0400 Subject: NFS: make NFS client allocated per network namespace context This patch adds new net variable to nfs_client structure. This variable is set on NFS client creation and cheched during matching NFS client search. Initially current->nsproxy->net_ns is used as network namespace owner for new NFS client to create. This network namespace pointer is set during mount options parsing and thus can be passed from user-spave utils in future if will be necessary. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 16 +++++++++++++--- fs/nfs/internal.h | 1 + fs/nfs/super.c | 3 +++ 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 31778f74357d..ca016fe44602 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -135,6 +135,7 @@ struct nfs_client_initdata { const struct nfs_rpc_ops *rpc_ops; int proto; u32 minorversion; + struct net *net; }; /* @@ -189,6 +190,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_ if (!IS_ERR(cred)) clp->cl_machine_cred = cred; nfs_fscache_get_client_cookie(clp); + clp->net = cl_init->net; return clp; @@ -481,6 +483,9 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat /* Match the full socket address */ if (!nfs_sockaddr_cmp(sap, clap)) continue; + /* Match network namespace */ + if (clp->net != data->net) + continue; atomic_inc(&clp->cl_count); return clp; @@ -831,6 +836,7 @@ static int nfs_init_server(struct nfs_server *server, .addrlen = data->nfs_server.addrlen, .rpc_ops = &nfs_v2_clientops, .proto = data->nfs_server.protocol, + .net = data->net, }; struct rpc_timeout timeparms; struct nfs_client *clp; @@ -1393,7 +1399,7 @@ static int nfs4_set_client(struct nfs_server *server, const char *ip_addr, rpc_authflavor_t authflavour, int proto, const struct rpc_timeout *timeparms, - u32 minorversion) + u32 minorversion, struct net *net) { struct nfs_client_initdata cl_init = { .hostname = hostname, @@ -1402,6 +1408,7 @@ static int nfs4_set_client(struct nfs_server *server, .rpc_ops = &nfs_v4_clientops, .proto = proto, .minorversion = minorversion, + .net = net, }; struct nfs_client *clp; int error; @@ -1453,6 +1460,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, .rpc_ops = &nfs_v4_clientops, .proto = ds_proto, .minorversion = mds_clp->cl_minorversion, + .net = mds_clp->net, }; struct rpc_timeout ds_timeout = { .to_initval = 15 * HZ, @@ -1580,7 +1588,8 @@ static int nfs4_init_server(struct nfs_server *server, data->auth_flavors[0], data->nfs_server.protocol, &timeparms, - data->minorversion); + data->minorversion, + data->net); if (error < 0) goto error; @@ -1677,7 +1686,8 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, data->authflavor, parent_server->client->cl_xprt->prot, parent_server->client->cl_timeout, - parent_client->cl_mvops->minor_version); + parent_client->cl_mvops->minor_version, + parent_client->net); if (error < 0) goto error; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 8102db9b926c..02fb2001a283 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -123,6 +123,7 @@ struct nfs_parsed_mount_data { } nfs_server; struct security_mnt_opts lsm_opts; + struct net *net; }; /* mount_clnt.c */ diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 3dfa4f112c0a..73aa75649bf8 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -1107,6 +1108,8 @@ static int nfs_parse_mount_options(char *raw, free_secdata(secdata); + mnt->net = current->nsproxy->net_ns; + while ((p = strsep(&raw, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; unsigned long option; -- cgit From 6d59b8d599d594bc314026c6856424fe49df5513 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 16:12:54 +0400 Subject: NFS: pass NFS client owner network namespace to RPC client creation routine This patch replaces static "init_net" with nfs_client->net pointer in RPC client creation calls. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 2 +- fs/nfs/internal.h | 1 + fs/nfs/mount_clnt.c | 4 ++-- fs/nfs/super.c | 1 + 4 files changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ca016fe44602..64815b725409 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -647,7 +647,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp, { struct rpc_clnt *clnt = NULL; struct rpc_create_args args = { - .net = &init_net, + .net = clp->net, .protocol = clp->cl_proto, .address = (struct sockaddr *)&clp->cl_addr, .addrsize = clp->cl_addrlen, diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 02fb2001a283..d602188f889f 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -138,6 +138,7 @@ struct nfs_mount_request { int noresvport; unsigned int *auth_flav_len; rpc_authflavor_t *auth_flavs; + struct net *net; }; extern int nfs_mount(struct nfs_mount_request *info); diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index d4c2d6b7507e..4fbe3a8e5e6b 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -153,7 +153,7 @@ int nfs_mount(struct nfs_mount_request *info) .rpc_resp = &result, }; struct rpc_create_args args = { - .net = &init_net, + .net = info->net, .protocol = info->protocol, .address = info->sap, .addrsize = info->salen, @@ -225,7 +225,7 @@ void nfs_umount(const struct nfs_mount_request *info) .to_retries = 2, }; struct rpc_create_args args = { - .net = &init_net, + .net = info->net, .protocol = IPPROTO_UDP, .address = info->sap, .addrsize = info->salen, diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 73aa75649bf8..e45feb0fee59 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1625,6 +1625,7 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args, .noresvport = args->flags & NFS_MOUNT_NORESVPORT, .auth_flav_len = &server_authlist_len, .auth_flavs = server_authlist, + .net = args->net, }; int status; -- cgit From 7bb782c6ac75898604eb547ed37b05c49b1edf21 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 16:13:03 +0400 Subject: NFS: create callback transports in parent transport network namespace This patch replaces static "init_net" references with parent transport xprt_net reference. Thus callback transports will be created in the same network namespace as respective NFS mount point was created. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 516f3375e067..d81040a7efc4 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -102,11 +102,11 @@ nfs4_callback_svc(void *vrqstp) * Prepare to bring up the NFSv4 callback service */ struct svc_rqst * -nfs4_callback_up(struct svc_serv *serv) +nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) { int ret; - ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET, + ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET, nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); if (ret <= 0) goto out_err; @@ -114,7 +114,7 @@ nfs4_callback_up(struct svc_serv *serv) dprintk("NFS: Callback listener port = %u (af %u)\n", nfs_callback_tcpport, PF_INET); - ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6, + ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6, nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); if (ret > 0) { nfs_callback_tcpport6 = ret; @@ -183,7 +183,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) * fore channel connection. * Returns the input port (0) and sets the svc_serv bc_xprt on success */ - ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0, + ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0, SVC_SOCK_ANONYMOUS); if (ret < 0) { rqstp = ERR_PTR(ret); @@ -269,7 +269,7 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt) serv, xprt, &rqstp, &callback_svc); if (!minorversion_setup) { /* v4.0 callback setup */ - rqstp = nfs4_callback_up(serv); + rqstp = nfs4_callback_up(serv, xprt); callback_svc = nfs4_callback_svc; } -- cgit From 4929d1d33fdbe8385cdd49ccd23563e9ff247ff8 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 16:13:11 +0400 Subject: NFS: handle NFS idmap pipe PipeFS dentries by network namespace aware routines This patch makes NFS idmap pipes dentries allocated and destroyed in network namespace context by PipeFS network namespace aware routines. Network namespace context is obtained from nfs_client structure. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/idmap.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 588d7da5b17e..769274ed51c4 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -431,6 +431,56 @@ static const struct rpc_pipe_ops idmap_upcall_ops = { .destroy_msg = idmap_pipe_destroy_msg, }; +static void __nfs_idmap_unregister(struct rpc_pipe *pipe) +{ + if (pipe->dentry) + rpc_unlink(pipe->dentry); +} + +static int __nfs_idmap_register(struct dentry *dir, + struct idmap *idmap, + struct rpc_pipe *pipe) +{ + struct dentry *dentry; + + dentry = rpc_mkpipe_dentry(dir, "idmap", idmap, pipe); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + pipe->dentry = dentry; + return 0; +} + +static void nfs_idmap_unregister(struct nfs_client *clp, + struct rpc_pipe *pipe) +{ + struct net *net = clp->net; + struct super_block *pipefs_sb; + + pipefs_sb = rpc_get_sb_net(net); + if (pipefs_sb) { + __nfs_idmap_unregister(pipe); + rpc_put_sb_net(net); + } +} + +static int nfs_idmap_register(struct nfs_client *clp, + struct idmap *idmap, + struct rpc_pipe *pipe) +{ + struct net *net = clp->net; + struct super_block *pipefs_sb; + int err = 0; + + pipefs_sb = rpc_get_sb_net(net); + if (pipefs_sb) { + if (clp->cl_rpcclient->cl_dentry) + err = __nfs_idmap_register(clp->cl_rpcclient->cl_dentry, + idmap, pipe); + rpc_put_sb_net(net); + } + return err; +} + int nfs_idmap_new(struct nfs_client *clp) { @@ -450,12 +500,8 @@ nfs_idmap_new(struct nfs_client *clp) kfree(idmap); return error; } - - if (clp->cl_rpcclient->cl_dentry) - pipe->dentry = rpc_mkpipe_dentry(clp->cl_rpcclient->cl_dentry, - "idmap", idmap, pipe); - if (IS_ERR(pipe->dentry)) { - error = PTR_ERR(pipe->dentry); + error = nfs_idmap_register(clp, idmap, pipe); + if (error) { rpc_destroy_pipe_data(pipe); kfree(idmap); return error; @@ -478,8 +524,7 @@ nfs_idmap_delete(struct nfs_client *clp) if (!idmap) return; - if (idmap->idmap_pipe->dentry) - rpc_unlink(idmap->idmap_pipe->dentry); + nfs_idmap_unregister(clp, idmap->idmap_pipe); rpc_destroy_pipe_data(idmap->idmap_pipe); clp->cl_idmap = NULL; kfree(idmap); -- cgit From eee17325f1dfbe004f1475743bab9e3d050d00f5 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 16:13:19 +0400 Subject: NFS: idmap PipeFS notifier introduced v2: 1) Added "nfs_idmap_init" and "nfs_idmap_quit" definitions for kernels built without CONFIG_NFS_V4 option set. This patch subscribes NFS clients to RPC pipefs notifications. Idmap notifier is registering on NFS module load. This notifier callback is responsible for creation/destruction of PipeFS idmap pipe dentry for NFS4 clients. Since ipdmap pipe is created in rpc client pipefs directory, we have make sure, that this directory has been created already. IOW RPC client notifier callback has been called already. To achive this, PipeFS notifier priorities has been introduced (RPC clients notifier priority is greater than NFS idmap one). But this approach gives another problem: unlink for RPC client directory will be called before NFS idmap pipe unlink on UMOUNT event and will fail, because directory is not empty. The solution, introduced in this patch, is to try to remove client directory once again after idmap pipe was unlinked. This looks like ugly hack, so probably it should be replaced in some more elegant way. Note that no locking required in notifier callback because PipeFS superblock pointer is passed as an argument from it's creation or destruction routine and thus we can be sure about it's validity. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 4 +-- fs/nfs/idmap.c | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/nfs/internal.h | 4 +++ 3 files changed, 81 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 64815b725409..ca9a4aa38dff 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -52,8 +52,8 @@ #define NFSDBG_FACILITY NFSDBG_CLIENT -static DEFINE_SPINLOCK(nfs_client_lock); -static LIST_HEAD(nfs_client_list); +DEFINE_SPINLOCK(nfs_client_lock); +LIST_HEAD(nfs_client_list); static LIST_HEAD(nfs_volume_list); static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); #ifdef CONFIG_NFS_V4 diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 769274ed51c4..ff084d258c41 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -377,6 +377,7 @@ int nfs_map_gid_to_group(const struct nfs_server *server, __u32 gid, char *buf, #include #include "nfs4_fs.h" +#include "internal.h" #define IDMAP_HASH_SZ 128 @@ -530,6 +531,80 @@ nfs_idmap_delete(struct nfs_client *clp) kfree(idmap); } +static int __rpc_pipefs_event(struct nfs_client *clp, unsigned long event, + struct super_block *sb) +{ + int err = 0; + + switch (event) { + case RPC_PIPEFS_MOUNT: + BUG_ON(clp->cl_rpcclient->cl_dentry == NULL); + err = __nfs_idmap_register(clp->cl_rpcclient->cl_dentry, + clp->cl_idmap, + clp->cl_idmap->idmap_pipe); + break; + case RPC_PIPEFS_UMOUNT: + if (clp->cl_idmap->idmap_pipe) { + struct dentry *parent; + + parent = clp->cl_idmap->idmap_pipe->dentry->d_parent; + __nfs_idmap_unregister(clp->cl_idmap->idmap_pipe); + /* + * Note: This is a dirty hack. SUNRPC hook has been + * called already but simple_rmdir() call for the + * directory returned with error because of idmap pipe + * inside. Thus now we have to remove this directory + * here. + */ + if (rpc_rmdir(parent)) + printk(KERN_ERR "%s: failed to remove clnt dir!\n", __func__); + } + break; + default: + printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); + return -ENOTSUPP; + } + return err; +} + +static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct super_block *sb = ptr; + struct nfs_client *clp; + int error = 0; + + spin_lock(&nfs_client_lock); + list_for_each_entry(clp, &nfs_client_list, cl_share_link) { + if (clp->net != sb->s_fs_info) + continue; + if (clp->rpc_ops != &nfs_v4_clientops) + continue; + error = __rpc_pipefs_event(clp, event, sb); + if (error) + break; + } + spin_unlock(&nfs_client_lock); + return error; +} + +#define PIPEFS_NFS_PRIO 1 + +static struct notifier_block nfs_idmap_block = { + .notifier_call = rpc_pipefs_event, + .priority = SUNRPC_PIPEFS_NFS_PRIO, +}; + +int nfs_idmap_init(void) +{ + return rpc_pipefs_notifier_register(&nfs_idmap_block); +} + +void nfs_idmap_quit(void) +{ + rpc_pipefs_notifier_unregister(&nfs_idmap_block); +} + /* * Helper routines for manipulating the hashtable */ diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d602188f889f..2b9836fe4434 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -182,6 +182,10 @@ static inline void nfs_fs_proc_exit(void) { } #endif +#ifdef CONFIG_NFS_V4 +extern spinlock_t nfs_client_lock; +extern struct list_head nfs_client_list; +#endif /* nfs4namespace.c */ #ifdef CONFIG_NFS_V4 -- cgit From 332dfab6f4e02d3c5897e9470492bee7d14f29cc Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 17:04:16 +0400 Subject: NFS: handle blocklayout pipe PipeFS dentry by network namespace aware routines This patch makes blocklayout pipe dentry allocated and destroyed in network namespace context by PipeFS network namespace aware routines. Network namespace context is obtained from nfs_client structure. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.c | 61 ++++++++++++++++++++++++++++++++-------- 1 file changed, 49 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 848660fd58c4..80b0c4a40485 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -1025,10 +1025,55 @@ static const struct rpc_pipe_ops bl_upcall_ops = { .destroy_msg = bl_pipe_destroy_msg, }; +static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb, + struct rpc_pipe *pipe) +{ + struct dentry *dir, *dentry; + + dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME); + if (dir == NULL) + return ERR_PTR(-ENOENT); + dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe); + dput(dir); + return dentry; +} + +static void nfs4blocklayout_unregister_sb(struct super_block *sb, + struct rpc_pipe *pipe) +{ + if (pipe->dentry) + rpc_unlink(pipe->dentry); +} + +static struct dentry *nfs4blocklayout_register_net(struct net *net, + struct rpc_pipe *pipe) +{ + struct super_block *pipefs_sb; + struct dentry *dentry; + + pipefs_sb = rpc_get_sb_net(net); + if (!pipefs_sb) + return ERR_PTR(-ENOENT); + dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe); + rpc_put_sb_net(net); + return dentry; +} + +static void nfs4blocklayout_unregister_net(struct net *net, + struct rpc_pipe *pipe) +{ + struct super_block *pipefs_sb; + + pipefs_sb = rpc_get_sb_net(net); + if (pipefs_sb) { + nfs4blocklayout_unregister_sb(pipefs_sb, pipe); + rpc_put_sb_net(net); + } +} + static int __init nfs4blocklayout_init(void) { struct vfsmount *mnt; - struct path path; int ret; dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__); @@ -1044,21 +1089,13 @@ static int __init nfs4blocklayout_init(void) ret = PTR_ERR(mnt); goto out_remove; } - - ret = vfs_path_lookup(mnt->mnt_root, - mnt, - NFS_PIPE_DIRNAME, 0, &path); - if (ret) - goto out_putrpc; - bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); - path_put(&path); if (IS_ERR(bl_device_pipe)) { ret = PTR_ERR(bl_device_pipe); goto out_putrpc; } - bl_device_pipe->dentry = rpc_mkpipe_dentry(path.dentry, "blocklayout", - NULL, bl_device_pipe); + bl_device_pipe->dentry = nfs4blocklayout_register_net(&init_net, + bl_device_pipe); if (IS_ERR(bl_device_pipe->dentry)) { ret = PTR_ERR(bl_device_pipe->dentry); goto out_destroy_pipe; @@ -1081,7 +1118,7 @@ static void __exit nfs4blocklayout_exit(void) __func__); pnfs_unregister_layoutdriver(&blocklayout_type); - rpc_unlink(bl_device_pipe->dentry); + nfs4blocklayout_unregister_net(&init_net, bl_device_pipe); rpc_destroy_pipe_data(bl_device_pipe); rpc_put_mount(); } -- cgit From 9e2e74dba6ddce94da187369b50a27536147d5df Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 17:04:24 +0400 Subject: NFS: blocklayout pipe creation per network namespace context introduced This patch implements blocklayout pipe creation and registration per each existent network namespace. This was achived by registering NFS per-net operations, responsible for blocklayout pipe allocation/register and unregister/destruction instead of initialization and destruction of static "bl_device_pipe" pipe (this one was removed). Note, than pointer to network blocklayout pipe is stored in per-net "nfs_net" structure, because allocating of one more per-net structure for blocklayout module looks redundant. This patch also changes dev_remove() function prototype (and all it's callers, where it' requied) by adding network namespace pointer parameter, which is used to discover proper blocklayout pipe for rpc_queue_upcall() call. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.c | 54 ++++++++++++++++++++++++------------- fs/nfs/blocklayout/blocklayout.h | 3 ++- fs/nfs/blocklayout/blocklayoutdev.c | 5 +++- fs/nfs/blocklayout/blocklayoutdm.c | 7 ++--- fs/nfs/inode.c | 1 + fs/nfs/netns.h | 1 + 6 files changed, 47 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 80b0c4a40485..9da72b8a5542 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -46,7 +46,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Andy Adamson "); MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); -struct rpc_pipe *bl_device_pipe; wait_queue_head_t bl_wq; static void print_page(struct page *page) @@ -1071,6 +1070,37 @@ static void nfs4blocklayout_unregister_net(struct net *net, } } +static int nfs4blocklayout_net_init(struct net *net) +{ + struct nfs_net *nn = net_generic(net, nfs_net_id); + struct dentry *dentry; + + nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); + if (IS_ERR(nn->bl_device_pipe)) + return PTR_ERR(nn->bl_device_pipe); + dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe); + if (IS_ERR(dentry)) { + rpc_destroy_pipe_data(nn->bl_device_pipe); + return PTR_ERR(dentry); + } + nn->bl_device_pipe->dentry = dentry; + return 0; +} + +static void nfs4blocklayout_net_exit(struct net *net) +{ + struct nfs_net *nn = net_generic(net, nfs_net_id); + + nfs4blocklayout_unregister_net(net, nn->bl_device_pipe); + rpc_destroy_pipe_data(nn->bl_device_pipe); + nn->bl_device_pipe = NULL; +} + +static struct pernet_operations nfs4blocklayout_net_ops = { + .init = nfs4blocklayout_net_init, + .exit = nfs4blocklayout_net_exit, +}; + static int __init nfs4blocklayout_init(void) { struct vfsmount *mnt; @@ -1089,24 +1119,12 @@ static int __init nfs4blocklayout_init(void) ret = PTR_ERR(mnt); goto out_remove; } - bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); - if (IS_ERR(bl_device_pipe)) { - ret = PTR_ERR(bl_device_pipe); - goto out_putrpc; - } - bl_device_pipe->dentry = nfs4blocklayout_register_net(&init_net, - bl_device_pipe); - if (IS_ERR(bl_device_pipe->dentry)) { - ret = PTR_ERR(bl_device_pipe->dentry); - goto out_destroy_pipe; - } + ret = register_pernet_subsys(&nfs4blocklayout_net_ops); + if (ret) + goto out_remove; out: return ret; -out_destroy_pipe: - rpc_destroy_pipe_data(bl_device_pipe); -out_putrpc: - rpc_put_mount(); out_remove: pnfs_unregister_layoutdriver(&blocklayout_type); return ret; @@ -1117,10 +1135,8 @@ static void __exit nfs4blocklayout_exit(void) dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n", __func__); + unregister_pernet_subsys(&nfs4blocklayout_net_ops); pnfs_unregister_layoutdriver(&blocklayout_type); - nfs4blocklayout_unregister_net(&init_net, bl_device_pipe); - rpc_destroy_pipe_data(bl_device_pipe); - rpc_put_mount(); } MODULE_ALIAS("nfs-layouttype4-3"); diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h index 49c670b18a9e..0966b39bbcfb 100644 --- a/fs/nfs/blocklayout/blocklayout.h +++ b/fs/nfs/blocklayout/blocklayout.h @@ -37,6 +37,7 @@ #include #include "../pnfs.h" +#include "../netns.h" #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) @@ -50,6 +51,7 @@ struct pnfs_block_dev { struct list_head bm_node; struct nfs4_deviceid bm_mdevid; /* associated devid */ struct block_device *bm_mdev; /* meta device itself */ + struct net *net; }; enum exstate4 { @@ -161,7 +163,6 @@ struct bl_msg_hdr { u16 totallen; /* length of entire message, including hdr itself */ }; -extern struct rpc_pipe *bl_device_pipe; extern wait_queue_head_t bl_wq; #define BL_DEVICE_UMOUNT 0x0 /* Umount--delete devices */ diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c index 949b62478799..94ed978860c0 100644 --- a/fs/nfs/blocklayout/blocklayoutdev.c +++ b/fs/nfs/blocklayout/blocklayoutdev.c @@ -120,6 +120,8 @@ nfs4_blk_decode_device(struct nfs_server *server, DECLARE_WAITQUEUE(wq, current); struct bl_dev_msg *reply = &bl_mount_reply; int offset, len, i, rc; + struct net *net = server->nfs_client->net; + struct nfs_net *nn = net_generic(net, nfs_net_id); dprintk("%s CREATING PIPEFS MESSAGE\n", __func__); dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data, @@ -146,7 +148,7 @@ nfs4_blk_decode_device(struct nfs_server *server, dprintk("%s CALLING USERSPACE DAEMON\n", __func__); add_wait_queue(&bl_wq, &wq); - rc = rpc_queue_upcall(bl_device_pipe, &msg); + rc = rpc_queue_upcall(nn->bl_device_pipe, &msg); if (rc < 0) { remove_wait_queue(&bl_wq, &wq); rv = ERR_PTR(rc); @@ -181,6 +183,7 @@ nfs4_blk_decode_device(struct nfs_server *server, rv->bm_mdev = bd; memcpy(&rv->bm_mdevid, &dev->dev_id, sizeof(struct nfs4_deviceid)); + rv->net = net; dprintk("%s Created device %s with bd_block_size %u\n", __func__, bd->bd_disk->disk_name, diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index 631f254d12ab..970490f556de 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -38,7 +38,7 @@ #define NFSDBG_FACILITY NFSDBG_PNFS_LD -static void dev_remove(dev_t dev) +static void dev_remove(struct net *net, dev_t dev) { struct rpc_pipe_msg msg; struct bl_dev_msg bl_umount_request; @@ -48,6 +48,7 @@ static void dev_remove(dev_t dev) }; uint8_t *dataptr; DECLARE_WAITQUEUE(wq, current); + struct nfs_net *nn = net_generic(net, nfs_net_id); dprintk("Entering %s\n", __func__); @@ -66,7 +67,7 @@ static void dev_remove(dev_t dev) msg.len = sizeof(bl_msg) + bl_msg.totallen; add_wait_queue(&bl_wq, &wq); - if (rpc_queue_upcall(bl_device_pipe, &msg) < 0) { + if (rpc_queue_upcall(nn->bl_device_pipe, &msg) < 0) { remove_wait_queue(&bl_wq, &wq); goto out; } @@ -93,7 +94,7 @@ static void nfs4_blk_metadev_release(struct pnfs_block_dev *bdev) printk(KERN_ERR "%s nfs4_blkdev_put returns %d\n", __func__, rv); - dev_remove(bdev->bm_mdev->bd_dev); + dev_remove(bdev->net, bdev->bm_mdev->bd_dev); } void bl_free_block_dev(struct pnfs_block_dev *bdev) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 0335f6e4ff7e..577ad5a72a24 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1554,6 +1554,7 @@ static void nfsiod_stop(void) } int nfs_net_id; +EXPORT_SYMBOL_GPL(nfs_net_id); static int nfs_net_init(struct net *net) { diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index 8c1f130d6ca2..39ae4cad5b4b 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -6,6 +6,7 @@ struct nfs_net { struct cache_detail *nfs_dns_resolve; + struct rpc_pipe *bl_device_pipe; }; extern int nfs_net_id; -- cgit From 627f30668fac12f5bd555a2cc22af2323762fe8d Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 17:04:32 +0400 Subject: NFS: blocklayout PipeFS notifier introduced This patch subscribes blocklayout pipes to RPC pipefs notifications. Notifier is registering on blocklayout module load. This notifier callback is responsible for creation/destruction of PipeFS blocklayout pipe dentry. Note that no locking required in notifier callback because PipeFS superblock pointer is passed as an argument from it's creation or destruction routine and thus we can be sure about it's validity. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.c | 50 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 9da72b8a5542..df05b9465146 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -1044,6 +1044,48 @@ static void nfs4blocklayout_unregister_sb(struct super_block *sb, rpc_unlink(pipe->dentry); } +static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct super_block *sb = ptr; + struct net *net = sb->s_fs_info; + struct nfs_net *nn = net_generic(net, nfs_net_id); + struct dentry *dentry; + int ret = 0; + + if (!try_module_get(THIS_MODULE)) + return 0; + + if (nn->bl_device_pipe == NULL) { + module_put(THIS_MODULE); + return 0; + } + + switch (event) { + case RPC_PIPEFS_MOUNT: + dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); + break; + } + nn->bl_device_pipe->dentry = dentry; + break; + case RPC_PIPEFS_UMOUNT: + if (nn->bl_device_pipe->dentry) + nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe); + break; + default: + ret = -ENOTSUPP; + break; + } + module_put(THIS_MODULE); + return ret; +} + +static struct notifier_block nfs4blocklayout_block = { + .notifier_call = rpc_pipefs_event, +}; + static struct dentry *nfs4blocklayout_register_net(struct net *net, struct rpc_pipe *pipe) { @@ -1119,12 +1161,17 @@ static int __init nfs4blocklayout_init(void) ret = PTR_ERR(mnt); goto out_remove; } - ret = register_pernet_subsys(&nfs4blocklayout_net_ops); + ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block); if (ret) goto out_remove; + ret = register_pernet_subsys(&nfs4blocklayout_net_ops); + if (ret) + goto out_notifier; out: return ret; +out_notifier: + rpc_pipefs_notifier_unregister(&nfs4blocklayout_block); out_remove: pnfs_unregister_layoutdriver(&blocklayout_type); return ret; @@ -1135,6 +1182,7 @@ static void __exit nfs4blocklayout_exit(void) dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n", __func__); + rpc_pipefs_notifier_unregister(&nfs4blocklayout_block); unregister_pernet_subsys(&nfs4blocklayout_net_ops); pnfs_unregister_layoutdriver(&blocklayout_type); } -- cgit From 2561d618ffb615f92fe17f0cf6b03f8e5cddb2cb Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 17:04:40 +0400 Subject: NFS: remove RPC PipeFS mount point reference from blocklayout routines This is a cleanup patch. We don't need this reference anymore, because blocklayout pipes dentries now creates and destroys in per-net operations and on PipeFS mount/umount notification. Note that nfs4blocklayout_register_net() now returns 0 instead of -ENOENT in case of PipeFS superblock absence. This is ok, because blocklayout pipe dentry will be created on PipeFS mount event. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index df05b9465146..783ebd51bd5f 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -1094,7 +1094,7 @@ static struct dentry *nfs4blocklayout_register_net(struct net *net, pipefs_sb = rpc_get_sb_net(net); if (!pipefs_sb) - return ERR_PTR(-ENOENT); + return NULL; dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe); rpc_put_sb_net(net); return dentry; @@ -1145,7 +1145,6 @@ static struct pernet_operations nfs4blocklayout_net_ops = { static int __init nfs4blocklayout_init(void) { - struct vfsmount *mnt; int ret; dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__); @@ -1155,12 +1154,6 @@ static int __init nfs4blocklayout_init(void) goto out; init_waitqueue_head(&bl_wq); - - mnt = rpc_get_mount(); - if (IS_ERR(mnt)) { - ret = PTR_ERR(mnt); - goto out_remove; - } ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block); if (ret) goto out_remove; -- cgit From 1313e6034a73a55d6293dbdc62b8853dd067771a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:04:24 -0500 Subject: NFS: Remove unnecessary includes from linux/nfs_fs_i.h Also from linux/nfs_xdr.h. Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 5668f7c54c41..77a184e2fe47 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include -- cgit From d1e284d50a1506aab8ad7895f31b5f93b5647fc9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:04:24 -0500 Subject: NFSv4: Clean up nfs4_get_state_owner Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4proc.c | 3 ++- fs/nfs/nfs4state.c | 24 +++++++++++------------- 3 files changed, 14 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 4d7d0aedc101..654b091644c5 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -319,7 +319,7 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session) } #endif /* CONFIG_NFS_V4_1 */ -extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); +extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t); extern void nfs4_put_state_owner(struct nfs4_state_owner *); extern void nfs4_purge_state_owners(struct nfs_server *); extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f0c849c98fe4..53ef365f4372 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1754,7 +1754,8 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode /* Protect against reboot recovery conflicts */ status = -ENOMEM; - if (!(sp = nfs4_get_state_owner(server, cred))) { + sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); + if (sp == NULL) { dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); goto out_err; } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a53f33b4ac3a..a8a42a677d8f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -444,13 +444,17 @@ nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp) * */ static struct nfs4_state_owner * -nfs4_alloc_state_owner(void) +nfs4_alloc_state_owner(struct nfs_server *server, + struct rpc_cred *cred, + gfp_t gfp_flags) { struct nfs4_state_owner *sp; - sp = kzalloc(sizeof(*sp),GFP_NOFS); + sp = kzalloc(sizeof(*sp), gfp_flags); if (!sp) return NULL; + sp->so_server = server; + sp->so_cred = get_rpccred(cred); spin_lock_init(&sp->so_lock); INIT_LIST_HEAD(&sp->so_states); rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); @@ -516,7 +520,8 @@ static void nfs4_gc_state_owners(struct nfs_server *server) * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL. */ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, - struct rpc_cred *cred) + struct rpc_cred *cred, + gfp_t gfp_flags) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp, *new; @@ -526,20 +531,13 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, spin_unlock(&clp->cl_lock); if (sp != NULL) goto out; - new = nfs4_alloc_state_owner(); + new = nfs4_alloc_state_owner(server, cred, gfp_flags); if (new == NULL) goto out; - new->so_server = server; - new->so_cred = cred; - spin_lock(&clp->cl_lock); sp = nfs4_insert_state_owner_locked(new); spin_unlock(&clp->cl_lock); - if (sp == new) - get_rpccred(cred); - else { - rpc_destroy_wait_queue(&new->so_sequence.wait); - kfree(new); - } + if (sp != new) + nfs4_free_state_owner(new); out: nfs4_gc_state_owners(server); return sp; -- cgit From 9157c31dd610a127bc6f01bc1953cf8b80382040 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:04:24 -0500 Subject: NFSv4: Replace state_owner->so_owner_id with an ida based allocator We're unlikely to ever need more than 2^31 simultaneous open owners, so let's replace the custom allocator with the generic ida allocator. Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 2 ++ fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4proc.c | 4 ++-- fs/nfs/nfs4state.c | 17 ++++++++++++----- 4 files changed, 17 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ca9a4aa38dff..8d1739d3424d 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1092,6 +1092,7 @@ static struct nfs_server *nfs_alloc_server(void) return NULL; } + ida_init(&server->openowner_id); pnfs_init_server(server); return server; @@ -1117,6 +1118,7 @@ void nfs_free_server(struct nfs_server *server) nfs_put_client(server->nfs_client); + ida_destroy(&server->openowner_id); nfs_free_iostats(server->io_stats); bdi_destroy(&server->backing_dev_info); kfree(server); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 654b091644c5..091b679747ed 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -92,7 +92,6 @@ struct nfs_unique_id { * semantics by allowing the server to identify replayed requests. */ struct nfs4_state_owner { - struct nfs_unique_id so_owner_id; struct nfs_server *so_server; struct list_head so_lru; unsigned long so_expires; @@ -106,6 +105,7 @@ struct nfs4_state_owner { struct list_head so_states; struct nfs_seqid_counter so_seqid; struct rpc_sequence so_sequence; + int so_owner_id; }; enum { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 53ef365f4372..1dd2e407a901 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -815,7 +815,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, p->o_arg.open_flags = flags; p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); p->o_arg.clientid = server->nfs_client->cl_clientid; - p->o_arg.id = sp->so_owner_id.id; + p->o_arg.id = sp->so_owner_id; p->o_arg.name = &dentry->d_name; p->o_arg.server = server; p->o_arg.bitmask = server->attr_bitmask; @@ -1440,7 +1440,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) rcu_read_unlock(); } /* Update sequence id. */ - data->o_arg.id = sp->so_owner_id.id; + data->o_arg.id = sp->so_owner_id; data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a8a42a677d8f..8472707286f9 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -405,6 +405,7 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new) struct rb_node **p = &server->state_owners.rb_node, *parent = NULL; struct nfs4_state_owner *sp; + int err; while (*p != NULL) { parent = *p; @@ -421,8 +422,9 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new) return sp; } } - nfs_alloc_unique_id_locked(&server->openowner_id, - &new->so_owner_id, 1, 64); + err = ida_get_new(&server->openowner_id, &new->so_owner_id); + if (err) + return ERR_PTR(err); rb_link_node(&new->so_server_node, parent, p); rb_insert_color(&new->so_server_node, &server->state_owners); return new; @@ -435,7 +437,7 @@ nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp) if (!RB_EMPTY_NODE(&sp->so_server_node)) rb_erase(&sp->so_server_node, &server->state_owners); - nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id); + ida_remove(&server->openowner_id, sp->so_owner_id); } /* @@ -534,8 +536,13 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, new = nfs4_alloc_state_owner(server, cred, gfp_flags); if (new == NULL) goto out; - sp = nfs4_insert_state_owner_locked(new); - spin_unlock(&clp->cl_lock); + do { + if (ida_pre_get(&server->openowner_id, gfp_flags) == 0) + break; + spin_lock(&clp->cl_lock); + sp = nfs4_insert_state_owner_locked(new); + spin_unlock(&clp->cl_lock); + } while (sp == ERR_PTR(-EAGAIN)); if (sp != new) nfs4_free_state_owner(new); out: -- cgit From d2d7ce28a2f8ec6ca2a49145e643d2e3c7d21ba3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:04:25 -0500 Subject: NFSv4: Replace lock_owner->ld_id with an ida based allocator Again, We're unlikely to ever need more than 2^31 simultaneous lock owners, so let's replace the custom allocator. Now that there are no more users, we can also get rid of the custom allocator code. Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 2 ++ fs/nfs/nfs4_fs.h | 7 +----- fs/nfs/nfs4proc.c | 6 ++--- fs/nfs/nfs4state.c | 74 ++++++------------------------------------------------ 4 files changed, 14 insertions(+), 75 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 8d1739d3424d..df60d9971b95 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1093,6 +1093,7 @@ static struct nfs_server *nfs_alloc_server(void) } ida_init(&server->openowner_id); + ida_init(&server->lockowner_id); pnfs_init_server(server); return server; @@ -1118,6 +1119,7 @@ void nfs_free_server(struct nfs_server *server) nfs_put_client(server->nfs_client); + ida_destroy(&server->lockowner_id); ida_destroy(&server->openowner_id); nfs_free_iostats(server->io_stats); bdi_destroy(&server->backing_dev_info); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 091b679747ed..b23cb0cda632 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -81,11 +81,6 @@ static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status seqid->flags |= NFS_SEQID_CONFIRMED; } -struct nfs_unique_id { - struct rb_node rb_node; - __u64 id; -}; - /* * NFS4 state_owners and lock_owners are simply labels for ordered * sequences of RPC calls. Their sole purpose is to provide once-only @@ -145,9 +140,9 @@ struct nfs4_lock_state { struct nfs4_state * ls_state; /* Pointer to open state */ #define NFS_LOCK_INITIALIZED 1 int ls_flags; + int ls_id; struct nfs_seqid_counter ls_seqid; struct rpc_sequence ls_sequence; - struct nfs_unique_id ls_id; nfs4_stateid ls_stateid; atomic_t ls_count; struct nfs4_lock_owner ls_owner; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1dd2e407a901..9c77af900960 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4017,7 +4017,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock if (status != 0) goto out; lsp = request->fl_u.nfs4_fl.owner; - arg.lock_owner.id = lsp->ls_id.id; + arg.lock_owner.id = lsp->ls_id; arg.lock_owner.s_dev = server->s_dev; status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); switch (status) { @@ -4262,7 +4262,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, goto out_free_seqid; p->arg.lock_stateid = &lsp->ls_stateid; p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; - p->arg.lock_owner.id = lsp->ls_id.id; + p->arg.lock_owner.id = lsp->ls_id; p->arg.lock_owner.s_dev = server->s_dev; p->res.lock_seqid = p->arg.lock_seqid; p->lsp = lsp; @@ -4679,7 +4679,7 @@ void nfs4_release_lockowner(const struct nfs4_lock_state *lsp) if (!args) return; args->lock_owner.clientid = server->nfs_client->cl_clientid; - args->lock_owner.id = lsp->ls_id.id; + args->lock_owner.id = lsp->ls_id; args->lock_owner.s_dev = server->s_dev; msg.rpc_argp = args; rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 8472707286f9..5abf23615bc5 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -317,62 +317,6 @@ out: return cred; } -static void nfs_alloc_unique_id_locked(struct rb_root *root, - struct nfs_unique_id *new, - __u64 minval, int maxbits) -{ - struct rb_node **p, *parent; - struct nfs_unique_id *pos; - __u64 mask = ~0ULL; - - if (maxbits < 64) - mask = (1ULL << maxbits) - 1ULL; - - /* Ensure distribution is more or less flat */ - get_random_bytes(&new->id, sizeof(new->id)); - new->id &= mask; - if (new->id < minval) - new->id += minval; -retry: - p = &root->rb_node; - parent = NULL; - - while (*p != NULL) { - parent = *p; - pos = rb_entry(parent, struct nfs_unique_id, rb_node); - - if (new->id < pos->id) - p = &(*p)->rb_left; - else if (new->id > pos->id) - p = &(*p)->rb_right; - else - goto id_exists; - } - rb_link_node(&new->rb_node, parent, p); - rb_insert_color(&new->rb_node, root); - return; -id_exists: - for (;;) { - new->id++; - if (new->id < minval || (new->id & mask) != new->id) { - new->id = minval; - break; - } - parent = rb_next(parent); - if (parent == NULL) - break; - pos = rb_entry(parent, struct nfs_unique_id, rb_node); - if (new->id < pos->id) - break; - } - goto retry; -} - -static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id) -{ - rb_erase(&id->rb_node, root); -} - static struct nfs4_state_owner * nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred) { @@ -800,7 +744,6 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f { struct nfs4_lock_state *lsp; struct nfs_server *server = state->owner->so_server; - struct nfs_client *clp = server->nfs_client; lsp = kzalloc(sizeof(*lsp), GFP_NOFS); if (lsp == NULL) @@ -820,24 +763,23 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f lsp->ls_owner.lo_u.posix_owner = fl_owner; break; default: - kfree(lsp); - return NULL; + goto out_free; } - spin_lock(&clp->cl_lock); - nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64); - spin_unlock(&clp->cl_lock); + lsp->ls_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS); + if (lsp->ls_id < 0) + goto out_free; INIT_LIST_HEAD(&lsp->ls_locks); return lsp; +out_free: + kfree(lsp); + return NULL; } static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) { struct nfs_server *server = lsp->ls_state->owner->so_server; - struct nfs_client *clp = server->nfs_client; - spin_lock(&clp->cl_lock); - nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id); - spin_unlock(&clp->cl_lock); + ida_simple_remove(&server->lockowner_id, lsp->ls_id); rpc_destroy_wait_queue(&lsp->ls_sequence.wait); kfree(lsp); } -- cgit From 9d12b216aa87f68c96f6dd8eb5d2d0ccc9989b1c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:04:25 -0500 Subject: NFSv41: Add a new helper nfs4_init_sequence() Clean up Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 6 ++-- fs/nfs/nfs4filelayout.c | 4 +-- fs/nfs/nfs4proc.c | 93 +++++++++++++++++++++++++++++-------------------- fs/nfs/read.c | 2 +- fs/nfs/unlink.c | 4 +-- fs/nfs/write.c | 2 +- 6 files changed, 65 insertions(+), 46 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index b23cb0cda632..0924494e10a2 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -230,10 +230,10 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser extern int nfs4_setup_sequence(const struct nfs_server *server, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, - int cache_reply, struct rpc_task *task); + struct rpc_task *task); extern int nfs41_setup_sequence(struct nfs4_session *session, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, - int cache_reply, struct rpc_task *task); + struct rpc_task *task); extern void nfs4_destroy_session(struct nfs4_session *session); extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp); extern int nfs4_proc_create_session(struct nfs_client *); @@ -264,7 +264,7 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser static inline int nfs4_setup_sequence(const struct nfs_server *server, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, - int cache_reply, struct rpc_task *task) + struct rpc_task *task) { return 0; } diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 71ec08617e23..b4f8f9624afa 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -173,7 +173,7 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data) if (nfs41_setup_sequence(rdata->ds_clp->cl_session, &rdata->args.seq_args, &rdata->res.seq_res, - 0, task)) + task)) return; rpc_call_start(task); @@ -254,7 +254,7 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data) if (nfs41_setup_sequence(wdata->ds_clp->cl_session, &wdata->args.seq_args, &wdata->res.seq_res, - 0, task)) + task)) return; rpc_call_start(task); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9c77af900960..8926d33383ed 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -534,10 +534,20 @@ out: return ret_id; } +static void nfs41_init_sequence(struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, int cache_reply) +{ + args->sa_session = NULL; + args->sa_cache_this = 0; + if (cache_reply) + args->sa_cache_this = 1; + res->sr_session = NULL; + res->sr_slot = NULL; +} + int nfs41_setup_sequence(struct nfs4_session *session, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, - int cache_reply, struct rpc_task *task) { struct nfs4_slot *slot; @@ -582,7 +592,6 @@ int nfs41_setup_sequence(struct nfs4_session *session, slot = tbl->slots + slotid; args->sa_session = session; args->sa_slotid = slotid; - args->sa_cache_this = cache_reply; dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); @@ -602,24 +611,19 @@ EXPORT_SYMBOL_GPL(nfs41_setup_sequence); int nfs4_setup_sequence(const struct nfs_server *server, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, - int cache_reply, struct rpc_task *task) { struct nfs4_session *session = nfs4_get_session(server); int ret = 0; - if (session == NULL) { - args->sa_session = NULL; - res->sr_session = NULL; + if (session == NULL) goto out; - } dprintk("--> %s clp %p session %p sr_slot %td\n", __func__, session->clp, session, res->sr_slot ? res->sr_slot - session->fc_slot_table.slots : -1); - ret = nfs41_setup_sequence(session, args, res, cache_reply, - task); + ret = nfs41_setup_sequence(session, args, res, task); out: dprintk("<-- %s status=%d\n", __func__, ret); return ret; @@ -629,7 +633,6 @@ struct nfs41_call_sync_data { const struct nfs_server *seq_server; struct nfs4_sequence_args *seq_args; struct nfs4_sequence_res *seq_res; - int cache_reply; }; static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) @@ -639,7 +642,7 @@ static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); if (nfs4_setup_sequence(data->seq_server, data->seq_args, - data->seq_res, data->cache_reply, task)) + data->seq_res, task)) return; rpc_call_start(task); } @@ -672,7 +675,6 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, struct rpc_message *msg, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, - int cache_reply, int privileged) { int ret; @@ -681,7 +683,6 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, .seq_server = server, .seq_args = args, .seq_res = res, - .cache_reply = cache_reply, }; struct rpc_task_setup task_setup = { .rpc_client = clnt, @@ -690,7 +691,6 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, .callback_data = &data }; - res->sr_slot = NULL; if (privileged) task_setup.callback_ops = &nfs41_call_priv_sync_ops; task = rpc_run_task(&task_setup); @@ -710,10 +710,17 @@ int _nfs4_call_sync_session(struct rpc_clnt *clnt, struct nfs4_sequence_res *res, int cache_reply) { - return nfs4_call_sync_sequence(clnt, server, msg, args, res, cache_reply, 0); + nfs41_init_sequence(args, res, cache_reply); + return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0); } #else +static inline +void nfs41_init_sequence(struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, int cache_reply) +{ +} + static int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { @@ -728,7 +735,7 @@ int _nfs4_call_sync(struct rpc_clnt *clnt, struct nfs4_sequence_res *res, int cache_reply) { - args->sa_session = res->sr_session = NULL; + nfs41_init_sequence(args, res, cache_reply); return rpc_call_sync(clnt, msg, 0); } @@ -1449,7 +1456,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) data->timestamp = jiffies; if (nfs4_setup_sequence(data->o_arg.server, &data->o_arg.seq_args, - &data->o_res.seq_res, 1, task)) + &data->o_res.seq_res, task)) return; rpc_call_start(task); return; @@ -1551,6 +1558,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) }; int status; + nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); kref_get(&data->kref); data->rpc_done = 0; data->rpc_status = 0; @@ -2030,8 +2038,9 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) nfs_fattr_init(calldata->res.fattr); calldata->timestamp = jiffies; if (nfs4_setup_sequence(NFS_SERVER(calldata->inode), - &calldata->arg.seq_args, &calldata->res.seq_res, - 1, task)) + &calldata->arg.seq_args, + &calldata->res.seq_res, + task)) return; rpc_call_start(task); } @@ -2075,6 +2084,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) calldata = kzalloc(sizeof(*calldata), gfp_mask); if (calldata == NULL) goto out; + nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); calldata->inode = state->inode; calldata->state = state; calldata->arg.fh = NFS_FH(state->inode); @@ -2713,8 +2723,8 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) args->bitmask = server->cache_consistency_bitmask; res->server = server; - res->seq_res.sr_slot = NULL; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; + nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); } static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) @@ -2739,6 +2749,7 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; arg->bitmask = server->attr_bitmask; res->server = server; + nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); } static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, @@ -3233,6 +3244,7 @@ static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message data->timestamp = jiffies; data->read_done_cb = nfs4_read_done_cb; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; + nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); } /* Reset the the nfs_read_data to send the read to the MDS. */ @@ -3306,6 +3318,7 @@ static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_messag data->timestamp = jiffies; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; + nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); } static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data) @@ -3340,6 +3353,7 @@ static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa data->write_done_cb = nfs4_commit_done_cb; data->res.server = server; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; + nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); } struct nfs4_renewdata { @@ -3892,7 +3906,7 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) if (nfs4_setup_sequence(d_data->res.server, &d_data->args.seq_args, - &d_data->res.seq_res, 1, task)) + &d_data->res.seq_res, task)) return; rpc_call_start(task); } @@ -3926,6 +3940,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co data = kzalloc(sizeof(*data), GFP_NOFS); if (data == NULL) return -ENOMEM; + nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); data->args.fhandle = &data->fh; data->args.stateid = &data->stateid; data->args.bitmask = server->attr_bitmask; @@ -4143,7 +4158,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data) calldata->timestamp = jiffies; if (nfs4_setup_sequence(calldata->server, &calldata->arg.seq_args, - &calldata->res.seq_res, 1, task)) + &calldata->res.seq_res, task)) return; rpc_call_start(task); } @@ -4183,6 +4198,7 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, return ERR_PTR(-ENOMEM); } + nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; task_setup_data.callback_data = data; @@ -4298,7 +4314,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) data->timestamp = jiffies; if (nfs4_setup_sequence(data->server, &data->arg.seq_args, - &data->res.seq_res, 1, task)) + &data->res.seq_res, task)) return; rpc_call_start(task); dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); @@ -4416,6 +4432,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f data->arg.reclaim = NFS_LOCK_RECLAIM; task_setup_data.callback_ops = &nfs4_recover_lock_ops; } + nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; task_setup_data.callback_data = data; @@ -4931,7 +4948,7 @@ static void nfs4_get_lease_time_prepare(struct rpc_task *task, since we're invoked within one */ ret = nfs41_setup_sequence(data->clp->cl_session, &data->args->la_seq_args, - &data->res->lr_seq_res, 0, task); + &data->res->lr_seq_res, task); BUG_ON(ret == -EAGAIN); rpc_call_start(task); @@ -4995,6 +5012,7 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) }; int status; + nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); dprintk("--> %s\n", __func__); task = rpc_run_task(&task_setup); @@ -5468,7 +5486,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data) args = task->tk_msg.rpc_argp; res = task->tk_msg.rpc_resp; - if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task)) + if (nfs41_setup_sequence(clp->cl_session, args, res, task)) return; rpc_call_start(task); } @@ -5500,6 +5518,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_ nfs_put_client(clp); return ERR_PTR(-ENOMEM); } + nfs41_init_sequence(&calldata->args, &calldata->res, 0); msg.rpc_argp = &calldata->args; msg.rpc_resp = &calldata->res; calldata->clp = clp; @@ -5561,7 +5580,7 @@ static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); if (nfs41_setup_sequence(calldata->clp->cl_session, &calldata->arg.seq_args, - &calldata->res.seq_res, 0, task)) + &calldata->res.seq_res, task)) return; rpc_call_start(task); @@ -5640,6 +5659,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp) calldata->clp = clp; calldata->arg.one_fs = 0; + nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); msg.rpc_argp = &calldata->arg; msg.rpc_resp = &calldata->res; task_setup_data.callback_data = calldata; @@ -5671,7 +5691,7 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) * to be no way to prevent it completely. */ if (nfs4_setup_sequence(server, &lgp->args.seq_args, - &lgp->res.seq_res, 0, task)) + &lgp->res.seq_res, task)) return; if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, NFS_I(lgp->args.inode)->layout, @@ -5746,6 +5766,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) lgp->res.layoutp = &lgp->args.layout; lgp->res.seq_res.sr_slot = NULL; + nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); @@ -5766,7 +5787,7 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) dprintk("--> %s\n", __func__); if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args, - &lrp->res.seq_res, 0, task)) + &lrp->res.seq_res, task)) return; rpc_call_start(task); } @@ -5832,6 +5853,7 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) int status; dprintk("--> %s\n", __func__); + nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); @@ -5932,7 +5954,7 @@ static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) struct nfs_server *server = NFS_SERVER(data->args.inode); if (nfs4_setup_sequence(server, &data->args.seq_args, - &data->res.seq_res, 1, task)) + &data->res.seq_res, task)) return; rpc_call_start(task); } @@ -6019,6 +6041,7 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) data->args.lastbytewritten, data->args.inode->i_ino); + nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); @@ -6114,7 +6137,6 @@ out: } static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state) { - int status; struct nfs41_test_stateid_args args = { .stateid = &state->stateid, }; @@ -6124,9 +6146,8 @@ static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *sta .rpc_argp = &args, .rpc_resp = &res, }; - args.seq_args.sa_session = res.seq_res.sr_session = NULL; - status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1); - return status; + nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); + return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); } static int nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state) @@ -6143,7 +6164,6 @@ static int nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *stat static int _nfs4_free_stateid(struct nfs_server *server, struct nfs4_state *state) { - int status; struct nfs41_free_stateid_args args = { .stateid = &state->stateid, }; @@ -6154,9 +6174,8 @@ static int _nfs4_free_stateid(struct nfs_server *server, struct nfs4_state *stat .rpc_resp = &res, }; - args.seq_args.sa_session = res.seq_res.sr_session = NULL; - status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1); - return status; + nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); + return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); } static int nfs41_free_stateid(struct nfs_server *server, struct nfs4_state *state) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index cfa175c223dc..3c2540d532c7 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -472,7 +472,7 @@ void nfs_read_prepare(struct rpc_task *task, void *calldata) if (nfs4_setup_sequence(NFS_SERVER(data->inode), &data->args.seq_args, &data->res.seq_res, - 0, task)) + task)) return; rpc_call_start(task); } diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index 4f9319a2e567..490613b709b6 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -114,7 +114,7 @@ void nfs_unlink_prepare(struct rpc_task *task, void *calldata) struct nfs_server *server = NFS_SERVER(data->dir); if (nfs4_setup_sequence(server, &data->args.seq_args, - &data->res.seq_res, 1, task)) + &data->res.seq_res, task)) return; rpc_call_start(task); } @@ -410,7 +410,7 @@ static void nfs_rename_prepare(struct rpc_task *task, void *calldata) struct nfs_server *server = NFS_SERVER(data->old_dir); if (nfs4_setup_sequence(server, &data->args.seq_args, - &data->res.seq_res, 1, task)) + &data->res.seq_res, task)) return; rpc_call_start(task); } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 834f0fe96f89..0b1831d95849 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1135,7 +1135,7 @@ void nfs_write_prepare(struct rpc_task *task, void *calldata) if (nfs4_setup_sequence(NFS_SERVER(data->inode), &data->args.seq_args, - &data->res.seq_res, 1, task)) + &data->res.seq_res, task)) return; rpc_call_start(task); } -- cgit From 7ba127ab9f5f83991df4142d5bc4fc319cd77a54 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:04:25 -0500 Subject: NFSv4: Move contents of struct rpc_sequence into struct nfs_seqid_counter Clean up. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 16 +++------------- fs/nfs/nfs4state.c | 36 +++++++++++++++++++++++------------- 2 files changed, 26 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 0924494e10a2..c4025ae1d071 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -53,21 +53,13 @@ struct nfs4_minor_version_ops { const struct nfs4_state_maintenance_ops *state_renewal_ops; }; -/* - * struct rpc_sequence ensures that RPC calls are sent in the exact - * order that they appear on the list. - */ -struct rpc_sequence { - struct rpc_wait_queue wait; /* RPC call delay queue */ - spinlock_t lock; /* Protects the list */ - struct list_head list; /* Defines sequence of RPC calls */ -}; - #define NFS_SEQID_CONFIRMED 1 struct nfs_seqid_counter { - struct rpc_sequence *sequence; int flags; u32 counter; + spinlock_t lock; /* Protects the list */ + struct list_head list; /* Defines sequence of RPC calls */ + struct rpc_wait_queue wait; /* RPC call delay queue */ }; struct nfs_seqid { @@ -99,7 +91,6 @@ struct nfs4_state_owner { unsigned long so_flags; struct list_head so_states; struct nfs_seqid_counter so_seqid; - struct rpc_sequence so_sequence; int so_owner_id; }; @@ -142,7 +133,6 @@ struct nfs4_lock_state { int ls_flags; int ls_id; struct nfs_seqid_counter ls_seqid; - struct rpc_sequence ls_sequence; nfs4_stateid ls_stateid; atomic_t ls_count; struct nfs4_lock_owner ls_owner; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5abf23615bc5..cf7bc39aa0ee 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -384,6 +384,22 @@ nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp) ida_remove(&server->openowner_id, sp->so_owner_id); } +static void +nfs4_init_seqid_counter(struct nfs_seqid_counter *sc) +{ + sc->flags = 0; + sc->counter = 0; + spin_lock_init(&sc->lock); + INIT_LIST_HEAD(&sc->list); + rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue"); +} + +static void +nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc) +{ + rpc_destroy_wait_queue(&sc->wait); +} + /* * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to * create a new state_owner. @@ -403,10 +419,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, sp->so_cred = get_rpccred(cred); spin_lock_init(&sp->so_lock); INIT_LIST_HEAD(&sp->so_states); - rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); - sp->so_seqid.sequence = &sp->so_sequence; - spin_lock_init(&sp->so_sequence.lock); - INIT_LIST_HEAD(&sp->so_sequence.list); + nfs4_init_seqid_counter(&sp->so_seqid); atomic_set(&sp->so_count, 1); INIT_LIST_HEAD(&sp->so_lru); return sp; @@ -428,7 +441,7 @@ nfs4_drop_state_owner(struct nfs4_state_owner *sp) static void nfs4_free_state_owner(struct nfs4_state_owner *sp) { - rpc_destroy_wait_queue(&sp->so_sequence.wait); + nfs4_destroy_seqid_counter(&sp->so_seqid); put_rpccred(sp->so_cred); kfree(sp); } @@ -748,10 +761,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f lsp = kzalloc(sizeof(*lsp), GFP_NOFS); if (lsp == NULL) return NULL; - rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue"); - spin_lock_init(&lsp->ls_sequence.lock); - INIT_LIST_HEAD(&lsp->ls_sequence.list); - lsp->ls_seqid.sequence = &lsp->ls_sequence; + nfs4_init_seqid_counter(&lsp->ls_seqid); atomic_set(&lsp->ls_count, 1); lsp->ls_state = state; lsp->ls_owner.lo_type = type; @@ -780,7 +790,7 @@ static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) struct nfs_server *server = lsp->ls_state->owner->so_server; ida_simple_remove(&server->lockowner_id, lsp->ls_id); - rpc_destroy_wait_queue(&lsp->ls_sequence.wait); + nfs4_destroy_seqid_counter(&lsp->ls_seqid); kfree(lsp); } @@ -914,7 +924,7 @@ struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_m void nfs_release_seqid(struct nfs_seqid *seqid) { if (!list_empty(&seqid->list)) { - struct rpc_sequence *sequence = seqid->sequence->sequence; + struct nfs_seqid_counter *sequence = seqid->sequence; spin_lock(&sequence->lock); list_del_init(&seqid->list); @@ -936,7 +946,7 @@ void nfs_free_seqid(struct nfs_seqid *seqid) */ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) { - BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid); + BUG_ON(list_first_entry(&seqid->sequence->list, struct nfs_seqid, list) != seqid); switch (status) { case 0: break; @@ -987,7 +997,7 @@ void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) { - struct rpc_sequence *sequence = seqid->sequence->sequence; + struct nfs_seqid_counter *sequence = seqid->sequence; int status = 0; spin_lock(&sequence->lock); -- cgit From 48c22eb21071a3524f8b6e587371be35b5e86969 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:04:25 -0500 Subject: NFS: Move struct nfs_unique_id into struct nfs_seqid_counter Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 8 ++++++-- fs/nfs/nfs4proc.c | 10 +++++----- fs/nfs/nfs4state.c | 10 +++++----- 3 files changed, 16 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index c4025ae1d071..df3d02c3e8cb 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -53,8 +53,14 @@ struct nfs4_minor_version_ops { const struct nfs4_state_maintenance_ops *state_renewal_ops; }; +struct nfs_unique_id { + struct rb_node rb_node; + __u64 id; +}; + #define NFS_SEQID_CONFIRMED 1 struct nfs_seqid_counter { + int owner_id; int flags; u32 counter; spinlock_t lock; /* Protects the list */ @@ -91,7 +97,6 @@ struct nfs4_state_owner { unsigned long so_flags; struct list_head so_states; struct nfs_seqid_counter so_seqid; - int so_owner_id; }; enum { @@ -131,7 +136,6 @@ struct nfs4_lock_state { struct nfs4_state * ls_state; /* Pointer to open state */ #define NFS_LOCK_INITIALIZED 1 int ls_flags; - int ls_id; struct nfs_seqid_counter ls_seqid; nfs4_stateid ls_stateid; atomic_t ls_count; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8926d33383ed..88a8b6999b4f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -822,7 +822,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, p->o_arg.open_flags = flags; p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); p->o_arg.clientid = server->nfs_client->cl_clientid; - p->o_arg.id = sp->so_owner_id; + p->o_arg.id = sp->so_seqid.owner_id; p->o_arg.name = &dentry->d_name; p->o_arg.server = server; p->o_arg.bitmask = server->attr_bitmask; @@ -1447,7 +1447,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) rcu_read_unlock(); } /* Update sequence id. */ - data->o_arg.id = sp->so_owner_id; + data->o_arg.id = sp->so_seqid.owner_id; data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; @@ -4032,7 +4032,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock if (status != 0) goto out; lsp = request->fl_u.nfs4_fl.owner; - arg.lock_owner.id = lsp->ls_id; + arg.lock_owner.id = lsp->ls_seqid.owner_id; arg.lock_owner.s_dev = server->s_dev; status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); switch (status) { @@ -4278,7 +4278,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, goto out_free_seqid; p->arg.lock_stateid = &lsp->ls_stateid; p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; - p->arg.lock_owner.id = lsp->ls_id; + p->arg.lock_owner.id = lsp->ls_seqid.owner_id; p->arg.lock_owner.s_dev = server->s_dev; p->res.lock_seqid = p->arg.lock_seqid; p->lsp = lsp; @@ -4696,7 +4696,7 @@ void nfs4_release_lockowner(const struct nfs4_lock_state *lsp) if (!args) return; args->lock_owner.clientid = server->nfs_client->cl_clientid; - args->lock_owner.id = lsp->ls_id; + args->lock_owner.id = lsp->ls_seqid.owner_id; args->lock_owner.s_dev = server->s_dev; msg.rpc_argp = args; rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index cf7bc39aa0ee..a42e60d3ee50 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -366,7 +366,7 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new) return sp; } } - err = ida_get_new(&server->openowner_id, &new->so_owner_id); + err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id); if (err) return ERR_PTR(err); rb_link_node(&new->so_server_node, parent, p); @@ -381,7 +381,7 @@ nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp) if (!RB_EMPTY_NODE(&sp->so_server_node)) rb_erase(&sp->so_server_node, &server->state_owners); - ida_remove(&server->openowner_id, sp->so_owner_id); + ida_remove(&server->openowner_id, sp->so_seqid.owner_id); } static void @@ -775,8 +775,8 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f default: goto out_free; } - lsp->ls_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS); - if (lsp->ls_id < 0) + lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS); + if (lsp->ls_seqid.owner_id < 0) goto out_free; INIT_LIST_HEAD(&lsp->ls_locks); return lsp; @@ -789,7 +789,7 @@ static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) { struct nfs_server *server = lsp->ls_state->owner->so_server; - ida_simple_remove(&server->lockowner_id, lsp->ls_id); + ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id); nfs4_destroy_seqid_counter(&lsp->ls_seqid); kfree(lsp); } -- cgit From 536e43d12b9517bbbf6114cd1a12be27857a4d7a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:04:26 -0500 Subject: NFS: Optimise away unnecessary setattrs for open(O_TRUNC); Currently, we will correctly optimise away a truncate that doesn't change the file size. However, in the case of open(O_TRUNC), we also want to optimise away the time changes. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 25 +++++++++++++++++++------ fs/nfs/inode.c | 4 ++-- fs/nfs/nfs4proc.c | 10 +++++++--- 3 files changed, 28 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index fd9a872fada0..bb132a88f4e8 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1429,6 +1429,7 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry } open_flags = nd->intent.open.flags; + attr.ia_valid = 0; ctx = create_nfs_open_context(dentry, open_flags); res = ERR_CAST(ctx); @@ -1437,11 +1438,14 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry if (nd->flags & LOOKUP_CREATE) { attr.ia_mode = nd->intent.open.create_mode; - attr.ia_valid = ATTR_MODE; + attr.ia_valid |= ATTR_MODE; attr.ia_mode &= ~current_umask(); - } else { + } else open_flags &= ~(O_EXCL | O_CREAT); - attr.ia_valid = 0; + + if (open_flags & O_TRUNC) { + attr.ia_valid |= ATTR_SIZE; + attr.ia_size = 0; } /* Open the file on the server */ @@ -1495,6 +1499,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) struct inode *inode; struct inode *dir; struct nfs_open_context *ctx; + struct iattr attr; int openflags, ret = 0; if (nd->flags & LOOKUP_RCU) @@ -1523,19 +1528,27 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) /* We cannot do exclusive creation on a positive dentry */ if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) goto no_open_dput; - /* We can't create new files, or truncate existing ones here */ - openflags &= ~(O_CREAT|O_EXCL|O_TRUNC); + /* We can't create new files here */ + openflags &= ~(O_CREAT|O_EXCL); ctx = create_nfs_open_context(dentry, openflags); ret = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out; + + attr.ia_valid = 0; + if (openflags & O_TRUNC) { + attr.ia_valid |= ATTR_SIZE; + attr.ia_size = 0; + nfs_wb_all(inode); + } + /* * Note: we're not holding inode->i_mutex and so may be racing with * operations that change the directory. We therefore save the * change attribute *before* we do the RPC call. */ - inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, NULL); + inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr); if (IS_ERR(inode)) { ret = PTR_ERR(inode); switch (ret) { diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 577ad5a72a24..65486e652943 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -402,7 +402,7 @@ out_no_inode: goto out; } -#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE) +#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE|ATTR_OPEN) int nfs_setattr(struct dentry *dentry, struct iattr *attr) @@ -424,7 +424,7 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr) /* Optimization: if the end result is no change, don't RPC */ attr->ia_valid &= NFS_VALID_ATTRS; - if ((attr->ia_valid & ~ATTR_FILE) == 0) + if ((attr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) return 0; /* Write all dirty data */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 88a8b6999b4f..360240cc1e9b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -828,7 +828,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, p->o_arg.bitmask = server->attr_bitmask; p->o_arg.dir_bitmask = server->cache_consistency_bitmask; p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; - if (flags & O_CREAT) { + if (attrs != NULL && attrs->ia_valid != 0) { u32 *s; p->o_arg.u.attrs = &p->attrs; @@ -885,7 +885,7 @@ static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode { int ret = 0; - if (open_mode & O_EXCL) + if (open_mode & (O_EXCL|O_TRUNC)) goto out; switch (mode & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ: @@ -1033,7 +1033,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) struct nfs4_state *state = opendata->state; struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *delegation; - int open_mode = opendata->o_arg.open_flags & O_EXCL; + int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); fmode_t fmode = opendata->o_arg.fmode; nfs4_stateid stateid; int ret = -EAGAIN; @@ -2431,6 +2431,10 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, } } + /* Deal with open(O_TRUNC) */ + if (sattr->ia_valid & ATTR_OPEN) + sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); + status = nfs4_do_setattr(inode, cred, fattr, sattr, state); if (status == 0) nfs_setattr_update_inode(inode, sattr); -- cgit From 2aeb98f498ce37742b743080fdc6c8cf64053599 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:04:26 -0500 Subject: NFS: Ensure that mmapped pages remain stable during writeback Ensure that nfs_vm_page_mkwrite() waits for the page writeback to complete before the application is allowed to modify page contents. The main reason for wanting to do this in NFS is to ensure that the server doesn't get confused if we have to resend the RPC request due to a dropped/missed reply. Signed-off-by: Trond Myklebust --- fs/nfs/file.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/nfs/file.c b/fs/nfs/file.c index c43a452f7da2..4fdaaa63cf1c 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -530,6 +530,8 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (mapping != dentry->d_inode->i_mapping) goto out_unlock; + wait_on_page_writeback(page); + pagelen = nfs_page_length(page); if (pagelen == 0) goto out_unlock; -- cgit From 961a828df64979d2a9faeeeee043391670a193b9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 17 Jan 2012 22:57:37 -0500 Subject: SUNRPC: Fix potential races in xprt_lock_write_next() We have to ensure that the wake up from the waitqueue and the assignment of xprt->snd_task are atomic. We can do this by assigning the snd_task while under the waitqueue spinlock. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 1 + fs/nfs/nfs4proc.c | 13 ++++++++----- fs/nfs/nfs4state.c | 17 ++++++++--------- 3 files changed, 17 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index df3d02c3e8cb..c45c21a5470f 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -222,6 +222,7 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser return server->nfs_client->cl_session; } +extern bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy); extern int nfs4_setup_sequence(const struct nfs_server *server, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 360240cc1e9b..828a76590af9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -385,17 +385,20 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid) free_slotid, tbl->highest_used_slotid); } +bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy) +{ + rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); + return true; +} + /* * Signal state manager thread if session fore channel is drained */ static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) { - struct rpc_task *task; - if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { - task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq); - if (task) - rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); + rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq, + nfs4_set_task_privileged, NULL); return; } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a42e60d3ee50..f0e9881c2aa2 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -190,23 +190,22 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp) static void nfs4_end_drain_session(struct nfs_client *clp) { struct nfs4_session *ses = clp->cl_session; + struct nfs4_slot_table *tbl; int max_slots; if (ses == NULL) return; + tbl = &ses->fc_slot_table; if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { - spin_lock(&ses->fc_slot_table.slot_tbl_lock); - max_slots = ses->fc_slot_table.max_slots; + spin_lock(&tbl->slot_tbl_lock); + max_slots = tbl->max_slots; while (max_slots--) { - struct rpc_task *task; - - task = rpc_wake_up_next(&ses->fc_slot_table. - slot_tbl_waitq); - if (!task) + if (rpc_wake_up_first(&tbl->slot_tbl_waitq, + nfs4_set_task_privileged, + NULL) == NULL) break; - rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); } - spin_unlock(&ses->fc_slot_table.slot_tbl_lock); + spin_unlock(&tbl->slot_tbl_lock); } } -- cgit From 90100b1766c914c820baa78b5be6845fae1159b8 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 13 Jan 2012 13:09:19 +0400 Subject: SUNRPC: parametrize rpc_pton() by network context Parametrize rpc_pton() by network context and thus force it's callers to pass in network context instead of using hard-coded "init_net". Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/dns_resolve.c | 4 ++-- fs/nfs/nfs4filelayoutdev.c | 2 +- fs/nfs/nfs4namespace.c | 2 +- fs/nfs/super.c | 4 ++-- fs/nfsd/nfsctl.c | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index 200eb67c95d9..7edc62a8a64f 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -20,7 +20,7 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen, ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL); if (ip_len > 0) - ret = rpc_pton(ip_addr, ip_len, sa, salen); + ret = rpc_pton(&init_net, ip_addr, ip_len, sa, salen); else ret = -ESRCH; kfree(ip_addr); @@ -224,7 +224,7 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen) len = qword_get(&buf, buf1, sizeof(buf1)); if (len <= 0) goto out; - key.addrlen = rpc_pton(buf1, len, + key.addrlen = rpc_pton(&init_net, buf1, len, (struct sockaddr *)&key.addr, sizeof(key.addr)); diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index 8ae91908f5aa..0d8b9523a3cb 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -457,7 +457,7 @@ decode_ds_addr(struct xdr_stream *streamp, gfp_t gfp_flags) INIT_LIST_HEAD(&da->da_node); - if (!rpc_pton(buf, portstr-buf, (struct sockaddr *)&da->da_addr, + if (!rpc_pton(&init_net, buf, portstr-buf, (struct sockaddr *)&da->da_addr, sizeof(da->da_addr))) { dprintk("%s: error parsing address %s\n", __func__, buf); goto out_free_da; diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index 919a36935924..48a9acdbaeb6 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -98,7 +98,7 @@ static size_t nfs_parse_server_name(char *string, size_t len, { ssize_t ret; - ret = rpc_pton(string, len, sa, salen); + ret = rpc_pton(&init_net, string, len, sa, salen); if (ret == 0) { ret = nfs_dns_resolve_name(server->client->cl_xprt->xprt_net, string, len, sa, salen); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index e45feb0fee59..b79f2a11c29e 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1408,7 +1408,7 @@ static int nfs_parse_mount_options(char *raw, if (string == NULL) goto out_nomem; mnt->nfs_server.addrlen = - rpc_pton(string, strlen(string), + rpc_pton(&init_net, string, strlen(string), (struct sockaddr *) &mnt->nfs_server.address, sizeof(mnt->nfs_server.address)); @@ -1430,7 +1430,7 @@ static int nfs_parse_mount_options(char *raw, if (string == NULL) goto out_nomem; mnt->mount_server.addrlen = - rpc_pton(string, strlen(string), + rpc_pton(&init_net, string, strlen(string), (struct sockaddr *) &mnt->mount_server.address, sizeof(mnt->mount_server.address)); diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 748eda93ce59..330352d379b6 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -223,7 +223,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size) if (qword_get(&buf, fo_path, size) < 0) return -EINVAL; - if (rpc_pton(fo_path, size, sap, salen) == 0) + if (rpc_pton(&init_net, fo_path, size, sap, salen) == 0) return -EINVAL; return nlmsvc_unlock_all_by_ip(sap); -- cgit From f2ac4dc911fdbc9b98a6a48b40efc45aa9161775 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 13 Jan 2012 13:09:27 +0400 Subject: SUNRPC: parametrize rpc_uaddr2sockaddr() by network context Parametrize rpc_uaddr2sockaddr() by network context and thus force it's callers to pass in network context instead of using hard-coded "init_net". Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfsd/nfs4state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index e8c98f009670..c5cddd659429 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1308,7 +1308,7 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_r else goto out_err; - conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val, + conn->cb_addrlen = rpc_uaddr2sockaddr(&init_net, se->se_callback_addr_val, se->se_callback_addr_len, (struct sockaddr *)&conn->cb_addr, sizeof(conn->cb_addr)); -- cgit From 5ecebb7c7fd737cf387a552994df319c063973db Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 13 Jan 2012 14:03:04 +0400 Subject: SUNRPC: unregister service on creation in current network namespace On service shutdown we can be sure, that no more users of it left except current. Thus it looks like using current network namespace context is safe in this case. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfsd/nfssvc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index eda7d7e55e05..fce472f5f39e 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -251,13 +251,13 @@ static void nfsd_shutdown(void) nfsd_up = false; } -static void nfsd_last_thread(struct svc_serv *serv) +static void nfsd_last_thread(struct svc_serv *serv, struct net *net) { /* When last nfsd thread exits we need to do some clean-up */ nfsd_serv = NULL; nfsd_shutdown(); - svc_rpcb_cleanup(serv); + svc_rpcb_cleanup(serv, net); printk(KERN_WARNING "nfsd: last server has exited, flushing export " "cache\n"); -- cgit From 599ec129c2f0e4da955bef685880260de1813c85 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Thu, 19 Jan 2012 18:51:04 +0400 Subject: NFS: parse DNS cache in proper network namespace context This patch replaces "init_net" with cache's owner net in rpc_pton() call. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/dns_resolve.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index 7edc62a8a64f..be9a530987b3 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -224,7 +224,7 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen) len = qword_get(&buf, buf1, sizeof(buf1)); if (len <= 0) goto out; - key.addrlen = rpc_pton(&init_net, buf1, len, + key.addrlen = rpc_pton(cd->net, buf1, len, (struct sockaddr *)&key.addr, sizeof(key.addr)); -- cgit From 170942726b16a1dfcc605f0b510b9663b66fa7a3 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Thu, 19 Jan 2012 19:05:57 +0400 Subject: NFS: decode destination address in proper network namespace context This patch replaces "init_net" with NFS client's owner net in rpc_pton() call in decode_ds_addr(). Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayoutdev.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index 0d8b9523a3cb..6eb59b044bfc 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -378,7 +378,7 @@ out: * Currently only supports ipv4, ipv6 and one multi-path address. */ static struct nfs4_pnfs_ds_addr * -decode_ds_addr(struct xdr_stream *streamp, gfp_t gfp_flags) +decode_ds_addr(struct net *net, struct xdr_stream *streamp, gfp_t gfp_flags) { struct nfs4_pnfs_ds_addr *da = NULL; char *buf, *portstr; @@ -457,7 +457,7 @@ decode_ds_addr(struct xdr_stream *streamp, gfp_t gfp_flags) INIT_LIST_HEAD(&da->da_node); - if (!rpc_pton(&init_net, buf, portstr-buf, (struct sockaddr *)&da->da_addr, + if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr, sizeof(da->da_addr))) { dprintk("%s: error parsing address %s\n", __func__, buf); goto out_free_da; @@ -625,7 +625,8 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) mp_count = be32_to_cpup(p); /* multipath count */ for (j = 0; j < mp_count; j++) { - da = decode_ds_addr(&stream, gfp_flags); + da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->net, + &stream, gfp_flags); if (da) list_add_tail(&da->da_node, &dsaddrs); } -- cgit From ec7652aaf261b7dcb368344369df1e99886c7cd2 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 6 Dec 2011 16:42:40 +0300 Subject: SUNRPC: register RPC stats /proc entries in passed network namespace context This patch makes it possible to create NFS program entry ("/proc/net/rpc/nfs") in passed network namespace context instead of hard-coded "init_net". Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 65486e652943..d2c760e193f4 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1625,14 +1625,14 @@ static int __init init_nfs_fs(void) goto out0; #ifdef CONFIG_PROC_FS - rpc_proc_register(&nfs_rpcstat); + rpc_proc_register(&init_net, &nfs_rpcstat); #endif if ((err = register_nfs_fs()) != 0) goto out; return 0; out: #ifdef CONFIG_PROC_FS - rpc_proc_unregister("nfs"); + rpc_proc_unregister(&init_net, "nfs"); #endif nfs_destroy_directcache(); out0: @@ -1671,7 +1671,7 @@ static void __exit exit_nfs_fs(void) nfs_dns_resolver_destroy(); nfs_idmap_quit(); #ifdef CONFIG_PROC_FS - rpc_proc_unregister("nfs"); + rpc_proc_unregister(&init_net, "nfs"); #endif nfs_cleanup_cb_ident_idr(); unregister_nfs_fs(); -- cgit From 246590f56c9f281d60b7dd7efa0818307e65600d Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 6 Dec 2011 16:42:49 +0300 Subject: SUNRPC: register service stats /proc entries in passed network namespace context This patch makes it possible to create NFSd program entry ("/proc/net/rpc/nfsd") in passed network namespace context instead of hard-coded "init_net". Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfsd/stats.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c index a2e2402b2afb..6d4521feb6e3 100644 --- a/fs/nfsd/stats.c +++ b/fs/nfsd/stats.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "nfsd.h" @@ -94,11 +95,11 @@ static const struct file_operations nfsd_proc_fops = { void nfsd_stat_init(void) { - svc_proc_register(&nfsd_svcstats, &nfsd_proc_fops); + svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_fops); } void nfsd_stat_shutdown(void) { - svc_proc_unregister("nfsd"); + svc_proc_unregister(&init_net, "nfsd"); } -- cgit From babea479b75a9ea3d84ace6d880513e18397a8bb Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 20 Jan 2012 17:19:56 +0400 Subject: NFS: remove unused nfs4_find_client_no_ident function Looks like this function survived after some cleanup patch without a reason. Now it's not called or referenced and I believe, that it can be simply removed. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 27 --------------------------- fs/nfs/internal.h | 1 - 2 files changed, 28 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index df60d9971b95..34c8d1cbf06e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1196,33 +1196,6 @@ error: } #ifdef CONFIG_NFS_V4 -/* - * NFSv4.0 callback thread helper - * - * Find a client by IP address, protocol version, and minorversion - * - * Called from the pg_authenticate method. The callback identifier - * is not used as it has not been decoded. - * - * Returns NULL if no such client - */ -struct nfs_client * -nfs4_find_client_no_ident(const struct sockaddr *addr) -{ - struct nfs_client *clp; - - spin_lock(&nfs_client_lock); - list_for_each_entry(clp, &nfs_client_list, cl_share_link) { - if (nfs4_cb_match_client(addr, clp, 0) == false) - continue; - atomic_inc(&clp->cl_count); - spin_unlock(&nfs_client_lock); - return clp; - } - spin_unlock(&nfs_client_lock); - return NULL; -} - /* * NFSv4.0 callback thread helper * diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 2b9836fe4434..eda4cde40fb2 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -149,7 +149,6 @@ extern struct rpc_program nfs_program; extern void nfs_cleanup_cb_ident_idr(void); extern void nfs_put_client(struct nfs_client *); -extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *); extern struct nfs_client *nfs4_find_client_ident(int); extern struct nfs_client * nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *); -- cgit From 4cb54ca2069903121e4c03ec427147c47bed5755 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 20 Jan 2012 16:50:53 +0400 Subject: SUNRPC: search for service transports in network namespace context Service transports are parametrized by network namespace. And thus lookup of transport instance have to take network namespace into account. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust Acked-by: J. Bruce Fields --- fs/lockd/svc.c | 2 +- fs/nfsd/nfsctl.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index c061b9aa7ddb..ff379ff7761f 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -193,7 +193,7 @@ static int create_lockd_listener(struct svc_serv *serv, const char *name, { struct svc_xprt *xprt; - xprt = svc_find_xprt(serv, name, family, 0); + xprt = svc_find_xprt(serv, name, &init_net, family, 0); if (xprt == NULL) return svc_create_xprt(serv, name, &init_net, family, port, SVC_SOCK_DEFAULTS); diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 330352d379b6..64c24af8d7ea 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -722,7 +722,7 @@ static ssize_t __write_ports_addxprt(char *buf) nfsd_serv->sv_nrthreads--; return 0; out_close: - xprt = svc_find_xprt(nfsd_serv, transport, PF_INET, port); + xprt = svc_find_xprt(nfsd_serv, transport, &init_net, PF_INET, port); if (xprt != NULL) { svc_close_xprt(xprt); svc_xprt_put(xprt); @@ -748,7 +748,7 @@ static ssize_t __write_ports_delxprt(char *buf) if (port < 1 || port > USHRT_MAX || nfsd_serv == NULL) return -EINVAL; - xprt = svc_find_xprt(nfsd_serv, transport, AF_UNSPEC, port); + xprt = svc_find_xprt(nfsd_serv, transport, &init_net, AF_UNSPEC, port); if (xprt == NULL) return -ENOTCONN; -- cgit From a613fa168afc19179a7547fbba45644c5b6912bf Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 20 Jan 2012 13:53:56 -0500 Subject: SUNRPC: constify the rpc_program Signed-off-by: Trond Myklebust --- fs/lockd/clnt4xdr.c | 2 +- fs/lockd/clntxdr.c | 8 ++++---- fs/lockd/mon.c | 8 ++++---- fs/nfs/client.c | 8 ++++---- fs/nfs/internal.h | 2 +- fs/nfs/mount_clnt.c | 10 +++++----- fs/nfs/nfs2xdr.c | 2 +- fs/nfs/nfs3xdr.c | 4 ++-- fs/nfs/nfs4xdr.c | 2 +- fs/nfsd/nfs4callback.c | 6 +++--- 10 files changed, 26 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c index f848b52c67b1..3ddcbb1c0a43 100644 --- a/fs/lockd/clnt4xdr.c +++ b/fs/lockd/clnt4xdr.c @@ -598,7 +598,7 @@ static struct rpc_procinfo nlm4_procedures[] = { PROC(GRANTED_RES, res, norep), }; -struct rpc_version nlm_version4 = { +const struct rpc_version nlm_version4 = { .number = 4, .nrprocs = ARRAY_SIZE(nlm4_procedures), .procs = nlm4_procedures, diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c index 180ac34feb9a..3d35e3e80c1c 100644 --- a/fs/lockd/clntxdr.c +++ b/fs/lockd/clntxdr.c @@ -596,19 +596,19 @@ static struct rpc_procinfo nlm_procedures[] = { PROC(GRANTED_RES, res, norep), }; -static struct rpc_version nlm_version1 = { +static const struct rpc_version nlm_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, }; -static struct rpc_version nlm_version3 = { +static const struct rpc_version nlm_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, }; -static struct rpc_version *nlm_versions[] = { +static const struct rpc_version *nlm_versions[] = { [1] = &nlm_version1, [3] = &nlm_version3, #ifdef CONFIG_LOCKD_V4 @@ -618,7 +618,7 @@ static struct rpc_version *nlm_versions[] = { static struct rpc_stat nlm_rpc_stats; -struct rpc_program nlm_program = { +const struct rpc_program nlm_program = { .name = "lockd", .number = NLM_PROGRAM, .nrvers = ARRAY_SIZE(nlm_versions), diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index 65ba36b80a9e..c196030e530a 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -47,7 +47,7 @@ struct nsm_res { u32 state; }; -static struct rpc_program nsm_program; +static const struct rpc_program nsm_program; static LIST_HEAD(nsm_handles); static DEFINE_SPINLOCK(nsm_lock); @@ -534,19 +534,19 @@ static struct rpc_procinfo nsm_procedures[] = { }, }; -static struct rpc_version nsm_version1 = { +static const struct rpc_version nsm_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(nsm_procedures), .procs = nsm_procedures }; -static struct rpc_version * nsm_version[] = { +static const struct rpc_version *nsm_version[] = { [1] = &nsm_version1, }; static struct rpc_stat nsm_stats; -static struct rpc_program nsm_program = { +static const struct rpc_program nsm_program = { .name = "statd", .number = NSM_PROGRAM, .nrvers = ARRAY_SIZE(nsm_version), diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 34c8d1cbf06e..98af1cb28ee3 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -89,7 +89,7 @@ static bool nfs4_disable_idmapping = true; /* * RPC cruft for NFS */ -static struct rpc_version *nfs_version[5] = { +static const struct rpc_version *nfs_version[5] = { [2] = &nfs_version2, #ifdef CONFIG_NFS_V3 [3] = &nfs_version3, @@ -99,7 +99,7 @@ static struct rpc_version *nfs_version[5] = { #endif }; -struct rpc_program nfs_program = { +const struct rpc_program nfs_program = { .name = "nfs", .number = NFS_PROGRAM, .nrvers = ARRAY_SIZE(nfs_version), @@ -115,11 +115,11 @@ struct rpc_stat nfs_rpcstat = { #ifdef CONFIG_NFS_V3_ACL static struct rpc_stat nfsacl_rpcstat = { &nfsacl_program }; -static struct rpc_version * nfsacl_version[] = { +static const struct rpc_version *nfsacl_version[] = { [3] = &nfsacl_version3, }; -struct rpc_program nfsacl_program = { +const struct rpc_program nfsacl_program = { .name = "nfsacl", .number = NFS_ACL_PROGRAM, .nrvers = ARRAY_SIZE(nfsacl_version), diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index eda4cde40fb2..cdb121d3c6f4 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -145,7 +145,7 @@ extern int nfs_mount(struct nfs_mount_request *info); extern void nfs_umount(const struct nfs_mount_request *info); /* client.c */ -extern struct rpc_program nfs_program; +extern const struct rpc_program nfs_program; extern void nfs_cleanup_cb_ident_idr(void); extern void nfs_put_client(struct nfs_client *); diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 4fbe3a8e5e6b..b37ca34af903 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -67,7 +67,7 @@ enum { MOUNTPROC3_EXPORT = 5, }; -static struct rpc_program mnt_program; +static const struct rpc_program mnt_program; /* * Defined by OpenGroup XNFS Version 3W, chapter 8 @@ -488,19 +488,19 @@ static struct rpc_procinfo mnt3_procedures[] = { }; -static struct rpc_version mnt_version1 = { +static const struct rpc_version mnt_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(mnt_procedures), .procs = mnt_procedures, }; -static struct rpc_version mnt_version3 = { +static const struct rpc_version mnt_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(mnt3_procedures), .procs = mnt3_procedures, }; -static struct rpc_version *mnt_version[] = { +static const struct rpc_version *mnt_version[] = { NULL, &mnt_version1, NULL, @@ -509,7 +509,7 @@ static struct rpc_version *mnt_version[] = { static struct rpc_stat mnt_stats; -static struct rpc_program mnt_program = { +static const struct rpc_program mnt_program = { .name = "mount", .number = NFS_MNT_PROGRAM, .nrvers = ARRAY_SIZE(mnt_version), diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 792cb13a4304..1f56000fabbd 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -1150,7 +1150,7 @@ struct rpc_procinfo nfs_procedures[] = { PROC(STATFS, fhandle, statfsres, 0), }; -struct rpc_version nfs_version2 = { +const struct rpc_version nfs_version2 = { .number = 2, .nrprocs = ARRAY_SIZE(nfs_procedures), .procs = nfs_procedures diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 183c6b123d0f..a77cc9a3ce55 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -2461,7 +2461,7 @@ struct rpc_procinfo nfs3_procedures[] = { PROC(COMMIT, commit, commit, 5), }; -struct rpc_version nfs_version3 = { +const struct rpc_version nfs_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(nfs3_procedures), .procs = nfs3_procedures @@ -2489,7 +2489,7 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { }, }; -struct rpc_version nfsacl_version3 = { +const struct rpc_version nfsacl_version3 = { .number = 3, .nrprocs = sizeof(nfs3_acl_procedures)/ sizeof(nfs3_acl_procedures[0]), diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 95e92e438407..4633d405a94c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -7109,7 +7109,7 @@ struct rpc_procinfo nfs4_procedures[] = { #endif /* CONFIG_NFS_V4_1 */ }; -struct rpc_version nfs_version4 = { +const struct rpc_version nfs_version4 = { .number = 4, .nrprocs = ARRAY_SIZE(nfs4_procedures), .procs = nfs4_procedures diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 426ccb171650..0e262f32ac41 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -605,18 +605,18 @@ static struct rpc_version nfs_cb_version4 = { .procs = nfs4_cb_procedures }; -static struct rpc_version *nfs_cb_version[] = { +static const struct rpc_version *nfs_cb_version[] = { &nfs_cb_version4, }; -static struct rpc_program cb_program; +static const struct rpc_program cb_program; static struct rpc_stat cb_stats = { .program = &cb_program }; #define NFS4_CALLBACK 0x40000000 -static struct rpc_program cb_program = { +static const struct rpc_program cb_program = { .name = "nfs4_cb", .number = NFS4_CALLBACK, .nrvers = ARRAY_SIZE(nfs_cb_version), -- cgit From 4601df20fb3bf2b87e248abc622b8a7e4c3059fb Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 20 Jan 2012 18:47:05 -0500 Subject: NFSv4: Avoid thundering herd issues with nfs_release_seqid Store a pointer to the rpc_task in struct nfs_seqid so that we can wake up only that request that is able to grab the lock after we've released it. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 1 + fs/nfs/nfs4state.c | 21 +++++++++++++++------ 2 files changed, 16 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index c45c21a5470f..b133b50dec9a 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -71,6 +71,7 @@ struct nfs_seqid_counter { struct nfs_seqid { struct nfs_seqid_counter *sequence; struct list_head list; + struct rpc_task *task; }; static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f0e9881c2aa2..7d098604802c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -916,20 +916,28 @@ struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_m if (new != NULL) { new->sequence = counter; INIT_LIST_HEAD(&new->list); + new->task = NULL; } return new; } void nfs_release_seqid(struct nfs_seqid *seqid) { - if (!list_empty(&seqid->list)) { - struct nfs_seqid_counter *sequence = seqid->sequence; + struct nfs_seqid_counter *sequence; - spin_lock(&sequence->lock); - list_del_init(&seqid->list); - spin_unlock(&sequence->lock); - rpc_wake_up(&sequence->wait); + if (list_empty(&seqid->list)) + return; + sequence = seqid->sequence; + spin_lock(&sequence->lock); + list_del_init(&seqid->list); + if (!list_empty(&sequence->list)) { + struct nfs_seqid *next; + + next = list_first_entry(&sequence->list, + struct nfs_seqid, list); + rpc_wake_up_queued_task(&sequence->wait, next->task); } + spin_unlock(&sequence->lock); } void nfs_free_seqid(struct nfs_seqid *seqid) @@ -1000,6 +1008,7 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) int status = 0; spin_lock(&sequence->lock); + seqid->task = task; if (list_empty(&seqid->list)) list_add_tail(&seqid->list, &sequence->list); if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid) -- cgit From 7d9dea915fe333357912bce2d624ee848dfbd890 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 20 Jan 2012 18:57:02 -0500 Subject: NFS: Use kcalloc() when allocating arrays Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/extents.c | 2 +- fs/nfs/pnfs.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c index 1abac09f7cd5..1f9a6032796b 100644 --- a/fs/nfs/blocklayout/extents.c +++ b/fs/nfs/blocklayout/extents.c @@ -147,7 +147,7 @@ static int _preload_range(struct pnfs_inval_markings *marks, count = (int)(end - start) / (int)tree->mtt_step_size; /* Pre-malloc what memory we might need */ - storage = kmalloc(sizeof(*storage) * count, GFP_NOFS); + storage = kcalloc(count, sizeof(*storage), GFP_NOFS); if (!storage) return -ENOMEM; for (i = 0; i < count; i++) { diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 17149a490065..92927878c2f8 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -590,7 +590,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; max_pages = max_resp_sz >> PAGE_SHIFT; - pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); + pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); if (!pages) goto out_err_free; -- cgit From d36b7cf7c626749efc75f49fb0468e8c3c0c1bbd Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Sun, 22 Jan 2012 16:03:39 +0200 Subject: pnfs: clean up initiate_file_draining layout lookup Fixes the following compiler warning: fs/nfs/callback_proc.c: In function 'do_callback_layoutrecall': fs/nfs/callback_proc.c:115:26: warning: 'lo' may be used uninitialized in this function Reported-by: Jim Rees Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 56 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 54cea8ad5a76..0e6e63f55db4 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -108,42 +108,62 @@ int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nf #if defined(CONFIG_NFS_V4_1) -static u32 initiate_file_draining(struct nfs_client *clp, - struct cb_layoutrecallargs *args) +/* + * Lookup a layout by filehandle. + * + * Note: gets a refcount on the layout hdr and on its respective inode. + * Caller must put the layout hdr and the inode. + * + * TODO: keep track of all layouts (and delegations) in a hash table + * hashed by filehandle. + */ +static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, struct nfs_fh *fh) { struct nfs_server *server; - struct pnfs_layout_hdr *lo; struct inode *ino; - bool found = false; - u32 rv = NFS4ERR_NOMATCHING_LAYOUT; - LIST_HEAD(free_me_list); + struct pnfs_layout_hdr *lo; - spin_lock(&clp->cl_lock); - rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { list_for_each_entry(lo, &server->layouts, plh_layouts) { - if (nfs_compare_fh(&args->cbl_fh, - &NFS_I(lo->plh_inode)->fh)) + if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh)) continue; ino = igrab(lo->plh_inode); if (!ino) continue; - found = true; - /* Without this, layout can be freed as soon - * as we release cl_lock. - */ get_layout_hdr(lo); - break; + return lo; } - if (found) - break; } + + return NULL; +} + +static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, struct nfs_fh *fh) +{ + struct pnfs_layout_hdr *lo; + + spin_lock(&clp->cl_lock); + rcu_read_lock(); + lo = get_layout_by_fh_locked(clp, fh); rcu_read_unlock(); spin_unlock(&clp->cl_lock); - if (!found) + return lo; +} + +static u32 initiate_file_draining(struct nfs_client *clp, + struct cb_layoutrecallargs *args) +{ + struct inode *ino; + struct pnfs_layout_hdr *lo; + u32 rv = NFS4ERR_NOMATCHING_LAYOUT; + LIST_HEAD(free_me_list); + + lo = get_layout_by_fh(clp, &args->cbl_fh); + if (!lo) return NFS4ERR_NOMATCHING_LAYOUT; + ino = lo->plh_inode; spin_lock(&ino->i_lock); if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || mark_matching_lsegs_invalid(lo, &free_me_list, -- cgit From c15c928f36a2710746c2b945067215f436f45544 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 24 Jan 2012 16:35:00 +0000 Subject: nfs: remove unneeded NULL pointer check in nfs4_remote_mount "data" is never NULL here. Reported-by: Eric Paris Signed-off-by: Jeff Layton Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index b79f2a11c29e..8e210b2c16d7 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2667,8 +2667,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags, if (!s->s_root) { /* initial superblock/root creation */ nfs4_fill_super(s); - nfs_fscache_get_super_cookie( - s, data ? data->fscache_uniq : NULL, NULL); + nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL); } mntroot = nfs4_get_root(s, mntfh, dev_name); -- cgit From 8b7e3f49ddda0d43c5bc8de404c1dc7e7a13cc80 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 30 Jan 2012 15:43:56 -0500 Subject: NFSv4: Don't decode fs_locations if we didn't ask for them... Currently, the server can potentially cause us to Oops by returning an fs_locations request that we didn't actually request. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4633d405a94c..ca288d115b54 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3561,6 +3561,10 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st status = 0; if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS))) goto out; + status = -EIO; + /* Ignore borken servers that return unrequested attrs */ + if (unlikely(res == NULL)) + goto out; dprintk("%s: fsroot ", __func__); status = decode_pathname(xdr, &res->fs_path); if (unlikely(status != 0)) @@ -4295,6 +4299,7 @@ xdr_error: static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fattr *fattr, struct nfs_fh *fh, + struct nfs4_fs_locations *fs_loc, const struct nfs_server *server) { int status; @@ -4342,9 +4347,7 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, goto xdr_error; fattr->valid |= status; - status = decode_attr_fs_locations(xdr, bitmap, container_of(fattr, - struct nfs4_fs_locations, - fattr)); + status = decode_attr_fs_locations(xdr, bitmap, fs_loc); if (status < 0) goto xdr_error; fattr->valid |= status; @@ -4408,7 +4411,8 @@ xdr_error: } static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fattr, - struct nfs_fh *fh, const struct nfs_server *server) + struct nfs_fh *fh, struct nfs4_fs_locations *fs_loc, + const struct nfs_server *server) { __be32 *savep; uint32_t attrlen, @@ -4427,7 +4431,7 @@ static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fat if (status < 0) goto xdr_error; - status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server); + status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, fs_loc, server); if (status < 0) goto xdr_error; @@ -4440,7 +4444,7 @@ xdr_error: static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, const struct nfs_server *server) { - return decode_getfattr_generic(xdr, fattr, NULL, server); + return decode_getfattr_generic(xdr, fattr, NULL, NULL, server); } /* @@ -6580,8 +6584,9 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, if (status) goto out; xdr_enter_page(xdr, PAGE_SIZE); - status = decode_getfattr(xdr, &res->fs_locations->fattr, - res->fs_locations->server); + status = decode_getfattr_generic(xdr, &res->fs_locations->fattr, + NULL, res->fs_locations, + res->fs_locations->server); out: return status; } @@ -6961,7 +6966,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, goto out_overflow; if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, - entry->server) < 0) + NULL, entry->server) < 0) goto out_overflow; if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) entry->ino = entry->fattr->mounted_on_fileid; -- cgit From a4980e7840176b4baa60715c32c5994b084ea9a6 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 30 Jan 2012 15:43:56 -0500 Subject: NFSv4: ACCESS validation doesn't require a full attribute refresh We only really need to check the change attribute, so let's just use the server->cache_consistency_bitmask. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 828a76590af9..1bb0be36a726 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2512,7 +2512,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry struct nfs_server *server = NFS_SERVER(inode); struct nfs4_accessargs args = { .fh = NFS_FH(inode), - .bitmask = server->attr_bitmask, + .bitmask = server->cache_consistency_bitmask, }; struct nfs4_accessres res = { .server = server, -- cgit From 51f72f4a0f92e4abde33a8bca0fac9667575d035 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 30 Jan 2012 20:09:33 -0800 Subject: sysctl: An easier to read version of find_subdir Suggested-by: Lucian Adrian Grijincu Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 27e265ba1afe..ebe8b3076db7 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -833,9 +833,9 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir, entry = find_entry(&head, dir, name, namelen); if (!entry) return ERR_PTR(-ENOENT); - if (S_ISDIR(entry->mode)) - return container_of(head, struct ctl_dir, header); - return ERR_PTR(-ENOTDIR); + if (!S_ISDIR(entry->mode)) + return ERR_PTR(-ENOTDIR); + return container_of(head, struct ctl_dir, header); } static struct ctl_dir *new_dir(struct ctl_table_set *set, -- cgit From 0eb97f38d2bfaea289b44c5140a7b04e7b369bad Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 30 Jan 2012 20:37:51 -0800 Subject: sysctl: Correct error return from get_subdir When insert_header fails ensure we return the proper error value from get_subdir. In practice nothing cares, but there is no need to be sloppy. Reported-by: Lucian Adrian Grijincu Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index ebe8b3076db7..722ec116208d 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -869,6 +869,7 @@ static struct ctl_dir *get_subdir(struct ctl_dir *dir, { struct ctl_table_set *set = dir->header.set; struct ctl_dir *subdir, *new = NULL; + int err; spin_lock(&sysctl_lock); subdir = find_subdir(dir, name, namelen); @@ -890,7 +891,9 @@ static struct ctl_dir *get_subdir(struct ctl_dir *dir, if (PTR_ERR(subdir) != -ENOENT) goto failed; - if (insert_header(dir, &new->header)) + err = insert_header(dir, &new->header); + subdir = ERR_PTR(err); + if (err) goto failed; subdir = new; found: -- cgit From 60f126d93b210ae708e2a5bb4a3be2121831f2a0 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 30 Jan 2012 21:23:52 -0800 Subject: sysctl: Comments to make the code clearer. Document get_subdir and that find_subdir alwasy takes a reference. Suggested-by: Lucian Adrian Grijincu Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 722ec116208d..e5601dc24088 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -73,6 +73,7 @@ static int namecmp(const char *name1, int len1, const char *name2, int len2) return cmp; } +/* Called under sysctl_lock */ static struct ctl_table *find_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { @@ -864,6 +865,18 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set, return new; } +/** + * get_subdir - find or create a subdir with the specified name. + * @dir: Directory to create the subdirectory in + * @name: The name of the subdirectory to find or create + * @namelen: The length of name + * + * Takes a directory with an elevated reference count so we know that + * if we drop the lock the directory will not go away. Upon success + * the reference is moved from @dir to the returned subdirectory. + * Upon error an error code is returned and the reference on @dir is + * simply dropped. + */ static struct ctl_dir *get_subdir(struct ctl_dir *dir, const char *name, int namelen) { @@ -885,12 +898,14 @@ static struct ctl_dir *get_subdir(struct ctl_dir *dir, if (!new) goto failed; + /* Was the subdir added while we dropped the lock? */ subdir = find_subdir(dir, name, namelen); if (!IS_ERR(subdir)) goto found; if (PTR_ERR(subdir) != -ENOENT) goto failed; + /* Nope. Use the our freshly made directory entry. */ err = insert_header(dir, &new->header); subdir = ERR_PTR(err); if (err) @@ -1190,6 +1205,7 @@ struct ctl_table_header *__register_sysctl_table( spin_lock(&sysctl_lock); dir = &set->dir; + /* Reference moved down the diretory tree get_subdir */ dir->header.nreg++; spin_unlock(&sysctl_lock); -- cgit From 4e75732035d7e97e001bdf6e3149d3967c0221de Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 30 Jan 2012 21:24:59 -0800 Subject: sysctl: Don't call sysctl_follow_link unless we are a link. There are no functional changes. Just code motion to make it clear that we don't follow a link between sysctl roots unless the directory entry actually is a link. Suggested-by: Lucian Adrian Grijincu Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index e5601dc24088..a7708b7c957f 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -451,10 +451,12 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, if (!p) goto out; - ret = sysctl_follow_link(&h, &p, current->nsproxy); - err = ERR_PTR(ret); - if (ret) - goto out; + if (S_ISLNK(p->mode)) { + ret = sysctl_follow_link(&h, &p, current->nsproxy); + err = ERR_PTR(ret); + if (ret) + goto out; + } err = ERR_PTR(-ENOMEM); inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); @@ -601,10 +603,12 @@ static int proc_sys_link_fill_cache(struct file *filp, void *dirent, int err, ret = 0; head = sysctl_head_grab(head); - /* It is not an error if we can not follow the link ignore it */ - err = sysctl_follow_link(&head, &table, current->nsproxy); - if (err) - goto out; + if (S_ISLNK(table->mode)) { + /* It is not an error if we can not follow the link ignore it */ + err = sysctl_follow_link(&head, &table, current->nsproxy); + if (err) + goto out; + } ret = proc_sys_fill_cache(filp, dirent, filldir, head, table); out: @@ -950,10 +954,6 @@ static int sysctl_follow_link(struct ctl_table_header **phead, struct ctl_dir *dir; int ret; - /* Get out quickly if not a link */ - if (!S_ISLNK((*pentry)->mode)) - return 0; - ret = 0; spin_lock(&sysctl_lock); root = (*pentry)->data; -- cgit From b9957308452afcf58e656db834f44df10d7b1662 Mon Sep 17 00:00:00 2001 From: Amit Sahrawat Date: Mon, 16 Jan 2012 12:24:36 +0000 Subject: xfs: kill the unused XFS_BB_FSB_OFFSET macro Removing the macro, as this is no more needed in the code. Tried to find the reference when it was last used - but the usage for this seemed to have been dropped long time ago. Signed-off-by: Amit Sahrawat Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_sb.h | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h index cb6ae715814a..f429d9d5d325 100644 --- a/fs/xfs/xfs_sb.h +++ b/fs/xfs/xfs_sb.h @@ -529,7 +529,6 @@ static inline int xfs_sb_version_hasprojid32bit(xfs_sb_t *sbp) #define XFS_BB_TO_FSB(mp,bb) \ (((bb) + (XFS_FSB_TO_BB(mp,1) - 1)) >> (mp)->m_blkbb_log) #define XFS_BB_TO_FSBT(mp,bb) ((bb) >> (mp)->m_blkbb_log) -#define XFS_BB_FSB_OFFSET(mp,bb) ((bb) & ((mp)->m_bsize - 1)) /* * File system block to byte conversions. -- cgit From 6967b964c1012231f338445f20f877e680cd4cb8 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 23 Jan 2012 17:31:25 +0000 Subject: Define a new function xfs_this_quota_on() Create a new function xfs_this_quota_on() that takes a xfs_mount data structure and a disk quota type and returns true if the specified type of quota is ON in the xfs_mount data structure. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_dquot.c | 4 ++-- fs/xfs/xfs_dquot.h | 17 +++++++++++++---- 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index b4ff40b5f918..4c8b3d2cc961 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -358,7 +358,7 @@ xfs_qm_dqalloc( * Return if this type of quotas is turned off while we didn't * have an inode lock */ - if (XFS_IS_THIS_QUOTA_OFF(dqp)) { + if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { xfs_iunlock(quotip, XFS_ILOCK_EXCL); return (ESRCH); } @@ -460,7 +460,7 @@ xfs_qm_dqtobp( dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; xfs_ilock(quotip, XFS_ILOCK_SHARED); - if (XFS_IS_THIS_QUOTA_OFF(dqp)) { + if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { /* * Return if this type of quotas is turned off while we * didn't have the quota inode lock. diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index a1d91d8f1802..1c48489423e4 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -115,6 +115,19 @@ static inline void xfs_dqunlock_nonotify(struct xfs_dquot *dqp) mutex_unlock(&dqp->q_qlock); } +static inline int xfs_this_quota_on(struct xfs_mount *mp, int type) +{ + switch (type & XFS_DQ_ALLTYPES) { + case XFS_DQ_USER: + return XFS_IS_UQUOTA_ON(mp); + case XFS_DQ_GROUP: + case XFS_DQ_PROJ: + return XFS_IS_OQUOTA_ON(mp); + default: + return 0; + } +} + #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) @@ -125,10 +138,6 @@ static inline void xfs_dqunlock_nonotify(struct xfs_dquot *dqp) XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \ XFS_DQ_TO_QINF(dqp)->qi_gquotaip) -#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \ - (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ - (XFS_IS_OQUOTA_ON((d)->q_mount)))) - extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint, uint, struct xfs_dquot **); extern void xfs_qm_dqdestroy(xfs_dquot_t *); -- cgit From 36731410834e08c7d15c3980abd6cc4c563c2e87 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 23 Jan 2012 17:31:30 +0000 Subject: Define a new function xfs_inode_dquot() Define a new function xfs_inode_dquot() that takes a inode pointer and a disk quota type and returns the quota pointer for the specified quota type. This simplifies the xfs_qm_dqget() error path significantly. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_dquot.c | 33 +++++++++------------------------ fs/xfs/xfs_dquot.h | 13 +++++++++++++ 2 files changed, 22 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 4c8b3d2cc961..bf4fe8637f3d 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -723,7 +723,7 @@ xfs_qm_dqget( uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ { - xfs_dquot_t *dqp; + xfs_dquot_t *dqp, *dqp1; xfs_dqhash_t *h; uint version; int error; @@ -750,10 +750,7 @@ xfs_qm_dqget( type == XFS_DQ_GROUP); if (ip) { ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - if (type == XFS_DQ_USER) - ASSERT(ip->i_udquot == NULL); - else - ASSERT(ip->i_gdquot == NULL); + ASSERT(xfs_inode_dquot(ip, type) == NULL); } #endif @@ -819,30 +816,18 @@ restart: * A dquot could be attached to this inode by now, since * we had dropped the ilock. */ - if (type == XFS_DQ_USER) { - if (!XFS_IS_UQUOTA_ON(mp)) { - /* inode stays locked on return */ - xfs_qm_dqdestroy(dqp); - return XFS_ERROR(ESRCH); - } - if (ip->i_udquot) { + if (xfs_this_quota_on(mp, type)) { + dqp1 = xfs_inode_dquot(ip, type); + if (dqp1) { xfs_qm_dqdestroy(dqp); - dqp = ip->i_udquot; + dqp = dqp1; xfs_dqlock(dqp); goto dqret; } } else { - if (!XFS_IS_OQUOTA_ON(mp)) { - /* inode stays locked on return */ - xfs_qm_dqdestroy(dqp); - return XFS_ERROR(ESRCH); - } - if (ip->i_gdquot) { - xfs_qm_dqdestroy(dqp); - dqp = ip->i_gdquot; - xfs_dqlock(dqp); - goto dqret; - } + /* inode stays locked on return */ + xfs_qm_dqdestroy(dqp); + return XFS_ERROR(ESRCH); } } diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 1c48489423e4..48a795b141b6 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -128,6 +128,19 @@ static inline int xfs_this_quota_on(struct xfs_mount *mp, int type) } } +static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type) +{ + switch (type & XFS_DQ_ALLTYPES) { + case XFS_DQ_USER: + return ip->i_udquot; + case XFS_DQ_GROUP: + case XFS_DQ_PROJ: + return ip->i_gdquot; + default: + return NULL; + } +} + #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) -- cgit From 6bd92a239fc71ea26eb1dab3aece5eaaa99d4ef7 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 23 Jan 2012 17:31:37 +0000 Subject: Change xfs_sb_from_disk() interface to take a mount pointer Change xfs_sb_from_disk() interface to take a mount pointer instead of a superblock pointer. This is to print mount point specific error messages in future fixes. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_log_recover.c | 2 +- fs/xfs/xfs_mount.c | 6 ++++-- fs/xfs/xfs_mount.h | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 15ff5392fb65..403825eb5c16 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3695,7 +3695,7 @@ xlog_do_recover( /* Convert superblock from on-disk format */ sbp = &log->l_mp->m_sb; - xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); + xfs_sb_from_disk(log->l_mp, XFS_BUF_TO_SBP(bp)); ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC); ASSERT(xfs_sb_good_version(sbp)); xfs_buf_relse(bp); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index e07f8528c5ef..1ffead4b2296 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -553,9 +553,11 @@ out_unwind: void xfs_sb_from_disk( - xfs_sb_t *to, + struct xfs_mount *mp, xfs_dsb_t *from) { + struct xfs_sb *to = &mp->m_sb; + to->sb_magicnum = be32_to_cpu(from->sb_magicnum); to->sb_blocksize = be32_to_cpu(from->sb_blocksize); to->sb_dblocks = be64_to_cpu(from->sb_dblocks); @@ -693,7 +695,7 @@ reread: * Initialize the mount structure from the superblock. * But first do some basic consistency checking. */ - xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); + xfs_sb_from_disk(mp, XFS_BUF_TO_SBP(bp)); error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags); if (error) { if (loud) diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 19f69e232509..c082e44dad2d 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -395,7 +395,7 @@ extern void xfs_set_low_space_thresholds(struct xfs_mount *); extern void xfs_mod_sb(struct xfs_trans *, __int64_t); extern int xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t, xfs_agnumber_t *); -extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *); +extern void xfs_sb_from_disk(struct xfs_mount *, struct xfs_dsb *); extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t); #endif /* __XFS_MOUNT_H__ */ -- cgit From 4177af3a8a6f119484c7903845c6693d7381c13e Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 23 Jan 2012 17:31:43 +0000 Subject: Define new macro XFS_ALL_QUOTA_ACTIVE and simply some usage Define new macro XFS_ALL_QUOTA_ACTIVE and simply some usage of quota macros. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_qm.c | 2 +- fs/xfs/xfs_quota.h | 2 ++ fs/xfs/xfs_super.c | 7 +++---- 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 671f37eae1c7..1b2f5b37eac4 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -1499,7 +1499,7 @@ xfs_qm_quotacheck( * quotachecked status, since we won't be doing accounting for * that type anymore. */ - mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD); + mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; mp->m_qflags |= flags; error_return: diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index 8a0807e0f979..b50ec5b95d5a 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h @@ -174,6 +174,8 @@ typedef struct xfs_qoff_logformat { #define XFS_UQUOTA_ACTIVE 0x0100 /* uquotas are being turned off */ #define XFS_PQUOTA_ACTIVE 0x0200 /* pquotas are being turned off */ #define XFS_GQUOTA_ACTIVE 0x0400 /* gquotas are being turned off */ +#define XFS_ALL_QUOTA_ACTIVE \ + (XFS_UQUOTA_ACTIVE | XFS_PQUOTA_ACTIVE | XFS_GQUOTA_ACTIVE) /* * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index ee5b695c99a7..5e0d43f231a4 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -324,10 +324,9 @@ xfs_parseargs( } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { mp->m_flags |= XFS_MOUNT_FILESTREAMS; } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { - mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | - XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | - XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | - XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD); + mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; + mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; + mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; } else if (!strcmp(this_char, MNTOPT_QUOTA) || !strcmp(this_char, MNTOPT_UQUOTA) || !strcmp(this_char, MNTOPT_USRQUOTA)) { -- cgit From 8112b9830a056c3f42423e4e8e914ac9f7162dce Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Sun, 22 Jan 2012 23:27:00 +0900 Subject: reiserfs: fix printk typo in lbalance.c Correct spelling "entry_cout" to "entry_count" in fs/reiserfs/lbalance.c Signed-off-by: Masanari Iida Signed-off-by: Jiri Kosina --- fs/reiserfs/lbalance.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c index 03d85cbf90bf..b43d01556313 100644 --- a/fs/reiserfs/lbalance.c +++ b/fs/reiserfs/lbalance.c @@ -975,7 +975,7 @@ static int leaf_cut_entries(struct buffer_head *bh, remove */ RFALSE(!is_direntry_le_ih(ih), "10180: item is not directory item"); RFALSE(I_ENTRY_COUNT(ih) < from + del_count, - "10185: item contains not enough entries: entry_cout = %d, from = %d, to delete = %d", + "10185: item contains not enough entries: entry_count = %d, from = %d, to delete = %d", I_ENTRY_COUNT(ih), from, del_count); if (del_count == 0) -- cgit From 982a598ff68acad37647baba06668054568eee49 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Tue, 24 Jan 2012 02:29:36 +0900 Subject: ntfs: fix printk typos in mft.c Correct two spelling errors "dealocate" to "deallocate" in fs/ntfs/mft.c Signed-off-by: Jiri Kosina --- fs/ntfs/mft.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 382857f9c7db..862f7ff57b78 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c @@ -1367,7 +1367,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol) ntfs_error(vol->sb, "Failed to merge runlists for mft " "bitmap."); if (ntfs_cluster_free_from_rl(vol, rl2)) { - ntfs_error(vol->sb, "Failed to dealocate " + ntfs_error(vol->sb, "Failed to deallocate " "allocated cluster.%s", es); NVolSetErrors(vol); } @@ -1805,7 +1805,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol) ntfs_error(vol->sb, "Failed to merge runlists for mft data " "attribute."); if (ntfs_cluster_free_from_rl(vol, rl2)) { - ntfs_error(vol->sb, "Failed to dealocate clusters " + ntfs_error(vol->sb, "Failed to deallocate clusters " "from the mft data attribute.%s", es); NVolSetErrors(vol); } -- cgit From 1cab0652ba985d11b67645bd344c39ebb6cd28a2 Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Tue, 31 Jan 2012 10:39:29 -0500 Subject: NFS: Pass a stateid to test_stateid() and free_stateid() This takes the guesswork out of what stateid to use. The caller is expected to figure this out and pass in the correct one. Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 37 ++++++++++++++++++++++--------------- fs/nfs/nfs4xdr.c | 3 ++- 2 files changed, 24 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1bb0be36a726..8491d775e23c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -82,8 +82,8 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_fattr *fattr, struct iattr *sattr, struct nfs4_state *state); #ifdef CONFIG_NFS_V4_1 -static int nfs41_test_stateid(struct nfs_server *, struct nfs4_state *); -static int nfs41_free_stateid(struct nfs_server *, struct nfs4_state *); +static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *); +static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *); #endif /* Prevent leaks of NFSv4 errors into userland */ static int nfs4_map_errors(int err) @@ -1728,10 +1728,10 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st int status; struct nfs_server *server = NFS_SERVER(state->inode); - status = nfs41_test_stateid(server, state); + status = nfs41_test_stateid(server, &state->stateid); if (status == NFS_OK) return 0; - nfs41_free_stateid(server, state); + nfs41_free_stateid(server, &state->stateid); return nfs4_open_expired(sp, state); } #endif @@ -4509,10 +4509,10 @@ static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *reques int status; struct nfs_server *server = NFS_SERVER(state->inode); - status = nfs41_test_stateid(server, state); + status = nfs41_test_stateid(server, &state->stateid); if (status == NFS_OK) return 0; - nfs41_free_stateid(server, state); + nfs41_free_stateid(server, &state->stateid); return nfs4_lock_expired(state, request); } #endif @@ -6142,10 +6142,12 @@ out_freepage: out: return err; } -static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state) + +static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) { + int status; struct nfs41_test_stateid_args args = { - .stateid = &state->stateid, + .stateid = stateid, }; struct nfs41_test_stateid_res res; struct rpc_message msg = { @@ -6153,26 +6155,31 @@ static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *sta .rpc_argp = &args, .rpc_resp = &res, }; + nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); - return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); + status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); + + if (status == NFS_OK) + return res.status; + return status; } -static int nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state) +static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, - _nfs41_test_stateid(server, state), + _nfs41_test_stateid(server, stateid), &exception); } while (exception.retry); return err; } -static int _nfs4_free_stateid(struct nfs_server *server, struct nfs4_state *state) +static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) { struct nfs41_free_stateid_args args = { - .stateid = &state->stateid, + .stateid = stateid, }; struct nfs41_free_stateid_res res; struct rpc_message msg = { @@ -6185,13 +6192,13 @@ static int _nfs4_free_stateid(struct nfs_server *server, struct nfs4_state *stat return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); } -static int nfs41_free_stateid(struct nfs_server *server, struct nfs4_state *state) +static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) { struct nfs4_exception exception = { }; int err; do { err = nfs4_handle_exception(server, - _nfs4_free_stateid(server, state), + _nfs4_free_stateid(server, stateid), &exception); } while (exception.retry); return err; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index ca288d115b54..5d1caac0656d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5671,7 +5671,8 @@ static int decode_test_stateid(struct xdr_stream *xdr, if (unlikely(!p)) goto out_overflow; res->status = be32_to_cpup(p++); - return res->status; + + return status; out_overflow: print_overflow_msg(__func__, xdr); out: -- cgit From b01dd1d8fae6178cbec374b90da2e4a3b8dce9ba Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Tue, 31 Jan 2012 10:39:30 -0500 Subject: NFS: Call test_stateid() and free_stateid() with correct stateids I noticed that test_stateid() was always using the same stateid for open and lock recovery. After poking around a bit, I discovered that it was always testing with a delegation stateid (even if there was no delegation present). I figured this wasn't correct, so now delegation and open stateids are tested during open_expired() and lock stateids are tested during lock_expired(). Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 58 ++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 47 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8491d775e23c..aaaf98ba8956 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1723,15 +1723,32 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta } #if defined(CONFIG_NFS_V4_1) -static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) +static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags) { - int status; + int status = NFS_OK; struct nfs_server *server = NFS_SERVER(state->inode); - status = nfs41_test_stateid(server, &state->stateid); - if (status == NFS_OK) - return 0; - nfs41_free_stateid(server, &state->stateid); + if (state->flags & flags) { + status = nfs41_test_stateid(server, stateid); + if (status != NFS_OK) { + nfs41_free_stateid(server, stateid); + state->flags &= ~flags; + } + } + return status; +} + +static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) +{ + int deleg_status, open_status; + int deleg_flags = 1 << NFS_DELEGATED_STATE; + int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE); + + deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags); + open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags); + + if ((deleg_status == NFS_OK) && (open_status == NFS_OK)) + return NFS_OK; return nfs4_open_expired(sp, state); } #endif @@ -4504,15 +4521,34 @@ out: } #if defined(CONFIG_NFS_V4_1) -static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) +static int nfs41_check_expired_locks(struct nfs4_state *state) { - int status; + int status, ret = NFS_OK; + struct nfs4_lock_state *lsp; struct nfs_server *server = NFS_SERVER(state->inode); - status = nfs41_test_stateid(server, &state->stateid); + list_for_each_entry(lsp, &state->lock_states, ls_locks) { + if (lsp->ls_flags & NFS_LOCK_INITIALIZED) { + status = nfs41_test_stateid(server, &lsp->ls_stateid); + if (status != NFS_OK) { + nfs41_free_stateid(server, &lsp->ls_stateid); + lsp->ls_flags &= ~NFS_LOCK_INITIALIZED; + ret = status; + } + } + }; + + return ret; +} + +static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) +{ + int status = NFS_OK; + + if (test_bit(LK_STATE_IN_USE, &state->flags)) + status = nfs41_check_expired_locks(state); if (status == NFS_OK) - return 0; - nfs41_free_stateid(server, &state->stateid); + return status; return nfs4_lock_expired(state, request); } #endif -- cgit From f9fd2d9c1f3b512c9794abbbd76c77a6e6de57aa Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Thu, 26 Jan 2012 13:32:22 -0500 Subject: NFS: printks in fs/nfs/ should start with NFS: Messages like "Got error -10052 from the server on DESTROY_SESSION. Session has been destroyed regardless" can be confusing to users who aren't very familiar with NFS. NOTE: This patch ignores any printks() that start by printing __func__ - that will be in a separate patch. Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/idmap.c | 3 ++- fs/nfs/nfs4filelayout.c | 2 +- fs/nfs/nfs4proc.c | 2 +- fs/nfs/nfs4state.c | 2 +- fs/nfs/nfs4xdr.c | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index ff084d258c41..91b1e2a82146 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -175,7 +175,8 @@ int nfs_idmap_init(void) struct key *keyring; int ret = 0; - printk(KERN_NOTICE "Registering the %s key type\n", key_type_id_resolver.name); + printk(KERN_NOTICE "NFS: Registering the %s key type\n", + key_type_id_resolver.name); cred = prepare_kernel_cred(NULL); if (!cred) diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index b4f8f9624afa..9a058b8c2888 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -575,7 +575,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, goto out_err_free; fl->fh_array[i]->size = be32_to_cpup(p++); if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { - printk(KERN_ERR "Too big fh %d received %d\n", + printk(KERN_ERR "NFS: Too big fh %d received %d\n", i, fl->fh_array[i]->size); goto out_err_free; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index aaaf98ba8956..34e525549f85 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5406,7 +5406,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session) if (status) printk(KERN_WARNING - "Got error %d from the server on DESTROY_SESSION. " + "NFS: Got error %d from the server on DESTROY_SESSION. " "Session has been destroyed regardless...\n", status); dprintk("<-- nfs4_proc_destroy_session\n"); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 7d098604802c..b43a65d7faca 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1764,7 +1764,7 @@ static void nfs4_state_manager(struct nfs_client *clp) } while (atomic_read(&clp->cl_count) > 1); return; out_error: - printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" + printk(KERN_WARNING "NFS: state manager failed on NFSv4 server %s" " with error %d\n", clp->cl_hostname, -status); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 5d1caac0656d..2adcc979e5df 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1023,7 +1023,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const * Now we backfill the bitmap and the attribute buffer length. */ if (len != ((char *)p - (char *)q) + 4) { - printk(KERN_ERR "nfs: Attr length error, %u != %Zu\n", + printk(KERN_ERR "NFS: Attr length error, %u != %Zu\n", len, ((char *)p - (char *)q) + 4); BUG(); } -- cgit From a030889a01d1bea921e1a7501010b7b891d2abd2 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Thu, 26 Jan 2012 13:32:23 -0500 Subject: NFS: start printks w/ NFS: even if __func__ shown This patch addresses printks that have some context to show that they are from fs/nfs/, but for the sake of consistency now start with NFS: Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayoutdev.c | 2 +- fs/nfs/blocklayout/blocklayoutdm.c | 2 +- fs/nfs/callback.c | 2 +- fs/nfs/callback_xdr.c | 6 +++--- fs/nfs/idmap.c | 6 ++++-- fs/nfs/inode.c | 2 +- fs/nfs/nfs4filelayout.c | 6 ++++-- fs/nfs/nfs4filelayoutdev.c | 10 +++++----- fs/nfs/nfs4proc.c | 7 ++++--- fs/nfs/nfs4state.c | 12 ++++++------ fs/nfs/nfs4xdr.c | 10 +++++----- fs/nfs/objlayout/objio_osd.c | 6 +++--- fs/nfs/objlayout/objlayout.c | 6 +++--- fs/nfs/pnfs.c | 14 +++++++------- 14 files changed, 48 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c index 94ed978860c0..b48f782a94ad 100644 --- a/fs/nfs/blocklayout/blocklayoutdev.c +++ b/fs/nfs/blocklayout/blocklayoutdev.c @@ -46,7 +46,7 @@ static int decode_sector_number(__be32 **rp, sector_t *sp) *rp = xdr_decode_hyper(*rp, &s); if (s & 0x1ff) { - printk(KERN_WARNING "%s: sector not aligned\n", __func__); + printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__); return -1; } *sp = s >> SECTOR_SHIFT; diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index 970490f556de..a0f588fa49c1 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -91,7 +91,7 @@ static void nfs4_blk_metadev_release(struct pnfs_block_dev *bdev) dprintk("%s Releasing\n", __func__); rv = nfs4_blkdev_put(bdev->bm_mdev); if (rv) - printk(KERN_ERR "%s nfs4_blkdev_put returns %d\n", + printk(KERN_ERR "NFS: %s nfs4_blkdev_put returns %d\n", __func__, rv); dev_remove(bdev->net, bdev->bm_mdev->bd_dev); diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index d81040a7efc4..4a122ae71762 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -85,7 +85,7 @@ nfs4_callback_svc(void *vrqstp) } if (err < 0) { if (err != preverr) { - printk(KERN_WARNING "%s: unexpected error " + printk(KERN_WARNING "NFS: %s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index d50b2742f23b..2f45aa717423 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -73,7 +73,7 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes) p = xdr_inline_decode(xdr, nbytes); if (unlikely(p == NULL)) - printk(KERN_WARNING "NFSv4 callback reply buffer overflowed!\n"); + printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n"); return p; } @@ -155,7 +155,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound return status; /* We do not like overly long tags! */ if (hdr->taglen > CB_OP_TAGLEN_MAXSZ - 12) { - printk("NFSv4 CALLBACK %s: client sent tag of length %u\n", + printk("NFS: NFSv4 CALLBACK %s: client sent tag of length %u\n", __func__, hdr->taglen); return htonl(NFS4ERR_RESOURCE); } @@ -167,7 +167,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound if (hdr->minorversion <= 1) { hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */ } else { - printk(KERN_WARNING "%s: NFSv4 server callback with " + printk(KERN_WARNING "NFS: %s: NFSv4 server callback with " "illegal minor version %u!\n", __func__, hdr->minorversion); return htonl(NFS4ERR_MINOR_VERS_MISMATCH); diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 91b1e2a82146..62264e0b1ddb 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -558,11 +558,13 @@ static int __rpc_pipefs_event(struct nfs_client *clp, unsigned long event, * here. */ if (rpc_rmdir(parent)) - printk(KERN_ERR "%s: failed to remove clnt dir!\n", __func__); + printk(KERN_ERR "NFS: %s: failed to remove " + "clnt dir!\n", __func__); } break; default: - printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); + printk(KERN_ERR "NFS: %s: unknown event: %ld\n", __func__, + event); return -ENOTSUPP; } return err; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d2c760e193f4..028464bcbe0e 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1407,7 +1407,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) /* * Big trouble! The inode has become a different object. */ - printk(KERN_DEBUG "%s: inode %ld mode changed, %07o to %07o\n", + printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n", __func__, inode->i_ino, inode->i_mode, fattr->mode); out_err: /* diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 9a058b8c2888..79be7acc9bae 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -367,7 +367,8 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync) idx = nfs4_fl_calc_ds_index(lseg, j); ds = nfs4_fl_prepare_ds(lseg, idx); if (!ds) { - printk(KERN_ERR "%s: prepare_ds failed, use MDS\n", __func__); + printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n", + __func__); set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags); set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); return PNFS_NOT_ATTEMPTED; @@ -797,7 +798,8 @@ static int filelayout_initiate_commit(struct nfs_write_data *data, int how) idx = calc_ds_index_from_commit(lseg, data->ds_commit_index); ds = nfs4_fl_prepare_ds(lseg, idx); if (!ds) { - printk(KERN_ERR "%s: prepare_ds failed, use MDS\n", __func__); + printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n", + __func__); set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags); set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); prepare_to_resend_writes(data); diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index 6eb59b044bfc..80fce8dade2e 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -554,7 +554,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) cnt = be32_to_cpup(p); dprintk("%s stripe count %d\n", __func__, cnt); if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) { - printk(KERN_WARNING "%s: stripe count %d greater than " + printk(KERN_WARNING "NFS: %s: stripe count %d greater than " "supported maximum %d\n", __func__, cnt, NFS4_PNFS_MAX_STRIPE_CNT); goto out_err_free_scratch; @@ -585,7 +585,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) num = be32_to_cpup(p); dprintk("%s ds_num %u\n", __func__, num); if (num > NFS4_PNFS_MAX_MULTI_CNT) { - printk(KERN_WARNING "%s: multipath count %d greater than " + printk(KERN_WARNING "NFS: %s: multipath count %d greater than " "supported maximum %d\n", __func__, num, NFS4_PNFS_MAX_MULTI_CNT); goto out_err_free_stripe_indices; @@ -593,7 +593,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) /* validate stripe indices are all < num */ if (max_stripe_index >= num) { - printk(KERN_WARNING "%s: stripe index %u >= num ds %u\n", + printk(KERN_WARNING "NFS: %s: stripe index %u >= num ds %u\n", __func__, max_stripe_index, num); goto out_err_free_stripe_indices; } @@ -687,7 +687,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_fl new = decode_device(inode, dev, gfp_flags); if (!new) { - printk(KERN_WARNING "%s: Could not decode or add device\n", + printk(KERN_WARNING "NFS: %s: Could not decode or add device\n", __func__); return NULL; } @@ -836,7 +836,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; if (ds == NULL) { - printk(KERN_ERR "%s: No data server for offset index %d\n", + printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", __func__, ds_idx); return NULL; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 34e525549f85..482ed97189c9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4584,7 +4584,8 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock /* Note: we always want to sleep here! */ request->fl_flags = fl_flags | FL_SLEEP; if (do_vfs_lock(request->fl_file, request) < 0) - printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); + printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " + "manager!\n", __func__); out_unlock: up_read(&nfsi->rwsem); out: @@ -4664,8 +4665,8 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); switch (err) { default: - printk(KERN_ERR "%s: unhandled error %d.\n", - __func__, err); + printk(KERN_ERR "NFS: %s: unhandled error " + "%d.\n", __func__, err); case 0: case -ESTALE: goto out; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index b43a65d7faca..4e37818a34ef 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1138,8 +1138,8 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: goto out; default: - printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", - __func__, status); + printk(KERN_ERR "NFS: %s: unhandled error %d. " + "Zeroing state\n", __func__, status); case -ENOMEM: case -NFS4ERR_DENIED: case -NFS4ERR_RECLAIM_BAD: @@ -1185,8 +1185,8 @@ restart: spin_lock(&state->state_lock); list_for_each_entry(lock, &state->lock_states, ls_locks) { if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) - printk("%s: Lock reclaim failed!\n", - __func__); + printk("NFS: %s: Lock reclaim " + "failed!\n", __func__); } spin_unlock(&state->state_lock); nfs4_put_open_state(state); @@ -1195,8 +1195,8 @@ restart: } switch (status) { default: - printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", - __func__, status); + printk(KERN_ERR "NFS: %s: unhandled error %d. " + "Zeroing state\n", __func__, status); case -ENOENT: case -ENOMEM: case -ESTALE: diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 2adcc979e5df..ae7834366712 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4468,8 +4468,8 @@ static int decode_first_pnfs_layout_type(struct xdr_stream *xdr, return 0; } if (num > 1) - printk(KERN_INFO "%s: Warning: Multiple pNFS layout drivers " - "per filesystem not supported\n", __func__); + printk(KERN_INFO "NFS: %s: Warning: Multiple pNFS layout " + "drivers per filesystem not supported\n", __func__); /* Decode and set first layout type, move xdr->p past unused types */ p = xdr_inline_decode(xdr, num * 4); @@ -5290,8 +5290,8 @@ static int decode_chan_attrs(struct xdr_stream *xdr, attrs->max_reqs = be32_to_cpup(p++); nr_attrs = be32_to_cpup(p); if (unlikely(nr_attrs > 1)) { - printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n", - __func__, nr_attrs); + printk(KERN_WARNING "NFS: %s: Invalid rdma channel attrs " + "count %u\n", __func__, nr_attrs); return -EINVAL; } if (nr_attrs == 1) { @@ -5448,7 +5448,7 @@ static int decode_getdevicelist(struct xdr_stream *xdr, dprintk("%s: num_dev %d\n", __func__, res->num_devs); if (res->num_devs > NFS4_PNFS_GETDEVLIST_MAXNUM) { - printk(KERN_ERR "%s too many result dev_num %u\n", + printk(KERN_ERR "NFS: %s too many result dev_num %u\n", __func__, res->num_devs); return -EIO; } diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 55d01280a609..405a62bdb9b4 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -582,10 +582,10 @@ objlayout_init(void) if (ret) printk(KERN_INFO - "%s: Registering OSD pNFS Layout Driver failed: error=%d\n", + "NFS: %s: Registering OSD pNFS Layout Driver failed: error=%d\n", __func__, ret); else - printk(KERN_INFO "%s: Registered OSD pNFS Layout Driver\n", + printk(KERN_INFO "NFS: %s: Registered OSD pNFS Layout Driver\n", __func__); return ret; } @@ -594,7 +594,7 @@ static void __exit objlayout_exit(void) { pnfs_unregister_layoutdriver(&objlayout_type); - printk(KERN_INFO "%s: Unregistered OSD pNFS Layout Driver\n", + printk(KERN_INFO "NFS: %s: Unregistered OSD pNFS Layout Driver\n", __func__); } diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c index b3c29039f5b8..2bd185277adb 100644 --- a/fs/nfs/objlayout/objlayout.c +++ b/fs/nfs/objlayout/objlayout.c @@ -490,9 +490,9 @@ encode_accumulated_error(struct objlayout *objlay, __be32 *p) if (!ioerr->oer_errno) continue; - printk(KERN_ERR "%s: err[%d]: errno=%d is_write=%d " - "dev(%llx:%llx) par=0x%llx obj=0x%llx " - "offset=0x%llx length=0x%llx\n", + printk(KERN_ERR "NFS: %s: err[%d]: errno=%d " + "is_write=%d dev(%llx:%llx) par=0x%llx " + "obj=0x%llx offset=0x%llx length=0x%llx\n", __func__, i, ioerr->oer_errno, ioerr->oer_iswrite, _DEVID_LO(&ioerr->oer_component.oid_device_id), diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 92927878c2f8..a53421604bc4 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -101,8 +101,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, goto out_no_driver; if (!(server->nfs_client->cl_exchange_flags & (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { - printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__, - id, server->nfs_client->cl_exchange_flags); + printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n", + __func__, id, server->nfs_client->cl_exchange_flags); goto out_no_driver; } ld_type = find_pnfs_driver(id); @@ -122,8 +122,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, server->pnfs_curr_ld = ld_type; if (ld_type->set_layoutdriver && ld_type->set_layoutdriver(server, mntfh)) { - printk(KERN_ERR "%s: Error initializing pNFS layout driver %u.\n", - __func__, id); + printk(KERN_ERR "NFS: %s: Error initializing pNFS layout " + "driver %u.\n", __func__, id); module_put(ld_type->owner); goto out_no_driver; } @@ -143,11 +143,11 @@ pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type) struct pnfs_layoutdriver_type *tmp; if (ld_type->id == 0) { - printk(KERN_ERR "%s id 0 is reserved\n", __func__); + printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__); return status; } if (!ld_type->alloc_lseg || !ld_type->free_lseg) { - printk(KERN_ERR "%s Layout driver must provide " + printk(KERN_ERR "NFS: %s Layout driver must provide " "alloc_lseg and free_lseg.\n", __func__); return status; } @@ -160,7 +160,7 @@ pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type) dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id, ld_type->name); } else { - printk(KERN_ERR "%s Module with id %d already loaded!\n", + printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n", __func__, ld_type->id); } spin_unlock(&pnfs_spinlock); -- cgit From 2d3fe01c36a9b881fae89c5bdf4085a4d7d53ae1 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 3 Feb 2012 15:45:40 -0500 Subject: NFS: Fix comparison between DS address lists data_server_cache entries should only be treated as the same if the address list hasn't changed. A new entry will be made when an MDS changes an address list (as seen by GETDEVINFO). The old entry will be freed once all references are gone. Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayoutdev.c | 71 ++++++++++++++++------------------------------ 1 file changed, 24 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index 80fce8dade2e..41677f0bf792 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -108,58 +108,40 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) return false; } -/* - * Lookup DS by addresses. The first matching address returns true. - * nfs4_ds_cache_lock is held - */ -static struct nfs4_pnfs_ds * -_data_server_lookup_locked(struct list_head *dsaddrs) +bool +_same_data_server_addrs_locked(const struct list_head *dsaddrs1, + const struct list_head *dsaddrs2) { - struct nfs4_pnfs_ds *ds; struct nfs4_pnfs_ds_addr *da1, *da2; - list_for_each_entry(da1, dsaddrs, da_node) { - list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) { - list_for_each_entry(da2, &ds->ds_addrs, da_node) { - if (same_sockaddr( - (struct sockaddr *)&da1->da_addr, - (struct sockaddr *)&da2->da_addr)) - return ds; - } - } + /* step through both lists, comparing as we go */ + for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node), + da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node); + da1 != NULL && da2 != NULL; + da1 = list_entry(da1->da_node.next, typeof(*da1), da_node), + da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) { + if (!same_sockaddr((struct sockaddr *)&da1->da_addr, + (struct sockaddr *)&da2->da_addr)) + return false; } - return NULL; + if (da1 == NULL && da2 == NULL) + return true; + + return false; } /* - * Compare two lists of addresses. + * Lookup DS by addresses. nfs4_ds_cache_lock is held */ -static bool -_data_server_match_all_addrs_locked(struct list_head *dsaddrs1, - struct list_head *dsaddrs2) +static struct nfs4_pnfs_ds * +_data_server_lookup_locked(const struct list_head *dsaddrs) { - struct nfs4_pnfs_ds_addr *da1, *da2; - size_t count1 = 0, - count2 = 0; - - list_for_each_entry(da1, dsaddrs1, da_node) - count1++; - - list_for_each_entry(da2, dsaddrs2, da_node) { - bool found = false; - count2++; - list_for_each_entry(da1, dsaddrs1, da_node) { - if (same_sockaddr((struct sockaddr *)&da1->da_addr, - (struct sockaddr *)&da2->da_addr)) { - found = true; - break; - } - } - if (!found) - return false; - } + struct nfs4_pnfs_ds *ds; - return (count1 == count2); + list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) + if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) + return ds; + return NULL; } /* @@ -356,11 +338,6 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) dprintk("%s add new data server %s\n", __func__, ds->ds_remotestr); } else { - if (!_data_server_match_all_addrs_locked(&tmp_ds->ds_addrs, - dsaddrs)) { - dprintk("%s: multipath address mismatch: %s != %s", - __func__, tmp_ds->ds_remotestr, remotestr); - } kfree(remotestr); kfree(ds); atomic_inc(&tmp_ds->ds_count); -- cgit From e6499c6f4b5f56a16f8b8ef60529c1da28b13aea Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Thu, 26 Jan 2012 16:54:23 -0500 Subject: NFS: Fall back on old idmapper if request_key() fails This patch removes the CONFIG_NFS_USE_NEW_IDMAPPER compile option. First, the idmapper will attempt to map the id using /sbin/request-key and nfsidmap. If this fails (if /etc/request-key.conf is not configured properly) then the idmapper will call the legacy code to perform the mapping. I left a comment stating where the legacy code begins to make it easier for somebody to remove in the future. Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/Kconfig | 11 ------- fs/nfs/idmap.c | 91 +++++++++++++++++++++++---------------------------------- fs/nfs/sysctl.c | 2 -- 3 files changed, 37 insertions(+), 67 deletions(-) (limited to 'fs') diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index dbcd82126aed..021d2cf6938a 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -132,14 +132,3 @@ config NFS_USE_KERNEL_DNS select DNS_RESOLVER select KEYS default y - -config NFS_USE_NEW_IDMAPPER - bool "Use the new idmapper upcall routine" - depends on NFS_V4 && KEYS - help - Say Y here if you want NFS to use the new idmapper upcall functions. - You will need /sbin/request-key (usually provided by the keyutils - package). For details, read - . - - If you are unsure, say N. diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 62264e0b1ddb..e0ecd5a7e19a 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -142,8 +142,6 @@ static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen) return snprintf(buf, buflen, "%u", id); } -#ifdef CONFIG_NFS_USE_NEW_IDMAPPER - #include #include #include @@ -169,7 +167,7 @@ struct key_type key_type_id_resolver = { .read = user_read, }; -int nfs_idmap_init(void) +static int nfs_idmap_init_keyring(void) { struct cred *cred; struct key *keyring; @@ -211,7 +209,7 @@ failed_put_cred: return ret; } -void nfs_idmap_quit(void) +static void nfs_idmap_quit_keyring(void) { key_revoke(id_resolver_cache->thread_keyring); unregister_key_type(&key_type_id_resolver); @@ -328,43 +326,7 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, return ret; } -int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid) -{ - if (nfs_map_string_to_numeric(name, namelen, uid)) - return 0; - return nfs_idmap_lookup_id(name, namelen, "uid", uid); -} - -int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *gid) -{ - if (nfs_map_string_to_numeric(name, namelen, gid)) - return 0; - return nfs_idmap_lookup_id(name, namelen, "gid", gid); -} - -int nfs_map_uid_to_name(const struct nfs_server *server, __u32 uid, char *buf, size_t buflen) -{ - int ret = -EINVAL; - - if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) - ret = nfs_idmap_lookup_name(uid, "user", buf, buflen); - if (ret < 0) - ret = nfs_map_numeric_to_string(uid, buf, buflen); - return ret; -} -int nfs_map_gid_to_group(const struct nfs_server *server, __u32 gid, char *buf, size_t buflen) -{ - int ret = -EINVAL; - - if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) - ret = nfs_idmap_lookup_name(gid, "group", buf, buflen); - if (ret < 0) - ret = nfs_map_numeric_to_string(gid, buf, buflen); - return ret; -} - -#else /* CONFIG_NFS_USE_NEW_IDMAPPER not defined */ - +/* idmap classic begins here */ #include #include #include @@ -600,12 +562,21 @@ static struct notifier_block nfs_idmap_block = { int nfs_idmap_init(void) { - return rpc_pipefs_notifier_register(&nfs_idmap_block); + int ret; + ret = nfs_idmap_init_keyring(); + if (ret != 0) + goto out; + ret = rpc_pipefs_notifier_register(&nfs_idmap_block); + if (ret != 0) + nfs_idmap_quit_keyring(); +out: + return ret; } void nfs_idmap_quit(void) { rpc_pipefs_notifier_unregister(&nfs_idmap_block); + nfs_idmap_quit_keyring(); } /* @@ -930,19 +901,27 @@ static unsigned int fnvhash32(const void *buf, size_t buflen) int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid) { struct idmap *idmap = server->nfs_client->cl_idmap; + int ret = -EINVAL; if (nfs_map_string_to_numeric(name, namelen, uid)) return 0; - return nfs_idmap_id(idmap, &idmap->idmap_user_hash, name, namelen, uid); + ret = nfs_idmap_lookup_id(name, namelen, "uid", uid); + if (ret < 0) + ret = nfs_idmap_id(idmap, &idmap->idmap_user_hash, name, namelen, uid); + return ret; } -int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid) +int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *gid) { struct idmap *idmap = server->nfs_client->cl_idmap; + int ret = -EINVAL; - if (nfs_map_string_to_numeric(name, namelen, uid)) + if (nfs_map_string_to_numeric(name, namelen, gid)) return 0; - return nfs_idmap_id(idmap, &idmap->idmap_group_hash, name, namelen, uid); + ret = nfs_idmap_lookup_id(name, namelen, "gid", gid); + if (ret < 0) + ret = nfs_idmap_id(idmap, &idmap->idmap_group_hash, name, namelen, gid); + return ret; } int nfs_map_uid_to_name(const struct nfs_server *server, __u32 uid, char *buf, size_t buflen) @@ -950,22 +929,26 @@ int nfs_map_uid_to_name(const struct nfs_server *server, __u32 uid, char *buf, s struct idmap *idmap = server->nfs_client->cl_idmap; int ret = -EINVAL; - if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) - ret = nfs_idmap_name(idmap, &idmap->idmap_user_hash, uid, buf); + if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) { + ret = nfs_idmap_lookup_name(uid, "user", buf, buflen); + if (ret < 0) + ret = nfs_idmap_name(idmap, &idmap->idmap_user_hash, uid, buf); + } if (ret < 0) ret = nfs_map_numeric_to_string(uid, buf, buflen); return ret; } -int nfs_map_gid_to_group(const struct nfs_server *server, __u32 uid, char *buf, size_t buflen) +int nfs_map_gid_to_group(const struct nfs_server *server, __u32 gid, char *buf, size_t buflen) { struct idmap *idmap = server->nfs_client->cl_idmap; int ret = -EINVAL; - if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) - ret = nfs_idmap_name(idmap, &idmap->idmap_group_hash, uid, buf); + if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) { + ret = nfs_idmap_lookup_name(gid, "group", buf, buflen); + if (ret < 0) + ret = nfs_idmap_name(idmap, &idmap->idmap_group_hash, gid, buf); + } if (ret < 0) - ret = nfs_map_numeric_to_string(uid, buf, buflen); + ret = nfs_map_numeric_to_string(gid, buf, buflen); return ret; } - -#endif /* CONFIG_NFS_USE_NEW_IDMAPPER */ diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c index 978aaeb8a093..ad4d2e787b20 100644 --- a/fs/nfs/sysctl.c +++ b/fs/nfs/sysctl.c @@ -32,7 +32,6 @@ static ctl_table nfs_cb_sysctls[] = { .extra1 = (int *)&nfs_set_port_min, .extra2 = (int *)&nfs_set_port_max, }, -#ifndef CONFIG_NFS_USE_NEW_IDMAPPER { .procname = "idmap_cache_timeout", .data = &nfs_idmap_cache_timeout, @@ -40,7 +39,6 @@ static ctl_table nfs_cb_sysctls[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, -#endif /* CONFIG_NFS_USE_NEW_IDMAPPER */ #endif { .procname = "nfs_mountpoint_timeout", -- cgit From 3cd0f37a2cc9e4d6188df10041a2441eaa41d991 Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Thu, 26 Jan 2012 16:54:24 -0500 Subject: NFS: Keep idmapper include files in one place Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/idmap.c | 66 +++++++++++++++++++++++++++------------------------------- 1 file changed, 31 insertions(+), 35 deletions(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index e0ecd5a7e19a..83f7d42d5c76 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -39,6 +39,37 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* include files needed by legacy idmapper */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nfs4_fs.h" +#include "internal.h" + +#define NFS_UINT_MAXLEN 11 +#define IDMAP_HASH_SZ 128 + +/* Default cache timeout is 10 minutes */ +unsigned int nfs_idmap_cache_timeout = 600 * HZ; +const struct cred *id_resolver_cache; + /** * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields @@ -142,21 +173,6 @@ static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen) return snprintf(buf, buflen, "%u", id); } -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define NFS_UINT_MAXLEN 11 - -const struct cred *id_resolver_cache; - struct key_type key_type_id_resolver = { .name = "id_resolver", .instantiate = user_instantiate, @@ -327,26 +343,6 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, } /* idmap classic begins here */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "nfs4_fs.h" -#include "internal.h" - -#define IDMAP_HASH_SZ 128 - -/* Default cache timeout is 10 minutes */ -unsigned int nfs_idmap_cache_timeout = 600 * HZ; - static int param_set_idmap_timeout(const char *val, struct kernel_param *kp) { char *endp; -- cgit From 6b13168b36b6a7f603d962c232f1f2f325705832 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 23 Jan 2012 17:26:05 +0000 Subject: NFS: make nfs_client_list per net ns This patch splits global list of NFS clients into per-net-ns array of lists. This looks more strict and clearer. BTW, this patch also makes "/proc/fs/nfsfs/servers" entry content depends on /proc mount owner pid namespace. See below for details. NOTE: few words about how was /proc/fs/nfsfs/ entries content show per network namespace done. This is a little bit tricky and not the best is could be. But it's cheap (proper fix for /proc conteinerization is a hard nut to crack). The idea is simple: take proper network namespace from pid namespace child reaper nsproxy of /proc/ mount creator. This actually means, that if there are 2 containers with different net namespace sharing pid namespace, then read of /proc/fs/nfsfs/ entries will always return content, taken from net namespace of pid namespace creator task (and thus second namespace set wil be unvisible). Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 38 +++++++++++++++++++++++++++----------- fs/nfs/idmap.c | 5 ++--- fs/nfs/inode.c | 1 + fs/nfs/internal.h | 2 +- fs/nfs/netns.h | 1 + 5 files changed, 32 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 98af1cb28ee3..058eb9bcfa9d 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -39,6 +39,8 @@ #include #include #include +#include +#include #include @@ -49,11 +51,11 @@ #include "internal.h" #include "fscache.h" #include "pnfs.h" +#include "netns.h" #define NFSDBG_FACILITY NFSDBG_CLIENT DEFINE_SPINLOCK(nfs_client_lock); -LIST_HEAD(nfs_client_list); static LIST_HEAD(nfs_volume_list); static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); #ifdef CONFIG_NFS_V4 @@ -464,8 +466,9 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat { struct nfs_client *clp; const struct sockaddr *sap = data->addr; + struct nfs_net *nn = net_generic(data->net, nfs_net_id); - list_for_each_entry(clp, &nfs_client_list, cl_share_link) { + list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; /* Don't match clients that failed to initialise properly */ if (clp->cl_cons_state < 0) @@ -483,9 +486,6 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat /* Match the full socket address */ if (!nfs_sockaddr_cmp(sap, clap)) continue; - /* Match network namespace */ - if (clp->net != data->net) - continue; atomic_inc(&clp->cl_count); return clp; @@ -506,6 +506,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init, { struct nfs_client *clp, *new = NULL; int error; + struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id); dprintk("--> nfs_get_client(%s,v%u)\n", cl_init->hostname ?: "", cl_init->rpc_ops->version); @@ -531,7 +532,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init, /* install a new client and return with it unready */ install_client: clp = new; - list_add(&clp->cl_share_link, &nfs_client_list); + list_add(&clp->cl_share_link, &nn->nfs_client_list); spin_unlock(&nfs_client_lock); error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr, @@ -1227,9 +1228,10 @@ nfs4_find_client_sessionid(const struct sockaddr *addr, struct nfs4_sessionid *sid) { struct nfs_client *clp; + struct nfs_net *nn = net_generic(&init_net, nfs_net_id); spin_lock(&nfs_client_lock); - list_for_each_entry(clp, &nfs_client_list, cl_share_link) { + list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { if (nfs4_cb_match_client(addr, clp, 1) == false) continue; @@ -1757,6 +1759,13 @@ out_free_server: return ERR_PTR(error); } +void nfs_clients_init(struct net *net) +{ + struct nfs_net *nn = net_generic(net, nfs_net_id); + + INIT_LIST_HEAD(&nn->nfs_client_list); +} + #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_fs_nfs; @@ -1810,13 +1819,15 @@ static int nfs_server_list_open(struct inode *inode, struct file *file) { struct seq_file *m; int ret; + struct pid_namespace *pid_ns = file->f_dentry->d_sb->s_fs_info; + struct net *net = pid_ns->child_reaper->nsproxy->net_ns; ret = seq_open(file, &nfs_server_list_ops); if (ret < 0) return ret; m = file->private_data; - m->private = PDE(inode)->data; + m->private = net; return 0; } @@ -1826,9 +1837,11 @@ static int nfs_server_list_open(struct inode *inode, struct file *file) */ static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos) { + struct nfs_net *nn = net_generic(m->private, nfs_net_id); + /* lock the list against modification */ spin_lock(&nfs_client_lock); - return seq_list_start_head(&nfs_client_list, *_pos); + return seq_list_start_head(&nn->nfs_client_list, *_pos); } /* @@ -1836,7 +1849,9 @@ static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos) */ static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos) { - return seq_list_next(v, &nfs_client_list, pos); + struct nfs_net *nn = net_generic(p->private, nfs_net_id); + + return seq_list_next(v, &nn->nfs_client_list, pos); } /* @@ -1853,9 +1868,10 @@ static void nfs_server_list_stop(struct seq_file *p, void *v) static int nfs_server_list_show(struct seq_file *m, void *v) { struct nfs_client *clp; + struct nfs_net *nn = net_generic(m->private, nfs_net_id); /* display header on line 1 */ - if (v == &nfs_client_list) { + if (v == &nn->nfs_client_list) { seq_puts(m, "NV SERVER PORT USE HOSTNAME\n"); return 0; } diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 83f7d42d5c76..2f78f0ce2664 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -532,13 +532,12 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct super_block *sb = ptr; + struct nfs_net *nn = net_generic(sb->s_fs_info, nfs_net_id); struct nfs_client *clp; int error = 0; spin_lock(&nfs_client_lock); - list_for_each_entry(clp, &nfs_client_list, cl_share_link) { - if (clp->net != sb->s_fs_info) - continue; + list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { if (clp->rpc_ops != &nfs_v4_clientops) continue; error = __rpc_pipefs_event(clp, event, sb); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 028464bcbe0e..0365b84cc2c7 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1558,6 +1558,7 @@ EXPORT_SYMBOL_GPL(nfs_net_id); static int nfs_net_init(struct net *net) { + nfs_clients_init(net); return nfs_dns_resolver_cache_init(net); } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index cdb121d3c6f4..a9ae8069fff9 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -146,6 +146,7 @@ extern void nfs_umount(const struct nfs_mount_request *info); /* client.c */ extern const struct rpc_program nfs_program; +extern void nfs_clients_init(struct net *net); extern void nfs_cleanup_cb_ident_idr(void); extern void nfs_put_client(struct nfs_client *); @@ -183,7 +184,6 @@ static inline void nfs_fs_proc_exit(void) #endif #ifdef CONFIG_NFS_V4 extern spinlock_t nfs_client_lock; -extern struct list_head nfs_client_list; #endif /* nfs4namespace.c */ diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index 39ae4cad5b4b..feb33c3f9a56 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -7,6 +7,7 @@ struct nfs_net { struct cache_detail *nfs_dns_resolve; struct rpc_pipe *bl_device_pipe; + struct list_head nfs_client_list; }; extern int nfs_net_id; -- cgit From c25d32b26361ce0814fef2281f164866c18c8692 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 23 Jan 2012 17:26:14 +0000 Subject: NFS: make nfs_volume_list per net ns This patch splits global list of NFS servers into per-net-ns array of lists. This looks more strict and clearer. BTW, this patch also makes "/proc/fs/nfsfs/volumes" content depends on /proc mount owner pid namespace. See below for details. NOTE: few words about how was /proc/fs/nfsfs/ entries content show per network namespace done. This is a little bit tricky and not the best is could be. But it's cheap (proper fix for /proc conteinerization is a hard nut to crack). The idea is simple: take proper network namespace from pid namespace child reaper nsproxy of /proc/ mount creator. This actually means, that if there are 2 containers with different net namespace sharing pid namespace, then read of /proc/fs/nfsfs/ entries will always return content, taken from net namespace of pid namespace creator task (and thus second namespace set wil be unvisible). Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 20 ++++++++++++++------ fs/nfs/netns.h | 1 + 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 058eb9bcfa9d..d58e8386e6bc 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -56,7 +56,6 @@ #define NFSDBG_FACILITY NFSDBG_CLIENT DEFINE_SPINLOCK(nfs_client_lock); -static LIST_HEAD(nfs_volume_list); static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); #ifdef CONFIG_NFS_V4 static DEFINE_IDR(cb_ident_idr); /* Protected by nfs_client_lock */ @@ -1036,10 +1035,11 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve static void nfs_server_insert_lists(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; + struct nfs_net *nn = net_generic(clp->net, nfs_net_id); spin_lock(&nfs_client_lock); list_add_tail_rcu(&server->client_link, &clp->cl_superblocks); - list_add_tail(&server->master_link, &nfs_volume_list); + list_add_tail(&server->master_link, &nn->nfs_volume_list); clear_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state); spin_unlock(&nfs_client_lock); @@ -1764,6 +1764,7 @@ void nfs_clients_init(struct net *net) struct nfs_net *nn = net_generic(net, nfs_net_id); INIT_LIST_HEAD(&nn->nfs_client_list); + INIT_LIST_HEAD(&nn->nfs_volume_list); } #ifdef CONFIG_PROC_FS @@ -1900,13 +1901,15 @@ static int nfs_volume_list_open(struct inode *inode, struct file *file) { struct seq_file *m; int ret; + struct pid_namespace *pid_ns = file->f_dentry->d_sb->s_fs_info; + struct net *net = pid_ns->child_reaper->nsproxy->net_ns; ret = seq_open(file, &nfs_volume_list_ops); if (ret < 0) return ret; m = file->private_data; - m->private = PDE(inode)->data; + m->private = net; return 0; } @@ -1916,9 +1919,11 @@ static int nfs_volume_list_open(struct inode *inode, struct file *file) */ static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos) { + struct nfs_net *nn = net_generic(m->private, nfs_net_id); + /* lock the list against modification */ spin_lock(&nfs_client_lock); - return seq_list_start_head(&nfs_volume_list, *_pos); + return seq_list_start_head(&nn->nfs_volume_list, *_pos); } /* @@ -1926,7 +1931,9 @@ static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos) */ static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos) { - return seq_list_next(v, &nfs_volume_list, pos); + struct nfs_net *nn = net_generic(p->private, nfs_net_id); + + return seq_list_next(v, &nn->nfs_volume_list, pos); } /* @@ -1945,9 +1952,10 @@ static int nfs_volume_list_show(struct seq_file *m, void *v) struct nfs_server *server; struct nfs_client *clp; char dev[8], fsid[17]; + struct nfs_net *nn = net_generic(m->private, nfs_net_id); /* display header on line 1 */ - if (v == &nfs_volume_list) { + if (v == &nn->nfs_volume_list) { seq_puts(m, "NV SERVER PORT DEV FSID FSC\n"); return 0; } diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index feb33c3f9a56..0fbd4e017d27 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -8,6 +8,7 @@ struct nfs_net { struct cache_detail *nfs_dns_resolve; struct rpc_pipe *bl_device_pipe; struct list_head nfs_client_list; + struct list_head nfs_volume_list; }; extern int nfs_net_id; -- cgit From 28cd1b3f262dba56b5e335ba668e342d530f6129 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 23 Jan 2012 17:26:22 +0000 Subject: NFS: make cb_ident_idr per net ns This patch makes ID's infrastructure network namespace aware. This was done mainly because of nfs_client_lock, which is desired to be per network namespace, but protects NFS clients ID's. NOTE: NFS client's net pointer have to be set prior to ID initialization, proper assignment was moved. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/callback_xdr.c | 2 +- fs/nfs/client.c | 28 ++++++++++++++++++---------- fs/nfs/inode.c | 2 +- fs/nfs/internal.h | 4 ++-- fs/nfs/netns.h | 3 +++ 5 files changed, 25 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 2f45aa717423..e14af46bd2c6 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -876,7 +876,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r return rpc_garbage_args; if (hdr_arg.minorversion == 0) { - cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident); + cps.clp = nfs4_find_client_ident(rqstp->rq_xprt->xpt_net, hdr_arg.cb_ident); if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) return rpc_drop_reply; } diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d58e8386e6bc..f51b2795ce07 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -58,7 +58,6 @@ DEFINE_SPINLOCK(nfs_client_lock); static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); #ifdef CONFIG_NFS_V4 -static DEFINE_IDR(cb_ident_idr); /* Protected by nfs_client_lock */ /* * Get a unique NFSv4.0 callback identifier which will be used @@ -67,14 +66,15 @@ static DEFINE_IDR(cb_ident_idr); /* Protected by nfs_client_lock */ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion) { int ret = 0; + struct nfs_net *nn = net_generic(clp->net, nfs_net_id); if (clp->rpc_ops->version != 4 || minorversion != 0) return ret; retry: - if (!idr_pre_get(&cb_ident_idr, GFP_KERNEL)) + if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL)) return -ENOMEM; spin_lock(&nfs_client_lock); - ret = idr_get_new(&cb_ident_idr, clp, &clp->cl_cb_ident); + ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident); spin_unlock(&nfs_client_lock); if (ret == -EAGAIN) goto retry; @@ -173,6 +173,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_ clp->cl_rpcclient = ERR_PTR(-EINVAL); clp->cl_proto = cl_init->proto; + clp->net = cl_init->net; #ifdef CONFIG_NFS_V4 err = nfs_get_cb_ident_idr(clp, cl_init->minorversion); @@ -191,7 +192,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_ if (!IS_ERR(cred)) clp->cl_machine_cred = cred; nfs_fscache_get_client_cookie(clp); - clp->net = cl_init->net; return clp; @@ -236,16 +236,20 @@ static void nfs4_shutdown_client(struct nfs_client *clp) } /* idr_remove_all is not needed as all id's are removed by nfs_put_client */ -void nfs_cleanup_cb_ident_idr(void) +void nfs_cleanup_cb_ident_idr(struct net *net) { - idr_destroy(&cb_ident_idr); + struct nfs_net *nn = net_generic(net, nfs_net_id); + + idr_destroy(&nn->cb_ident_idr); } /* nfs_client_lock held */ static void nfs_cb_idr_remove_locked(struct nfs_client *clp) { + struct nfs_net *nn = net_generic(clp->net, nfs_net_id); + if (clp->cl_cb_ident) - idr_remove(&cb_ident_idr, clp->cl_cb_ident); + idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident); } static void pnfs_init_server(struct nfs_server *server) @@ -263,7 +267,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp) { } -void nfs_cleanup_cb_ident_idr(void) +void nfs_cleanup_cb_ident_idr(struct net *net) { } @@ -1203,12 +1207,13 @@ error: * Find a client by callback identifier */ struct nfs_client * -nfs4_find_client_ident(int cb_ident) +nfs4_find_client_ident(struct net *net, int cb_ident) { struct nfs_client *clp; + struct nfs_net *nn = net_generic(net, nfs_net_id); spin_lock(&nfs_client_lock); - clp = idr_find(&cb_ident_idr, cb_ident); + clp = idr_find(&nn->cb_ident_idr, cb_ident); if (clp) atomic_inc(&clp->cl_count); spin_unlock(&nfs_client_lock); @@ -1765,6 +1770,9 @@ void nfs_clients_init(struct net *net) INIT_LIST_HEAD(&nn->nfs_client_list); INIT_LIST_HEAD(&nn->nfs_volume_list); +#ifdef CONFIG_NFS_V4 + idr_init(&nn->cb_ident_idr); +#endif } #ifdef CONFIG_PROC_FS diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 0365b84cc2c7..6c662598f885 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1565,6 +1565,7 @@ static int nfs_net_init(struct net *net) static void nfs_net_exit(struct net *net) { nfs_dns_resolver_cache_destroy(net); + nfs_cleanup_cb_ident_idr(net); } static struct pernet_operations nfs_net_ops = { @@ -1674,7 +1675,6 @@ static void __exit exit_nfs_fs(void) #ifdef CONFIG_PROC_FS rpc_proc_unregister(&init_net, "nfs"); #endif - nfs_cleanup_cb_ident_idr(); unregister_nfs_fs(); nfs_fs_proc_exit(); nfsiod_stop(); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index a9ae8069fff9..958fff2927c0 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -148,9 +148,9 @@ extern void nfs_umount(const struct nfs_mount_request *info); extern const struct rpc_program nfs_program; extern void nfs_clients_init(struct net *net); -extern void nfs_cleanup_cb_ident_idr(void); +extern void nfs_cleanup_cb_ident_idr(struct net *); extern void nfs_put_client(struct nfs_client *); -extern struct nfs_client *nfs4_find_client_ident(int); +extern struct nfs_client *nfs4_find_client_ident(struct net *, int); extern struct nfs_client * nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *); extern struct nfs_server *nfs_create_server( diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index 0fbd4e017d27..547cc9525ba2 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -9,6 +9,9 @@ struct nfs_net { struct rpc_pipe *bl_device_pipe; struct list_head nfs_client_list; struct list_head nfs_volume_list; +#ifdef CONFIG_NFS_V4 + struct idr cb_ident_idr; /* Protected by nfs_client_lock */ +#endif }; extern int nfs_net_id; -- cgit From dc03085834a4530b2514708a643cd3fe38f35b21 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 23 Jan 2012 17:26:31 +0000 Subject: NFS: make nfs_client_lock per net ns This patch makes nfs_clients_lock allocated per network namespace. All items it protects are already network namespace aware. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 51 +++++++++++++++++++++++++++++---------------------- fs/nfs/idmap.c | 4 ++-- fs/nfs/internal.h | 3 --- fs/nfs/netns.h | 1 + 4 files changed, 32 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index f51b2795ce07..9e11d2988830 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -55,7 +55,6 @@ #define NFSDBG_FACILITY NFSDBG_CLIENT -DEFINE_SPINLOCK(nfs_client_lock); static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); #ifdef CONFIG_NFS_V4 @@ -73,9 +72,9 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion) retry: if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL)) return -ENOMEM; - spin_lock(&nfs_client_lock); + spin_lock(&nn->nfs_client_lock); ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident); - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); if (ret == -EAGAIN) goto retry; return ret; @@ -313,15 +312,18 @@ static void nfs_free_client(struct nfs_client *clp) */ void nfs_put_client(struct nfs_client *clp) { + struct nfs_net *nn; + if (!clp) return; dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count)); + nn = net_generic(clp->net, nfs_net_id); - if (atomic_dec_and_lock(&clp->cl_count, &nfs_client_lock)) { + if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) { list_del(&clp->cl_share_link); nfs_cb_idr_remove_locked(clp); - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); BUG_ON(!list_empty(&clp->cl_superblocks)); @@ -516,7 +518,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init, /* see if the client already exists */ do { - spin_lock(&nfs_client_lock); + spin_lock(&nn->nfs_client_lock); clp = nfs_match_client(cl_init); if (clp) @@ -524,7 +526,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init, if (new) goto install_client; - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); new = nfs_alloc_client(cl_init); } while (!IS_ERR(new)); @@ -536,7 +538,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init, install_client: clp = new; list_add(&clp->cl_share_link, &nn->nfs_client_list); - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr, authflavour, noresvport); @@ -551,7 +553,7 @@ install_client: * - make sure it's ready before returning */ found_client: - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); if (new) nfs_free_client(new); @@ -1041,24 +1043,25 @@ static void nfs_server_insert_lists(struct nfs_server *server) struct nfs_client *clp = server->nfs_client; struct nfs_net *nn = net_generic(clp->net, nfs_net_id); - spin_lock(&nfs_client_lock); + spin_lock(&nn->nfs_client_lock); list_add_tail_rcu(&server->client_link, &clp->cl_superblocks); list_add_tail(&server->master_link, &nn->nfs_volume_list); clear_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state); - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); } static void nfs_server_remove_lists(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; + struct nfs_net *nn = net_generic(clp->net, nfs_net_id); - spin_lock(&nfs_client_lock); + spin_lock(&nn->nfs_client_lock); list_del_rcu(&server->client_link); if (clp && list_empty(&clp->cl_superblocks)) set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state); list_del(&server->master_link); - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); synchronize_rcu(); } @@ -1212,11 +1215,11 @@ nfs4_find_client_ident(struct net *net, int cb_ident) struct nfs_client *clp; struct nfs_net *nn = net_generic(net, nfs_net_id); - spin_lock(&nfs_client_lock); + spin_lock(&nn->nfs_client_lock); clp = idr_find(&nn->cb_ident_idr, cb_ident); if (clp) atomic_inc(&clp->cl_count); - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); return clp; } @@ -1235,7 +1238,7 @@ nfs4_find_client_sessionid(const struct sockaddr *addr, struct nfs_client *clp; struct nfs_net *nn = net_generic(&init_net, nfs_net_id); - spin_lock(&nfs_client_lock); + spin_lock(&nn->nfs_client_lock); list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { if (nfs4_cb_match_client(addr, clp, 1) == false) continue; @@ -1249,10 +1252,10 @@ nfs4_find_client_sessionid(const struct sockaddr *addr, continue; atomic_inc(&clp->cl_count); - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); return clp; } - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); return NULL; } @@ -1849,7 +1852,7 @@ static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos) struct nfs_net *nn = net_generic(m->private, nfs_net_id); /* lock the list against modification */ - spin_lock(&nfs_client_lock); + spin_lock(&nn->nfs_client_lock); return seq_list_start_head(&nn->nfs_client_list, *_pos); } @@ -1868,7 +1871,9 @@ static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos) */ static void nfs_server_list_stop(struct seq_file *p, void *v) { - spin_unlock(&nfs_client_lock); + struct nfs_net *nn = net_generic(p->private, nfs_net_id); + + spin_unlock(&nn->nfs_client_lock); } /* @@ -1930,7 +1935,7 @@ static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos) struct nfs_net *nn = net_generic(m->private, nfs_net_id); /* lock the list against modification */ - spin_lock(&nfs_client_lock); + spin_lock(&nn->nfs_client_lock); return seq_list_start_head(&nn->nfs_volume_list, *_pos); } @@ -1949,7 +1954,9 @@ static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos) */ static void nfs_volume_list_stop(struct seq_file *p, void *v) { - spin_unlock(&nfs_client_lock); + struct nfs_net *nn = net_generic(p->private, nfs_net_id); + + spin_unlock(&nn->nfs_client_lock); } /* diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 2f78f0ce2664..d2afcd8354ef 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -536,7 +536,7 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, struct nfs_client *clp; int error = 0; - spin_lock(&nfs_client_lock); + spin_lock(&nn->nfs_client_lock); list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { if (clp->rpc_ops != &nfs_v4_clientops) continue; @@ -544,7 +544,7 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, if (error) break; } - spin_unlock(&nfs_client_lock); + spin_unlock(&nn->nfs_client_lock); return error; } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 958fff2927c0..b38b73347af5 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -182,9 +182,6 @@ static inline void nfs_fs_proc_exit(void) { } #endif -#ifdef CONFIG_NFS_V4 -extern spinlock_t nfs_client_lock; -#endif /* nfs4namespace.c */ #ifdef CONFIG_NFS_V4 diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index 547cc9525ba2..7baad89ae60e 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -12,6 +12,7 @@ struct nfs_net { #ifdef CONFIG_NFS_V4 struct idr cb_ident_idr; /* Protected by nfs_client_lock */ #endif + spinlock_t nfs_client_lock; }; extern int nfs_net_id; -- cgit From bc224f539dcce7805d4bfb68a92f0fe8bb102c22 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Thu, 26 Jan 2012 15:11:33 +0400 Subject: NFS: pass proper net rpc_pton() in nfs_dns_resolve_name() Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/dns_resolve.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index be9a530987b3..fcd8f1d7430f 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -20,7 +20,7 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen, ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL); if (ip_len > 0) - ret = rpc_pton(&init_net, ip_addr, ip_len, sa, salen); + ret = rpc_pton(net, ip_addr, ip_len, sa, salen); else ret = -ESRCH; kfree(ip_addr); -- cgit From c7add9a9720ff5be4715f7a0bb0d9578b2e8534e Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Thu, 26 Jan 2012 15:11:49 +0400 Subject: NFS: search for client session id in proper network namespace Network namespace is taken from request transport and passed as a part of cb_process_state structure. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/callback.h | 1 + fs/nfs/callback_proc.c | 2 +- fs/nfs/callback_xdr.c | 1 + fs/nfs/client.c | 4 ++-- fs/nfs/internal.h | 3 ++- 5 files changed, 7 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index c89d3b9e483c..197e0d3754c2 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -39,6 +39,7 @@ struct cb_process_state { __be32 drc_status; struct nfs_client *clp; int slotid; + struct net *net; }; struct cb_compound_hdr_arg { diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 0e6e63f55db4..f71978d107d0 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -461,7 +461,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, int i; __be32 status = htonl(NFS4ERR_BADSESSION); - clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid); + clp = nfs4_find_client_sessionid(cps->net, args->csa_addr, &args->csa_sessionid); if (clp == NULL) goto out; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index e14af46bd2c6..2e372240d028 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -861,6 +861,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r .drc_status = 0, .clp = NULL, .slotid = -1, + .net = rqstp->rq_xprt->xpt_net, }; unsigned int nops = 0; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 9e11d2988830..2328dcbf6c0b 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1232,11 +1232,11 @@ nfs4_find_client_ident(struct net *net, int cb_ident) * Returns NULL if no such client */ struct nfs_client * -nfs4_find_client_sessionid(const struct sockaddr *addr, +nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr, struct nfs4_sessionid *sid) { struct nfs_client *clp; - struct nfs_net *nn = net_generic(&init_net, nfs_net_id); + struct nfs_net *nn = net_generic(net, nfs_net_id); spin_lock(&nn->nfs_client_lock); list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b38b73347af5..0c3648a947d1 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -152,7 +152,8 @@ extern void nfs_cleanup_cb_ident_idr(struct net *); extern void nfs_put_client(struct nfs_client *); extern struct nfs_client *nfs4_find_client_ident(struct net *, int); extern struct nfs_client * -nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *); +nfs4_find_client_sessionid(struct net *, const struct sockaddr *, + struct nfs4_sessionid *); extern struct nfs_server *nfs_create_server( const struct nfs_parsed_mount_data *, struct nfs_fh *); -- cgit From b48e127884b117b429d3473577b9dc3f2b42b8eb Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Thu, 26 Jan 2012 15:11:57 +0400 Subject: NFS: pass current net to rpc_pton() while parsing mount options Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 8e210b2c16d7..94667848af9a 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1408,7 +1408,7 @@ static int nfs_parse_mount_options(char *raw, if (string == NULL) goto out_nomem; mnt->nfs_server.addrlen = - rpc_pton(&init_net, string, strlen(string), + rpc_pton(mnt->net, string, strlen(string), (struct sockaddr *) &mnt->nfs_server.address, sizeof(mnt->nfs_server.address)); @@ -1430,7 +1430,7 @@ static int nfs_parse_mount_options(char *raw, if (string == NULL) goto out_nomem; mnt->mount_server.addrlen = - rpc_pton(&init_net, string, strlen(string), + rpc_pton(mnt->net, string, strlen(string), (struct sockaddr *) &mnt->mount_server.address, sizeof(mnt->mount_server.address)); -- cgit From 33faaa380e9ec4b93503cae8c9969fb599f0f283 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Thu, 26 Jan 2012 15:12:05 +0400 Subject: NFS: pass transport net to rpc_pton() while parse server name Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/nfs4namespace.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index 48a9acdbaeb6..667ea7406fd3 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -97,11 +97,11 @@ static size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr *sa, size_t salen, struct nfs_server *server) { ssize_t ret; + struct net *net = server->client->cl_xprt->xprt_net; - ret = rpc_pton(&init_net, string, len, sa, salen); + ret = rpc_pton(net, string, len, sa, salen); if (ret == 0) { - ret = nfs_dns_resolve_name(server->client->cl_xprt->xprt_net, - string, len, sa, salen); + ret = nfs_dns_resolve_name(net, string, len, sa, salen); if (ret < 0) ret = 0; } -- cgit From 17347d03c008e2f504c33bb4905cdad0abc01319 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Thu, 26 Jan 2012 15:11:41 +0400 Subject: NFS: build fixed in case of NFS_USE_NEW_IDMAPPER is undefined Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/idmap.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index d2afcd8354ef..5a5566fa1619 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -62,6 +62,7 @@ #include #include "nfs4_fs.h" #include "internal.h" +#include "netns.h" #define NFS_UINT_MAXLEN 11 #define IDMAP_HASH_SZ 128 -- cgit From 934e7d44b810691ae5aefa3308b97a402aac1a55 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Tue, 7 Feb 2012 22:21:45 +0900 Subject: btrfs: Fix typo in free-space-cache.c Correct spelling "cace" to "cache" in fs/btrfs/free-space-cache.c Signed-off-by: Masanari Iida Signed-off-by: Jiri Kosina --- fs/btrfs/free-space-cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index c2f20594c9f7..7f4f30253571 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1067,7 +1067,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, spin_unlock(&block_group->lock); ret = 0; #ifdef DEBUG - printk(KERN_ERR "btrfs: failed to write free space cace " + printk(KERN_ERR "btrfs: failed to write free space cache " "for block group %llu\n", block_group->key.objectid); #endif } -- cgit From 42ea19790e82498e14a24e97b7cf2a83d89203b6 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Wed, 8 Feb 2012 20:39:39 +0900 Subject: jffs2: Fix typo in compr.c Correct spelling "modul" to "module" in fs/hffs2/compr.c Signed-off-by: Masanari Iida Signed-off-by: Jiri Kosina --- fs/jffs2/compr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c index 5b6c9d1a2fb9..96ed3c9ec3fc 100644 --- a/fs/jffs2/compr.c +++ b/fs/jffs2/compr.c @@ -340,7 +340,7 @@ int jffs2_unregister_compressor(struct jffs2_compressor *comp) if (comp->usecount) { spin_unlock(&jffs2_compressor_list_lock); - printk(KERN_WARNING "JFFS2: Compressor modul is in use. Unregister failed.\n"); + printk(KERN_WARNING "JFFS2: Compressor module is in use. Unregister failed.\n"); return -1; } list_del(&comp->list); -- cgit From 3e93b8dfd9dd8735152e59913a2bde226f83d43e Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sun, 5 Feb 2012 01:29:47 +0100 Subject: BTRFS: Don't include disk-io.h twice in check-integrity.c Once should be enough. Signed-off-by: Jesper Juhl Signed-off-by: Jiri Kosina --- fs/btrfs/check-integrity.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index b669a7d8e499..064b29bd1600 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -89,7 +89,6 @@ #include "disk-io.h" #include "transaction.h" #include "extent_io.h" -#include "disk-io.h" #include "volumes.h" #include "print-tree.h" #include "locking.h" -- cgit From 92b2e5b31dd2ad2c9273578c2289d17f417fe32d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Feb 2012 13:57:20 +0000 Subject: xfs: use a normal shrinker for the dquot freelist Stop reusing dquots from the freelist when allocating new ones directly, and implement a shrinker that actually follows the specifications for the interface. The shrinker implementation is still highly suboptimal at this point, but we can gradually work on it. This also fixes an bug in the previous lock ordering, where we would take the hash and dqlist locks inside of the freelist lock against the normal lock ordering. This is only solvable by introducing the dispose list, and thus not when using direct reclaim of unused dquots for new allocations. As a side-effect the quota upper bound and used to free ratio values in /proc/fs/xfs/xqm are set to 0 as these values don't make any sense in the new world order. Signed-off-by: Christoph Hellwig Signed-off-by: Ben Myers (cherry picked from commit 04da0c8196ac0b12fb6b84f4b7a51ad2fa56d869) --- fs/xfs/kmem.h | 6 -- fs/xfs/xfs_dquot.c | 103 +++++------------- fs/xfs/xfs_qm.c | 291 +++++++++++++++++++------------------------------- fs/xfs/xfs_qm.h | 14 --- fs/xfs/xfs_qm_stats.c | 4 +- fs/xfs/xfs_trace.h | 5 +- 6 files changed, 141 insertions(+), 282 deletions(-) (limited to 'fs') diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 292eff198030..ab7c53fe346e 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -110,10 +110,4 @@ kmem_zone_destroy(kmem_zone_t *zone) extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); -static inline int -kmem_shake_allow(gfp_t gfp_mask) -{ - return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)); -} - #endif /* __XFS_SUPPORT_KMEM_H__ */ diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index bf4fe8637f3d..6d7faa87b41c 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -62,82 +62,6 @@ int xfs_dqerror_mod = 33; static struct lock_class_key xfs_dquot_other_class; -/* - * Allocate and initialize a dquot. We don't always allocate fresh memory; - * we try to reclaim a free dquot if the number of incore dquots are above - * a threshold. - * The only field inside the core that gets initialized at this point - * is the d_id field. The idea is to fill in the entire q_core - * when we read in the on disk dquot. - */ -STATIC xfs_dquot_t * -xfs_qm_dqinit( - xfs_mount_t *mp, - xfs_dqid_t id, - uint type) -{ - xfs_dquot_t *dqp; - boolean_t brandnewdquot; - - brandnewdquot = xfs_qm_dqalloc_incore(&dqp); - dqp->dq_flags = type; - dqp->q_core.d_id = cpu_to_be32(id); - dqp->q_mount = mp; - - /* - * No need to re-initialize these if this is a reclaimed dquot. - */ - if (brandnewdquot) { - INIT_LIST_HEAD(&dqp->q_freelist); - mutex_init(&dqp->q_qlock); - init_waitqueue_head(&dqp->q_pinwait); - - /* - * Because we want to use a counting completion, complete - * the flush completion once to allow a single access to - * the flush completion without blocking. - */ - init_completion(&dqp->q_flush); - complete(&dqp->q_flush); - - trace_xfs_dqinit(dqp); - } else { - /* - * Only the q_core portion was zeroed in dqreclaim_one(). - * So, we need to reset others. - */ - dqp->q_nrefs = 0; - dqp->q_blkno = 0; - INIT_LIST_HEAD(&dqp->q_mplist); - INIT_LIST_HEAD(&dqp->q_hashlist); - dqp->q_bufoffset = 0; - dqp->q_fileoffset = 0; - dqp->q_transp = NULL; - dqp->q_gdquot = NULL; - dqp->q_res_bcount = 0; - dqp->q_res_icount = 0; - dqp->q_res_rtbcount = 0; - atomic_set(&dqp->q_pincount, 0); - dqp->q_hash = NULL; - ASSERT(list_empty(&dqp->q_freelist)); - - trace_xfs_dqreuse(dqp); - } - - /* - * In either case we need to make sure group quotas have a different - * lock class than user quotas, to make sure lockdep knows we can - * locks of one of each at the same time. - */ - if (!(type & XFS_DQ_USER)) - lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); - - /* - * log item gets initialized later - */ - return (dqp); -} - /* * This is called to free all the memory associated with a dquot */ @@ -567,7 +491,32 @@ xfs_qm_dqread( int error; int cancelflags = 0; - dqp = xfs_qm_dqinit(mp, id, type); + + dqp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); + + dqp->dq_flags = type; + dqp->q_core.d_id = cpu_to_be32(id); + dqp->q_mount = mp; + INIT_LIST_HEAD(&dqp->q_freelist); + mutex_init(&dqp->q_qlock); + init_waitqueue_head(&dqp->q_pinwait); + + /* + * Because we want to use a counting completion, complete + * the flush completion once to allow a single access to + * the flush completion without blocking. + */ + init_completion(&dqp->q_flush); + complete(&dqp->q_flush); + + /* + * Make sure group quotas have a different lock class than user + * quotas. + */ + if (!(type & XFS_DQ_USER)) + lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); + + atomic_inc(&xfs_Gqm->qm_totaldquots); trace_xfs_dqread(dqp); diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 1b2f5b37eac4..c872feaf3697 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -50,7 +50,6 @@ */ struct mutex xfs_Gqm_lock; struct xfs_qm *xfs_Gqm; -uint ndquot; kmem_zone_t *qm_dqzone; kmem_zone_t *qm_dqtrxzone; @@ -93,7 +92,6 @@ xfs_Gqm_init(void) goto out_free_udqhash; hsize /= sizeof(xfs_dqhash_t); - ndquot = hsize << 8; xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); xqm->qm_dqhashmask = hsize - 1; @@ -137,7 +135,6 @@ xfs_Gqm_init(void) xqm->qm_dqtrxzone = qm_dqtrxzone; atomic_set(&xqm->qm_totaldquots, 0); - xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; xqm->qm_nrefs = 0; return xqm; @@ -1600,216 +1597,150 @@ xfs_qm_init_quotainos( return 0; } +STATIC void +xfs_qm_dqfree_one( + struct xfs_dquot *dqp) +{ + struct xfs_mount *mp = dqp->q_mount; + struct xfs_quotainfo *qi = mp->m_quotainfo; + mutex_lock(&dqp->q_hash->qh_lock); + list_del_init(&dqp->q_hashlist); + dqp->q_hash->qh_version++; + mutex_unlock(&dqp->q_hash->qh_lock); -/* - * Pop the least recently used dquot off the freelist and recycle it. - */ -STATIC struct xfs_dquot * -xfs_qm_dqreclaim_one(void) + mutex_lock(&qi->qi_dqlist_lock); + list_del_init(&dqp->q_mplist); + qi->qi_dquots--; + qi->qi_dqreclaims++; + mutex_unlock(&qi->qi_dqlist_lock); + + xfs_qm_dqdestroy(dqp); +} + +STATIC void +xfs_qm_dqreclaim_one( + struct xfs_dquot *dqp, + struct list_head *dispose_list) { - struct xfs_dquot *dqp; - int restarts = 0; + struct xfs_mount *mp = dqp->q_mount; + int error; - mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); -restart: - list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { - struct xfs_mount *mp = dqp->q_mount; + if (!xfs_dqlock_nowait(dqp)) + goto out_busy; - if (!xfs_dqlock_nowait(dqp)) - continue; + /* + * This dquot has acquired a reference in the meantime remove it from + * the freelist and try again. + */ + if (dqp->q_nrefs) { + xfs_dqunlock(dqp); - /* - * This dquot has already been grabbed by dqlookup. - * Remove it from the freelist and try again. - */ - if (dqp->q_nrefs) { - trace_xfs_dqreclaim_want(dqp); - XQM_STATS_INC(xqmstats.xs_qm_dqwants); - - list_del_init(&dqp->q_freelist); - xfs_Gqm->qm_dqfrlist_cnt--; - restarts++; - goto dqunlock; - } + trace_xfs_dqreclaim_want(dqp); + XQM_STATS_INC(xqmstats.xs_qm_dqwants); - ASSERT(dqp->q_hash); - ASSERT(!list_empty(&dqp->q_mplist)); + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; + return; + } - /* - * Try to grab the flush lock. If this dquot is in the process - * of getting flushed to disk, we don't want to reclaim it. - */ - if (!xfs_dqflock_nowait(dqp)) - goto dqunlock; + ASSERT(dqp->q_hash); + ASSERT(!list_empty(&dqp->q_mplist)); - /* - * We have the flush lock so we know that this is not in the - * process of being flushed. So, if this is dirty, flush it - * DELWRI so that we don't get a freelist infested with - * dirty dquots. - */ - if (XFS_DQ_IS_DIRTY(dqp)) { - int error; + /* + * Try to grab the flush lock. If this dquot is in the process of + * getting flushed to disk, we don't want to reclaim it. + */ + if (!xfs_dqflock_nowait(dqp)) + goto out_busy; - trace_xfs_dqreclaim_dirty(dqp); + /* + * We have the flush lock so we know that this is not in the + * process of being flushed. So, if this is dirty, flush it + * DELWRI so that we don't get a freelist infested with + * dirty dquots. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + trace_xfs_dqreclaim_dirty(dqp); - /* - * We flush it delayed write, so don't bother - * releasing the freelist lock. - */ - error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK); - if (error) { - xfs_warn(mp, "%s: dquot %p flush failed", - __func__, dqp); - } - goto dqunlock; + /* + * We flush it delayed write, so don't bother releasing the + * freelist lock. + */ + error = xfs_qm_dqflush(dqp, 0); + if (error) { + xfs_warn(mp, "%s: dquot %p flush failed", + __func__, dqp); } - xfs_dqfunlock(dqp); /* - * Prevent lookup now that we are going to reclaim the dquot. - * Once XFS_DQ_FREEING is set lookup won't touch the dquot, - * thus we can drop the lock now. + * Give the dquot another try on the freelist, as the + * flushing will take some time. */ - dqp->dq_flags |= XFS_DQ_FREEING; - xfs_dqunlock(dqp); - - mutex_lock(&dqp->q_hash->qh_lock); - list_del_init(&dqp->q_hashlist); - dqp->q_hash->qh_version++; - mutex_unlock(&dqp->q_hash->qh_lock); - - mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); - list_del_init(&dqp->q_mplist); - mp->m_quotainfo->qi_dquots--; - mp->m_quotainfo->qi_dqreclaims++; - mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); + goto out_busy; + } + xfs_dqfunlock(dqp); - ASSERT(dqp->q_nrefs == 0); - list_del_init(&dqp->q_freelist); - xfs_Gqm->qm_dqfrlist_cnt--; + /* + * Prevent lookups now that we are past the point of no return. + */ + dqp->dq_flags |= XFS_DQ_FREEING; + xfs_dqunlock(dqp); - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - return dqp; -dqunlock: - xfs_dqunlock(dqp); - if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) - break; - goto restart; - } + ASSERT(dqp->q_nrefs == 0); + list_move_tail(&dqp->q_freelist, dispose_list); + xfs_Gqm->qm_dqfrlist_cnt--; - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - return NULL; -} + trace_xfs_dqreclaim_done(dqp); + XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); + return; -/* - * Traverse the freelist of dquots and attempt to reclaim a maximum of - * 'howmany' dquots. This operation races with dqlookup(), and attempts to - * favor the lookup function ... - */ -STATIC int -xfs_qm_shake_freelist( - int howmany) -{ - int nreclaimed = 0; - xfs_dquot_t *dqp; +out_busy: + xfs_dqunlock(dqp); - if (howmany <= 0) - return 0; + /* + * Move the dquot to the tail of the list so that we don't spin on it. + */ + list_move_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); - while (nreclaimed < howmany) { - dqp = xfs_qm_dqreclaim_one(); - if (!dqp) - return nreclaimed; - xfs_qm_dqdestroy(dqp); - nreclaimed++; - } - return nreclaimed; + trace_xfs_dqreclaim_busy(dqp); + XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); } -/* - * The kmem_shake interface is invoked when memory is running low. - */ -/* ARGSUSED */ STATIC int xfs_qm_shake( - struct shrinker *shrink, - struct shrink_control *sc) + struct shrinker *shrink, + struct shrink_control *sc) { - int ndqused, nfree, n; - gfp_t gfp_mask = sc->gfp_mask; - - if (!kmem_shake_allow(gfp_mask)) - return 0; - if (!xfs_Gqm) - return 0; - - nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */ - /* incore dquots in all f/s's */ - ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; - - ASSERT(ndqused >= 0); + int nr_to_scan = sc->nr_to_scan; + LIST_HEAD (dispose_list); + struct xfs_dquot *dqp; - if (nfree <= ndqused && nfree < ndquot) + if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) return 0; + if (!nr_to_scan) + goto out; - ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ - n = nfree - ndqused - ndquot; /* # over target */ - - return xfs_qm_shake_freelist(MAX(nfree, n)); -} - - -/*------------------------------------------------------------------*/ - -/* - * Return a new incore dquot. Depending on the number of - * dquots in the system, we either allocate a new one on the kernel heap, - * or reclaim a free one. - * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed - * to reclaim an existing one from the freelist. - */ -boolean_t -xfs_qm_dqalloc_incore( - xfs_dquot_t **O_dqpp) -{ - xfs_dquot_t *dqp; - - /* - * Check against high water mark to see if we want to pop - * a nincompoop dquot off the freelist. - */ - if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) { - /* - * Try to recycle a dquot from the freelist. - */ - if ((dqp = xfs_qm_dqreclaim_one())) { - XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); - /* - * Just zero the core here. The rest will get - * reinitialized by caller. XXX we shouldn't even - * do this zero ... - */ - memset(&dqp->q_core, 0, sizeof(dqp->q_core)); - *O_dqpp = dqp; - return B_FALSE; - } - XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); + while (!list_empty(&xfs_Gqm->qm_dqfrlist)) { + if (nr_to_scan-- <= 0) + break; + dqp = list_first_entry(&xfs_Gqm->qm_dqfrlist, struct xfs_dquot, + q_freelist); + xfs_qm_dqreclaim_one(dqp, &dispose_list); } + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - /* - * Allocate a brand new dquot on the kernel heap and return it - * to the caller to initialize. - */ - ASSERT(xfs_Gqm->qm_dqzone != NULL); - *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); - atomic_inc(&xfs_Gqm->qm_totaldquots); - - return B_TRUE; + while (!list_empty(&dispose_list)) { + dqp = list_first_entry(&dispose_list, struct xfs_dquot, + q_freelist); + list_del_init(&dqp->q_freelist); + xfs_qm_dqfree_one(dqp); + } +out: + return (xfs_Gqm->qm_dqfrlist_cnt / 100) * sysctl_vfs_cache_pressure; } - /* * Start a transaction and write the incore superblock changes to * disk. flags parameter indicates which fields have changed. diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 9b4f3adefbc5..9a9b997e1a0a 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -26,23 +26,11 @@ struct xfs_qm; struct xfs_inode; -extern uint ndquot; extern struct mutex xfs_Gqm_lock; extern struct xfs_qm *xfs_Gqm; extern kmem_zone_t *qm_dqzone; extern kmem_zone_t *qm_dqtrxzone; -/* - * Ditto, for xfs_qm_dqreclaim_one. - */ -#define XFS_QM_RECLAIM_MAX_RESTARTS 4 - -/* - * Ideal ratio of free to in use dquots. Quota manager makes an attempt - * to keep this balance. - */ -#define XFS_QM_DQFREE_RATIO 2 - /* * Dquot hashtable constants/threshold values. */ @@ -74,7 +62,6 @@ typedef struct xfs_qm { int qm_dqfrlist_cnt; atomic_t qm_totaldquots; /* total incore dquots */ uint qm_nrefs; /* file systems with quota on */ - int qm_dqfree_ratio;/* ratio of free to inuse dquots */ kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ } xfs_qm_t; @@ -143,7 +130,6 @@ extern int xfs_qm_quotacheck(xfs_mount_t *); extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); /* dquot stuff */ -extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **); extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); diff --git a/fs/xfs/xfs_qm_stats.c b/fs/xfs/xfs_qm_stats.c index 8671a0b32644..5729ba570877 100644 --- a/fs/xfs/xfs_qm_stats.c +++ b/fs/xfs/xfs_qm_stats.c @@ -42,9 +42,9 @@ static int xqm_proc_show(struct seq_file *m, void *v) { /* maximum; incore; ratio free to inuse; freelist */ seq_printf(m, "%d\t%d\t%d\t%u\n", - ndquot, + 0, xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, - xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, + 0, xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0); return 0; } diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 6b6df5802e95..bb134a819930 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -733,11 +733,10 @@ DEFINE_EVENT(xfs_dquot_class, name, \ DEFINE_DQUOT_EVENT(xfs_dqadjust); DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); -DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink); +DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy); +DEFINE_DQUOT_EVENT(xfs_dqreclaim_done); DEFINE_DQUOT_EVENT(xfs_dqattach_found); DEFINE_DQUOT_EVENT(xfs_dqattach_get); -DEFINE_DQUOT_EVENT(xfs_dqinit); -DEFINE_DQUOT_EVENT(xfs_dqreuse); DEFINE_DQUOT_EVENT(xfs_dqalloc); DEFINE_DQUOT_EVENT(xfs_dqtobp_read); DEFINE_DQUOT_EVENT(xfs_dqread); -- cgit From f65020a83ad570c1788f7d8ece67f3487166576b Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Mon, 13 Feb 2012 20:51:05 +0000 Subject: XFS: xfs_trans_add_item() - don't assign in ASSERT() when compare is intended It looks to me like the two ASSERT()s in xfs_trans_add_item() really want to do a compare (==) rather than assignment (=). This patch changes it from the latter to the former. Signed-off-by: Jesper Juhl Signed-off-by: Ben Myers (cherry picked from commit 05293485a0b6b1f803e8a3c0ff188c38f6969985) --- fs/xfs/xfs_trans.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 329b06aba1c2..7adcdf15ae0c 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1151,8 +1151,8 @@ xfs_trans_add_item( { struct xfs_log_item_desc *lidp; - ASSERT(lip->li_mountp = tp->t_mountp); - ASSERT(lip->li_ailp = tp->t_mountp->m_ail); + ASSERT(lip->li_mountp == tp->t_mountp); + ASSERT(lip->li_ailp == tp->t_mountp->m_ail); lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS); -- cgit From 4040153087478993cbf0809f444400a3c808074c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 13 Feb 2012 03:58:52 +0000 Subject: security: trim security.h Trim security.h Signed-off-by: Al Viro Signed-off-by: James Morris --- fs/nfs/client.c | 1 + fs/proc/proc_sysctl.c | 2 ++ fs/quota/dquot.c | 1 + fs/super.c | 1 + 4 files changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 31778f74357d..d4f772ebd1ef 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index a6b62173d4c3..67bbf6e4e197 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -6,7 +6,9 @@ #include #include #include +#include #include +#include #include "internal.h" static const struct dentry_operations proc_sys_dentry_operations; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 46741970371b..8b4f12b33f57 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -71,6 +71,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/super.c b/fs/super.c index 6015c02296b7..18660532909e 100644 --- a/fs/super.c +++ b/fs/super.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "internal.h" -- cgit From 45d43c291e9a922d7b432b0dbcb1d8fb70d8410f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 6 Feb 2012 19:38:51 -0500 Subject: NFSv4.1: Convert slotid from u8 to u32 It is perfectly legal to negotiate up to 2^32-1 slots in the protocol, and with 10GigE, we are already seeing that 255 slots is far too limiting. Signed-off-by: Trond Myklebust --- fs/nfs/callback.h | 2 +- fs/nfs/callback_xdr.c | 6 +++--- fs/nfs/nfs4proc.c | 42 ++++++++++++++++++++---------------------- 3 files changed, 24 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index 197e0d3754c2..a5527c90a5aa 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -38,7 +38,7 @@ enum nfs4_callback_opnum { struct cb_process_state { __be32 drc_status; struct nfs_client *clp; - int slotid; + u32 slotid; struct net *net; }; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 2e372240d028..5466829c7e77 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -759,14 +759,14 @@ static void nfs4_callback_free_slot(struct nfs4_session *session) * Let the state manager know callback processing done. * A single slot, so highest used slotid is either 0 or -1 */ - tbl->highest_used_slotid = -1; + tbl->highest_used_slotid = NFS4_NO_SLOT; nfs4_check_drain_bc_complete(session); spin_unlock(&tbl->slot_tbl_lock); } static void nfs4_cb_free_slot(struct cb_process_state *cps) { - if (cps->slotid != -1) + if (cps->slotid != NFS4_NO_SLOT) nfs4_callback_free_slot(cps->clp->cl_session); } @@ -860,7 +860,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r struct cb_process_state cps = { .drc_status = 0, .clp = NULL, - .slotid = -1, + .slotid = NFS4_NO_SLOT, .net = rqstp->rq_xprt->xpt_net, }; unsigned int nops = 0; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 482ed97189c9..f3f56f4a3b72 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -360,16 +360,14 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp * When updating highest_used_slotid there may be "holes" in the bitmap * so we need to scan down from highest_used_slotid to 0 looking for the now * highest slotid in use. - * If none found, highest_used_slotid is set to -1. + * If none found, highest_used_slotid is set to NFS4_NO_SLOT. * * Must be called while holding tbl->slot_tbl_lock */ static void -nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid) +nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid) { - int slotid = free_slotid; - - BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE); + BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE); /* clear used bit in bitmap */ __clear_bit(slotid, tbl->used_slots); @@ -379,10 +377,10 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid) if (slotid < tbl->max_slots) tbl->highest_used_slotid = slotid; else - tbl->highest_used_slotid = -1; + tbl->highest_used_slotid = NFS4_NO_SLOT; } - dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__, - free_slotid, tbl->highest_used_slotid); + dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, + slotid, tbl->highest_used_slotid); } bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy) @@ -402,7 +400,7 @@ static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) return; } - if (ses->fc_slot_table.highest_used_slotid != -1) + if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT) return; dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); @@ -415,7 +413,7 @@ static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) void nfs4_check_drain_bc_complete(struct nfs4_session *ses) { if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || - ses->bc_slot_table.highest_used_slotid != -1) + ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT) return; dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); complete(&ses->bc_slot_table.complete); @@ -510,25 +508,25 @@ static int nfs4_sequence_done(struct rpc_task *task, * nfs4_find_slot looks for an unset bit in the used_slots bitmap. * If found, we mark the slot as used, update the highest_used_slotid, * and respectively set up the sequence operation args. - * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise. + * The slot number is returned if found, or NFS4_NO_SLOT otherwise. * * Note: must be called with under the slot_tbl_lock. */ -static u8 +static u32 nfs4_find_slot(struct nfs4_slot_table *tbl) { - int slotid; - u8 ret_id = NFS4_MAX_SLOT_TABLE; - BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE); + u32 slotid; + u32 ret_id = NFS4_NO_SLOT; - dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n", + dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", __func__, tbl->used_slots[0], tbl->highest_used_slotid, tbl->max_slots); slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); if (slotid >= tbl->max_slots) goto out; __set_bit(slotid, tbl->used_slots); - if (slotid > tbl->highest_used_slotid) + if (slotid > tbl->highest_used_slotid || + tbl->highest_used_slotid == NFS4_NO_SLOT) tbl->highest_used_slotid = slotid; ret_id = slotid; out: @@ -555,7 +553,7 @@ int nfs41_setup_sequence(struct nfs4_session *session, { struct nfs4_slot *slot; struct nfs4_slot_table *tbl; - u8 slotid; + u32 slotid; dprintk("--> %s\n", __func__); /* slot already allocated? */ @@ -583,7 +581,7 @@ int nfs41_setup_sequence(struct nfs4_session *session, } slotid = nfs4_find_slot(tbl); - if (slotid == NFS4_MAX_SLOT_TABLE) { + if (slotid == NFS4_NO_SLOT) { rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); spin_unlock(&tbl->slot_tbl_lock); dprintk("<-- %s: no free slots\n", __func__); @@ -5144,7 +5142,7 @@ static int nfs4_init_slot_table(struct nfs4_slot_table *tbl, spin_lock(&tbl->slot_tbl_lock); tbl->max_slots = max_slots; tbl->slots = slot; - tbl->highest_used_slotid = -1; /* no slot is currently used */ + tbl->highest_used_slotid = NFS4_NO_SLOT; /* no slot is currently used */ spin_unlock(&tbl->slot_tbl_lock); dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, tbl, tbl->slots, tbl->max_slots); @@ -5196,13 +5194,13 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) return NULL; tbl = &session->fc_slot_table; - tbl->highest_used_slotid = -1; + tbl->highest_used_slotid = NFS4_NO_SLOT; spin_lock_init(&tbl->slot_tbl_lock); rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); init_completion(&tbl->complete); tbl = &session->bc_slot_table; - tbl->highest_used_slotid = -1; + tbl->highest_used_slotid = NFS4_NO_SLOT; spin_lock_init(&tbl->slot_tbl_lock); rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); init_completion(&tbl->complete); -- cgit From ef159e9177cc5a09e6174796dde0b2d243ddf28b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 6 Feb 2012 19:50:40 -0500 Subject: NFSv4.1: Add a module parameter to set the number of session slots Add the module parameter 'max_session_slots' to set the initial number of slots that the NFSv4.1 client will attempt to negotiate with the server. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f3f56f4a3b72..0b3316541734 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -72,6 +72,8 @@ #define NFS4_MAX_LOOP_ON_RECOVER (10) +static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE; + struct nfs4_opendata; static int _nfs4_proc_open(struct nfs4_opendata *data); static int _nfs4_recover_proc_open(struct nfs4_opendata *data); @@ -5245,7 +5247,7 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) args->fc_attrs.max_rqst_sz = mxrqst_sz; args->fc_attrs.max_resp_sz = mxresp_sz; args->fc_attrs.max_ops = NFS4_MAX_OPS; - args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; + args->fc_attrs.max_reqs = max_session_slots; dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " "max_ops=%u max_reqs=%u\n", @@ -6390,6 +6392,10 @@ const struct xattr_handler *nfs4_xattr_handlers[] = { NULL }; +module_param(max_session_slots, ushort, 0644); +MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 " + "requests the client will negotiate"); + /* * Local variables: * c-basic-offset: 8 -- cgit From c228fa2038a33bb3b87f567482124f452e162a71 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 31 Jan 2012 15:07:48 +0400 Subject: Lockd: create permanent lockd sockets in current network namespace This patch parametrizes Lockd permanent sockets creation routine by network namespace context. It also replaces hard-coded init_net with current network namespace context in Lockd sockets creation routines. This approach looks safe, because Lockd is created during NFS mount (or NFS server start) and thus socket is required exactly in current network namespace context. But in the same time it means, that Lockd sockets inherits first Lockd requester network namespace. This issue will be fixed in further patches of the series. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/lockd/svc.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index ff379ff7761f..cba35984dde7 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -189,27 +189,29 @@ lockd(void *vrqstp) } static int create_lockd_listener(struct svc_serv *serv, const char *name, - const int family, const unsigned short port) + struct net *net, const int family, + const unsigned short port) { struct svc_xprt *xprt; - xprt = svc_find_xprt(serv, name, &init_net, family, 0); + xprt = svc_find_xprt(serv, name, net, family, 0); if (xprt == NULL) - return svc_create_xprt(serv, name, &init_net, family, port, + return svc_create_xprt(serv, name, net, family, port, SVC_SOCK_DEFAULTS); svc_xprt_put(xprt); return 0; } -static int create_lockd_family(struct svc_serv *serv, const int family) +static int create_lockd_family(struct svc_serv *serv, struct net *net, + const int family) { int err; - err = create_lockd_listener(serv, "udp", family, nlm_udpport); + err = create_lockd_listener(serv, "udp", net, family, nlm_udpport); if (err < 0) return err; - return create_lockd_listener(serv, "tcp", family, nlm_tcpport); + return create_lockd_listener(serv, "tcp", net, family, nlm_tcpport); } /* @@ -222,16 +224,16 @@ static int create_lockd_family(struct svc_serv *serv, const int family) * Returns zero if all listeners are available; otherwise a * negative errno value is returned. */ -static int make_socks(struct svc_serv *serv) +static int make_socks(struct svc_serv *serv, struct net *net) { static int warned; int err; - err = create_lockd_family(serv, PF_INET); + err = create_lockd_family(serv, net, PF_INET); if (err < 0) goto out_err; - err = create_lockd_family(serv, PF_INET6); + err = create_lockd_family(serv, net, PF_INET6); if (err < 0 && err != -EAFNOSUPPORT) goto out_err; @@ -252,6 +254,7 @@ int lockd_up(void) { struct svc_serv *serv; int error = 0; + struct net *net = current->nsproxy->net_ns; mutex_lock(&nlmsvc_mutex); /* @@ -275,7 +278,7 @@ int lockd_up(void) goto out; } - error = make_socks(serv); + error = make_socks(serv, net); if (error < 0) goto destroy_and_out; -- cgit From a9c5d73a8d8cb37601f8c39b35b9b4128e1a5254 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 31 Jan 2012 15:07:57 +0400 Subject: Lockd: pernet usage counter introduced Lockd is going to be shared between network namespaces - i.e. going to be able to handle lock requests from different network namespaces. This means, that network namespace related resources have to be allocated not once (like now), but for every network namespace context, from which service is requested to operate. This patch implements Lockd per-net users accounting. New per-net counter is used to determine, when per-net resources have to be freed. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/lockd/netns.h | 12 ++++++++++++ fs/lockd/svc.c | 45 ++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 3 deletions(-) create mode 100644 fs/lockd/netns.h (limited to 'fs') diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h new file mode 100644 index 000000000000..ce227e0fbc5c --- /dev/null +++ b/fs/lockd/netns.h @@ -0,0 +1,12 @@ +#ifndef __LOCKD_NETNS_H__ +#define __LOCKD_NETNS_H__ + +#include + +struct lockd_net { + unsigned int nlmsvc_users; +}; + +extern int lockd_net_id; + +#endif diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index cba35984dde7..73c9ebf09301 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -35,6 +35,8 @@ #include #include +#include "netns.h" + #define NLMDBG_FACILITY NLMDBG_SVC #define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE) #define ALLOWED_SIGS (sigmask(SIGKILL)) @@ -50,6 +52,8 @@ static struct task_struct *nlmsvc_task; static struct svc_rqst *nlmsvc_rqst; unsigned long nlmsvc_timeout; +int lockd_net_id; + /* * These can be set at insmod time (useful for NFS as root filesystem), * and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003 @@ -316,8 +320,12 @@ int lockd_up(void) destroy_and_out: svc_destroy(serv); out: - if (!error) + if (!error) { + struct lockd_net *ln = net_generic(net, lockd_net_id); + + ln->nlmsvc_users++; nlmsvc_users++; + } mutex_unlock(&nlmsvc_mutex); return error; } @@ -500,24 +508,55 @@ module_param_call(nlm_tcpport, param_set_port, param_get_int, module_param(nsm_use_hostnames, bool, 0644); module_param(nlm_max_connections, uint, 0644); +static int lockd_init_net(struct net *net) +{ + return 0; +} + +static void lockd_exit_net(struct net *net) +{ +} + +static struct pernet_operations lockd_net_ops = { + .init = lockd_init_net, + .exit = lockd_exit_net, + .id = &lockd_net_id, + .size = sizeof(struct lockd_net), +}; + + /* * Initialising and terminating the module. */ static int __init init_nlm(void) { + int err; + #ifdef CONFIG_SYSCTL + err = -ENOMEM; nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root); - return nlm_sysctl_table ? 0 : -ENOMEM; -#else + if (nlm_sysctl_table == NULL) + goto err_sysctl; +#endif + err = register_pernet_subsys(&lockd_net_ops); + if (err) + goto err_pernet; return 0; + +err_pernet: +#ifdef CONFIG_SYSCTL + unregister_sysctl_table(nlm_sysctl_table); #endif +err_sysctl: + return err; } static void __exit exit_nlm(void) { /* FIXME: delete all NLM clients */ nlm_shutdown_hosts(); + unregister_pernet_subsys(&lockd_net_ops); #ifdef CONFIG_SYSCTL unregister_sysctl_table(nlm_sysctl_table); #endif -- cgit From bb2224df5ffe4f864f5b696199b17db1ce77bc0a Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 31 Jan 2012 15:08:05 +0400 Subject: Lockd: per-net up and down routines introduced This patch introduces per-net Lockd initialization and destruction routines. The logic is the same as in global Lockd up and down routines. Probably the solution is not the best one. But at least it looks clear. So per-net "up" routine are called only in case of lockd is running already. If per-net resources are not allocated yet, then service is being registered with local portmapper and lockd sockets created. Per-net "down" routine is called on every lockd_down() call in case of global users counter is not zero. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/lockd/svc.c | 47 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 73c9ebf09301..90dec426bfd8 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -251,6 +251,45 @@ out_err: return err; } +static int lockd_up_net(struct net *net) +{ + struct lockd_net *ln = net_generic(net, lockd_net_id); + struct svc_serv *serv = nlmsvc_rqst->rq_server; + int error; + + if (ln->nlmsvc_users) + return 0; + + error = svc_rpcb_setup(serv, net); + if (error) + goto err_rpcb; + + error = make_socks(serv, net); + if (error < 0) + goto err_socks; + return 0; + +err_socks: + svc_rpcb_cleanup(serv, net); +err_rpcb: + return error; +} + +static void lockd_down_net(struct net *net) +{ + struct lockd_net *ln = net_generic(net, lockd_net_id); + struct svc_serv *serv = nlmsvc_rqst->rq_server; + + if (ln->nlmsvc_users) { + if (--ln->nlmsvc_users == 0) + svc_shutdown_net(serv, net); + } else { + printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n", + nlmsvc_task, net); + BUG(); + } +} + /* * Bring up the lockd process if it's not already up. */ @@ -264,8 +303,10 @@ int lockd_up(void) /* * Check whether we're already up and running. */ - if (nlmsvc_rqst) + if (nlmsvc_rqst) { + error = lockd_up_net(net); goto out; + } /* * Sanity check: if there's no pid, @@ -339,8 +380,10 @@ lockd_down(void) { mutex_lock(&nlmsvc_mutex); if (nlmsvc_users) { - if (--nlmsvc_users) + if (--nlmsvc_users) { + lockd_down_net(current->nsproxy->net_ns); goto out; + } } else { printk(KERN_ERR "lockd_down: no users! task=%p\n", nlmsvc_task); -- cgit From 66697bfd6aec0a9ca9331c1aa544ac20324a7561 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 31 Jan 2012 15:08:13 +0400 Subject: LockD: make nlm hosts network namespace aware This object depends on RPC client, and thus on network namespace. So let's make it's allocation and lookup in network namespace context. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/lockd/clntlock.c | 3 ++- fs/lockd/host.c | 16 ++++++++++++++-- fs/nfs/client.c | 1 + 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 8d4ea8351e3d..ba1dc2eebd1e 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -62,7 +62,8 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init) host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen, nlm_init->protocol, nlm_version, - nlm_init->hostname, nlm_init->noresvport); + nlm_init->hostname, nlm_init->noresvport, + nlm_init->net); if (host == NULL) { lockd_down(); return ERR_PTR(-ENOLCK); diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 6f29836ec0cb..9ebd91dc42c3 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -17,6 +17,8 @@ #include #include +#include + #include #define NLMDBG_FACILITY NLMDBG_HOSTCACHE @@ -54,6 +56,7 @@ struct nlm_lookup_host_info { const char *hostname; /* remote's hostname */ const size_t hostname_len; /* it's length */ const int noresvport; /* use non-priv port */ + struct net *net; /* network namespace to bind */ }; /* @@ -155,6 +158,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, INIT_LIST_HEAD(&host->h_reclaim); host->h_nsmhandle = nsm; host->h_addrbuf = nsm->sm_addrbuf; + host->net = ni->net; out: return host; @@ -206,7 +210,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const unsigned short protocol, const u32 version, const char *hostname, - int noresvport) + int noresvport, + struct net *net) { struct nlm_lookup_host_info ni = { .server = 0, @@ -217,6 +222,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, .hostname = hostname, .hostname_len = strlen(hostname), .noresvport = noresvport, + .net = net, }; struct hlist_head *chain; struct hlist_node *pos; @@ -231,6 +237,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, chain = &nlm_client_hosts[nlm_hash_address(sap)]; hlist_for_each_entry(host, pos, chain, h_hash) { + if (host->net != net) + continue; if (!rpc_cmp_addr(nlm_addr(host), sap)) continue; @@ -318,6 +326,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, struct nsm_handle *nsm = NULL; struct sockaddr *src_sap = svc_daddr(rqstp); size_t src_len = rqstp->rq_daddrlen; + struct net *net = rqstp->rq_xprt->xpt_net; struct nlm_lookup_host_info ni = { .server = 1, .sap = svc_addr(rqstp), @@ -326,6 +335,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, .version = rqstp->rq_vers, .hostname = hostname, .hostname_len = hostname_len, + .net = net, }; dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__, @@ -339,6 +349,8 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; hlist_for_each_entry(host, pos, chain, h_hash) { + if (host->net != net) + continue; if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) continue; @@ -431,7 +443,7 @@ nlm_bind_host(struct nlm_host *host) .to_retries = 5U, }; struct rpc_create_args args = { - .net = &init_net, + .net = host->net, .protocol = host->h_proto, .address = nlm_addr(host), .addrsize = host->h_addrlen, diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 2328dcbf6c0b..1a5cd49dff80 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -707,6 +707,7 @@ static int nfs_start_lockd(struct nfs_server *server) .nfs_version = clp->rpc_ops->version, .noresvport = server->flags & NFS_MOUNT_NORESVPORT ? 1 : 0, + .net = clp->net, }; if (nlm_init.nfs_version > 3) -- cgit From 0e1cb5c0aad1c37a4eee6db45f52c0b3869db2cc Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 31 Jan 2012 15:08:21 +0400 Subject: LockD: make NSM network namespace aware NLM host is network namespace aware now. So NSM have to take it into account. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/lockd/mon.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index c196030e530a..7ef14b3c5bee 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -62,14 +62,14 @@ static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm) return (struct sockaddr *)&nsm->sm_addr; } -static struct rpc_clnt *nsm_create(void) +static struct rpc_clnt *nsm_create(struct net *net) { struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_LOOPBACK), }; struct rpc_create_args args = { - .net = &init_net, + .net = net, .protocol = XPRT_TRANSPORT_UDP, .address = (struct sockaddr *)&sin, .addrsize = sizeof(sin), @@ -83,7 +83,8 @@ static struct rpc_clnt *nsm_create(void) return rpc_create(&args); } -static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res) +static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res, + struct net *net) { struct rpc_clnt *clnt; int status; @@ -99,7 +100,7 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res) .rpc_resp = res, }; - clnt = nsm_create(); + clnt = nsm_create(net); if (IS_ERR(clnt)) { status = PTR_ERR(clnt); dprintk("lockd: failed to create NSM upcall transport, " @@ -149,7 +150,7 @@ int nsm_monitor(const struct nlm_host *host) */ nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf; - status = nsm_mon_unmon(nsm, NSMPROC_MON, &res); + status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host->net); if (unlikely(res.status != 0)) status = -EIO; if (unlikely(status < 0)) { @@ -183,7 +184,7 @@ void nsm_unmonitor(const struct nlm_host *host) && nsm->sm_monitored && !nsm->sm_sticky) { dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name); - status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res); + status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host->net); if (res.status != 0) status = -EIO; if (status < 0) -- cgit From 3b64739fb928c34b13db6b5adcb0d3efb19e78be Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 31 Jan 2012 15:08:29 +0400 Subject: Lockd: shutdown NLM hosts in network namespace context Lockd now managed in network namespace context. And this patch introduces network namespace related NLM hosts shutdown in case of releasing per-net Lockd resources. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 26 +++++++++++++++++++------- fs/lockd/svc.c | 4 +++- 2 files changed, 22 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 9ebd91dc42c3..eb75ca7c2d6e 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -565,12 +565,8 @@ void nlm_host_rebooted(const struct nlm_reboot *info) nsm_release(nsm); } -/* - * Shut down the hosts module. - * Note that this routine is called only at server shutdown time. - */ void -nlm_shutdown_hosts(void) +nlm_shutdown_hosts_net(struct net *net) { struct hlist_head *chain; struct hlist_node *pos; @@ -582,6 +578,8 @@ nlm_shutdown_hosts(void) /* First, make all hosts eligible for gc */ dprintk("lockd: nuking all hosts...\n"); for_each_host(host, pos, chain, nlm_server_hosts) { + if (net && host->net != net) + continue; host->h_expires = jiffies - 1; if (host->h_rpcclnt) { rpc_shutdown_client(host->h_rpcclnt); @@ -592,15 +590,29 @@ nlm_shutdown_hosts(void) /* Then, perform a garbage collection pass */ nlm_gc_hosts(); mutex_unlock(&nlm_host_mutex); +} + +/* + * Shut down the hosts module. + * Note that this routine is called only at server shutdown time. + */ +void +nlm_shutdown_hosts(void) +{ + struct hlist_head *chain; + struct hlist_node *pos; + struct nlm_host *host; + + nlm_shutdown_hosts_net(NULL); /* complain if any hosts are left */ if (nrhosts != 0) { printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); dprintk("lockd: %lu hosts left:\n", nrhosts); for_each_host(host, pos, chain, nlm_server_hosts) { - dprintk(" %s (cnt %d use %d exp %ld)\n", + dprintk(" %s (cnt %d use %d exp %ld net %p)\n", host->h_name, atomic_read(&host->h_count), - host->h_inuse, host->h_expires); + host->h_inuse, host->h_expires, host->net); } } } diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 90dec426bfd8..2774e1013b34 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -281,8 +281,10 @@ static void lockd_down_net(struct net *net) struct svc_serv *serv = nlmsvc_rqst->rq_server; if (ln->nlmsvc_users) { - if (--ln->nlmsvc_users == 0) + if (--ln->nlmsvc_users == 0) { + nlm_shutdown_hosts_net(net); svc_shutdown_net(serv, net); + } } else { printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n", nlmsvc_task, net); -- cgit From 4c03ae4a897b52e0e8fc38749606549eaa20d5b7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 7 Feb 2012 00:05:11 -0500 Subject: NFS: Initialise the nfs_net->nfs_client_lock Ensure that we initialise the nfs_net->nfs_client_lock spinlock. Also ensure that nfs_server_remove_lists() doesn't try to dereference server->nfs_client before that is initialised. Signed-off-by: Trond Myklebust Cc: Stanislav Kinsbursky --- fs/nfs/client.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 1a5cd49dff80..6f6267cb6bad 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1055,11 +1055,14 @@ static void nfs_server_insert_lists(struct nfs_server *server) static void nfs_server_remove_lists(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; - struct nfs_net *nn = net_generic(clp->net, nfs_net_id); + struct nfs_net *nn; + if (clp == NULL) + return; + nn = net_generic(clp->net, nfs_net_id); spin_lock(&nn->nfs_client_lock); list_del_rcu(&server->client_link); - if (clp && list_empty(&clp->cl_superblocks)) + if (list_empty(&clp->cl_superblocks)) set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state); list_del(&server->master_link); spin_unlock(&nn->nfs_client_lock); @@ -1777,6 +1780,7 @@ void nfs_clients_init(struct net *net) #ifdef CONFIG_NFS_V4 idr_init(&nn->cb_ident_idr); #endif + spin_lock_init(&nn->nfs_client_lock); } #ifdef CONFIG_PROC_FS -- cgit From b6d1e83b4ea6cb369bdd490871f00651decdb509 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 7 Feb 2012 19:53:19 +0400 Subject: NFS: fix nfs4_find_client_sessionid() arguments list It's not compilable in case of CONFIG_NFS_V4_1 is not set. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 6f6267cb6bad..d0f850ffeb19 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1266,7 +1266,7 @@ nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr, #else /* CONFIG_NFS_V4_1 */ struct nfs_client * -nfs4_find_client_sessionid(const struct sockaddr *addr, +nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr, struct nfs4_sessionid *sid) { return NULL; -- cgit From 7ced286e0ade171af89d32c22b1590e1ca480542 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Tue, 7 Feb 2012 11:49:11 -0500 Subject: NFS: add mount options 'v4.0' and 'v4.1' Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 94667848af9a..d18a90ba165f 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -80,7 +80,7 @@ enum { Opt_cto, Opt_nocto, Opt_ac, Opt_noac, Opt_lock, Opt_nolock, - Opt_v2, Opt_v3, Opt_v4, + Opt_v2, Opt_v3, Opt_v4, Opt_v4_0, Opt_v4_1, Opt_udp, Opt_tcp, Opt_rdma, Opt_acl, Opt_noacl, Opt_rdirplus, Opt_nordirplus, @@ -136,6 +136,8 @@ static const match_table_t nfs_mount_option_tokens = { { Opt_v2, "v2" }, { Opt_v3, "v3" }, { Opt_v4, "v4" }, + { Opt_v4_0, "v4.0" }, + { Opt_v4_1, "v4.1" }, { Opt_udp, "udp" }, { Opt_tcp, "tcp" }, { Opt_rdma, "rdma" }, @@ -1172,6 +1174,16 @@ static int nfs_parse_mount_options(char *raw, mnt->flags &= ~NFS_MOUNT_VER3; mnt->version = 4; break; + case Opt_v4_0: + mnt->flags &= ~NFS_MOUNT_VER3; + mnt->version = 4; + mnt->minorversion = 0; + break; + case Opt_v4_1: + mnt->flags &= ~NFS_MOUNT_VER3; + mnt->version = 4; + mnt->minorversion = 1; + break; case Opt_udp: mnt->flags &= ~NFS_MOUNT_TCP; mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP; -- cgit From d073e9b541e1ac3f52d72c3a153855d9a9ee3278 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 7 Feb 2012 14:59:05 -0500 Subject: NFSv4: Reduce the footprint of the idmapper Instead of pre-allocating the storage for all the strings, we can significantly reduce the size of that table by doing the allocation when we do the downcall. Signed-off-by: Trond Myklebust Reviewed-by: Jeff Layton --- fs/nfs/idmap.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 5a5566fa1619..fff79481218c 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -362,7 +362,7 @@ struct idmap_hashent { unsigned long ih_expires; __u32 ih_id; size_t ih_namelen; - char ih_name[IDMAP_NAMESZ]; + const char *ih_name; }; struct idmap_hashtable { @@ -482,12 +482,17 @@ void nfs_idmap_delete(struct nfs_client *clp) { struct idmap *idmap = clp->cl_idmap; + int i; if (!idmap) return; nfs_idmap_unregister(clp, idmap->idmap_pipe); rpc_destroy_pipe_data(idmap->idmap_pipe); clp->cl_idmap = NULL; + for (i = 0; i < ARRAY_SIZE(idmap->idmap_user_hash.h_entries); i++) + kfree(idmap->idmap_user_hash.h_entries[i].ih_name); + for (i = 0; i < ARRAY_SIZE(idmap->idmap_group_hash.h_entries); i++) + kfree(idmap->idmap_group_hash.h_entries[i].ih_name); kfree(idmap); } @@ -634,9 +639,14 @@ static void idmap_update_entry(struct idmap_hashent *he, const char *name, size_t namelen, __u32 id) { + char *str = kmalloc(namelen + 1, GFP_KERNEL); + if (str == NULL) + return; + kfree(he->ih_name); he->ih_id = id; - memcpy(he->ih_name, name, namelen); - he->ih_name[namelen] = '\0'; + memcpy(str, name, namelen); + str[namelen] = '\0'; + he->ih_name = str; he->ih_namelen = namelen; he->ih_expires = jiffies + nfs_idmap_cache_timeout; } -- cgit From e3da87066f950076fe274b58f0d0adc7d9f9d412 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 8 Feb 2012 13:21:38 -0500 Subject: NFSv4: The idmapper now depends on keyring functionality Add the appropriate 'select KEYS' to the NFSv4 Kconfig entry. Signed-off-by: Trond Myklebust --- fs/nfs/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 021d2cf6938a..ee86cfcd6c33 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -64,6 +64,7 @@ config NFS_V4 bool "NFS client support for NFS version 4" depends on NFS_FS select SUNRPC_GSS + select KEYS help This option enables support for version 4 of the NFS protocol (RFC 3530) in the kernel's NFS client. @@ -130,5 +131,4 @@ config NFS_USE_KERNEL_DNS bool depends on NFS_V4 && !NFS_USE_LEGACY_DNS select DNS_RESOLVER - select KEYS default y -- cgit From 685f50f9188ac1e8244d0340a9d6ea36b6136cec Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 8 Feb 2012 13:39:15 -0500 Subject: NFSv4: Further reduce the footprint of the idmapper Don't allocate the legacy idmapper tables until we actually need them. Signed-off-by: Trond Myklebust Reviewed-by: Jeff Layton --- fs/nfs/idmap.c | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index fff79481218c..b5c6d8eb7e03 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -367,7 +367,7 @@ struct idmap_hashent { struct idmap_hashtable { __u8 h_type; - struct idmap_hashent h_entries[IDMAP_HASH_SZ]; + struct idmap_hashent *h_entries; }; struct idmap { @@ -478,21 +478,40 @@ nfs_idmap_new(struct nfs_client *clp) return 0; } +static void +idmap_alloc_hashtable(struct idmap_hashtable *h) +{ + if (h->h_entries != NULL) + return; + h->h_entries = kcalloc(IDMAP_HASH_SZ, + sizeof(*h->h_entries), + GFP_KERNEL); +} + +static void +idmap_free_hashtable(struct idmap_hashtable *h) +{ + int i; + + if (h->h_entries == NULL) + return; + for (i = 0; i < IDMAP_HASH_SZ; i++) + kfree(h->h_entries[i].ih_name); + kfree(h->h_entries); +} + void nfs_idmap_delete(struct nfs_client *clp) { struct idmap *idmap = clp->cl_idmap; - int i; if (!idmap) return; nfs_idmap_unregister(clp, idmap->idmap_pipe); rpc_destroy_pipe_data(idmap->idmap_pipe); clp->cl_idmap = NULL; - for (i = 0; i < ARRAY_SIZE(idmap->idmap_user_hash.h_entries); i++) - kfree(idmap->idmap_user_hash.h_entries[i].ih_name); - for (i = 0; i < ARRAY_SIZE(idmap->idmap_group_hash.h_entries); i++) - kfree(idmap->idmap_group_hash.h_entries[i].ih_name); + idmap_free_hashtable(&idmap->idmap_user_hash); + idmap_free_hashtable(&idmap->idmap_group_hash); kfree(idmap); } @@ -586,6 +605,8 @@ void nfs_idmap_quit(void) static inline struct idmap_hashent * idmap_name_hash(struct idmap_hashtable* h, const char *name, size_t len) { + if (h->h_entries == NULL) + return NULL; return &h->h_entries[fnvhash32(name, len) % IDMAP_HASH_SZ]; } @@ -594,6 +615,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len) { struct idmap_hashent *he = idmap_name_hash(h, name, len); + if (he == NULL) + return NULL; if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0) return NULL; if (time_after(jiffies, he->ih_expires)) @@ -604,6 +627,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len) static inline struct idmap_hashent * idmap_id_hash(struct idmap_hashtable* h, __u32 id) { + if (h->h_entries == NULL) + return NULL; return &h->h_entries[fnvhash32(&id, sizeof(id)) % IDMAP_HASH_SZ]; } @@ -611,6 +636,9 @@ static struct idmap_hashent * idmap_lookup_id(struct idmap_hashtable *h, __u32 id) { struct idmap_hashent *he = idmap_id_hash(h, id); + + if (he == NULL) + return NULL; if (he->ih_id != id || he->ih_namelen == 0) return NULL; if (time_after(jiffies, he->ih_expires)) @@ -626,12 +654,14 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id) static inline struct idmap_hashent * idmap_alloc_name(struct idmap_hashtable *h, char *name, size_t len) { + idmap_alloc_hashtable(h); return idmap_name_hash(h, name, len); } static inline struct idmap_hashent * idmap_alloc_id(struct idmap_hashtable *h, __u32 id) { + idmap_alloc_hashtable(h); return idmap_id_hash(h, id); } -- cgit From 571b7554016941ef0f0c3c61be72561e2bc55f5e Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Wed, 1 Feb 2012 14:06:41 -0500 Subject: NFS: dont allow minorversion= opt when vers != 4 Don't allow invalid 'vers' and 'minorversion' combinations in mount options, such as "vers=3,minorversion=1". Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index d18a90ba165f..d05024a18984 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1531,6 +1531,9 @@ static int nfs_parse_mount_options(char *raw, if (!sloppy && invalid_option) return 0; + if (mnt->minorversion && mnt->version != 4) + goto out_minorversion_mismatch; + /* * verify that any proto=/mountproto= options match the address * familiies in the addr=/mountaddr= options. @@ -1564,6 +1567,10 @@ out_invalid_address: out_invalid_value: printk(KERN_INFO "NFS: bad mount option value specified: %s\n", p); return 0; +out_minorversion_mismatch: + printk(KERN_INFO "NFS: mount option vers=%u does not support " + "minorversion=%u\n", mnt->version, mnt->minorversion); + return 0; out_nomem: printk(KERN_INFO "NFS: not enough memory to parse option\n"); return 0; -- cgit From b4b9a0c1c89464dabafef974960f509ce33ae1c0 Mon Sep 17 00:00:00 2001 From: Vitaliy Gusev Date: Wed, 15 Feb 2012 19:38:25 +0400 Subject: nfs41: Verify channel's attributes accordingly to RFC v2 ca_maxoperations: For the backchannel, the server MUST NOT change the value the client offers. For the fore channel, the server MAY change the requested value. ca_maxrequests: For the backchannel, the server MUST NOT change the value the client offers. For the fore channel, the server MAY change the requested value. Signed-off-by: Vitaliy Gusev Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 0b3316541734..87c584dd88b1 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5287,6 +5287,8 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args return -EINVAL; if (rcvd->max_reqs == 0) return -EINVAL; + if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) + rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; return 0; } @@ -5302,9 +5304,9 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) return -EINVAL; /* These would render the backchannel useless: */ - if (rcvd->max_ops == 0) + if (rcvd->max_ops != sent->max_ops) return -EINVAL; - if (rcvd->max_reqs == 0) + if (rcvd->max_reqs != sent->max_reqs) return -EINVAL; return 0; } -- cgit From d7c32675021bd750d8e0e726f2f81f746e8cab01 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 15 Feb 2012 16:35:17 -0500 Subject: nfs: Clean up debugging in nfs_follow_mountpoint() Clean up: Fix a debugging message which had an obsolete function name in it (nfs_follow_mountpoint). Introduced by commit 36d43a43 "NFS: Use d_automount() rather than abusing follow_link()" (January 14, 2011) Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/namespace.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 8102391bb374..1807866bb3ab 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -276,7 +276,10 @@ out: nfs_free_fattr(fattr); nfs_free_fhandle(fh); out_nofree: - dprintk("<-- nfs_follow_mountpoint() = %p\n", mnt); + if (IS_ERR(mnt)) + dprintk("<-- %s(): error %ld\n", __func__, PTR_ERR(mnt)); + else + dprintk("<-- %s() = %p\n", __func__, mnt); return mnt; } -- cgit From b6bf6e7d6f6fae1ddcae9e02dfe676bdc8fe892c Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Fri, 17 Feb 2012 13:05:23 -0500 Subject: NFSv4.1 set highest_used_slotid to NFS4_NO_SLOT Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 4e37818a34ef..c1111a37dc14 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -212,7 +212,7 @@ static void nfs4_end_drain_session(struct nfs_client *clp) static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl) { spin_lock(&tbl->slot_tbl_lock); - if (tbl->highest_used_slotid != -1) { + if (tbl->highest_used_slotid != NFS4_NO_SLOT) { INIT_COMPLETION(tbl->complete); spin_unlock(&tbl->slot_tbl_lock); return wait_for_completion_interruptible(&tbl->complete); -- cgit From 0a702195234eb77c4097148285cccf7f095de9cf Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 17 Feb 2012 13:15:24 -0500 Subject: NFS: include filelayout DS rpc stats in mountstats Include RPC statistics from all data servers in /proc/self/mountstats for pNFS filelayout mounts. Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayout.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 79be7acc9bae..47e8f3435d38 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -33,6 +33,8 @@ #include #include +#include + #include "internal.h" #include "nfs4filelayout.h" @@ -189,6 +191,13 @@ static void filelayout_read_call_done(struct rpc_task *task, void *data) rdata->mds_ops->rpc_call_done(task, data); } +static void filelayout_read_count_stats(struct rpc_task *task, void *data) +{ + struct nfs_read_data *rdata = (struct nfs_read_data *)data; + + rpc_count_iostats(task, NFS_SERVER(rdata->inode)->client->cl_metrics); +} + static void filelayout_read_release(void *data) { struct nfs_read_data *rdata = (struct nfs_read_data *)data; @@ -268,6 +277,13 @@ static void filelayout_write_call_done(struct rpc_task *task, void *data) wdata->mds_ops->rpc_call_done(task, data); } +static void filelayout_write_count_stats(struct rpc_task *task, void *data) +{ + struct nfs_write_data *wdata = (struct nfs_write_data *)data; + + rpc_count_iostats(task, NFS_SERVER(wdata->inode)->client->cl_metrics); +} + static void filelayout_write_release(void *data) { struct nfs_write_data *wdata = (struct nfs_write_data *)data; @@ -288,18 +304,21 @@ static void filelayout_commit_release(void *data) struct rpc_call_ops filelayout_read_call_ops = { .rpc_call_prepare = filelayout_read_prepare, .rpc_call_done = filelayout_read_call_done, + .rpc_count_stats = filelayout_read_count_stats, .rpc_release = filelayout_read_release, }; struct rpc_call_ops filelayout_write_call_ops = { .rpc_call_prepare = filelayout_write_prepare, .rpc_call_done = filelayout_write_call_done, + .rpc_count_stats = filelayout_write_count_stats, .rpc_release = filelayout_write_release, }; struct rpc_call_ops filelayout_commit_call_ops = { .rpc_call_prepare = filelayout_write_prepare, .rpc_call_done = filelayout_write_call_done, + .rpc_count_stats = filelayout_write_count_stats, .rpc_release = filelayout_commit_release, }; -- cgit From 9937347a1ee6a67e450cc9e90750ce0b10abfe75 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 19 Feb 2012 08:44:07 +0100 Subject: NFS: Ensure that the nfs_client 'net' field is always set Currently, the nfs_parsed_mount_data->net field is initialised in the nfs_parse_mount_options() function, which means that it only gets set if we're using text based mounts. The legacy binary mount interface is therefore broken. Fix is to initialise the ->net field in nfs_alloc_parsed_mount_data. Signed-off-by: Trond Myklebust Cc: Stanislav Kinsbursky --- fs/nfs/super.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index d05024a18984..6708f3044eb0 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -911,6 +911,7 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve data->auth_flavor_len = 1; data->version = version; data->minorversion = 0; + data->net = current->nsproxy->net_ns; security_init_mnt_opts(&data->lsm_opts); } return data; @@ -1110,8 +1111,6 @@ static int nfs_parse_mount_options(char *raw, free_secdata(secdata); - mnt->net = current->nsproxy->net_ns; - while ((p = strsep(&raw, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; unsigned long option; -- cgit From abd96698613eb27415e7028b6100be930920adc6 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 19 Feb 2012 08:46:49 +0100 Subject: NFS: Ensure struct nfs_client holds a reference to the net namespace Otherwise we have no guarantee that the net namespace won't just disappear from underneath us once the task that created it is destroyed. Signed-off-by: Trond Myklebust Cc: Stanislav Kinsbursky --- fs/nfs/client.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d0f850ffeb19..8563585cccec 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -172,7 +172,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_ clp->cl_rpcclient = ERR_PTR(-EINVAL); clp->cl_proto = cl_init->proto; - clp->net = cl_init->net; + clp->net = get_net(cl_init->net); #ifdef CONFIG_NFS_V4 err = nfs_get_cb_ident_idr(clp, cl_init->minorversion); @@ -300,6 +300,7 @@ static void nfs_free_client(struct nfs_client *clp) nfs4_deviceid_purge_client(clp); + put_net(clp->net); kfree(clp->cl_hostname); kfree(clp->server_scope); kfree(clp); -- cgit From 0cc785ecbf6c04c1ef01c311accee859c856a6b9 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Sat, 11 Feb 2012 21:35:12 +0900 Subject: cramfs: Fix typo in inode.c Correct spelling "endianess" to "endianness" in fs/cramfs/inode.c Signed-off-by: Masanari Iida Signed-off-by: Jiri Kosina --- fs/cramfs/inode.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index a2ee8f9f5a38..04d51f9333d7 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -257,10 +257,10 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) /* Do sanity checks on the superblock */ if (super.magic != CRAMFS_MAGIC) { - /* check for wrong endianess */ + /* check for wrong endianness */ if (super.magic == CRAMFS_MAGIC_WEND) { if (!silent) - printk(KERN_ERR "cramfs: wrong endianess\n"); + printk(KERN_ERR "cramfs: wrong endianness\n"); goto out; } @@ -270,7 +270,7 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) mutex_unlock(&read_mutex); if (super.magic != CRAMFS_MAGIC) { if (super.magic == CRAMFS_MAGIC_WEND && !silent) - printk(KERN_ERR "cramfs: wrong endianess\n"); + printk(KERN_ERR "cramfs: wrong endianness\n"); else if (!silent) printk(KERN_ERR "cramfs: wrong magic\n"); goto out; -- cgit From a80581d0d1b11b2d4bbb9333c1cac5416714112d Mon Sep 17 00:00:00 2001 From: "Justin P. Mattock" Date: Sat, 11 Feb 2012 05:55:58 -0800 Subject: Typos: change aditional to additional. The below patch fixes some typos "aditional" to "additional", and also fixes a comment with another word mispelled. Signed-off-by: Justin P. Mattock Signed-off-by: Jiri Kosina --- fs/ntfs/layout.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h index faece7190866..809c0e6d8e09 100644 --- a/fs/ntfs/layout.h +++ b/fs/ntfs/layout.h @@ -2008,14 +2008,14 @@ typedef struct { * * When a directory is small enough to fit inside the index root then this * is the only attribute describing the directory. When the directory is too - * large to fit in the index root, on the other hand, two aditional attributes + * large to fit in the index root, on the other hand, two additional attributes * are present: an index allocation attribute, containing sub-nodes of the B+ * directory tree (see below), and a bitmap attribute, describing which virtual * cluster numbers (vcns) in the index allocation attribute are in use by an * index block. * * NOTE: The root directory (FILE_root) contains an entry for itself. Other - * dircetories do not contain entries for themselves, though. + * directories do not contain entries for themselves, though. */ typedef struct { ATTR_TYPE type; /* Type of the indexed attribute. Is -- cgit From d0a3fe67e30261bb2018d2a06f33ff3303438c8e Mon Sep 17 00:00:00 2001 From: Mitsuo Hayasaka Date: Mon, 6 Feb 2012 12:50:07 +0000 Subject: xfs: change available ranges of softlimit and hardlimit in quota check In general, quota allows us to use disk blocks and inodes up to each limit, that is, they are available if they don't exceed their limitations. Current xfs sets their available ranges to lower than them except disk inode quota check. So, this patch changes the ranges to not beyond them. Signed-off-by: Mitsuo Hayasaka Cc: Ben Myers Cc: Alex Elder Cc: Christoph Hellwig Reviewed-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers (cherry picked from commit 20f12d8ac01917d96860f352f67eddd912df0afb) --- fs/xfs/xfs_dquot.c | 24 ++++++++++++------------ fs/xfs/xfs_log_recover.c | 6 +++--- fs/xfs/xfs_qm_syscalls.c | 4 ++-- fs/xfs/xfs_trans_dquot.c | 4 ++-- 4 files changed, 19 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 6d7faa87b41c..4e268edcf3f6 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -139,10 +139,10 @@ xfs_qm_adjust_dqtimers( if (!d->d_btimer) { if ((d->d_blk_softlimit && - (be64_to_cpu(d->d_bcount) >= + (be64_to_cpu(d->d_bcount) > be64_to_cpu(d->d_blk_softlimit))) || (d->d_blk_hardlimit && - (be64_to_cpu(d->d_bcount) >= + (be64_to_cpu(d->d_bcount) > be64_to_cpu(d->d_blk_hardlimit)))) { d->d_btimer = cpu_to_be32(get_seconds() + mp->m_quotainfo->qi_btimelimit); @@ -151,10 +151,10 @@ xfs_qm_adjust_dqtimers( } } else { if ((!d->d_blk_softlimit || - (be64_to_cpu(d->d_bcount) < + (be64_to_cpu(d->d_bcount) <= be64_to_cpu(d->d_blk_softlimit))) && (!d->d_blk_hardlimit || - (be64_to_cpu(d->d_bcount) < + (be64_to_cpu(d->d_bcount) <= be64_to_cpu(d->d_blk_hardlimit)))) { d->d_btimer = 0; } @@ -162,10 +162,10 @@ xfs_qm_adjust_dqtimers( if (!d->d_itimer) { if ((d->d_ino_softlimit && - (be64_to_cpu(d->d_icount) >= + (be64_to_cpu(d->d_icount) > be64_to_cpu(d->d_ino_softlimit))) || (d->d_ino_hardlimit && - (be64_to_cpu(d->d_icount) >= + (be64_to_cpu(d->d_icount) > be64_to_cpu(d->d_ino_hardlimit)))) { d->d_itimer = cpu_to_be32(get_seconds() + mp->m_quotainfo->qi_itimelimit); @@ -174,10 +174,10 @@ xfs_qm_adjust_dqtimers( } } else { if ((!d->d_ino_softlimit || - (be64_to_cpu(d->d_icount) < + (be64_to_cpu(d->d_icount) <= be64_to_cpu(d->d_ino_softlimit))) && (!d->d_ino_hardlimit || - (be64_to_cpu(d->d_icount) < + (be64_to_cpu(d->d_icount) <= be64_to_cpu(d->d_ino_hardlimit)))) { d->d_itimer = 0; } @@ -185,10 +185,10 @@ xfs_qm_adjust_dqtimers( if (!d->d_rtbtimer) { if ((d->d_rtb_softlimit && - (be64_to_cpu(d->d_rtbcount) >= + (be64_to_cpu(d->d_rtbcount) > be64_to_cpu(d->d_rtb_softlimit))) || (d->d_rtb_hardlimit && - (be64_to_cpu(d->d_rtbcount) >= + (be64_to_cpu(d->d_rtbcount) > be64_to_cpu(d->d_rtb_hardlimit)))) { d->d_rtbtimer = cpu_to_be32(get_seconds() + mp->m_quotainfo->qi_rtbtimelimit); @@ -197,10 +197,10 @@ xfs_qm_adjust_dqtimers( } } else { if ((!d->d_rtb_softlimit || - (be64_to_cpu(d->d_rtbcount) < + (be64_to_cpu(d->d_rtbcount) <= be64_to_cpu(d->d_rtb_softlimit))) && (!d->d_rtb_hardlimit || - (be64_to_cpu(d->d_rtbcount) < + (be64_to_cpu(d->d_rtbcount) <= be64_to_cpu(d->d_rtb_hardlimit)))) { d->d_rtbtimer = 0; } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 403825eb5c16..8a3d8aedd1f4 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1981,7 +1981,7 @@ xfs_qm_dqcheck( if (!errs && ddq->d_id) { if (ddq->d_blk_softlimit && - be64_to_cpu(ddq->d_bcount) >= + be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit)) { if (!ddq->d_btimer) { if (flags & XFS_QMOPT_DOWARN) @@ -1992,7 +1992,7 @@ xfs_qm_dqcheck( } } if (ddq->d_ino_softlimit && - be64_to_cpu(ddq->d_icount) >= + be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit)) { if (!ddq->d_itimer) { if (flags & XFS_QMOPT_DOWARN) @@ -2003,7 +2003,7 @@ xfs_qm_dqcheck( } } if (ddq->d_rtb_softlimit && - be64_to_cpu(ddq->d_rtbcount) >= + be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit)) { if (!ddq->d_rtbtimer) { if (flags & XFS_QMOPT_DOWARN) diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index eafbcff81f3a..711a86e39ff0 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c @@ -813,11 +813,11 @@ xfs_qm_export_dquot( (XFS_IS_OQUOTA_ENFORCED(mp) && (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && dst->d_id != 0) { - if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) && + if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) && (dst->d_blk_softlimit > 0)) { ASSERT(dst->d_btimer != 0); } - if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) && + if (((int) dst->d_icount > (int) dst->d_ino_softlimit) && (dst->d_ino_softlimit > 0)) { ASSERT(dst->d_itimer != 0); } diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 4d00ee67792d..85255536b4b6 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -649,12 +649,12 @@ xfs_trans_dqresv( * nblks. */ if (hardlimit > 0ULL && - hardlimit <= nblks + *resbcountp) { + hardlimit < nblks + *resbcountp) { xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); goto error_return; } if (softlimit > 0ULL && - softlimit <= nblks + *resbcountp) { + softlimit < nblks + *resbcountp) { if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, -- cgit From 33e0edafd78d83273c14b14501cff063fac528e5 Mon Sep 17 00:00:00 2001 From: Mitsuo Hayasaka Date: Mon, 6 Feb 2012 12:50:30 +0000 Subject: xfs: make inode quota check more general The xfs checks quota when reserving disk blocks and inodes. In the block reservation, it checks if the total number of blocks including current usage and new reservation exceed quota. In the inode reservation, it checks using the total number of inodes including only current usage without new reservation. However, this inode quota check works well since the caller of xfs_trans_dquot() always sets the argument of the number of new inode reservation to 1 or 0 and inode is reserved one by one in current xfs. To make it more general, this patch changes it to the same way as the block quota check. Signed-off-by: Mitsuo Hayasaka Cc: Ben Myers Cc: Alex Elder Cc: Christoph Hellwig Reviewed-by: Mark Tinguely Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers (cherry picked from commit c922bbc819324558e61402a7a76c10c550ca61bc) --- fs/xfs/xfs_trans_dquot.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 85255536b4b6..c4ba366d24e6 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -677,11 +677,13 @@ xfs_trans_dqresv( if (!softlimit) softlimit = q->qi_isoftlimit; - if (hardlimit > 0ULL && count >= hardlimit) { + if (hardlimit > 0ULL && + hardlimit < ninos + count) { xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); goto error_return; } - if (softlimit > 0ULL && count >= softlimit) { + if (softlimit > 0ULL && + softlimit < ninos + count) { if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, -- cgit From 162573937679ff36c9acd54268c047199dab564e Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Thu, 16 Feb 2012 01:15:00 +0000 Subject: fadump: Introduce cleanup routine to invalidate /proc/vmcore. With the firmware-assisted dump support we don't require a reboot when we are in second kernel after crash. The second kernel after crash is a normal kernel boot and has knowledge about entire system RAM with the page tables initialized for entire system RAM. Hence once the dump is saved to disk, we can just release the reserved memory area for general use and continue with second kernel as production kernel. Hence when we release the reserved memory that contains dump data, the '/proc/vmcore' will not be valid anymore. Hence this patch introduces a cleanup routine that invalidates and removes the /proc/vmcore file. This routine will be invoked before we release the reserved dump memory area. Signed-off-by: Mahesh Salgaonkar Signed-off-by: Benjamin Herrenschmidt --- fs/proc/vmcore.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'fs') diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index b0f450a2bb7c..0d5071d29985 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -700,3 +700,26 @@ static int __init vmcore_init(void) return 0; } module_init(vmcore_init) + +/* Cleanup function for vmcore module. */ +void vmcore_cleanup(void) +{ + struct list_head *pos, *next; + + if (proc_vmcore) { + remove_proc_entry(proc_vmcore->name, proc_vmcore->parent); + proc_vmcore = NULL; + } + + /* clear the vmcore list. */ + list_for_each_safe(pos, next, &vmcore_list) { + struct vmcore *m; + + m = list_entry(pos, struct vmcore, list); + list_del(&m->list); + kfree(m); + } + kfree(elfcorebuf); + elfcorebuf = NULL; +} +EXPORT_SYMBOL_GPL(vmcore_cleanup); -- cgit From 70b5437653d9c6c8de287affd38836cce98ebde5 Mon Sep 17 00:00:00 2001 From: Mitsuo Hayasaka Date: Mon, 6 Feb 2012 12:51:05 +0000 Subject: xfs: cleanup quota check on disk blocks and inodes reservations This patch is a cleanup of quota check on disk blocks and inodes reservations, and changes it as follows. (1) add a total_count variable to store the total number of current usages and new reservations for disk blocks and inodes, respectively. (2) make it more readable to check if the local variables softlimit and hardlimit are positive. It has been changed as follows. if (softlimit > 0ULL) -> if (softlimit) if (hardlimit > 0ULL) -> if (hardlimit) This is because they are defined as xfs_qcnt_t which is unsigned. Signed-off-by: Mitsuo Hayasaka Cc: Ben Myers Cc: Alex Elder Cc: Christoph Hellwig Reviewed-by: Mark Tinguely Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_trans_dquot.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index c4ba366d24e6..877fe6367d2d 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -605,7 +605,7 @@ xfs_trans_dqresv( time_t timer; xfs_qwarncnt_t warns; xfs_qwarncnt_t warnlimit; - xfs_qcnt_t count; + xfs_qcnt_t total_count; xfs_qcnt_t *resbcountp; xfs_quotainfo_t *q = mp->m_quotainfo; @@ -648,13 +648,12 @@ xfs_trans_dqresv( * hardlimit or exceed the timelimit if we allocate * nblks. */ - if (hardlimit > 0ULL && - hardlimit < nblks + *resbcountp) { + total_count = *resbcountp + nblks; + if (hardlimit && total_count > hardlimit) { xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); goto error_return; } - if (softlimit > 0ULL && - softlimit < nblks + *resbcountp) { + if (softlimit && total_count > softlimit) { if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, @@ -666,7 +665,7 @@ xfs_trans_dqresv( } } if (ninos > 0) { - count = be64_to_cpu(dqp->q_core.d_icount); + total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; @@ -677,13 +676,11 @@ xfs_trans_dqresv( if (!softlimit) softlimit = q->qi_isoftlimit; - if (hardlimit > 0ULL && - hardlimit < ninos + count) { + if (hardlimit && total_count > hardlimit) { xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); goto error_return; } - if (softlimit > 0ULL && - softlimit < ninos + count) { + if (softlimit && total_count > softlimit) { if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, -- cgit From 09a423a3d6c70905f1090f01aadb8e6abff527ce Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:20 +0000 Subject: xfs: split tail_lsn assignments from log space wakeups Currently xfs_log_move_tail has a tail_lsn argument that is horribly overloaded: it may contain either an actual lsn to assign to the log tail, 0 as a special case to use the last sync LSN, or 1 to indicate that no tail LSN assignment should be performed, and we should opportunisticly wake up at one task waiting for log space even if we did not move the LSN. Remove the tail lsn assigned from xfs_log_move_tail and make the two callers use xlog_assign_tail_lsn instead of the current variant of partially using the code in xfs_log_move_tail and partially opencoding it. Note that means we grow an addition lock roundtrip on the AIL lock for each bulk update or delete, which is still far less than what we had before introducing the bulk operations. If this proves to be a problem we can still add a variant of xlog_assign_tail_lsn that expects the lock to be held already. Also rename the remainder of xfs_log_move_tail to xfs_log_space_wake as that name describes its functionality much better. Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 74 +++++++++++++++++++++----------------------------- fs/xfs/xfs_log.h | 5 ++-- fs/xfs/xfs_log_priv.h | 1 - fs/xfs/xfs_trans_ail.c | 45 ++++++++---------------------- 4 files changed, 45 insertions(+), 80 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e2cc3568c299..372642d39872 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -760,37 +760,35 @@ xfs_log_item_init( INIT_LIST_HEAD(&item->li_cil); } +/* + * Wake up processes waiting for log space after we have moved the log tail. + * + * If opportunistic is set wake up one waiter even if we do not have enough + * free space by our strict accounting. + */ void -xfs_log_move_tail(xfs_mount_t *mp, - xfs_lsn_t tail_lsn) +xfs_log_space_wake( + struct xfs_mount *mp, + bool opportunistic) { - xlog_ticket_t *tic; - xlog_t *log = mp->m_log; - int need_bytes, free_bytes; + struct xlog_ticket *tic; + struct log *log = mp->m_log; + int need_bytes, free_bytes; if (XLOG_FORCED_SHUTDOWN(log)) return; - if (tail_lsn == 0) - tail_lsn = atomic64_read(&log->l_last_sync_lsn); - - /* tail_lsn == 1 implies that we weren't passed a valid value. */ - if (tail_lsn != 1) - atomic64_set(&log->l_tail_lsn, tail_lsn); - if (!list_empty_careful(&log->l_writeq)) { -#ifdef DEBUG - if (log->l_flags & XLOG_ACTIVE_RECOVERY) - panic("Recovery problem"); -#endif + ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); + spin_lock(&log->l_grant_write_lock); free_bytes = xlog_space_left(log, &log->l_grant_write_head); list_for_each_entry(tic, &log->l_writeq, t_queue) { ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); - if (free_bytes < tic->t_unit_res && tail_lsn != 1) + if (free_bytes < tic->t_unit_res && !opportunistic) break; - tail_lsn = 0; + opportunistic = false; free_bytes -= tic->t_unit_res; trace_xfs_log_regrant_write_wake_up(log, tic); wake_up(&tic->t_wait); @@ -799,10 +797,8 @@ xfs_log_move_tail(xfs_mount_t *mp, } if (!list_empty_careful(&log->l_reserveq)) { -#ifdef DEBUG - if (log->l_flags & XLOG_ACTIVE_RECOVERY) - panic("Recovery problem"); -#endif + ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); + spin_lock(&log->l_grant_reserve_lock); free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); list_for_each_entry(tic, &log->l_reserveq, t_queue) { @@ -810,9 +806,9 @@ xfs_log_move_tail(xfs_mount_t *mp, need_bytes = tic->t_unit_res*tic->t_cnt; else need_bytes = tic->t_unit_res; - if (free_bytes < need_bytes && tail_lsn != 1) + if (free_bytes < need_bytes && !opportunistic) break; - tail_lsn = 0; + opportunistic = false; free_bytes -= need_bytes; trace_xfs_log_grant_wake_up(log, tic); wake_up(&tic->t_wait); @@ -867,21 +863,7 @@ xfs_log_need_covered(xfs_mount_t *mp) return needed; } -/****************************************************************************** - * - * local routines - * - ****************************************************************************** - */ - -/* xfs_trans_tail_ail returns 0 when there is nothing in the list. - * The log manager must keep track of the last LR which was committed - * to disk. The lsn of this LR will become the new tail_lsn whenever - * xfs_trans_tail_ail returns 0. If we don't do this, we run into - * the situation where stuff could be written into the log but nothing - * was ever in the AIL when asked. Eventually, we panic since the - * tail hits the head. - * +/* * We may be holding the log iclog lock upon entering this routine. */ xfs_lsn_t @@ -891,10 +873,17 @@ xlog_assign_tail_lsn( xfs_lsn_t tail_lsn; struct log *log = mp->m_log; + /* + * To make sure we always have a valid LSN for the log tail we keep + * track of the last LSN which was committed in log->l_last_sync_lsn, + * and use that when the AIL was empty and xfs_ail_min_lsn returns 0. + * + * If the AIL has been emptied we also need to wake any process + * waiting for this condition. + */ tail_lsn = xfs_ail_min_lsn(mp->m_ail); if (!tail_lsn) tail_lsn = atomic64_read(&log->l_last_sync_lsn); - atomic64_set(&log->l_tail_lsn, tail_lsn); return tail_lsn; } @@ -2759,9 +2748,8 @@ xlog_ungrant_log_space(xlog_t *log, trace_xfs_log_ungrant_exit(log, ticket); - xfs_log_move_tail(log->l_mp, 1); -} /* xlog_ungrant_log_space */ - + xfs_log_space_wake(log->l_mp, true); +} /* * Flush iclog to disk if this is the last reference to the given iclog and diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 2aee3b22d29c..58d858074e6b 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -160,8 +160,9 @@ int xfs_log_mount(struct xfs_mount *mp, xfs_daddr_t start_block, int num_bblocks); int xfs_log_mount_finish(struct xfs_mount *mp); -void xfs_log_move_tail(struct xfs_mount *mp, - xfs_lsn_t tail_lsn); +xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); +void xfs_log_space_wake(struct xfs_mount *mp, + bool opportunistic); int xfs_log_notify(struct xfs_mount *mp, struct xlog_in_core *iclog, xfs_log_callback_t *callback_entry); diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 2d3b6a498d63..785905e3cf03 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -545,7 +545,6 @@ typedef struct log { #define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) /* common routines */ -extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); extern int xlog_recover(xlog_t *log); extern int xlog_recover_finish(xlog_t *log); extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index ed9252bcdac9..c9234956bcb2 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -643,15 +643,15 @@ xfs_trans_unlocked_item( * at the tail, it doesn't matter what result we get back. This * is slightly racy because since we were just unlocked, we could * go to sleep between the call to xfs_ail_min and the call to - * xfs_log_move_tail, have someone else lock us, commit to us disk, + * xfs_log_space_wake, have someone else lock us, commit to us disk, * move us out of the tail of the AIL, and then we wake up. However, - * the call to xfs_log_move_tail() doesn't do anything if there's + * the call to xfs_log_space_wake() doesn't do anything if there's * not enough free space to wake people up so we're safe calling it. */ min_lip = xfs_ail_min(ailp); if (min_lip == lip) - xfs_log_move_tail(ailp->xa_mount, 1); + xfs_log_space_wake(ailp->xa_mount, true); } /* xfs_trans_unlocked_item */ /* @@ -685,7 +685,6 @@ xfs_trans_ail_update_bulk( xfs_lsn_t lsn) __releases(ailp->xa_lock) { xfs_log_item_t *mlip; - xfs_lsn_t tail_lsn; int mlip_changed = 0; int i; LIST_HEAD(tmp); @@ -712,22 +711,12 @@ xfs_trans_ail_update_bulk( if (!list_empty(&tmp)) xfs_ail_splice(ailp, cur, &tmp, lsn); + spin_unlock(&ailp->xa_lock); - if (!mlip_changed) { - spin_unlock(&ailp->xa_lock); - return; + if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { + xlog_assign_tail_lsn(ailp->xa_mount); + xfs_log_space_wake(ailp->xa_mount, false); } - - /* - * It is not safe to access mlip after the AIL lock is dropped, so we - * must get a copy of li_lsn before we do so. This is especially - * important on 32-bit platforms where accessing and updating 64-bit - * values like li_lsn is not atomic. - */ - mlip = xfs_ail_min(ailp); - tail_lsn = mlip->li_lsn; - spin_unlock(&ailp->xa_lock); - xfs_log_move_tail(ailp->xa_mount, tail_lsn); } /* @@ -758,7 +747,6 @@ xfs_trans_ail_delete_bulk( int nr_items) __releases(ailp->xa_lock) { xfs_log_item_t *mlip; - xfs_lsn_t tail_lsn; int mlip_changed = 0; int i; @@ -785,23 +773,12 @@ xfs_trans_ail_delete_bulk( if (mlip == lip) mlip_changed = 1; } + spin_unlock(&ailp->xa_lock); - if (!mlip_changed) { - spin_unlock(&ailp->xa_lock); - return; + if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { + xlog_assign_tail_lsn(ailp->xa_mount); + xfs_log_space_wake(ailp->xa_mount, false); } - - /* - * It is not safe to access mlip after the AIL lock is dropped, so we - * must get a copy of li_lsn before we do so. This is especially - * important on 32-bit platforms where accessing and updating 64-bit - * values like li_lsn is not atomic. It is possible we've emptied the - * AIL here, so if that is the case, pass an LSN of 0 to the tail move. - */ - mlip = xfs_ail_min(ailp); - tail_lsn = mlip ? mlip->li_lsn : 0; - spin_unlock(&ailp->xa_lock); - xfs_log_move_tail(ailp->xa_mount, tail_lsn); } /* -- cgit From 3af1de753b3caf9fa3762b4b1b85d833c121847e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:21 +0000 Subject: xfs: do exact log space wakeups in xlog_ungrant_log_space The only reason that xfs_log_space_wake had to do opportunistic wakeups was that the old xfs_log_move_tail calling convention didn't allow for exact wakeups when not updating the log tail LSN. Since this issue has been fixed we can do exact wakeups now. Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 372642d39872..9161e8a76e77 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -2748,7 +2748,7 @@ xlog_ungrant_log_space(xlog_t *log, trace_xfs_log_ungrant_exit(log, ticket); - xfs_log_space_wake(log->l_mp, true); + xfs_log_space_wake(log->l_mp, false); } /* -- cgit From 5b03ff1b2444ddf7b8084b7505101e97257aff5a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:22 +0000 Subject: xfs: remove xfs_trans_unlocked_item There is no reason to wake up log space waiters when unlocking inodes or dquots, and the commit log has no explanation for this function either. Given that we now have exact log space wakeups everywhere we can assume the reason for this function was to paper over log space races in earlier XFS versions. Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_dquot.c | 11 ----------- fs/xfs/xfs_dquot.h | 3 +-- fs/xfs/xfs_iget.c | 13 +------------ fs/xfs/xfs_inode.h | 4 +--- fs/xfs/xfs_inode_item.c | 6 +----- fs/xfs/xfs_trans_ail.c | 44 -------------------------------------------- fs/xfs/xfs_trans_buf.c | 25 +------------------------ fs/xfs/xfs_trans_priv.h | 3 --- 8 files changed, 5 insertions(+), 104 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 4e268edcf3f6..71e615fef174 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -1076,17 +1076,6 @@ xfs_qm_dqflush( } -void -xfs_dqunlock( - xfs_dquot_t *dqp) -{ - xfs_dqunlock_nonotify(dqp); - if (dqp->q_logitem.qli_dquot == dqp) { - xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp, - &dqp->q_logitem.qli_item); - } -} - /* * Lock two xfs_dquot structures. * diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 48a795b141b6..60b0d72b0241 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -110,7 +110,7 @@ static inline void xfs_dqlock(struct xfs_dquot *dqp) mutex_lock(&dqp->q_qlock); } -static inline void xfs_dqunlock_nonotify(struct xfs_dquot *dqp) +static inline void xfs_dqunlock(struct xfs_dquot *dqp) { mutex_unlock(&dqp->q_qlock); } @@ -166,7 +166,6 @@ extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, extern void xfs_qm_dqput(xfs_dquot_t *); extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *); -extern void xfs_dqunlock(struct xfs_dquot *); extern void xfs_dqflock_pushbuf_wait(struct xfs_dquot *dqp); static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp) diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 8c3e46394d48..19dcfb2aac9a 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -642,8 +642,7 @@ xfs_iunlock( (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); - ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY | - XFS_LOCK_DEP_MASK)) == 0); + ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); ASSERT(lock_flags != 0); if (lock_flags & XFS_IOLOCK_EXCL) @@ -656,16 +655,6 @@ xfs_iunlock( else if (lock_flags & XFS_ILOCK_SHARED) mrunlock_shared(&ip->i_lock); - if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) && - !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) { - /* - * Let the AIL know that this item has been unlocked in case - * it is in the AIL and anyone is waiting on it. Don't do - * this if the caller has asked us not to. - */ - xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp, - (xfs_log_item_t*)(ip->i_itemp)); - } trace_xfs_iunlock(ip, lock_flags, _RET_IP_); } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 2f27b7454085..eda493780395 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -422,7 +422,6 @@ static inline int xfs_isiflocked(struct xfs_inode *ip) #define XFS_IOLOCK_SHARED (1<<1) #define XFS_ILOCK_EXCL (1<<2) #define XFS_ILOCK_SHARED (1<<3) -#define XFS_IUNLOCK_NONOTIFY (1<<4) #define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \ | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED) @@ -431,8 +430,7 @@ static inline int xfs_isiflocked(struct xfs_inode *ip) { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \ { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \ { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \ - { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \ - { XFS_IUNLOCK_NONOTIFY, "IUNLOCK_NONOTIFY" } + { XFS_ILOCK_SHARED, "ILOCK_SHARED" } /* diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 91d71dcd4852..adc8a261b5d0 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -596,11 +596,7 @@ xfs_inode_item_trylock( /* Stale items should force out the iclog */ if (ip->i_flags & XFS_ISTALE) { xfs_ifunlock(ip); - /* - * we hold the AIL lock - notify the unlock routine of this - * so it doesn't try to get the lock again. - */ - xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY); + xfs_iunlock(ip, XFS_ILOCK_SHARED); return XFS_ITEM_PINNED; } diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index c9234956bcb2..9d5fc089ea2e 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -610,50 +610,6 @@ xfs_ail_push_all( xfs_ail_push(ailp, threshold_lsn); } -/* - * This is to be called when an item is unlocked that may have - * been in the AIL. It will wake up the first member of the AIL - * wait list if this item's unlocking might allow it to progress. - * If the item is in the AIL, then we need to get the AIL lock - * while doing our checking so we don't race with someone going - * to sleep waiting for this event in xfs_trans_push_ail(). - */ -void -xfs_trans_unlocked_item( - struct xfs_ail *ailp, - xfs_log_item_t *lip) -{ - xfs_log_item_t *min_lip; - - /* - * If we're forcibly shutting down, we may have - * unlocked log items arbitrarily. The last thing - * we want to do is to move the tail of the log - * over some potentially valid data. - */ - if (!(lip->li_flags & XFS_LI_IN_AIL) || - XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { - return; - } - - /* - * This is the one case where we can call into xfs_ail_min() - * without holding the AIL lock because we only care about the - * case where we are at the tail of the AIL. If the object isn't - * at the tail, it doesn't matter what result we get back. This - * is slightly racy because since we were just unlocked, we could - * go to sleep between the call to xfs_ail_min and the call to - * xfs_log_space_wake, have someone else lock us, commit to us disk, - * move us out of the tail of the AIL, and then we wake up. However, - * the call to xfs_log_space_wake() doesn't do anything if there's - * not enough free space to wake people up so we're safe calling it. - */ - min_lip = xfs_ail_min(ailp); - - if (min_lip == lip) - xfs_log_space_wake(ailp->xa_mount, true); -} /* xfs_trans_unlocked_item */ - /* * xfs_trans_ail_update - bulk AIL insertion operation. * diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 475a4ded4f41..1302d1d95a58 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -463,19 +463,7 @@ xfs_trans_brelse(xfs_trans_t *tp, * Default to a normal brelse() call if the tp is NULL. */ if (tp == NULL) { - struct xfs_log_item *lip = bp->b_fspriv; - ASSERT(bp->b_transp == NULL); - - /* - * If there's a buf log item attached to the buffer, - * then let the AIL know that the buffer is being - * unlocked. - */ - if (lip != NULL && lip->li_type == XFS_LI_BUF) { - bip = bp->b_fspriv; - xfs_trans_unlocked_item(bip->bli_item.li_ailp, lip); - } xfs_buf_relse(bp); return; } @@ -550,21 +538,10 @@ xfs_trans_brelse(xfs_trans_t *tp, ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); xfs_buf_item_relse(bp); - bip = NULL; - } - bp->b_transp = NULL; - - /* - * If we've still got a buf log item on the buffer, then - * tell the AIL that the buffer is being unlocked. - */ - if (bip != NULL) { - xfs_trans_unlocked_item(bip->bli_item.li_ailp, - (xfs_log_item_t*)bip); } + bp->b_transp = NULL; xfs_buf_relse(bp); - return; } /* diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 44820b9fcb43..8ab2ced415f1 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -104,9 +104,6 @@ void xfs_ail_push(struct xfs_ail *, xfs_lsn_t); void xfs_ail_push_all(struct xfs_ail *); xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp); -void xfs_trans_unlocked_item(struct xfs_ail *, - xfs_log_item_t *); - struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp, struct xfs_ail_cursor *cur, xfs_lsn_t lsn); -- cgit From cfb7cdca0aca5ee2e2ef491284bf1edc3b581885 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:23 +0000 Subject: xfs: cleanup xfs_log_space_wake Remove the now unused opportunistic parameter, and use the the xlog_writeq_wake and xlog_reserveq_wake helpers now that we don't have to care about the opportunistic wakeups. Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 35 +++++------------------------------ fs/xfs/xfs_log.h | 3 +-- fs/xfs/xfs_trans_ail.c | 4 ++-- 3 files changed, 8 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 9161e8a76e77..2db39df5a57d 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -762,18 +762,13 @@ xfs_log_item_init( /* * Wake up processes waiting for log space after we have moved the log tail. - * - * If opportunistic is set wake up one waiter even if we do not have enough - * free space by our strict accounting. */ void xfs_log_space_wake( - struct xfs_mount *mp, - bool opportunistic) + struct xfs_mount *mp) { - struct xlog_ticket *tic; struct log *log = mp->m_log; - int need_bytes, free_bytes; + int free_bytes; if (XLOG_FORCED_SHUTDOWN(log)) return; @@ -783,16 +778,7 @@ xfs_log_space_wake( spin_lock(&log->l_grant_write_lock); free_bytes = xlog_space_left(log, &log->l_grant_write_head); - list_for_each_entry(tic, &log->l_writeq, t_queue) { - ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); - - if (free_bytes < tic->t_unit_res && !opportunistic) - break; - opportunistic = false; - free_bytes -= tic->t_unit_res; - trace_xfs_log_regrant_write_wake_up(log, tic); - wake_up(&tic->t_wait); - } + xlog_writeq_wake(log, &free_bytes); spin_unlock(&log->l_grant_write_lock); } @@ -801,18 +787,7 @@ xfs_log_space_wake( spin_lock(&log->l_grant_reserve_lock); free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); - list_for_each_entry(tic, &log->l_reserveq, t_queue) { - if (tic->t_flags & XLOG_TIC_PERM_RESERV) - need_bytes = tic->t_unit_res*tic->t_cnt; - else - need_bytes = tic->t_unit_res; - if (free_bytes < need_bytes && !opportunistic) - break; - opportunistic = false; - free_bytes -= need_bytes; - trace_xfs_log_grant_wake_up(log, tic); - wake_up(&tic->t_wait); - } + xlog_reserveq_wake(log, &free_bytes); spin_unlock(&log->l_grant_reserve_lock); } } @@ -2748,7 +2723,7 @@ xlog_ungrant_log_space(xlog_t *log, trace_xfs_log_ungrant_exit(log, ticket); - xfs_log_space_wake(log->l_mp, false); + xfs_log_space_wake(log->l_mp); } /* diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 58d858074e6b..fe32c6927877 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -161,8 +161,7 @@ int xfs_log_mount(struct xfs_mount *mp, int num_bblocks); int xfs_log_mount_finish(struct xfs_mount *mp); xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); -void xfs_log_space_wake(struct xfs_mount *mp, - bool opportunistic); +void xfs_log_space_wake(struct xfs_mount *mp); int xfs_log_notify(struct xfs_mount *mp, struct xlog_in_core *iclog, xfs_log_callback_t *callback_entry); diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 9d5fc089ea2e..1dead07f092c 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -671,7 +671,7 @@ xfs_trans_ail_update_bulk( if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { xlog_assign_tail_lsn(ailp->xa_mount); - xfs_log_space_wake(ailp->xa_mount, false); + xfs_log_space_wake(ailp->xa_mount); } } @@ -733,7 +733,7 @@ xfs_trans_ail_delete_bulk( if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { xlog_assign_tail_lsn(ailp->xa_mount); - xfs_log_space_wake(ailp->xa_mount, false); + xfs_log_space_wake(ailp->xa_mount); } } -- cgit From 14a7235fba4302a82d61150eda92ec90d3ae9cfb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:24 +0000 Subject: xfs: remove log space waitqueues The tic->t_wait waitqueues can never have more than a single waiter on them, so we can easily replace them with a task_struct pointer and wake_up_process. Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 24 +++++++++++++++--------- fs/xfs/xfs_log_priv.h | 2 +- 2 files changed, 16 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 2db39df5a57d..02a35fba5eae 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -169,7 +169,7 @@ xlog_reserveq_wake( *free_bytes -= need_bytes; trace_xfs_log_grant_wake_up(log, tic); - wake_up(&tic->t_wait); + wake_up_process(tic->t_task); } return true; @@ -193,7 +193,7 @@ xlog_writeq_wake( *free_bytes -= need_bytes; trace_xfs_log_regrant_write_wake_up(log, tic); - wake_up(&tic->t_wait); + wake_up_process(tic->t_task); } return true; @@ -212,10 +212,13 @@ xlog_reserveq_wait( goto shutdown; xlog_grant_push_ail(log, need_bytes); + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock(&log->l_grant_reserve_lock); + XFS_STATS_INC(xs_sleep_logspace); - trace_xfs_log_grant_sleep(log, tic); - xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); + trace_xfs_log_grant_sleep(log, tic); + schedule(); trace_xfs_log_grant_wake(log, tic); spin_lock(&log->l_grant_reserve_lock); @@ -243,10 +246,13 @@ xlog_writeq_wait( goto shutdown; xlog_grant_push_ail(log, need_bytes); + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock(&log->l_grant_write_lock); + XFS_STATS_INC(xs_sleep_logspace); - trace_xfs_log_regrant_write_sleep(log, tic); - xlog_wait(&tic->t_wait, &log->l_grant_write_lock); + trace_xfs_log_regrant_write_sleep(log, tic); + schedule(); trace_xfs_log_regrant_write_wake(log, tic); spin_lock(&log->l_grant_write_lock); @@ -3276,6 +3282,7 @@ xlog_ticket_alloc( } atomic_set(&tic->t_ref, 1); + tic->t_task = current; INIT_LIST_HEAD(&tic->t_queue); tic->t_unit_res = unit_bytes; tic->t_curr_res = unit_bytes; @@ -3287,7 +3294,6 @@ xlog_ticket_alloc( tic->t_trans_type = 0; if (xflags & XFS_LOG_PERM_RESERV) tic->t_flags |= XLOG_TIC_PERM_RESERV; - init_waitqueue_head(&tic->t_wait); xlog_tic_reset_res(tic); @@ -3615,12 +3621,12 @@ xfs_log_force_umount( */ spin_lock(&log->l_grant_reserve_lock); list_for_each_entry(tic, &log->l_reserveq, t_queue) - wake_up(&tic->t_wait); + wake_up_process(tic->t_task); spin_unlock(&log->l_grant_reserve_lock); spin_lock(&log->l_grant_write_lock); list_for_each_entry(tic, &log->l_writeq, t_queue) - wake_up(&tic->t_wait); + wake_up_process(tic->t_task); spin_unlock(&log->l_grant_write_lock); if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 785905e3cf03..d8c5e47bbc2f 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -239,8 +239,8 @@ typedef struct xlog_res { } xlog_res_t; typedef struct xlog_ticket { - wait_queue_head_t t_wait; /* ticket wait queue */ struct list_head t_queue; /* reserve/write queue */ + struct task_struct *t_task; /* task that owns this ticket */ xlog_tid_t t_tid; /* transaction identifier : 4 */ atomic_t t_ref; /* ticket reference count : 4 */ int t_curr_res; /* current reservation in bytes : 4 */ -- cgit From 28496968a6ac37c8b8c44b5156e633c581bb8378 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:25 +0000 Subject: xfs: add the xlog_grant_head structure Add a new data structure to allow sharing code between the log grant and regrant code. Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 112 +++++++++++++++++++++++------------------------ fs/xfs/xfs_log_priv.h | 23 +++++----- fs/xfs/xfs_log_recover.c | 4 +- fs/xfs/xfs_trace.h | 8 ++-- 4 files changed, 74 insertions(+), 73 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 02a35fba5eae..ad0cac378e9a 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -158,7 +158,7 @@ xlog_reserveq_wake( struct xlog_ticket *tic; int need_bytes; - list_for_each_entry(tic, &log->l_reserveq, t_queue) { + list_for_each_entry(tic, &log->l_reserve_head.waiters, t_queue) { if (tic->t_flags & XLOG_TIC_PERM_RESERV) need_bytes = tic->t_unit_res * tic->t_cnt; else @@ -183,7 +183,7 @@ xlog_writeq_wake( struct xlog_ticket *tic; int need_bytes; - list_for_each_entry(tic, &log->l_writeq, t_queue) { + list_for_each_entry(tic, &log->l_write_head.waiters, t_queue) { ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); need_bytes = tic->t_unit_res; @@ -205,7 +205,7 @@ xlog_reserveq_wait( struct xlog_ticket *tic, int need_bytes) { - list_add_tail(&tic->t_queue, &log->l_reserveq); + list_add_tail(&tic->t_queue, &log->l_reserve_head.waiters); do { if (XLOG_FORCED_SHUTDOWN(log)) @@ -213,7 +213,7 @@ xlog_reserveq_wait( xlog_grant_push_ail(log, need_bytes); __set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock(&log->l_grant_reserve_lock); + spin_unlock(&log->l_reserve_head.lock); XFS_STATS_INC(xs_sleep_logspace); @@ -221,10 +221,10 @@ xlog_reserveq_wait( schedule(); trace_xfs_log_grant_wake(log, tic); - spin_lock(&log->l_grant_reserve_lock); + spin_lock(&log->l_reserve_head.lock); if (XLOG_FORCED_SHUTDOWN(log)) goto shutdown; - } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes); + } while (xlog_space_left(log, &log->l_reserve_head.grant) < need_bytes); list_del_init(&tic->t_queue); return 0; @@ -239,7 +239,7 @@ xlog_writeq_wait( struct xlog_ticket *tic, int need_bytes) { - list_add_tail(&tic->t_queue, &log->l_writeq); + list_add_tail(&tic->t_queue, &log->l_write_head.waiters); do { if (XLOG_FORCED_SHUTDOWN(log)) @@ -247,7 +247,7 @@ xlog_writeq_wait( xlog_grant_push_ail(log, need_bytes); __set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock(&log->l_grant_write_lock); + spin_unlock(&log->l_write_head.lock); XFS_STATS_INC(xs_sleep_logspace); @@ -255,10 +255,10 @@ xlog_writeq_wait( schedule(); trace_xfs_log_regrant_write_wake(log, tic); - spin_lock(&log->l_grant_write_lock); + spin_lock(&log->l_write_head.lock); if (XLOG_FORCED_SHUTDOWN(log)) goto shutdown; - } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes); + } while (xlog_space_left(log, &log->l_write_head.grant) < need_bytes); list_del_init(&tic->t_queue); return 0; @@ -779,22 +779,22 @@ xfs_log_space_wake( if (XLOG_FORCED_SHUTDOWN(log)) return; - if (!list_empty_careful(&log->l_writeq)) { + if (!list_empty_careful(&log->l_write_head.waiters)) { ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); - spin_lock(&log->l_grant_write_lock); - free_bytes = xlog_space_left(log, &log->l_grant_write_head); + spin_lock(&log->l_write_head.lock); + free_bytes = xlog_space_left(log, &log->l_write_head.grant); xlog_writeq_wake(log, &free_bytes); - spin_unlock(&log->l_grant_write_lock); + spin_unlock(&log->l_write_head.lock); } - if (!list_empty_careful(&log->l_reserveq)) { + if (!list_empty_careful(&log->l_reserve_head.waiters)) { ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); - spin_lock(&log->l_grant_reserve_lock); - free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); + spin_lock(&log->l_reserve_head.lock); + free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); xlog_reserveq_wake(log, &free_bytes); - spin_unlock(&log->l_grant_reserve_lock); + spin_unlock(&log->l_reserve_head.lock); } } @@ -1070,12 +1070,12 @@ xlog_alloc_log(xfs_mount_t *mp, xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ - xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); - xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); - INIT_LIST_HEAD(&log->l_reserveq); - INIT_LIST_HEAD(&log->l_writeq); - spin_lock_init(&log->l_grant_reserve_lock); - spin_lock_init(&log->l_grant_write_lock); + xlog_assign_grant_head(&log->l_reserve_head.grant, 1, 0); + xlog_assign_grant_head(&log->l_write_head.grant, 1, 0); + INIT_LIST_HEAD(&log->l_reserve_head.waiters); + INIT_LIST_HEAD(&log->l_write_head.waiters); + spin_lock_init(&log->l_reserve_head.lock); + spin_lock_init(&log->l_write_head.lock); error = EFSCORRUPTED; if (xfs_sb_version_hassector(&mp->m_sb)) { @@ -1250,7 +1250,7 @@ xlog_grant_push_ail( ASSERT(BTOBB(need_bytes) < log->l_logBBsize); - free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); + free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); free_blocks = BTOBBT(free_bytes); /* @@ -1382,8 +1382,8 @@ xlog_sync(xlog_t *log, roundoff < BBTOB(1))); /* move grant heads by roundoff in sync */ - xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff); - xlog_grant_add_space(log, &log->l_grant_write_head, roundoff); + xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); + xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); /* put cycle number in every block */ xlog_pack_data(log, iclog, roundoff); @@ -2547,8 +2547,8 @@ restart: * path. Hence any lock will be globally hot if we take it unconditionally on * every pass. * - * As tickets are only ever moved on and off the reserveq under the - * l_grant_reserve_lock, we only need to take that lock if we are going to add + * As tickets are only ever moved on and off the l_reserve.waiters under the + * l_reserve.lock, we only need to take that lock if we are going to add * the ticket to the queue and sleep. We can avoid taking the lock if the ticket * was never added to the reserveq because the t_queue list head will be empty * and we hold the only reference to it so it can safely be checked unlocked. @@ -2574,23 +2574,23 @@ xlog_grant_log_space( need_bytes = tic->t_unit_res; if (tic->t_flags & XFS_LOG_PERM_RESERV) need_bytes *= tic->t_ocnt; - free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); - if (!list_empty_careful(&log->l_reserveq)) { - spin_lock(&log->l_grant_reserve_lock); + free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); + if (!list_empty_careful(&log->l_reserve_head.waiters)) { + spin_lock(&log->l_reserve_head.lock); if (!xlog_reserveq_wake(log, &free_bytes) || free_bytes < need_bytes) error = xlog_reserveq_wait(log, tic, need_bytes); - spin_unlock(&log->l_grant_reserve_lock); + spin_unlock(&log->l_reserve_head.lock); } else if (free_bytes < need_bytes) { - spin_lock(&log->l_grant_reserve_lock); + spin_lock(&log->l_reserve_head.lock); error = xlog_reserveq_wait(log, tic, need_bytes); - spin_unlock(&log->l_grant_reserve_lock); + spin_unlock(&log->l_reserve_head.lock); } if (error) return error; - xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes); - xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); + xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); + xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); trace_xfs_log_grant_exit(log, tic); xlog_verify_grant_tail(log); return 0; @@ -2627,23 +2627,23 @@ xlog_regrant_write_log_space( * otherwise try to get some space for this transaction. */ need_bytes = tic->t_unit_res; - free_bytes = xlog_space_left(log, &log->l_grant_write_head); - if (!list_empty_careful(&log->l_writeq)) { - spin_lock(&log->l_grant_write_lock); + free_bytes = xlog_space_left(log, &log->l_write_head.grant); + if (!list_empty_careful(&log->l_write_head.waiters)) { + spin_lock(&log->l_write_head.lock); if (!xlog_writeq_wake(log, &free_bytes) || free_bytes < need_bytes) error = xlog_writeq_wait(log, tic, need_bytes); - spin_unlock(&log->l_grant_write_lock); + spin_unlock(&log->l_write_head.lock); } else if (free_bytes < need_bytes) { - spin_lock(&log->l_grant_write_lock); + spin_lock(&log->l_write_head.lock); error = xlog_writeq_wait(log, tic, need_bytes); - spin_unlock(&log->l_grant_write_lock); + spin_unlock(&log->l_write_head.lock); } if (error) return error; - xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); + xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); trace_xfs_log_regrant_write_exit(log, tic); xlog_verify_grant_tail(log); return 0; @@ -2665,9 +2665,9 @@ xlog_regrant_reserve_log_space(xlog_t *log, if (ticket->t_cnt > 0) ticket->t_cnt--; - xlog_grant_sub_space(log, &log->l_grant_reserve_head, + xlog_grant_sub_space(log, &log->l_reserve_head.grant, ticket->t_curr_res); - xlog_grant_sub_space(log, &log->l_grant_write_head, + xlog_grant_sub_space(log, &log->l_write_head.grant, ticket->t_curr_res); ticket->t_curr_res = ticket->t_unit_res; xlog_tic_reset_res(ticket); @@ -2678,7 +2678,7 @@ xlog_regrant_reserve_log_space(xlog_t *log, if (ticket->t_cnt > 0) return; - xlog_grant_add_space(log, &log->l_grant_reserve_head, + xlog_grant_add_space(log, &log->l_reserve_head.grant, ticket->t_unit_res); trace_xfs_log_regrant_reserve_exit(log, ticket); @@ -2724,8 +2724,8 @@ xlog_ungrant_log_space(xlog_t *log, bytes += ticket->t_unit_res*ticket->t_cnt; } - xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes); - xlog_grant_sub_space(log, &log->l_grant_write_head, bytes); + xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); + xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); trace_xfs_log_ungrant_exit(log, ticket); @@ -3349,7 +3349,7 @@ xlog_verify_grant_tail( int tail_cycle, tail_blocks; int cycle, space; - xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); + xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); if (tail_cycle != cycle) { if (cycle - 1 != tail_cycle && @@ -3619,15 +3619,15 @@ xfs_log_force_umount( * we don't enqueue anything once the SHUTDOWN flag is set, and this * action is protected by the grant locks. */ - spin_lock(&log->l_grant_reserve_lock); - list_for_each_entry(tic, &log->l_reserveq, t_queue) + spin_lock(&log->l_reserve_head.lock); + list_for_each_entry(tic, &log->l_reserve_head.waiters, t_queue) wake_up_process(tic->t_task); - spin_unlock(&log->l_grant_reserve_lock); + spin_unlock(&log->l_reserve_head.lock); - spin_lock(&log->l_grant_write_lock); - list_for_each_entry(tic, &log->l_writeq, t_queue) + spin_lock(&log->l_write_head.lock); + list_for_each_entry(tic, &log->l_write_head.waiters, t_queue) wake_up_process(tic->t_task); - spin_unlock(&log->l_grant_write_lock); + spin_unlock(&log->l_write_head.lock); if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { ASSERT(!logerror); diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index d8c5e47bbc2f..eba4ec925a45 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -469,6 +469,16 @@ struct xfs_cil { #define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3) #define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4)) +/* + * ticket grant locks, queues and accounting have their own cachlines + * as these are quite hot and can be operated on concurrently. + */ +struct xlog_grant_head { + spinlock_t lock ____cacheline_aligned_in_smp; + struct list_head waiters; + atomic64_t grant; +}; + /* * The reservation head lsn is not made up of a cycle number and block number. * Instead, it uses a cycle number and byte number. Logs don't expect to @@ -520,17 +530,8 @@ typedef struct log { /* lsn of 1st LR with unflushed * buffers */ atomic64_t l_tail_lsn ____cacheline_aligned_in_smp; - /* - * ticket grant locks, queues and accounting have their own cachlines - * as these are quite hot and can be operated on concurrently. - */ - spinlock_t l_grant_reserve_lock ____cacheline_aligned_in_smp; - struct list_head l_reserveq; - atomic64_t l_grant_reserve_head; - - spinlock_t l_grant_write_lock ____cacheline_aligned_in_smp; - struct list_head l_writeq; - atomic64_t l_grant_write_head; + struct xlog_grant_head l_reserve_head; + struct xlog_grant_head l_write_head; /* The following field are used for debugging; need to hold icloglock */ #ifdef DEBUG diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 8a3d8aedd1f4..7c75c7374d5a 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -965,9 +965,9 @@ xlog_find_tail( log->l_curr_cycle++; atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); - xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, + xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle, BBTOB(log->l_curr_block)); - xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle, + xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle, BBTOB(log->l_curr_block)); /* diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index bb134a819930..a80e30854fce 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -782,12 +782,12 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class, __entry->curr_res = tic->t_curr_res; __entry->unit_res = tic->t_unit_res; __entry->flags = tic->t_flags; - __entry->reserveq = list_empty(&log->l_reserveq); - __entry->writeq = list_empty(&log->l_writeq); - xlog_crack_grant_head(&log->l_grant_reserve_head, + __entry->reserveq = list_empty(&log->l_reserve_head.waiters); + __entry->writeq = list_empty(&log->l_write_head.waiters); + xlog_crack_grant_head(&log->l_reserve_head.grant, &__entry->grant_reserve_cycle, &__entry->grant_reserve_bytes); - xlog_crack_grant_head(&log->l_grant_write_head, + xlog_crack_grant_head(&log->l_write_head.grant, &__entry->grant_write_cycle, &__entry->grant_write_bytes); __entry->curr_cycle = log->l_curr_cycle; -- cgit From c303c5b8c3b8eace41c4fba26205b50c0f8e4ca0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:26 +0000 Subject: xfs: add xlog_grant_head_init Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index ad0cac378e9a..30fec0a2a213 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -150,6 +150,15 @@ xlog_grant_add_space( } while (head_val != old); } +STATIC void +xlog_grant_head_init( + struct xlog_grant_head *head) +{ + xlog_assign_grant_head(&head->grant, 1, 0); + INIT_LIST_HEAD(&head->waiters); + spin_lock_init(&head->lock); +} + STATIC bool xlog_reserveq_wake( struct log *log, @@ -1070,12 +1079,9 @@ xlog_alloc_log(xfs_mount_t *mp, xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ - xlog_assign_grant_head(&log->l_reserve_head.grant, 1, 0); - xlog_assign_grant_head(&log->l_write_head.grant, 1, 0); - INIT_LIST_HEAD(&log->l_reserve_head.waiters); - INIT_LIST_HEAD(&log->l_write_head.waiters); - spin_lock_init(&log->l_reserve_head.lock); - spin_lock_init(&log->l_write_head.lock); + + xlog_grant_head_init(&log->l_reserve_head); + xlog_grant_head_init(&log->l_write_head); error = EFSCORRUPTED; if (xfs_sb_version_hassector(&mp->m_sb)) { -- cgit From a79bf2d75b8f96bcdb6714138cd53cb3c358669c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:27 +0000 Subject: xfs: add xlog_grant_head_wake_all Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 30fec0a2a213..a0d1376b3d48 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -159,6 +159,18 @@ xlog_grant_head_init( spin_lock_init(&head->lock); } +STATIC void +xlog_grant_head_wake_all( + struct xlog_grant_head *head) +{ + struct xlog_ticket *tic; + + spin_lock(&head->lock); + list_for_each_entry(tic, &head->waiters, t_queue) + wake_up_process(tic->t_task); + spin_unlock(&head->lock); +} + STATIC bool xlog_reserveq_wake( struct log *log, @@ -3557,7 +3569,6 @@ xfs_log_force_umount( struct xfs_mount *mp, int logerror) { - xlog_ticket_t *tic; xlog_t *log; int retval; @@ -3625,15 +3636,8 @@ xfs_log_force_umount( * we don't enqueue anything once the SHUTDOWN flag is set, and this * action is protected by the grant locks. */ - spin_lock(&log->l_reserve_head.lock); - list_for_each_entry(tic, &log->l_reserve_head.waiters, t_queue) - wake_up_process(tic->t_task); - spin_unlock(&log->l_reserve_head.lock); - - spin_lock(&log->l_write_head.lock); - list_for_each_entry(tic, &log->l_write_head.waiters, t_queue) - wake_up_process(tic->t_task); - spin_unlock(&log->l_write_head.lock); + xlog_grant_head_wake_all(&log->l_reserve_head); + xlog_grant_head_wake_all(&log->l_write_head); if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { ASSERT(!logerror); -- cgit From 23ee3df349b8b8fd153bd02fccf08b31aec5bce3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:28 +0000 Subject: xfs: share code for grant head waiting Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 63 ++++++++++++++++-------------------------------------- fs/xfs/xfs_trace.h | 2 -- 2 files changed, 18 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index a0d1376b3d48..c2d13827582b 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -221,12 +221,13 @@ xlog_writeq_wake( } STATIC int -xlog_reserveq_wait( +xlog_grant_head_wait( struct log *log, + struct xlog_grant_head *head, struct xlog_ticket *tic, int need_bytes) { - list_add_tail(&tic->t_queue, &log->l_reserve_head.waiters); + list_add_tail(&tic->t_queue, &head->waiters); do { if (XLOG_FORCED_SHUTDOWN(log)) @@ -234,7 +235,7 @@ xlog_reserveq_wait( xlog_grant_push_ail(log, need_bytes); __set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock(&log->l_reserve_head.lock); + spin_unlock(&head->lock); XFS_STATS_INC(xs_sleep_logspace); @@ -242,44 +243,10 @@ xlog_reserveq_wait( schedule(); trace_xfs_log_grant_wake(log, tic); - spin_lock(&log->l_reserve_head.lock); + spin_lock(&head->lock); if (XLOG_FORCED_SHUTDOWN(log)) goto shutdown; - } while (xlog_space_left(log, &log->l_reserve_head.grant) < need_bytes); - - list_del_init(&tic->t_queue); - return 0; -shutdown: - list_del_init(&tic->t_queue); - return XFS_ERROR(EIO); -} - -STATIC int -xlog_writeq_wait( - struct log *log, - struct xlog_ticket *tic, - int need_bytes) -{ - list_add_tail(&tic->t_queue, &log->l_write_head.waiters); - - do { - if (XLOG_FORCED_SHUTDOWN(log)) - goto shutdown; - xlog_grant_push_ail(log, need_bytes); - - __set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock(&log->l_write_head.lock); - - XFS_STATS_INC(xs_sleep_logspace); - - trace_xfs_log_regrant_write_sleep(log, tic); - schedule(); - trace_xfs_log_regrant_write_wake(log, tic); - - spin_lock(&log->l_write_head.lock); - if (XLOG_FORCED_SHUTDOWN(log)) - goto shutdown; - } while (xlog_space_left(log, &log->l_write_head.grant) < need_bytes); + } while (xlog_space_left(log, &head->grant) < need_bytes); list_del_init(&tic->t_queue); return 0; @@ -2596,12 +2563,15 @@ xlog_grant_log_space( if (!list_empty_careful(&log->l_reserve_head.waiters)) { spin_lock(&log->l_reserve_head.lock); if (!xlog_reserveq_wake(log, &free_bytes) || - free_bytes < need_bytes) - error = xlog_reserveq_wait(log, tic, need_bytes); + free_bytes < need_bytes) { + error = xlog_grant_head_wait(log, &log->l_reserve_head, + tic, need_bytes); + } spin_unlock(&log->l_reserve_head.lock); } else if (free_bytes < need_bytes) { spin_lock(&log->l_reserve_head.lock); - error = xlog_reserveq_wait(log, tic, need_bytes); + error = xlog_grant_head_wait(log, &log->l_reserve_head, tic, + need_bytes); spin_unlock(&log->l_reserve_head.lock); } if (error) @@ -2649,12 +2619,15 @@ xlog_regrant_write_log_space( if (!list_empty_careful(&log->l_write_head.waiters)) { spin_lock(&log->l_write_head.lock); if (!xlog_writeq_wake(log, &free_bytes) || - free_bytes < need_bytes) - error = xlog_writeq_wait(log, tic, need_bytes); + free_bytes < need_bytes) { + error = xlog_grant_head_wait(log, &log->l_write_head, + tic, need_bytes); + } spin_unlock(&log->l_write_head.lock); } else if (free_bytes < need_bytes) { spin_lock(&log->l_write_head.lock); - error = xlog_writeq_wait(log, tic, need_bytes); + error = xlog_grant_head_wait(log, &log->l_write_head, tic, + need_bytes); spin_unlock(&log->l_write_head.lock); } diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index a80e30854fce..7ee18e91bf89 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -837,8 +837,6 @@ DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); -- cgit From e179840d74606ab1935c83fe5ad9d93c95ddc956 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:29 +0000 Subject: xfs: share code for grant head wakeups Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 50 ++++++++++++++++++++------------------------------ fs/xfs/xfs_trace.h | 1 - 2 files changed, 20 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index c2d13827582b..685997548fb8 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -171,49 +171,39 @@ xlog_grant_head_wake_all( spin_unlock(&head->lock); } -STATIC bool -xlog_reserveq_wake( +static inline int +xlog_ticket_reservation( struct log *log, - int *free_bytes) + struct xlog_grant_head *head, + struct xlog_ticket *tic) { - struct xlog_ticket *tic; - int need_bytes; - - list_for_each_entry(tic, &log->l_reserve_head.waiters, t_queue) { + if (head == &log->l_write_head) { + ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); + return tic->t_unit_res; + } else { if (tic->t_flags & XLOG_TIC_PERM_RESERV) - need_bytes = tic->t_unit_res * tic->t_cnt; + return tic->t_unit_res * tic->t_cnt; else - need_bytes = tic->t_unit_res; - - if (*free_bytes < need_bytes) - return false; - *free_bytes -= need_bytes; - - trace_xfs_log_grant_wake_up(log, tic); - wake_up_process(tic->t_task); + return tic->t_unit_res; } - - return true; } STATIC bool -xlog_writeq_wake( +xlog_grant_head_wake( struct log *log, + struct xlog_grant_head *head, int *free_bytes) { struct xlog_ticket *tic; int need_bytes; - list_for_each_entry(tic, &log->l_write_head.waiters, t_queue) { - ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); - - need_bytes = tic->t_unit_res; - + list_for_each_entry(tic, &head->waiters, t_queue) { + need_bytes = xlog_ticket_reservation(log, head, tic); if (*free_bytes < need_bytes) return false; - *free_bytes -= need_bytes; - trace_xfs_log_regrant_write_wake_up(log, tic); + *free_bytes -= need_bytes; + trace_xfs_log_grant_wake_up(log, tic); wake_up_process(tic->t_task); } @@ -772,7 +762,7 @@ xfs_log_space_wake( spin_lock(&log->l_write_head.lock); free_bytes = xlog_space_left(log, &log->l_write_head.grant); - xlog_writeq_wake(log, &free_bytes); + xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); spin_unlock(&log->l_write_head.lock); } @@ -781,7 +771,7 @@ xfs_log_space_wake( spin_lock(&log->l_reserve_head.lock); free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); - xlog_reserveq_wake(log, &free_bytes); + xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); spin_unlock(&log->l_reserve_head.lock); } } @@ -2562,7 +2552,7 @@ xlog_grant_log_space( free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); if (!list_empty_careful(&log->l_reserve_head.waiters)) { spin_lock(&log->l_reserve_head.lock); - if (!xlog_reserveq_wake(log, &free_bytes) || + if (!xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes) || free_bytes < need_bytes) { error = xlog_grant_head_wait(log, &log->l_reserve_head, tic, need_bytes); @@ -2618,7 +2608,7 @@ xlog_regrant_write_log_space( free_bytes = xlog_space_left(log, &log->l_write_head.grant); if (!list_empty_careful(&log->l_write_head.waiters)) { spin_lock(&log->l_write_head.lock); - if (!xlog_writeq_wake(log, &free_bytes) || + if (!xlog_grant_head_wake(log, &log->l_write_head, &free_bytes) || free_bytes < need_bytes) { error = xlog_grant_head_wait(log, &log->l_write_head, tic, need_bytes); diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 7ee18e91bf89..482bebf8c7ef 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -837,7 +837,6 @@ DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub); -- cgit From 42ceedb3caffe67c4ec0dfbb78ce410832d429b9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:30 +0000 Subject: xfs: share code for grant head availability checks Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 133 +++++++++++++++++++++++++------------------------------ 1 file changed, 60 insertions(+), 73 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 685997548fb8..c6a29a05c60a 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -245,6 +245,60 @@ shutdown: return XFS_ERROR(EIO); } +/* + * Atomically get the log space required for a log ticket. + * + * Once a ticket gets put onto head->waiters, it will only return after the + * needed reservation is satisfied. + * + * This function is structured so that it has a lock free fast path. This is + * necessary because every new transaction reservation will come through this + * path. Hence any lock will be globally hot if we take it unconditionally on + * every pass. + * + * As tickets are only ever moved on and off head->waiters under head->lock, we + * only need to take that lock if we are going to add the ticket to the queue + * and sleep. We can avoid taking the lock if the ticket was never added to + * head->waiters because the t_queue list head will be empty and we hold the + * only reference to it so it can safely be checked unlocked. + */ +STATIC int +xlog_grant_head_check( + struct log *log, + struct xlog_grant_head *head, + struct xlog_ticket *tic, + int *need_bytes) +{ + int free_bytes; + int error = 0; + + ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); + + /* + * If there are other waiters on the queue then give them a chance at + * logspace before us. Wake up the first waiters, if we do not wake + * up all the waiters then go to sleep waiting for more free space, + * otherwise try to get some space for this transaction. + */ + *need_bytes = xlog_ticket_reservation(log, head, tic); + free_bytes = xlog_space_left(log, &head->grant); + if (!list_empty_careful(&head->waiters)) { + spin_lock(&head->lock); + if (!xlog_grant_head_wake(log, head, &free_bytes) || + free_bytes < *need_bytes) { + error = xlog_grant_head_wait(log, head, tic, + *need_bytes); + } + spin_unlock(&head->lock); + } else if (free_bytes < *need_bytes) { + spin_lock(&head->lock); + error = xlog_grant_head_wait(log, head, tic, *need_bytes); + spin_unlock(&head->lock); + } + + return error; +} + static void xlog_tic_reset_res(xlog_ticket_t *tic) { @@ -2511,59 +2565,18 @@ restart: return 0; } /* xlog_state_get_iclog_space */ -/* - * Atomically get the log space required for a log ticket. - * - * Once a ticket gets put onto the reserveq, it will only return after the - * needed reservation is satisfied. - * - * This function is structured so that it has a lock free fast path. This is - * necessary because every new transaction reservation will come through this - * path. Hence any lock will be globally hot if we take it unconditionally on - * every pass. - * - * As tickets are only ever moved on and off the l_reserve.waiters under the - * l_reserve.lock, we only need to take that lock if we are going to add - * the ticket to the queue and sleep. We can avoid taking the lock if the ticket - * was never added to the reserveq because the t_queue list head will be empty - * and we hold the only reference to it so it can safely be checked unlocked. - */ STATIC int xlog_grant_log_space( struct log *log, struct xlog_ticket *tic) { - int free_bytes, need_bytes; + int need_bytes; int error = 0; - ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); - trace_xfs_log_grant_enter(log, tic); - /* - * If there are other waiters on the queue then give them a chance at - * logspace before us. Wake up the first waiters, if we do not wake - * up all the waiters then go to sleep waiting for more free space, - * otherwise try to get some space for this transaction. - */ - need_bytes = tic->t_unit_res; - if (tic->t_flags & XFS_LOG_PERM_RESERV) - need_bytes *= tic->t_ocnt; - free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); - if (!list_empty_careful(&log->l_reserve_head.waiters)) { - spin_lock(&log->l_reserve_head.lock); - if (!xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes) || - free_bytes < need_bytes) { - error = xlog_grant_head_wait(log, &log->l_reserve_head, - tic, need_bytes); - } - spin_unlock(&log->l_reserve_head.lock); - } else if (free_bytes < need_bytes) { - spin_lock(&log->l_reserve_head.lock); - error = xlog_grant_head_wait(log, &log->l_reserve_head, tic, - need_bytes); - spin_unlock(&log->l_reserve_head.lock); - } + error = xlog_grant_head_check(log, &log->l_reserve_head, tic, + &need_bytes); if (error) return error; @@ -2576,16 +2589,13 @@ xlog_grant_log_space( /* * Replenish the byte reservation required by moving the grant write head. - * - * Similar to xlog_grant_log_space, the function is structured to have a lock - * free fast path. */ STATIC int xlog_regrant_write_log_space( struct log *log, struct xlog_ticket *tic) { - int free_bytes, need_bytes; + int need_bytes; int error = 0; tic->t_curr_res = tic->t_unit_res; @@ -2594,33 +2604,10 @@ xlog_regrant_write_log_space( if (tic->t_cnt > 0) return 0; - ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); - trace_xfs_log_regrant_write_enter(log, tic); - /* - * If there are other waiters on the queue then give them a chance at - * logspace before us. Wake up the first waiters, if we do not wake - * up all the waiters then go to sleep waiting for more free space, - * otherwise try to get some space for this transaction. - */ - need_bytes = tic->t_unit_res; - free_bytes = xlog_space_left(log, &log->l_write_head.grant); - if (!list_empty_careful(&log->l_write_head.waiters)) { - spin_lock(&log->l_write_head.lock); - if (!xlog_grant_head_wake(log, &log->l_write_head, &free_bytes) || - free_bytes < need_bytes) { - error = xlog_grant_head_wait(log, &log->l_write_head, - tic, need_bytes); - } - spin_unlock(&log->l_write_head.lock); - } else if (free_bytes < need_bytes) { - spin_lock(&log->l_write_head.lock); - error = xlog_grant_head_wait(log, &log->l_write_head, tic, - need_bytes); - spin_unlock(&log->l_write_head.lock); - } - + error = xlog_grant_head_check(log, &log->l_write_head, tic, + &need_bytes); if (error) return error; -- cgit From 9006fb91cfdf22812923f0536c7531c429c1aeab Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:31 +0000 Subject: xfs: split and cleanup xfs_log_reserve Split the log regrant case out of xfs_log_reserve into a separate function, and merge xlog_grant_log_space and xlog_regrant_write_log_space into their respective callers. Also replace the XFS_LOG_PERM_RESERV flag, which easily got misused before the previous cleanups with a simple boolean parameter. Signed-off-by: Christoph Hellwig Reviewed-by: Mark Tinguely Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 265 +++++++++++++++++++++++--------------------------- fs/xfs/xfs_log.h | 12 +-- fs/xfs/xfs_log_priv.h | 2 +- fs/xfs/xfs_trace.h | 11 +-- fs/xfs/xfs_trans.c | 31 +++--- 5 files changed, 151 insertions(+), 170 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index c6a29a05c60a..98a9cb5ffd17 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -67,15 +67,10 @@ STATIC void xlog_state_switch_iclogs(xlog_t *log, int eventual_size); STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); -/* local functions to manipulate grant head */ -STATIC int xlog_grant_log_space(xlog_t *log, - xlog_ticket_t *xtic); STATIC void xlog_grant_push_ail(struct log *log, int need_bytes); STATIC void xlog_regrant_reserve_log_space(xlog_t *log, xlog_ticket_t *ticket); -STATIC int xlog_regrant_write_log_space(xlog_t *log, - xlog_ticket_t *ticket); STATIC void xlog_ungrant_log_space(xlog_t *log, xlog_ticket_t *ticket); @@ -323,6 +318,128 @@ xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) tic->t_res_num++; } +/* + * Replenish the byte reservation required by moving the grant write head. + */ +int +xfs_log_regrant( + struct xfs_mount *mp, + struct xlog_ticket *tic) +{ + struct log *log = mp->m_log; + int need_bytes; + int error = 0; + + if (XLOG_FORCED_SHUTDOWN(log)) + return XFS_ERROR(EIO); + + XFS_STATS_INC(xs_try_logspace); + + /* + * This is a new transaction on the ticket, so we need to change the + * transaction ID so that the next transaction has a different TID in + * the log. Just add one to the existing tid so that we can see chains + * of rolling transactions in the log easily. + */ + tic->t_tid++; + + xlog_grant_push_ail(log, tic->t_unit_res); + + tic->t_curr_res = tic->t_unit_res; + xlog_tic_reset_res(tic); + + if (tic->t_cnt > 0) + return 0; + + trace_xfs_log_regrant(log, tic); + + error = xlog_grant_head_check(log, &log->l_write_head, tic, + &need_bytes); + if (error) + goto out_error; + + xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); + trace_xfs_log_regrant_exit(log, tic); + xlog_verify_grant_tail(log); + return 0; + +out_error: + /* + * If we are failing, make sure the ticket doesn't have any current + * reservations. We don't want to add this back when the ticket/ + * transaction gets cancelled. + */ + tic->t_curr_res = 0; + tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ + return error; +} + +/* + * Reserve log space and return a ticket corresponding the reservation. + * + * Each reservation is going to reserve extra space for a log record header. + * When writes happen to the on-disk log, we don't subtract the length of the + * log record header from any reservation. By wasting space in each + * reservation, we prevent over allocation problems. + */ +int +xfs_log_reserve( + struct xfs_mount *mp, + int unit_bytes, + int cnt, + struct xlog_ticket **ticp, + __uint8_t client, + bool permanent, + uint t_type) +{ + struct log *log = mp->m_log; + struct xlog_ticket *tic; + int need_bytes; + int error = 0; + + ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); + + if (XLOG_FORCED_SHUTDOWN(log)) + return XFS_ERROR(EIO); + + XFS_STATS_INC(xs_try_logspace); + + ASSERT(*ticp == NULL); + tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, + KM_SLEEP | KM_MAYFAIL); + if (!tic) + return XFS_ERROR(ENOMEM); + + tic->t_trans_type = t_type; + *ticp = tic; + + xlog_grant_push_ail(log, tic->t_unit_res * tic->t_cnt); + + trace_xfs_log_reserve(log, tic); + + error = xlog_grant_head_check(log, &log->l_reserve_head, tic, + &need_bytes); + if (error) + goto out_error; + + xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); + xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); + trace_xfs_log_reserve_exit(log, tic); + xlog_verify_grant_tail(log); + return 0; + +out_error: + /* + * If we are failing, make sure the ticket doesn't have any current + * reservations. We don't want to add this back when the ticket/ + * transaction gets cancelled. + */ + tic->t_curr_res = 0; + tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ + return error; +} + + /* * NOTES: * @@ -432,88 +549,6 @@ xfs_log_release_iclog( return 0; } -/* - * 1. Reserve an amount of on-disk log space and return a ticket corresponding - * to the reservation. - * 2. Potentially, push buffers at tail of log to disk. - * - * Each reservation is going to reserve extra space for a log record header. - * When writes happen to the on-disk log, we don't subtract the length of the - * log record header from any reservation. By wasting space in each - * reservation, we prevent over allocation problems. - */ -int -xfs_log_reserve( - struct xfs_mount *mp, - int unit_bytes, - int cnt, - struct xlog_ticket **ticket, - __uint8_t client, - uint flags, - uint t_type) -{ - struct log *log = mp->m_log; - struct xlog_ticket *internal_ticket; - int retval = 0; - - ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); - - if (XLOG_FORCED_SHUTDOWN(log)) - return XFS_ERROR(EIO); - - XFS_STATS_INC(xs_try_logspace); - - - if (*ticket != NULL) { - ASSERT(flags & XFS_LOG_PERM_RESERV); - internal_ticket = *ticket; - - /* - * this is a new transaction on the ticket, so we need to - * change the transaction ID so that the next transaction has a - * different TID in the log. Just add one to the existing tid - * so that we can see chains of rolling transactions in the log - * easily. - */ - internal_ticket->t_tid++; - - trace_xfs_log_reserve(log, internal_ticket); - - xlog_grant_push_ail(log, internal_ticket->t_unit_res); - retval = xlog_regrant_write_log_space(log, internal_ticket); - } else { - /* may sleep if need to allocate more tickets */ - internal_ticket = xlog_ticket_alloc(log, unit_bytes, cnt, - client, flags, - KM_SLEEP|KM_MAYFAIL); - if (!internal_ticket) - return XFS_ERROR(ENOMEM); - internal_ticket->t_trans_type = t_type; - *ticket = internal_ticket; - - trace_xfs_log_reserve(log, internal_ticket); - - xlog_grant_push_ail(log, - (internal_ticket->t_unit_res * - internal_ticket->t_cnt)); - retval = xlog_grant_log_space(log, internal_ticket); - } - - if (unlikely(retval)) { - /* - * If we are failing, make sure the ticket doesn't have any - * current reservations. We don't want to add this back - * when the ticket/ transaction gets cancelled. - */ - internal_ticket->t_curr_res = 0; - /* ungrant will give back unit_res * t_cnt. */ - internal_ticket->t_cnt = 0; - } - - return retval; -} - - /* * Mount a log filesystem * @@ -2565,58 +2600,6 @@ restart: return 0; } /* xlog_state_get_iclog_space */ -STATIC int -xlog_grant_log_space( - struct log *log, - struct xlog_ticket *tic) -{ - int need_bytes; - int error = 0; - - trace_xfs_log_grant_enter(log, tic); - - error = xlog_grant_head_check(log, &log->l_reserve_head, tic, - &need_bytes); - if (error) - return error; - - xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); - xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); - trace_xfs_log_grant_exit(log, tic); - xlog_verify_grant_tail(log); - return 0; -} - -/* - * Replenish the byte reservation required by moving the grant write head. - */ -STATIC int -xlog_regrant_write_log_space( - struct log *log, - struct xlog_ticket *tic) -{ - int need_bytes; - int error = 0; - - tic->t_curr_res = tic->t_unit_res; - xlog_tic_reset_res(tic); - - if (tic->t_cnt > 0) - return 0; - - trace_xfs_log_regrant_write_enter(log, tic); - - error = xlog_grant_head_check(log, &log->l_write_head, tic, - &need_bytes); - if (error) - return error; - - xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); - trace_xfs_log_regrant_write_exit(log, tic); - xlog_verify_grant_tail(log); - return 0; -} - /* The first cnt-1 times through here we don't need to * move the grant write head because the permanent * reservation has reserved cnt times the unit amount. @@ -3156,7 +3139,7 @@ xlog_ticket_alloc( int unit_bytes, int cnt, char client, - uint xflags, + bool permanent, int alloc_flags) { struct xlog_ticket *tic; @@ -3260,7 +3243,7 @@ xlog_ticket_alloc( tic->t_clientid = client; tic->t_flags = XLOG_TIC_INITED; tic->t_trans_type = 0; - if (xflags & XFS_LOG_PERM_RESERV) + if (permanent) tic->t_flags |= XLOG_TIC_PERM_RESERV; xlog_tic_reset_res(tic); diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index fe32c6927877..2c622bedb302 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -52,15 +52,6 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) */ #define XFS_LOG_REL_PERM_RESERV 0x1 -/* - * Flags to xfs_log_reserve() - * - * XFS_LOG_PERM_RESERV: Permanent reservation. When writes are - * performed against this type of reservation, the reservation - * is not decreased. Long running transactions should use this. - */ -#define XFS_LOG_PERM_RESERV 0x2 - /* * Flags to xfs_log_force() * @@ -172,8 +163,9 @@ int xfs_log_reserve(struct xfs_mount *mp, int count, struct xlog_ticket **ticket, __uint8_t clientid, - uint flags, + bool permanent, uint t_type); +int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic); int xfs_log_unmount_write(struct xfs_mount *mp); void xfs_log_unmount(struct xfs_mount *mp); int xfs_log_force_umount(struct xfs_mount *mp, int logerror); diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index eba4ec925a45..2152900b79d4 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -552,7 +552,7 @@ extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); extern kmem_zone_t *xfs_log_ticket_zone; struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes, - int count, char client, uint xflags, + int count, char client, bool permanent, int alloc_flags); diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 482bebf8c7ef..3b369c1277f0 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -826,17 +826,14 @@ DEFINE_EVENT(xfs_loggrant_class, name, \ TP_ARGS(log, tic)) DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); -DEFINE_LOGGRANT_EVENT(xfs_log_reserve); DEFINE_LOGGRANT_EVENT(xfs_log_umount_write); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_error); DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep); DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake); DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); +DEFINE_LOGGRANT_EVENT(xfs_log_reserve); +DEFINE_LOGGRANT_EVENT(xfs_log_reserve_exit); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_exit); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub); diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 7adcdf15ae0c..103b00c90004 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -681,7 +681,6 @@ xfs_trans_reserve( uint flags, uint logcount) { - int log_flags; int error = 0; int rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; @@ -707,24 +706,32 @@ xfs_trans_reserve( * Reserve the log space needed for this transaction. */ if (logspace > 0) { - ASSERT((tp->t_log_res == 0) || (tp->t_log_res == logspace)); - ASSERT((tp->t_log_count == 0) || - (tp->t_log_count == logcount)); + bool permanent = false; + + ASSERT(tp->t_log_res == 0 || tp->t_log_res == logspace); + ASSERT(tp->t_log_count == 0 || tp->t_log_count == logcount); + if (flags & XFS_TRANS_PERM_LOG_RES) { - log_flags = XFS_LOG_PERM_RESERV; tp->t_flags |= XFS_TRANS_PERM_LOG_RES; + permanent = true; } else { ASSERT(tp->t_ticket == NULL); ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); - log_flags = 0; } - error = xfs_log_reserve(tp->t_mountp, logspace, logcount, - &tp->t_ticket, - XFS_TRANSACTION, log_flags, tp->t_type); - if (error) { - goto undo_blocks; + if (tp->t_ticket != NULL) { + ASSERT(flags & XFS_TRANS_PERM_LOG_RES); + error = xfs_log_regrant(tp->t_mountp, tp->t_ticket); + } else { + error = xfs_log_reserve(tp->t_mountp, logspace, + logcount, &tp->t_ticket, + XFS_TRANSACTION, permanent, + tp->t_type); } + + if (error) + goto undo_blocks; + tp->t_log_res = logspace; tp->t_log_count = logcount; } @@ -752,6 +759,8 @@ xfs_trans_reserve( */ undo_log: if (logspace > 0) { + int log_flags; + if (flags & XFS_TRANS_PERM_LOG_RES) { log_flags = XFS_LOG_REL_PERM_RESERV; } else { -- cgit From 93518dd2ebafcc761a8637b2877008cfd748c202 Mon Sep 17 00:00:00 2001 From: Masami Ichikawa Date: Tue, 21 Feb 2012 07:43:50 +0900 Subject: sysfs: Fix memory leak in sysfs_sd_setsecdata(). This patch fixies follwing two memory leak patterns that reported by kmemleak. sysfs_sd_setsecdata() is called during sys_lsetxattr() operation. It checks sd->s_iattr is NULL or not. Then if it is NULL, it calls sysfs_init_inode_attrs() to allocate memory. That code is this. iattrs = sd->s_iattr; if (!iattrs) iattrs = sysfs_init_inode_attrs(sd); The iattrs recieves sysfs_init_inode_attrs()'s result, but sd->s_iattr doesn't know the address. so it needs to set correct address to sd->s_iattr to free memory in other function. unreferenced object 0xffff880250b73e60 (size 32): comm "systemd", pid 1, jiffies 4294683888 (age 94.553s) hex dump (first 32 bytes): 73 79 73 74 65 6d 5f 75 3a 6f 62 6a 65 63 74 5f system_u:object_ 72 3a 73 79 73 66 73 5f 74 3a 73 30 00 00 00 00 r:sysfs_t:s0.... backtrace: [] kmemleak_alloc+0x73/0x98 [] __kmalloc+0x100/0x12c [] context_struct_to_string+0x106/0x210 [] security_sid_to_context_core+0x10b/0x129 [] security_sid_to_context+0x10/0x12 [] selinux_inode_getsecurity+0x7d/0xa8 [] selinux_inode_getsecctx+0x22/0x2e [] security_inode_getsecctx+0x16/0x18 [] sysfs_setxattr+0x96/0x117 [] __vfs_setxattr_noperm+0x73/0xd9 [] vfs_setxattr+0x83/0xa1 [] setxattr+0xcf/0x101 [] sys_lsetxattr+0x6a/0x8f [] system_call_fastpath+0x16/0x1b [] 0xffffffffffffffff unreferenced object 0xffff88024163c5a0 (size 96): comm "systemd", pid 1, jiffies 4294683888 (age 94.553s) hex dump (first 32 bytes): 00 00 00 00 ed 41 00 00 00 00 00 00 00 00 00 00 .....A.......... 00 00 00 00 00 00 00 00 0c 64 42 4f 00 00 00 00 .........dBO.... backtrace: [] kmemleak_alloc+0x73/0x98 [] kmem_cache_alloc_trace+0xc4/0xee [] sysfs_init_inode_attrs+0x2a/0x83 [] sysfs_setxattr+0xbf/0x117 [] __vfs_setxattr_noperm+0x73/0xd9 [] vfs_setxattr+0x83/0xa1 [] setxattr+0xcf/0x101 [] sys_lsetxattr+0x6a/0x8f [] system_call_fastpath+0x16/0x1b [] 0xffffffffffffffff ` Signed-off-by: Masami Ichikawa Cc: stable Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/inode.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 4291fd1617ab..cc7ea5de2fdd 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -136,12 +136,13 @@ static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata, u32 *sec void *old_secdata; size_t old_secdata_len; - iattrs = sd->s_iattr; - if (!iattrs) - iattrs = sysfs_init_inode_attrs(sd); - if (!iattrs) - return -ENOMEM; + if (!sd->s_iattr) { + sd->s_iattr = sysfs_init_inode_attrs(sd); + if (!sd->s_iattr) + return -ENOMEM; + } + iattrs = sd->s_iattr; old_secdata = iattrs->ia_secdata; old_secdata_len = iattrs->ia_secdata_len; -- cgit From ad637a10f444fc66b1f6d4a28fe30d4c61ed0161 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 16 Feb 2012 22:01:00 +0000 Subject: xfs: only take the ILOCK in xfs_reclaim_inode() At the end of xfs_reclaim_inode(), the inode is locked in order to we wait for a possible concurrent lookup to complete before the inode is freed. This synchronization step was taking both the ILOCK and the IOLOCK, but the latter was causing lockdep to produce reports of the possibility of deadlock. It turns out that there's no need to acquire the IOLOCK at this point anyway. It may have been required in some earlier version of the code, but there should be no need to take the IOLOCK in xfs_iget(), so there's no (longer) any need to get it here for synchronization. Add an assertion in xfs_iget() as a reminder of this assumption. Dave Chinner diagnosed this on IRC, and Christoph Hellwig suggested no longer including the IOLOCK. I just put together the patch. Signed-off-by: Alex Elder Reviewed-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_iget.c | 9 +++++++++ fs/xfs/xfs_sync.c | 10 ++++------ 2 files changed, 13 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 19dcfb2aac9a..37f22dad5f59 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -418,6 +418,15 @@ xfs_iget( xfs_perag_t *pag; xfs_agino_t agino; + /* + * xfs_reclaim_inode() uses the ILOCK to ensure an inode + * doesn't get freed while it's being referenced during a + * radix tree traversal here. It assumes this function + * aqcuires only the ILOCK (and therefore it has no need to + * involve the IOLOCK in this synchronization). + */ + ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); + /* reject inode numbers outside existing AGs */ if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) return EINVAL; diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index 40b75eecd2b4..71bf846b7280 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c @@ -913,17 +913,15 @@ reclaim: * can reference the inodes in the cache without taking references. * * We make that OK here by ensuring that we wait until the inode is - * unlocked after the lookup before we go ahead and free it. We get - * both the ilock and the iolock because the code may need to drop the - * ilock one but will still hold the iolock. + * unlocked after the lookup before we go ahead and free it. */ - xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_qm_dqdetach(ip); - xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inode_free(ip); - return error; + return error; } /* -- cgit From 7df529af5fb4b4064f8cd62629e259ac79c0b4ca Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 26 Feb 2012 17:34:22 -0500 Subject: NFSv4.1: Don't call nfs4_deviceid_purge_client() unless we're NFSv4.1 Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 7 ++++--- fs/nfs/pnfs.h | 3 --- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 8563585cccec..592b5583aa3a 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -204,8 +204,11 @@ error_0: #ifdef CONFIG_NFS_V4_1 static void nfs4_shutdown_session(struct nfs_client *clp) { - if (nfs4_has_session(clp)) + if (nfs4_has_session(clp)) { + nfs4_deviceid_purge_client(clp); nfs4_destroy_session(clp->cl_session); + } + } #else /* CONFIG_NFS_V4_1 */ static void nfs4_shutdown_session(struct nfs_client *clp) @@ -298,8 +301,6 @@ static void nfs_free_client(struct nfs_client *clp) if (clp->cl_machine_cred != NULL) put_rpccred(clp->cl_machine_cred); - nfs4_deviceid_purge_client(clp); - put_net(clp->net); kfree(clp->cl_hostname); kfree(clp->server_scope); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 53d593a0a4f2..8088d51f495e 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -426,9 +426,6 @@ static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync) return 0; } -static inline void nfs4_deviceid_purge_client(struct nfs_client *ncl) -{ -} #endif /* CONFIG_NFS_V4_1 */ #endif /* FS_NFS_PNFS_H */ -- cgit From e9dbca8d732e20b8d31a3094a8669c014e7ee262 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 27 Feb 2012 22:05:37 +0400 Subject: NFS: release per-net clients lock before calling PipeFS dentries creation v3: 1) Lookup for client is performed from the beginning of the list on each PipeFS event handling operation. Lockdep is sad otherwise, because inode mutex is taken on PipeFS dentry creation, which can be called on mount notification, where this per-net client lock is taken on clients list walk. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/idmap.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b5c6d8eb7e03..d4db3b6f4b8e 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -553,23 +553,41 @@ static int __rpc_pipefs_event(struct nfs_client *clp, unsigned long event, return err; } -static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, - void *ptr) +static struct nfs_client *nfs_get_client_for_event(struct net *net, int event) { - struct super_block *sb = ptr; - struct nfs_net *nn = net_generic(sb->s_fs_info, nfs_net_id); + struct nfs_net *nn = net_generic(net, nfs_net_id); + struct dentry *cl_dentry; struct nfs_client *clp; - int error = 0; spin_lock(&nn->nfs_client_lock); list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { if (clp->rpc_ops != &nfs_v4_clientops) continue; + cl_dentry = clp->cl_idmap->idmap_pipe->dentry; + if (((event == RPC_PIPEFS_MOUNT) && cl_dentry) || + ((event == RPC_PIPEFS_UMOUNT) && !cl_dentry)) + continue; + atomic_inc(&clp->cl_count); + spin_unlock(&nn->nfs_client_lock); + return clp; + } + spin_unlock(&nn->nfs_client_lock); + return NULL; +} + +static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct super_block *sb = ptr; + struct nfs_client *clp; + int error = 0; + + while ((clp = nfs_get_client_for_event(sb->s_fs_info, event))) { error = __rpc_pipefs_event(clp, event, sb); + nfs_put_client(clp); if (error) break; } - spin_unlock(&nn->nfs_client_lock); return error; } -- cgit From a245769f254bbbea868e2cf8dc42daa061cd276f Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Fri, 20 Jan 2012 10:38:36 +0000 Subject: GFS2: glock statistics gathering The stats are divided into two sets: those relating to the super block and those relating to an individual glock. The super block stats are done on a per cpu basis in order to try and reduce the overhead of gathering them. They are also further divided by glock type. In the case of both the super block and glock statistics, the same information is gathered in each case. The super block statistics are used to provide default values for most of the glock statistics, so that newly created glocks should have, as far as possible, a sensible starting point. The statistics are divided into three pairs of mean and variance, plus two counters. The mean/variance pairs are smoothed exponential estimates and the algorithm used is one which will be very familiar to those used to calculation of round trip times in network code. The three pairs of mean/variance measure the following things: 1. DLM lock time (non-blocking requests) 2. DLM lock time (blocking requests) 3. Inter-request time (again to the DLM) A non-blocking request is one which will complete right away, whatever the state of the DLM lock in question. That currently means any requests when (a) the current state of the lock is exclusive (b) the requested state is either null or unlocked or (c) the "try lock" flag is set. A blocking request covers all the other lock requests. There are two counters. The first is there primarily to show how many lock requests have been made, and thus how much data has gone into the mean/variance calculations. The other counter is counting queueing of holders at the top layer of the glock code. Hopefully that number will be a lot larger than the number of dlm lock requests issued. So why gather these statistics? There are several reasons we'd like to get a better idea of these timings: 1. To be able to better set the glock "min hold time" 2. To spot performance issues more easily 3. To improve the algorithm for selecting resource groups for allocation (to base it on lock wait time, rather than blindly using a "try lock") Due to the smoothing action of the updates, a step change in some input quantity being sampled will only fully be taken into account after 8 samples (or 4 for the variance) and this needs to be carefully considered when interpreting the results. Knowing both the time it takes a lock request to complete and the average time between lock requests for a glock means we can compute the total percentage of the time for which the node is able to use a glock vs. time that the rest of the cluster has its share. That will be very useful when setting the lock min hold time. The other point to remember is that all times are in nanoseconds. Great care has been taken to ensure that we measure exactly the quantities that we want, as accurately as possible. There are always inaccuracies in any measuring system, but I hope this is as accurate as we can reasonably make it. Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 210 +++++++++++++++++++++++++++++++++++++++++++++++++-- fs/gfs2/incore.h | 49 ++++++++++-- fs/gfs2/lock_dlm.c | 123 ++++++++++++++++++++++++++++-- fs/gfs2/ops_fstype.c | 8 ++ fs/gfs2/trace_gfs2.h | 60 ++++++++++++++- 5 files changed, 431 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 351a3e797789..dab2526071cc 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "gfs2.h" #include "incore.h" @@ -543,6 +544,11 @@ __acquires(&gl->gl_spin) do_error(gl, 0); /* Fail queued try locks */ } gl->gl_req = target; + set_bit(GLF_BLOCKING, &gl->gl_flags); + if ((gl->gl_req == LM_ST_UNLOCKED) || + (gl->gl_state == LM_ST_EXCLUSIVE) || + (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) + clear_bit(GLF_BLOCKING, &gl->gl_flags); spin_unlock(&gl->gl_spin); if (glops->go_xmote_th) glops->go_xmote_th(gl); @@ -744,6 +750,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, return -ENOMEM; atomic_inc(&sdp->sd_glock_disposal); + gl->gl_sbd = sdp; gl->gl_flags = 0; gl->gl_name = name; atomic_set(&gl->gl_ref, 1); @@ -752,12 +759,17 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, gl->gl_demote_state = LM_ST_EXCLUSIVE; gl->gl_hash = hash; gl->gl_ops = glops; - snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number); + gl->gl_dstamp = ktime_set(0, 0); + preempt_disable(); + /* We use the global stats to estimate the initial per-glock stats */ + gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; + preempt_enable(); + gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; + gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); gl->gl_lksb.sb_lvbptr = gl->gl_lvb; gl->gl_tchange = jiffies; gl->gl_object = NULL; - gl->gl_sbd = sdp; gl->gl_hold_time = GL_GLOCK_DFT_HOLD; INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); INIT_WORK(&gl->gl_delete, delete_work_func); @@ -999,6 +1011,8 @@ fail: } set_bit(GLF_QUEUED, &gl->gl_flags); trace_gfs2_glock_queue(gh, 1); + gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); + gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); if (likely(insert_pt == NULL)) { list_add_tail(&gh->gh_list, &gl->gl_holders); if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) @@ -1658,6 +1672,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) *p++ = 'L'; if (gl->gl_object) *p++ = 'o'; + if (test_bit(GLF_BLOCKING, gflags)) + *p++ = 'b'; *p = 0; return buf; } @@ -1714,8 +1730,78 @@ out: return error; } +static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) +{ + struct gfs2_glock *gl = iter_ptr; + + seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n", + gl->gl_name.ln_type, + (unsigned long long)gl->gl_name.ln_number, + (long long)gl->gl_stats.stats[GFS2_LKS_SRTT], + (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], + (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], + (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], + (long long)gl->gl_stats.stats[GFS2_LKS_SIRT], + (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], + (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], + (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); + return 0; +} + +static const char *gfs2_gltype[] = { + "type", + "reserved", + "nondisk", + "inode", + "rgrp", + "meta", + "iopen", + "flock", + "plock", + "quota", + "journal", +}; + +static const char *gfs2_stype[] = { + [GFS2_LKS_SRTT] = "srtt", + [GFS2_LKS_SRTTVAR] = "srttvar", + [GFS2_LKS_SRTTB] = "srttb", + [GFS2_LKS_SRTTVARB] = "srttvarb", + [GFS2_LKS_SIRT] = "sirt", + [GFS2_LKS_SIRTVAR] = "sirtvar", + [GFS2_LKS_DCOUNT] = "dlm", + [GFS2_LKS_QCOUNT] = "queue", +}; + +#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) + +static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) +{ + struct gfs2_glock_iter *gi = seq->private; + struct gfs2_sbd *sdp = gi->sdp; + unsigned index = gi->hash >> 3; + unsigned subindex = gi->hash & 0x07; + s64 value; + int i; + + if (index == 0 && subindex != 0) + return 0; + seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], + (index == 0) ? "cpu": gfs2_stype[subindex]); + for_each_possible_cpu(i) { + const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); + if (index == 0) { + value = i; + } else { + value = lkstats->lkstats[index - 1].stats[subindex]; + } + seq_printf(seq, " %15lld", (long long)value); + } + seq_putc(seq, '\n'); + return 0; +} int __init gfs2_glock_init(void) { @@ -1828,6 +1914,35 @@ static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) return dump_glock(seq, iter_ptr); } +static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct gfs2_glock_iter *gi = seq->private; + + gi->hash = *pos; + if (*pos >= GFS2_NR_SBSTATS) + return NULL; + preempt_disable(); + return SEQ_START_TOKEN; +} + +static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, + loff_t *pos) +{ + struct gfs2_glock_iter *gi = seq->private; + (*pos)++; + gi->hash++; + if (gi->hash >= GFS2_NR_SBSTATS) { + preempt_enable(); + return NULL; + } + return SEQ_START_TOKEN; +} + +static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) +{ + preempt_enable(); +} + static const struct seq_operations gfs2_glock_seq_ops = { .start = gfs2_glock_seq_start, .next = gfs2_glock_seq_next, @@ -1835,7 +1950,21 @@ static const struct seq_operations gfs2_glock_seq_ops = { .show = gfs2_glock_seq_show, }; -static int gfs2_debugfs_open(struct inode *inode, struct file *file) +static const struct seq_operations gfs2_glstats_seq_ops = { + .start = gfs2_glock_seq_start, + .next = gfs2_glock_seq_next, + .stop = gfs2_glock_seq_stop, + .show = gfs2_glstats_seq_show, +}; + +static const struct seq_operations gfs2_sbstats_seq_ops = { + .start = gfs2_sbstats_seq_start, + .next = gfs2_sbstats_seq_next, + .stop = gfs2_sbstats_seq_stop, + .show = gfs2_sbstats_seq_show, +}; + +static int gfs2_glocks_open(struct inode *inode, struct file *file) { int ret = seq_open_private(file, &gfs2_glock_seq_ops, sizeof(struct gfs2_glock_iter)); @@ -1847,9 +1976,49 @@ static int gfs2_debugfs_open(struct inode *inode, struct file *file) return ret; } -static const struct file_operations gfs2_debug_fops = { +static int gfs2_glstats_open(struct inode *inode, struct file *file) +{ + int ret = seq_open_private(file, &gfs2_glstats_seq_ops, + sizeof(struct gfs2_glock_iter)); + if (ret == 0) { + struct seq_file *seq = file->private_data; + struct gfs2_glock_iter *gi = seq->private; + gi->sdp = inode->i_private; + } + return ret; +} + +static int gfs2_sbstats_open(struct inode *inode, struct file *file) +{ + int ret = seq_open_private(file, &gfs2_sbstats_seq_ops, + sizeof(struct gfs2_glock_iter)); + if (ret == 0) { + struct seq_file *seq = file->private_data; + struct gfs2_glock_iter *gi = seq->private; + gi->sdp = inode->i_private; + } + return ret; +} + +static const struct file_operations gfs2_glocks_fops = { + .owner = THIS_MODULE, + .open = gfs2_glocks_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +static const struct file_operations gfs2_glstats_fops = { .owner = THIS_MODULE, - .open = gfs2_debugfs_open, + .open = gfs2_glstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +static const struct file_operations gfs2_sbstats_fops = { + .owner = THIS_MODULE, + .open = gfs2_sbstats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, @@ -1863,20 +2032,45 @@ int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) sdp->debugfs_dentry_glocks = debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, - &gfs2_debug_fops); + &gfs2_glocks_fops); if (!sdp->debugfs_dentry_glocks) - return -ENOMEM; + goto fail; + + sdp->debugfs_dentry_glstats = debugfs_create_file("glstats", + S_IFREG | S_IRUGO, + sdp->debugfs_dir, sdp, + &gfs2_glstats_fops); + if (!sdp->debugfs_dentry_glstats) + goto fail; + + sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats", + S_IFREG | S_IRUGO, + sdp->debugfs_dir, sdp, + &gfs2_sbstats_fops); + if (!sdp->debugfs_dentry_sbstats) + goto fail; return 0; +fail: + gfs2_delete_debugfs_file(sdp); + return -ENOMEM; } void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) { - if (sdp && sdp->debugfs_dir) { + if (sdp->debugfs_dir) { if (sdp->debugfs_dentry_glocks) { debugfs_remove(sdp->debugfs_dentry_glocks); sdp->debugfs_dentry_glocks = NULL; } + if (sdp->debugfs_dentry_glstats) { + debugfs_remove(sdp->debugfs_dentry_glstats); + sdp->debugfs_dentry_glstats = NULL; + } + if (sdp->debugfs_dentry_sbstats) { + debugfs_remove(sdp->debugfs_dentry_sbstats); + sdp->debugfs_dentry_sbstats = NULL; + } debugfs_remove(sdp->debugfs_dir); sdp->debugfs_dir = NULL; } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 97742a7ea9cc..4d546df58ac9 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -19,6 +19,8 @@ #include #include #include +#include +#include #define DIO_WAIT 0x00000010 #define DIO_METADATA 0x00000020 @@ -204,6 +206,22 @@ struct gfs2_glock_operations { #define GLOF_ASPACE 1 }; +enum { + GFS2_LKS_SRTT = 0, /* Non blocking smoothed round trip time */ + GFS2_LKS_SRTTVAR = 1, /* Non blocking smoothed variance */ + GFS2_LKS_SRTTB = 2, /* Blocking smoothed round trip time */ + GFS2_LKS_SRTTVARB = 3, /* Blocking smoothed variance */ + GFS2_LKS_SIRT = 4, /* Smoothed Inter-request time */ + GFS2_LKS_SIRTVAR = 5, /* Smoothed Inter-request variance */ + GFS2_LKS_DCOUNT = 6, /* Count of dlm requests */ + GFS2_LKS_QCOUNT = 7, /* Count of gfs2_holder queues */ + GFS2_NR_LKSTATS +}; + +struct gfs2_lkstats { + s64 stats[GFS2_NR_LKSTATS]; +}; + enum { /* States */ HIF_HOLDER = 6, /* Set for gh that "holds" the glock */ @@ -238,10 +256,12 @@ enum { GLF_QUEUED = 12, GLF_LRU = 13, GLF_OBJECT = 14, /* Used only for tracing */ + GLF_BLOCKING = 15, }; struct gfs2_glock { struct hlist_bl_node gl_list; + struct gfs2_sbd *gl_sbd; unsigned long gl_flags; /* GLF_... */ struct lm_lockname gl_name; atomic_t gl_ref; @@ -261,16 +281,14 @@ struct gfs2_glock { struct list_head gl_holders; const struct gfs2_glock_operations *gl_ops; - char gl_strname[GDLM_STRNAME_BYTES]; + ktime_t gl_dstamp; + struct gfs2_lkstats gl_stats; struct dlm_lksb gl_lksb; char gl_lvb[32]; unsigned long gl_tchange; void *gl_object; struct list_head gl_lru; - - struct gfs2_sbd *gl_sbd; - struct list_head gl_ail_list; atomic_t gl_ail_count; atomic_t gl_revokes; @@ -560,8 +578,14 @@ struct lm_lockstruct { uint32_t *ls_recover_result; /* result of last jid recovery */ }; +struct gfs2_pcpu_lkstats { + /* One struct for each glock type */ + struct gfs2_lkstats lkstats[10]; +}; + struct gfs2_sbd { struct super_block *sd_vfs; + struct gfs2_pcpu_lkstats __percpu *sd_lkstats; struct kobject sd_kobj; unsigned long sd_flags; /* SDF_... */ struct gfs2_sb_host sd_sb; @@ -725,8 +749,23 @@ struct gfs2_sbd { unsigned long sd_last_warning; struct dentry *debugfs_dir; /* debugfs directory */ - struct dentry *debugfs_dentry_glocks; /* for debugfs */ + struct dentry *debugfs_dentry_glocks; + struct dentry *debugfs_dentry_glstats; + struct dentry *debugfs_dentry_sbstats; }; +static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) +{ + gl->gl_stats.stats[which]++; +} + +static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) +{ + const struct gfs2_sbd *sdp = gl->gl_sbd; + preempt_disable(); + this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++; + preempt_enable(); +} + #endif /* __INCORE_DOT_H__ */ diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 8944d1e32ab5..f8411bd1b805 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -18,14 +18,106 @@ #include "glock.h" #include "util.h" #include "sys.h" +#include "trace_gfs2.h" extern struct workqueue_struct *gfs2_control_wq; +/** + * gfs2_update_stats - Update time based stats + * @mv: Pointer to mean/variance structure to update + * @sample: New data to include + * + * @delta is the difference between the current rtt sample and the + * running average srtt. We add 1/8 of that to the srtt in order to + * update the current srtt estimate. The varience estimate is a bit + * more complicated. We subtract the abs value of the @delta from + * the current variance estimate and add 1/4 of that to the running + * total. + * + * Note that the index points at the array entry containing the smoothed + * mean value, and the variance is always in the following entry + * + * Reference: TCP/IP Illustrated, vol 2, p. 831,832 + * All times are in units of integer nanoseconds. Unlike the TCP/IP case, + * they are not scaled fixed point. + */ + +static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index, + s64 sample) +{ + s64 delta = sample - s->stats[index]; + s->stats[index] += (delta >> 3); + index++; + s->stats[index] += ((abs64(delta) - s->stats[index]) >> 2); +} + +/** + * gfs2_update_reply_times - Update locking statistics + * @gl: The glock to update + * + * This assumes that gl->gl_dstamp has been set earlier. + * + * The rtt (lock round trip time) is an estimate of the time + * taken to perform a dlm lock request. We update it on each + * reply from the dlm. + * + * The blocking flag is set on the glock for all dlm requests + * which may potentially block due to lock requests from other nodes. + * DLM requests where the current lock state is exclusive, the + * requested state is null (or unlocked) or where the TRY or + * TRY_1CB flags are set are classified as non-blocking. All + * other DLM requests are counted as (potentially) blocking. + */ +static inline void gfs2_update_reply_times(struct gfs2_glock *gl) +{ + struct gfs2_pcpu_lkstats *lks; + const unsigned gltype = gl->gl_name.ln_type; + unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? + GFS2_LKS_SRTTB : GFS2_LKS_SRTT; + s64 rtt; + + preempt_disable(); + rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); + lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats); + gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ + gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ + preempt_enable(); + + trace_gfs2_glock_lock_time(gl, rtt); +} + +/** + * gfs2_update_request_times - Update locking statistics + * @gl: The glock to update + * + * The irt (lock inter-request times) measures the average time + * between requests to the dlm. It is updated immediately before + * each dlm call. + */ + +static inline void gfs2_update_request_times(struct gfs2_glock *gl) +{ + struct gfs2_pcpu_lkstats *lks; + const unsigned gltype = gl->gl_name.ln_type; + ktime_t dstamp; + s64 irt; + + preempt_disable(); + dstamp = gl->gl_dstamp; + gl->gl_dstamp = ktime_get_real(); + irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); + lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats); + gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ + gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ + preempt_enable(); +} + static void gdlm_ast(void *arg) { struct gfs2_glock *gl = arg; unsigned ret = gl->gl_state; + gfs2_update_reply_times(gl); BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) @@ -111,7 +203,7 @@ static int make_mode(const unsigned int lmstate) static u32 make_flags(const u32 lkid, const unsigned int gfs_flags, const int req) { - u32 lkf = 0; + u32 lkf = DLM_LKF_VALBLK; if (gfs_flags & LM_FLAG_TRY) lkf |= DLM_LKF_NOQUEUE; @@ -138,26 +230,43 @@ static u32 make_flags(const u32 lkid, const unsigned int gfs_flags, if (lkid != 0) lkf |= DLM_LKF_CONVERT; - lkf |= DLM_LKF_VALBLK; - return lkf; } +static void gfs2_reverse_hex(char *c, u64 value) +{ + while (value) { + *c-- = hex_asc[value & 0x0f]; + value >>= 4; + } +} + static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, unsigned int flags) { struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; int req; u32 lkf; + char strname[GDLM_STRNAME_BYTES] = ""; req = make_mode(req_state); lkf = make_flags(gl->gl_lksb.sb_lkid, flags, req); - + gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); + gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); + if (gl->gl_lksb.sb_lkid) { + gfs2_update_request_times(gl); + } else { + memset(strname, ' ', GDLM_STRNAME_BYTES - 1); + strname[GDLM_STRNAME_BYTES - 1] = '\0'; + gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); + gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); + gl->gl_dstamp = ktime_get_real(); + } /* * Submit the actual lock request. */ - return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, gl->gl_strname, + return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); } @@ -172,6 +281,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl) return; } + clear_bit(GLF_BLOCKING, &gl->gl_flags); + gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); + gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); + gfs2_update_request_times(gl); error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, NULL, gl); if (error) { diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 24f609c9ef91..a55baa7f3239 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -68,6 +68,12 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) sb->s_fs_info = sdp; sdp->sd_vfs = sb; + sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats); + if (!sdp->sd_lkstats) { + kfree(sdp); + return NULL; + } + set_bit(SDF_NOJOURNALID, &sdp->sd_flags); gfs2_tune_init(&sdp->sd_tune); @@ -1221,6 +1227,7 @@ fail_sys: gfs2_sys_fs_del(sdp); fail: gfs2_delete_debugfs_file(sdp); + free_percpu(sdp->sd_lkstats); kfree(sdp); sb->s_fs_info = NULL; return error; @@ -1393,6 +1400,7 @@ static void gfs2_kill_sb(struct super_block *sb) shrink_dcache_sb(sb); kill_block_super(sb); gfs2_delete_debugfs_file(sdp); + free_percpu(sdp->sd_lkstats); kfree(sdp); } diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h index 5d07609ec57d..dfa89cd75534 100644 --- a/fs/gfs2/trace_gfs2.h +++ b/fs/gfs2/trace_gfs2.h @@ -11,6 +11,7 @@ #include #include #include +#include #include "incore.h" #include "glock.h" @@ -43,7 +44,8 @@ {(1UL << GLF_FROZEN), "F" }, \ {(1UL << GLF_QUEUED), "q" }, \ {(1UL << GLF_LRU), "L" }, \ - {(1UL << GLF_OBJECT), "o" }) + {(1UL << GLF_OBJECT), "o" }, \ + {(1UL << GLF_BLOCKING), "b" }) #ifndef NUMPTY #define NUMPTY @@ -236,6 +238,62 @@ TRACE_EVENT(gfs2_glock_queue, glock_trace_name(__entry->state)) ); +/* DLM sends a reply to GFS2 */ +TRACE_EVENT(gfs2_glock_lock_time, + + TP_PROTO(const struct gfs2_glock *gl, s64 tdiff), + + TP_ARGS(gl, tdiff), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( u64, glnum ) + __field( u32, gltype ) + __field( int, status ) + __field( char, flags ) + __field( s64, tdiff ) + __field( s64, srtt ) + __field( s64, srttvar ) + __field( s64, srttb ) + __field( s64, srttvarb ) + __field( s64, sirt ) + __field( s64, sirtvar ) + __field( s64, dcount ) + __field( s64, qcount ) + ), + + TP_fast_assign( + __entry->dev = gl->gl_sbd->sd_vfs->s_dev; + __entry->glnum = gl->gl_name.ln_number; + __entry->gltype = gl->gl_name.ln_type; + __entry->status = gl->gl_lksb.sb_status; + __entry->flags = gl->gl_lksb.sb_flags; + __entry->tdiff = tdiff; + __entry->srtt = gl->gl_stats.stats[GFS2_LKS_SRTT]; + __entry->srttvar = gl->gl_stats.stats[GFS2_LKS_SRTTVAR]; + __entry->srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB]; + __entry->srttvarb = gl->gl_stats.stats[GFS2_LKS_SRTTVARB]; + __entry->sirt = gl->gl_stats.stats[GFS2_LKS_SIRT]; + __entry->sirtvar = gl->gl_stats.stats[GFS2_LKS_SIRTVAR]; + __entry->dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT]; + __entry->qcount = gl->gl_stats.stats[GFS2_LKS_QCOUNT]; + ), + + TP_printk("%u,%u glock %d:%lld status:%d flags:%02x tdiff:%lld srtt:%lld/%lld srttb:%lld/%lld sirt:%lld/%lld dcnt:%lld qcnt:%lld", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype, + (unsigned long long)__entry->glnum, + __entry->status, __entry->flags, + (long long)__entry->tdiff, + (long long)__entry->srtt, + (long long)__entry->srttvar, + (long long)__entry->srttb, + (long long)__entry->srttvarb, + (long long)__entry->sirt, + (long long)__entry->sirtvar, + (long long)__entry->dcount, + (long long)__entry->qcount) +); + /* Section 2 - Log/journal * * Objectives: -- cgit From 47ac5537a794fc71f89d51af492a945bd233f70c Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Fri, 3 Feb 2012 15:21:59 +0000 Subject: GFS2: Move two functions from log.c to lops.c gfs2_log_get_buf() and gfs2_log_fake_buf() are both used only in lops.c, so move them next to their callers and they can then become static. Signed-off-by: Steven Whitehouse --- fs/gfs2/log.c | 101 +++------------------------------------------------------ fs/gfs2/log.h | 5 +-- fs/gfs2/lops.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+), 101 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 756fae9eaf8f..4d31379265cb 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -358,7 +358,7 @@ retry: return 0; } -static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) +u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) { struct gfs2_journal_extent *je; @@ -467,8 +467,8 @@ static unsigned int current_tail(struct gfs2_sbd *sdp) void gfs2_log_incr_head(struct gfs2_sbd *sdp) { - if (sdp->sd_log_flush_head == sdp->sd_log_tail) - BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head); + BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && + (sdp->sd_log_flush_head != sdp->sd_log_head)); if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { sdp->sd_log_flush_head = 0; @@ -476,99 +476,6 @@ void gfs2_log_incr_head(struct gfs2_sbd *sdp) } } -/** - * gfs2_log_write_endio - End of I/O for a log buffer - * @bh: The buffer head - * @uptodate: I/O Status - * - */ - -static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate) -{ - struct gfs2_sbd *sdp = bh->b_private; - bh->b_private = NULL; - - end_buffer_write_sync(bh, uptodate); - if (atomic_dec_and_test(&sdp->sd_log_in_flight)) - wake_up(&sdp->sd_log_flush_wait); -} - -/** - * gfs2_log_get_buf - Get and initialize a buffer to use for log control data - * @sdp: The GFS2 superblock - * - * Returns: the buffer_head - */ - -struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp) -{ - u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); - struct buffer_head *bh; - - bh = sb_getblk(sdp->sd_vfs, blkno); - lock_buffer(bh); - memset(bh->b_data, 0, bh->b_size); - set_buffer_uptodate(bh); - clear_buffer_dirty(bh); - gfs2_log_incr_head(sdp); - atomic_inc(&sdp->sd_log_in_flight); - bh->b_private = sdp; - bh->b_end_io = gfs2_log_write_endio; - - return bh; -} - -/** - * gfs2_fake_write_endio - - * @bh: The buffer head - * @uptodate: The I/O Status - * - */ - -static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate) -{ - struct buffer_head *real_bh = bh->b_private; - struct gfs2_bufdata *bd = real_bh->b_private; - struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd; - - end_buffer_write_sync(bh, uptodate); - free_buffer_head(bh); - unlock_buffer(real_bh); - brelse(real_bh); - if (atomic_dec_and_test(&sdp->sd_log_in_flight)) - wake_up(&sdp->sd_log_flush_wait); -} - -/** - * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log - * @sdp: the filesystem - * @data: the data the buffer_head should point to - * - * Returns: the log buffer descriptor - */ - -struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp, - struct buffer_head *real) -{ - u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); - struct buffer_head *bh; - - bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL); - atomic_set(&bh->b_count, 1); - bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock); - set_bh_page(bh, real->b_page, bh_offset(real)); - bh->b_blocknr = blkno; - bh->b_size = sdp->sd_sb.sb_bsize; - bh->b_bdev = sdp->sd_vfs->s_bdev; - bh->b_private = real; - bh->b_end_io = gfs2_fake_write_endio; - - gfs2_log_incr_head(sdp); - atomic_inc(&sdp->sd_log_in_flight); - - return bh; -} - static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) { unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); @@ -592,7 +499,7 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) { - u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); + u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); struct buffer_head *bh; struct gfs2_log_header *lh; unsigned int tail; diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index ab0621698b73..ff07454b582c 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h @@ -53,10 +53,7 @@ extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); extern void gfs2_log_incr_head(struct gfs2_sbd *sdp); - -extern struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp); -extern struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp, - struct buffer_head *real); +extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn); extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl); extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd); diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 0301be655b12..8e323c4b7983 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -143,6 +143,98 @@ static inline __be64 *bh_ptr_end(struct buffer_head *bh) return (__force __be64 *)(bh->b_data + bh->b_size); } +/** + * gfs2_log_write_endio - End of I/O for a log buffer + * @bh: The buffer head + * @uptodate: I/O Status + * + */ + +static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate) +{ + struct gfs2_sbd *sdp = bh->b_private; + bh->b_private = NULL; + + end_buffer_write_sync(bh, uptodate); + if (atomic_dec_and_test(&sdp->sd_log_in_flight)) + wake_up(&sdp->sd_log_flush_wait); +} + +/** + * gfs2_log_get_buf - Get and initialize a buffer to use for log control data + * @sdp: The GFS2 superblock + * + * tReturns: the buffer_head + */ + +static struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp) +{ + u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); + struct buffer_head *bh; + + bh = sb_getblk(sdp->sd_vfs, blkno); + lock_buffer(bh); + memset(bh->b_data, 0, bh->b_size); + set_buffer_uptodate(bh); + clear_buffer_dirty(bh); + gfs2_log_incr_head(sdp); + atomic_inc(&sdp->sd_log_in_flight); + bh->b_private = sdp; + bh->b_end_io = gfs2_log_write_endio; + + return bh; +} + +/** + * gfs2_fake_write_endio - + * @bh: The buffer head + * @uptodate: The I/O Status + * + */ + +static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate) +{ + struct buffer_head *real_bh = bh->b_private; + struct gfs2_bufdata *bd = real_bh->b_private; + struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd; + + end_buffer_write_sync(bh, uptodate); + free_buffer_head(bh); + unlock_buffer(real_bh); + brelse(real_bh); + if (atomic_dec_and_test(&sdp->sd_log_in_flight)) + wake_up(&sdp->sd_log_flush_wait); +} + +/** + * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log + * @sdp: the filesystem + * @data: the data the buffer_head should point to + * + * Returns: the log buffer descriptor + */ + +static struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp, + struct buffer_head *real) +{ + u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); + struct buffer_head *bh; + + bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL); + atomic_set(&bh->b_count, 1); + bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock); + set_bh_page(bh, real->b_page, bh_offset(real)); + bh->b_blocknr = blkno; + bh->b_size = sdp->sd_sb.sb_bsize; + bh->b_bdev = sdp->sd_vfs->s_bdev; + bh->b_private = real; + bh->b_end_io = gfs2_fake_write_endio; + + gfs2_log_incr_head(sdp); + atomic_inc(&sdp->sd_log_in_flight); + + return bh; +} static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) { -- cgit From 66fc061bda3526650328b73f69985da3518c4256 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 8 Feb 2012 12:58:32 +0000 Subject: GFS2: FITRIM ioctl support The FITRIM ioctl provides an alternative way to send discard requests to the underlying device. Using the discard mount option results in every freed block generating a discard request to the block device. This can be slow, since many block devices can only process discard requests of larger sizes, and also such operations can be time consuming. Rather than using the discard mount option, FITRIM allows a sweep of the filesystem on an occasional basis, and also to optionally avoid sending down discard requests for smaller regions. In GFS2 FITRIM will work at resource group granularity. There is a flag for each resource group which keeps track of which resource groups have been trimmed. This flag is reset whenever a deallocation occurs in the resource group, and set whenever a successful FITRIM of that resource group has taken place. This helps to reduce repeated discard requests for the same block ranges, again improving performance. Signed-off-by: Steven Whitehouse --- fs/gfs2/file.c | 2 + fs/gfs2/inode.c | 4 +- fs/gfs2/lops.c | 2 +- fs/gfs2/rgrp.c | 164 +++++++++++++++++++++++++++++++++++++++++++++++--------- fs/gfs2/rgrp.h | 10 ++-- fs/gfs2/super.c | 2 +- fs/gfs2/xattr.c | 4 +- 7 files changed, 152 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index c5fb3597f696..310f2fb6f7ea 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -313,6 +313,8 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return gfs2_get_flags(filp, (u32 __user *)arg); case FS_IOC_SETFLAGS: return gfs2_set_flags(filp, (u32 __user *)arg); + case FITRIM: + return gfs2_fitrim(filp, (void __user *)arg); } return -ENOTTY; } diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 56987460cdae..c98a60ee6dfd 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -1036,7 +1036,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry) gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); - rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); + rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); if (!rgd) goto out_inodes; @@ -1255,7 +1255,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, * this is the case of the target file already existing * so we unlink before doing the rename */ - nrgd = gfs2_blk2rgrpd(sdp, nip->i_no_addr); + nrgd = gfs2_blk2rgrpd(sdp, nip->i_no_addr, 1); if (nrgd) gfs2_holder_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh++); } diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 8e323c4b7983..fe369bd9e10c 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -76,7 +76,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd) if (bi->bi_clone == 0) return; if (sdp->sd_args.ar_discard) - gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi); + gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); memcpy(bi->bi_clone + bi->bi_offset, bd->bd_bh->b_data + bi->bi_offset, bi->bi_len); clear_bit(GBF_FULL, &bi->bi_flags); diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 49ada95209d0..1446b4e0ac73 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -327,23 +327,31 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) * Returns: The resource group, or NULL if not found */ -struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk) +struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact) { - struct rb_node **newn; + struct rb_node *n, *next; struct gfs2_rgrpd *cur; spin_lock(&sdp->sd_rindex_spin); - newn = &sdp->sd_rindex_tree.rb_node; - while (*newn) { - cur = rb_entry(*newn, struct gfs2_rgrpd, rd_node); + n = sdp->sd_rindex_tree.rb_node; + while (n) { + cur = rb_entry(n, struct gfs2_rgrpd, rd_node); + next = NULL; if (blk < cur->rd_addr) - newn = &((*newn)->rb_left); + next = n->rb_left; else if (blk >= cur->rd_data0 + cur->rd_data) - newn = &((*newn)->rb_right); - else { + next = n->rb_right; + if (next == NULL) { spin_unlock(&sdp->sd_rindex_spin); + if (exact) { + if (blk < cur->rd_addr) + return NULL; + if (blk >= cur->rd_data0 + cur->rd_data) + return NULL; + } return cur; } + n = next; } spin_unlock(&sdp->sd_rindex_spin); @@ -810,9 +818,9 @@ void gfs2_rgrp_go_unlock(struct gfs2_holder *gh) } -void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, +int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, struct buffer_head *bh, - const struct gfs2_bitmap *bi) + const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) { struct super_block *sb = sdp->sd_vfs; struct block_device *bdev = sb->s_bdev; @@ -823,11 +831,19 @@ void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, sector_t nr_sects = 0; int rv; unsigned int x; + u32 trimmed = 0; + u8 diff; for (x = 0; x < bi->bi_len; x++) { - const u8 *orig = bh->b_data + bi->bi_offset + x; - const u8 *clone = bi->bi_clone + bi->bi_offset + x; - u8 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1)); + const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data; + clone += bi->bi_offset; + clone += x; + if (bh) { + const u8 *orig = bh->b_data + bi->bi_offset + x; + diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1)); + } else { + diff = ~(*clone | (*clone >> 1)); + } diff &= 0x55; if (diff == 0) continue; @@ -838,11 +854,14 @@ void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, if (nr_sects == 0) goto start_new_extent; if ((start + nr_sects) != blk) { - rv = blkdev_issue_discard(bdev, start, - nr_sects, GFP_NOFS, - 0); - if (rv) - goto fail; + if (nr_sects >= minlen) { + rv = blkdev_issue_discard(bdev, + start, nr_sects, + GFP_NOFS, 0); + if (rv) + goto fail; + trimmed += nr_sects; + } nr_sects = 0; start_new_extent: start = blk; @@ -853,15 +872,108 @@ start_new_extent: blk += sects_per_blk; } } - if (nr_sects) { + if (nr_sects >= minlen) { rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); if (rv) goto fail; + trimmed += nr_sects; } - return; + if (ptrimmed) + *ptrimmed = trimmed; + return 0; + fail: - fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv); + if (sdp->sd_args.ar_discard) + fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv); sdp->sd_args.ar_discard = 0; + return -EIO; +} + +/** + * gfs2_fitrim - Generate discard requests for unused bits of the filesystem + * @filp: Any file on the filesystem + * @argp: Pointer to the arguments (also used to pass result) + * + * Returns: 0 on success, otherwise error code + */ + +int gfs2_fitrim(struct file *filp, void __user *argp) +{ + struct inode *inode = filp->f_dentry->d_inode; + struct gfs2_sbd *sdp = GFS2_SB(inode); + struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); + struct buffer_head *bh; + struct gfs2_rgrpd *rgd; + struct gfs2_rgrpd *rgd_end; + struct gfs2_holder gh; + struct fstrim_range r; + int ret = 0; + u64 amt; + u64 trimmed = 0; + unsigned int x; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (!blk_queue_discard(q)) + return -EOPNOTSUPP; + + ret = gfs2_rindex_update(sdp); + if (ret) + return ret; + + if (argp == NULL) { + r.start = 0; + r.len = ULLONG_MAX; + r.minlen = 0; + } else if (copy_from_user(&r, argp, sizeof(r))) + return -EFAULT; + + rgd = gfs2_blk2rgrpd(sdp, r.start, 0); + rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0); + + while (1) { + + ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); + if (ret) + goto out; + + if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) { + /* Trim each bitmap in the rgrp */ + for (x = 0; x < rgd->rd_length; x++) { + struct gfs2_bitmap *bi = rgd->rd_bits + x; + ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt); + if (ret) { + gfs2_glock_dq_uninit(&gh); + goto out; + } + trimmed += amt; + } + + /* Mark rgrp as having been trimmed */ + ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0); + if (ret == 0) { + bh = rgd->rd_bits[0].bi_bh; + rgd->rd_flags |= GFS2_RGF_TRIMMED; + gfs2_trans_add_bh(rgd->rd_gl, bh, 1); + gfs2_rgrp_out(rgd, bh->b_data); + gfs2_trans_end(sdp); + } + } + gfs2_glock_dq_uninit(&gh); + + if (rgd == rgd_end) + break; + + rgd = gfs2_rgrpd_get_next(rgd); + } + +out: + r.len = trimmed << 9; + if (argp && copy_to_user(argp, &r, sizeof(r))) + return -EFAULT; + + return ret; } /** @@ -1008,7 +1120,7 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) rgd = begin = ip->i_rgd; else - rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal); + rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); if (rgd == NULL) return -EBADSLT; @@ -1293,7 +1405,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, u32 length, rgrp_blk, buf_blk; unsigned int buf; - rgd = gfs2_blk2rgrpd(sdp, bstart); + rgd = gfs2_blk2rgrpd(sdp, bstart, 1); if (!rgd) { if (gfs2_consist(sdp)) fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); @@ -1474,7 +1586,7 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) return; trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE); rgd->rd_free += blen; - + rgd->rd_flags &= ~GFS2_RGF_TRIMMED; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); @@ -1567,7 +1679,7 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) return error; error = -EINVAL; - rgd = gfs2_blk2rgrpd(sdp, no_addr); + rgd = gfs2_blk2rgrpd(sdp, no_addr, 1); if (!rgd) goto fail; @@ -1610,7 +1722,7 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block)) rgd = ip->i_rgd; else - rgd = gfs2_blk2rgrpd(sdp, block); + rgd = gfs2_blk2rgrpd(sdp, block, 1); if (!rgd) { fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block); return; diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h index ceec9106cdf4..b4b10f4de25f 100644 --- a/fs/gfs2/rgrp.h +++ b/fs/gfs2/rgrp.h @@ -11,6 +11,7 @@ #define __RGRP_DOT_H__ #include +#include struct gfs2_rgrpd; struct gfs2_sbd; @@ -18,7 +19,7 @@ struct gfs2_holder; extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd); -extern struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk); +extern struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact); extern struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp); extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd); @@ -62,8 +63,9 @@ extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state); extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist); extern u64 gfs2_ri_total(struct gfs2_sbd *sdp); extern int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl); -extern void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, - struct buffer_head *bh, - const struct gfs2_bitmap *bi); +extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, + struct buffer_head *bh, + const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed); +extern int gfs2_fitrim(struct file *filp, void __user *argp); #endif /* __RGRP_DOT_H__ */ diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 4553ce515f62..f3faf72fa7ae 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1417,7 +1417,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip) if (error) goto out; - rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); + rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); if (!rgd) { gfs2_consist_inode(ip); error = -EIO; diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index e9636591b5d5..2e5ba425cae7 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c @@ -251,7 +251,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, if (!blks) return 0; - rgd = gfs2_blk2rgrpd(sdp, bn); + rgd = gfs2_blk2rgrpd(sdp, bn, 1); if (!rgd) { gfs2_consist_inode(ip); return -EIO; @@ -1439,7 +1439,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip) struct gfs2_holder gh; int error; - rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr); + rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1); if (!rgd) { gfs2_consist_inode(ip); return -EIO; -- cgit From 4a36d08d0d1cba0581d1656739102ce936f26557 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Tue, 14 Feb 2012 14:49:57 -0500 Subject: GFS2: Sort the ordered write list This patch sorts the ordered write list for GFS2 writes. This increases the throughput for simultaneous writes. For example, if you have ten processes, all doing: dd if=/dev/zero of=/mnt/gfs2/fileX on different files, the throughput will be much better. Signed-off-by: Bob Peterson Signed-off-by: Steven Whitehouse --- fs/gfs2/log.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'fs') diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 4d31379265cb..b8fe7b739c27 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "gfs2.h" #include "incore.h" @@ -566,6 +567,20 @@ static void log_flush_commit(struct gfs2_sbd *sdp) log_write_header(sdp, 0, 0); } +int bd_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct gfs2_bufdata *bda, *bdb; + + bda = list_entry(a, struct gfs2_bufdata, bd_le.le_list); + bdb = list_entry(b, struct gfs2_bufdata, bd_le.le_list); + + if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) + return -1; + if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) + return 1; + return 0; +} + static void gfs2_ordered_write(struct gfs2_sbd *sdp) { struct gfs2_bufdata *bd; @@ -573,6 +588,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp) LIST_HEAD(written); gfs2_log_lock(sdp); + list_sort(NULL, &sdp->sd_log_le_ordered, &bd_cmp); while (!list_empty(&sdp->sd_log_le_ordered)) { bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list); list_move(&bd->bd_le.le_list, &written); -- cgit From 08728f2d8b0ebf01618d3d63e69966f7d43859b9 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 21 Feb 2012 11:14:00 +0000 Subject: GFS2: Make bd_cmp() static Add missing static to bd_cmp() Signed-off-by: Steven Whitehouse --- fs/gfs2/log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index b8fe7b739c27..2b9f0d9b1b28 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -567,7 +567,7 @@ static void log_flush_commit(struct gfs2_sbd *sdp) log_write_header(sdp, 0, 0); } -int bd_cmp(void *priv, struct list_head *a, struct list_head *b) +static int bd_cmp(void *priv, struct list_head *a, struct list_head *b) { struct gfs2_bufdata *bda, *bdb; -- cgit From 630d9c47274aa89bfa77fe6556d7818bdcb12992 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 16 Nov 2011 23:57:37 -0500 Subject: fs: reduce the use of module.h wherever possible For files only using THIS_MODULE and/or EXPORT_SYMBOL, map them onto including export.h -- or if the file isn't even using those, then just delete the include. Fix up any implicit include dependencies that were being masked by module.h along the way. Signed-off-by: Paul Gortmaker --- fs/aio.c | 2 +- fs/attr.c | 2 +- fs/bad_inode.c | 2 +- fs/binfmt_flat.c | 2 +- fs/bio.c | 2 +- fs/buffer.c | 2 +- fs/compat.c | 1 - fs/compat_ioctl.c | 1 - fs/dcache.c | 2 +- fs/dcookies.c | 2 +- fs/eventfd.c | 2 +- fs/file.c | 2 +- fs/fs-writeback.c | 2 +- fs/fs_struct.c | 2 +- fs/inode.c | 2 +- fs/ioctl.c | 2 +- fs/libfs.c | 2 +- fs/mpage.c | 2 +- fs/namei.c | 2 +- fs/posix_acl.c | 2 +- fs/read_write.c | 2 +- fs/readdir.c | 2 +- fs/select.c | 2 +- fs/seq_file.c | 2 +- fs/splice.c | 2 +- fs/stack.c | 2 +- fs/stat.c | 2 +- fs/statfs.c | 2 +- fs/super.c | 2 +- fs/sync.c | 2 +- fs/xattr.c | 2 +- fs/xattr_acl.c | 2 +- 32 files changed, 30 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index 969beb0e2231..4b5e06390db0 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/attr.c b/fs/attr.c index 95053ad8abcc..73f69a6ce9ed 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -5,7 +5,7 @@ * changes by Thomas Schoebel-Theuer */ -#include +#include #include #include #include diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 22e9a78872ff..37268c5bb98b 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -9,7 +9,7 @@ */ #include -#include +#include #include #include #include diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 1bffbe0ed778..3e27232e3574 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -15,7 +15,7 @@ * JAN/99 -- coded full program relocation (gerg@snapgear.com) */ -#include +#include #include #include #include diff --git a/fs/bio.c b/fs/bio.c index b980ecde026a..e453924036e9 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include /* for struct sg_iovec */ diff --git a/fs/buffer.c b/fs/buffer.c index 1a30db77af32..70e2017edd70 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/compat.c b/fs/compat.c index 07880bae28a9..14483a715bbb 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index a26bea10e81b..4d3eec7418e3 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -49,7 +49,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/dcache.c b/fs/dcache.c index fe19ac13f75f..303ebd98bc80 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/dcookies.c b/fs/dcookies.c index dda0dc702d1b..17c779967828 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c @@ -13,7 +13,7 @@ */ #include -#include +#include #include #include #include diff --git a/fs/eventfd.c b/fs/eventfd.c index d9a591773919..dba15fecf23e 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/fs/file.c b/fs/file.c index 4c6992d8f3ba..3c426de7203a 100644 --- a/fs/file.c +++ b/fs/file.c @@ -6,7 +6,7 @@ * Manage the dynamic fd arrays in the process files_struct. */ -#include +#include #include #include #include diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 5b4a9362d5aa..92fcb19e4102 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -14,7 +14,7 @@ */ #include -#include +#include #include #include #include diff --git a/fs/fs_struct.c b/fs/fs_struct.c index 78b519c13536..a2d1db2ea984 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -1,4 +1,4 @@ -#include +#include #include #include #include diff --git a/fs/inode.c b/fs/inode.c index d3ebdbe723d0..cd8cffcb75f3 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/ioctl.c b/fs/ioctl.c index 066836e81848..29167bebe874 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/libfs.c b/fs/libfs.c index 5b2dbb3ba4fc..001e25be4b65 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -3,7 +3,7 @@ * Library for filesystems writers. */ -#include +#include #include #include #include diff --git a/fs/mpage.c b/fs/mpage.c index 643e9f55ef29..0face1c4d4c6 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -13,7 +13,7 @@ */ #include -#include +#include #include #include #include diff --git a/fs/namei.c b/fs/namei.c index a780ea515c47..fa549f27f019 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -15,7 +15,7 @@ */ #include -#include +#include #include #include #include diff --git a/fs/posix_acl.c b/fs/posix_acl.c index cea4623f1ed6..5e325a42e33d 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include diff --git a/fs/read_write.c b/fs/read_write.c index 5ad4248b0cd8..ffc99d22e0a3 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/readdir.c b/fs/readdir.c index 356f71528ad6..cc0a8227cddf 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include #include diff --git a/fs/select.c b/fs/select.c index e782258d0de3..de668aa9a098 100644 --- a/fs/select.c +++ b/fs/select.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include /* for STICKY_TIMEOUTS */ diff --git a/fs/seq_file.c b/fs/seq_file.c index 4023d6be939b..9f73c6b4436d 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -6,7 +6,7 @@ */ #include -#include +#include #include #include diff --git a/fs/splice.c b/fs/splice.c index 1ec0493266b3..66f4ee013bc2 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/stack.c b/fs/stack.c index 9c11519245a6..5b5388250e29 100644 --- a/fs/stack.c +++ b/fs/stack.c @@ -1,4 +1,4 @@ -#include +#include #include #include diff --git a/fs/stat.c b/fs/stat.c index 8806b8997d2e..c9dfa296b0c3 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -4,7 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ -#include +#include #include #include #include diff --git a/fs/statfs.c b/fs/statfs.c index 2aa6a22e0be2..43e6b6fe4e85 100644 --- a/fs/statfs.c +++ b/fs/statfs.c @@ -1,5 +1,5 @@ #include -#include +#include #include #include #include diff --git a/fs/super.c b/fs/super.c index 6277ec6cb60a..52bfd251b75c 100644 --- a/fs/super.c +++ b/fs/super.c @@ -20,7 +20,7 @@ * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 */ -#include +#include #include #include #include diff --git a/fs/sync.c b/fs/sync.c index f3501ef39235..0e8db939d96f 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/xattr.c b/fs/xattr.c index 82f43376c7cd..d6dfd247bb2f 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c index 8d5a506c82eb..69d06b07b169 100644 --- a/fs/xattr_acl.c +++ b/fs/xattr_acl.c @@ -5,7 +5,7 @@ * Copyright (C) 2001 by Andreas Gruenbacher, */ -#include +#include #include #include #include -- cgit From 78437368c8d9c8dc13461c689d13369f64e4522c Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 16 Jan 2012 12:10:56 +0200 Subject: UBIFS: amend recovery debugging message Print LEB and offset as well. Signed-off-by: Artem Bityutskiy --- fs/ubifs/recovery.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index ee4f43f4bb99..2a935b317232 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -679,7 +679,8 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE || ret == SCANNED_A_CORRUPT_NODE) { - dbg_rcvry("found corruption - %d", ret); + dbg_rcvry("found corruption (%d) at %d:%d", + ret, lnum, offs); break; } else { dbg_err("unexpected return value %d", ret); -- cgit From 16c395ca72a79115653892c0724d4928f72520fe Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 18 Jan 2012 15:46:13 +0200 Subject: UBIFS: increase dumps loglevel Most of the time we use the dumping function to dump something in case of error. We use 'KERN_DEBUG' printk level, and the drawback is that users do not see them in the console, while they see the other error messages in the console. The result is that they send bug reports which does not contain a lot of useful information. This patch changes the printk level of the dump functions to 'KERN_ERR' to correct the situation. I documented it in the MTD web site that people have to send the 'dmesg' output when submitting bug reposts - it did not help. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 408 +++++++++++++++++++++++++++---------------------------- 1 file changed, 204 insertions(+), 204 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index f922cbacdb96..305062886f17 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -221,15 +221,15 @@ const char *dbg_jhead(int jhead) static void dump_ch(const struct ubifs_ch *ch) { - printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic)); - printk(KERN_DEBUG "\tcrc %#x\n", le32_to_cpu(ch->crc)); - printk(KERN_DEBUG "\tnode_type %d (%s)\n", ch->node_type, + printk(KERN_ERR "\tmagic %#x\n", le32_to_cpu(ch->magic)); + printk(KERN_ERR "\tcrc %#x\n", le32_to_cpu(ch->crc)); + printk(KERN_ERR "\tnode_type %d (%s)\n", ch->node_type, dbg_ntype(ch->node_type)); - printk(KERN_DEBUG "\tgroup_type %d (%s)\n", ch->group_type, + printk(KERN_ERR "\tgroup_type %d (%s)\n", ch->group_type, dbg_gtype(ch->group_type)); - printk(KERN_DEBUG "\tsqnum %llu\n", + printk(KERN_ERR "\tsqnum %llu\n", (unsigned long long)le64_to_cpu(ch->sqnum)); - printk(KERN_DEBUG "\tlen %u\n", le32_to_cpu(ch->len)); + printk(KERN_ERR "\tlen %u\n", le32_to_cpu(ch->len)); } void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode) @@ -240,43 +240,43 @@ void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode) struct ubifs_dent_node *dent, *pdent = NULL; int count = 2; - printk(KERN_DEBUG "Dump in-memory inode:"); - printk(KERN_DEBUG "\tinode %lu\n", inode->i_ino); - printk(KERN_DEBUG "\tsize %llu\n", + printk(KERN_ERR "Dump in-memory inode:"); + printk(KERN_ERR "\tinode %lu\n", inode->i_ino); + printk(KERN_ERR "\tsize %llu\n", (unsigned long long)i_size_read(inode)); - printk(KERN_DEBUG "\tnlink %u\n", inode->i_nlink); - printk(KERN_DEBUG "\tuid %u\n", (unsigned int)inode->i_uid); - printk(KERN_DEBUG "\tgid %u\n", (unsigned int)inode->i_gid); - printk(KERN_DEBUG "\tatime %u.%u\n", + printk(KERN_ERR "\tnlink %u\n", inode->i_nlink); + printk(KERN_ERR "\tuid %u\n", (unsigned int)inode->i_uid); + printk(KERN_ERR "\tgid %u\n", (unsigned int)inode->i_gid); + printk(KERN_ERR "\tatime %u.%u\n", (unsigned int)inode->i_atime.tv_sec, (unsigned int)inode->i_atime.tv_nsec); - printk(KERN_DEBUG "\tmtime %u.%u\n", + printk(KERN_ERR "\tmtime %u.%u\n", (unsigned int)inode->i_mtime.tv_sec, (unsigned int)inode->i_mtime.tv_nsec); - printk(KERN_DEBUG "\tctime %u.%u\n", + printk(KERN_ERR "\tctime %u.%u\n", (unsigned int)inode->i_ctime.tv_sec, (unsigned int)inode->i_ctime.tv_nsec); - printk(KERN_DEBUG "\tcreat_sqnum %llu\n", ui->creat_sqnum); - printk(KERN_DEBUG "\txattr_size %u\n", ui->xattr_size); - printk(KERN_DEBUG "\txattr_cnt %u\n", ui->xattr_cnt); - printk(KERN_DEBUG "\txattr_names %u\n", ui->xattr_names); - printk(KERN_DEBUG "\tdirty %u\n", ui->dirty); - printk(KERN_DEBUG "\txattr %u\n", ui->xattr); - printk(KERN_DEBUG "\tbulk_read %u\n", ui->xattr); - printk(KERN_DEBUG "\tsynced_i_size %llu\n", + printk(KERN_ERR "\tcreat_sqnum %llu\n", ui->creat_sqnum); + printk(KERN_ERR "\txattr_size %u\n", ui->xattr_size); + printk(KERN_ERR "\txattr_cnt %u\n", ui->xattr_cnt); + printk(KERN_ERR "\txattr_names %u\n", ui->xattr_names); + printk(KERN_ERR "\tdirty %u\n", ui->dirty); + printk(KERN_ERR "\txattr %u\n", ui->xattr); + printk(KERN_ERR "\tbulk_read %u\n", ui->xattr); + printk(KERN_ERR "\tsynced_i_size %llu\n", (unsigned long long)ui->synced_i_size); - printk(KERN_DEBUG "\tui_size %llu\n", + printk(KERN_ERR "\tui_size %llu\n", (unsigned long long)ui->ui_size); - printk(KERN_DEBUG "\tflags %d\n", ui->flags); - printk(KERN_DEBUG "\tcompr_type %d\n", ui->compr_type); - printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read); - printk(KERN_DEBUG "\tread_in_a_row %lu\n", ui->read_in_a_row); - printk(KERN_DEBUG "\tdata_len %d\n", ui->data_len); + printk(KERN_ERR "\tflags %d\n", ui->flags); + printk(KERN_ERR "\tcompr_type %d\n", ui->compr_type); + printk(KERN_ERR "\tlast_page_read %lu\n", ui->last_page_read); + printk(KERN_ERR "\tread_in_a_row %lu\n", ui->read_in_a_row); + printk(KERN_ERR "\tdata_len %d\n", ui->data_len); if (!S_ISDIR(inode->i_mode)) return; - printk(KERN_DEBUG "List of directory entries:\n"); + printk(KERN_ERR "List of directory entries:\n"); ubifs_assert(!mutex_is_locked(&c->tnc_mutex)); lowest_dent_key(c, &key, inode->i_ino); @@ -284,11 +284,11 @@ void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode) dent = ubifs_tnc_next_ent(c, &key, &nm); if (IS_ERR(dent)) { if (PTR_ERR(dent) != -ENOENT) - printk(KERN_DEBUG "error %ld\n", PTR_ERR(dent)); + printk(KERN_ERR "error %ld\n", PTR_ERR(dent)); break; } - printk(KERN_DEBUG "\t%d: %s (%s)\n", + printk(KERN_ERR "\t%d: %s (%s)\n", count++, dent->name, get_dent_type(dent->type)); nm.name = dent->name; @@ -312,8 +312,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) /* If the magic is incorrect, just hexdump the first bytes */ if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) { - printk(KERN_DEBUG "Not a node, first %zu bytes:", UBIFS_CH_SZ); - print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, + printk(KERN_ERR "Not a node, first %zu bytes:", UBIFS_CH_SZ); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 32, 1, (void *)node, UBIFS_CH_SZ, 1); return; } @@ -326,7 +326,7 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) { const struct ubifs_pad_node *pad = node; - printk(KERN_DEBUG "\tpad_len %u\n", + printk(KERN_ERR "\tpad_len %u\n", le32_to_cpu(pad->pad_len)); break; } @@ -335,50 +335,50 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) const struct ubifs_sb_node *sup = node; unsigned int sup_flags = le32_to_cpu(sup->flags); - printk(KERN_DEBUG "\tkey_hash %d (%s)\n", + printk(KERN_ERR "\tkey_hash %d (%s)\n", (int)sup->key_hash, get_key_hash(sup->key_hash)); - printk(KERN_DEBUG "\tkey_fmt %d (%s)\n", + printk(KERN_ERR "\tkey_fmt %d (%s)\n", (int)sup->key_fmt, get_key_fmt(sup->key_fmt)); - printk(KERN_DEBUG "\tflags %#x\n", sup_flags); - printk(KERN_DEBUG "\t big_lpt %u\n", + printk(KERN_ERR "\tflags %#x\n", sup_flags); + printk(KERN_ERR "\t big_lpt %u\n", !!(sup_flags & UBIFS_FLG_BIGLPT)); - printk(KERN_DEBUG "\t space_fixup %u\n", + printk(KERN_ERR "\t space_fixup %u\n", !!(sup_flags & UBIFS_FLG_SPACE_FIXUP)); - printk(KERN_DEBUG "\tmin_io_size %u\n", + printk(KERN_ERR "\tmin_io_size %u\n", le32_to_cpu(sup->min_io_size)); - printk(KERN_DEBUG "\tleb_size %u\n", + printk(KERN_ERR "\tleb_size %u\n", le32_to_cpu(sup->leb_size)); - printk(KERN_DEBUG "\tleb_cnt %u\n", + printk(KERN_ERR "\tleb_cnt %u\n", le32_to_cpu(sup->leb_cnt)); - printk(KERN_DEBUG "\tmax_leb_cnt %u\n", + printk(KERN_ERR "\tmax_leb_cnt %u\n", le32_to_cpu(sup->max_leb_cnt)); - printk(KERN_DEBUG "\tmax_bud_bytes %llu\n", + printk(KERN_ERR "\tmax_bud_bytes %llu\n", (unsigned long long)le64_to_cpu(sup->max_bud_bytes)); - printk(KERN_DEBUG "\tlog_lebs %u\n", + printk(KERN_ERR "\tlog_lebs %u\n", le32_to_cpu(sup->log_lebs)); - printk(KERN_DEBUG "\tlpt_lebs %u\n", + printk(KERN_ERR "\tlpt_lebs %u\n", le32_to_cpu(sup->lpt_lebs)); - printk(KERN_DEBUG "\torph_lebs %u\n", + printk(KERN_ERR "\torph_lebs %u\n", le32_to_cpu(sup->orph_lebs)); - printk(KERN_DEBUG "\tjhead_cnt %u\n", + printk(KERN_ERR "\tjhead_cnt %u\n", le32_to_cpu(sup->jhead_cnt)); - printk(KERN_DEBUG "\tfanout %u\n", + printk(KERN_ERR "\tfanout %u\n", le32_to_cpu(sup->fanout)); - printk(KERN_DEBUG "\tlsave_cnt %u\n", + printk(KERN_ERR "\tlsave_cnt %u\n", le32_to_cpu(sup->lsave_cnt)); - printk(KERN_DEBUG "\tdefault_compr %u\n", + printk(KERN_ERR "\tdefault_compr %u\n", (int)le16_to_cpu(sup->default_compr)); - printk(KERN_DEBUG "\trp_size %llu\n", + printk(KERN_ERR "\trp_size %llu\n", (unsigned long long)le64_to_cpu(sup->rp_size)); - printk(KERN_DEBUG "\trp_uid %u\n", + printk(KERN_ERR "\trp_uid %u\n", le32_to_cpu(sup->rp_uid)); - printk(KERN_DEBUG "\trp_gid %u\n", + printk(KERN_ERR "\trp_gid %u\n", le32_to_cpu(sup->rp_gid)); - printk(KERN_DEBUG "\tfmt_version %u\n", + printk(KERN_ERR "\tfmt_version %u\n", le32_to_cpu(sup->fmt_version)); - printk(KERN_DEBUG "\ttime_gran %u\n", + printk(KERN_ERR "\ttime_gran %u\n", le32_to_cpu(sup->time_gran)); - printk(KERN_DEBUG "\tUUID %pUB\n", + printk(KERN_ERR "\tUUID %pUB\n", sup->uuid); break; } @@ -386,61 +386,61 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) { const struct ubifs_mst_node *mst = node; - printk(KERN_DEBUG "\thighest_inum %llu\n", + printk(KERN_ERR "\thighest_inum %llu\n", (unsigned long long)le64_to_cpu(mst->highest_inum)); - printk(KERN_DEBUG "\tcommit number %llu\n", + printk(KERN_ERR "\tcommit number %llu\n", (unsigned long long)le64_to_cpu(mst->cmt_no)); - printk(KERN_DEBUG "\tflags %#x\n", + printk(KERN_ERR "\tflags %#x\n", le32_to_cpu(mst->flags)); - printk(KERN_DEBUG "\tlog_lnum %u\n", + printk(KERN_ERR "\tlog_lnum %u\n", le32_to_cpu(mst->log_lnum)); - printk(KERN_DEBUG "\troot_lnum %u\n", + printk(KERN_ERR "\troot_lnum %u\n", le32_to_cpu(mst->root_lnum)); - printk(KERN_DEBUG "\troot_offs %u\n", + printk(KERN_ERR "\troot_offs %u\n", le32_to_cpu(mst->root_offs)); - printk(KERN_DEBUG "\troot_len %u\n", + printk(KERN_ERR "\troot_len %u\n", le32_to_cpu(mst->root_len)); - printk(KERN_DEBUG "\tgc_lnum %u\n", + printk(KERN_ERR "\tgc_lnum %u\n", le32_to_cpu(mst->gc_lnum)); - printk(KERN_DEBUG "\tihead_lnum %u\n", + printk(KERN_ERR "\tihead_lnum %u\n", le32_to_cpu(mst->ihead_lnum)); - printk(KERN_DEBUG "\tihead_offs %u\n", + printk(KERN_ERR "\tihead_offs %u\n", le32_to_cpu(mst->ihead_offs)); - printk(KERN_DEBUG "\tindex_size %llu\n", + printk(KERN_ERR "\tindex_size %llu\n", (unsigned long long)le64_to_cpu(mst->index_size)); - printk(KERN_DEBUG "\tlpt_lnum %u\n", + printk(KERN_ERR "\tlpt_lnum %u\n", le32_to_cpu(mst->lpt_lnum)); - printk(KERN_DEBUG "\tlpt_offs %u\n", + printk(KERN_ERR "\tlpt_offs %u\n", le32_to_cpu(mst->lpt_offs)); - printk(KERN_DEBUG "\tnhead_lnum %u\n", + printk(KERN_ERR "\tnhead_lnum %u\n", le32_to_cpu(mst->nhead_lnum)); - printk(KERN_DEBUG "\tnhead_offs %u\n", + printk(KERN_ERR "\tnhead_offs %u\n", le32_to_cpu(mst->nhead_offs)); - printk(KERN_DEBUG "\tltab_lnum %u\n", + printk(KERN_ERR "\tltab_lnum %u\n", le32_to_cpu(mst->ltab_lnum)); - printk(KERN_DEBUG "\tltab_offs %u\n", + printk(KERN_ERR "\tltab_offs %u\n", le32_to_cpu(mst->ltab_offs)); - printk(KERN_DEBUG "\tlsave_lnum %u\n", + printk(KERN_ERR "\tlsave_lnum %u\n", le32_to_cpu(mst->lsave_lnum)); - printk(KERN_DEBUG "\tlsave_offs %u\n", + printk(KERN_ERR "\tlsave_offs %u\n", le32_to_cpu(mst->lsave_offs)); - printk(KERN_DEBUG "\tlscan_lnum %u\n", + printk(KERN_ERR "\tlscan_lnum %u\n", le32_to_cpu(mst->lscan_lnum)); - printk(KERN_DEBUG "\tleb_cnt %u\n", + printk(KERN_ERR "\tleb_cnt %u\n", le32_to_cpu(mst->leb_cnt)); - printk(KERN_DEBUG "\tempty_lebs %u\n", + printk(KERN_ERR "\tempty_lebs %u\n", le32_to_cpu(mst->empty_lebs)); - printk(KERN_DEBUG "\tidx_lebs %u\n", + printk(KERN_ERR "\tidx_lebs %u\n", le32_to_cpu(mst->idx_lebs)); - printk(KERN_DEBUG "\ttotal_free %llu\n", + printk(KERN_ERR "\ttotal_free %llu\n", (unsigned long long)le64_to_cpu(mst->total_free)); - printk(KERN_DEBUG "\ttotal_dirty %llu\n", + printk(KERN_ERR "\ttotal_dirty %llu\n", (unsigned long long)le64_to_cpu(mst->total_dirty)); - printk(KERN_DEBUG "\ttotal_used %llu\n", + printk(KERN_ERR "\ttotal_used %llu\n", (unsigned long long)le64_to_cpu(mst->total_used)); - printk(KERN_DEBUG "\ttotal_dead %llu\n", + printk(KERN_ERR "\ttotal_dead %llu\n", (unsigned long long)le64_to_cpu(mst->total_dead)); - printk(KERN_DEBUG "\ttotal_dark %llu\n", + printk(KERN_ERR "\ttotal_dark %llu\n", (unsigned long long)le64_to_cpu(mst->total_dark)); break; } @@ -448,11 +448,11 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) { const struct ubifs_ref_node *ref = node; - printk(KERN_DEBUG "\tlnum %u\n", + printk(KERN_ERR "\tlnum %u\n", le32_to_cpu(ref->lnum)); - printk(KERN_DEBUG "\toffs %u\n", + printk(KERN_ERR "\toffs %u\n", le32_to_cpu(ref->offs)); - printk(KERN_DEBUG "\tjhead %u\n", + printk(KERN_ERR "\tjhead %u\n", le32_to_cpu(ref->jhead)); break; } @@ -461,40 +461,40 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) const struct ubifs_ino_node *ino = node; key_read(c, &ino->key, &key); - printk(KERN_DEBUG "\tkey %s\n", + printk(KERN_ERR "\tkey %s\n", dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); - printk(KERN_DEBUG "\tcreat_sqnum %llu\n", + printk(KERN_ERR "\tcreat_sqnum %llu\n", (unsigned long long)le64_to_cpu(ino->creat_sqnum)); - printk(KERN_DEBUG "\tsize %llu\n", + printk(KERN_ERR "\tsize %llu\n", (unsigned long long)le64_to_cpu(ino->size)); - printk(KERN_DEBUG "\tnlink %u\n", + printk(KERN_ERR "\tnlink %u\n", le32_to_cpu(ino->nlink)); - printk(KERN_DEBUG "\tatime %lld.%u\n", + printk(KERN_ERR "\tatime %lld.%u\n", (long long)le64_to_cpu(ino->atime_sec), le32_to_cpu(ino->atime_nsec)); - printk(KERN_DEBUG "\tmtime %lld.%u\n", + printk(KERN_ERR "\tmtime %lld.%u\n", (long long)le64_to_cpu(ino->mtime_sec), le32_to_cpu(ino->mtime_nsec)); - printk(KERN_DEBUG "\tctime %lld.%u\n", + printk(KERN_ERR "\tctime %lld.%u\n", (long long)le64_to_cpu(ino->ctime_sec), le32_to_cpu(ino->ctime_nsec)); - printk(KERN_DEBUG "\tuid %u\n", + printk(KERN_ERR "\tuid %u\n", le32_to_cpu(ino->uid)); - printk(KERN_DEBUG "\tgid %u\n", + printk(KERN_ERR "\tgid %u\n", le32_to_cpu(ino->gid)); - printk(KERN_DEBUG "\tmode %u\n", + printk(KERN_ERR "\tmode %u\n", le32_to_cpu(ino->mode)); - printk(KERN_DEBUG "\tflags %#x\n", + printk(KERN_ERR "\tflags %#x\n", le32_to_cpu(ino->flags)); - printk(KERN_DEBUG "\txattr_cnt %u\n", + printk(KERN_ERR "\txattr_cnt %u\n", le32_to_cpu(ino->xattr_cnt)); - printk(KERN_DEBUG "\txattr_size %u\n", + printk(KERN_ERR "\txattr_size %u\n", le32_to_cpu(ino->xattr_size)); - printk(KERN_DEBUG "\txattr_names %u\n", + printk(KERN_ERR "\txattr_names %u\n", le32_to_cpu(ino->xattr_names)); - printk(KERN_DEBUG "\tcompr_type %#x\n", + printk(KERN_ERR "\tcompr_type %#x\n", (int)le16_to_cpu(ino->compr_type)); - printk(KERN_DEBUG "\tdata len %u\n", + printk(KERN_ERR "\tdata len %u\n", le32_to_cpu(ino->data_len)); break; } @@ -505,16 +505,16 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) int nlen = le16_to_cpu(dent->nlen); key_read(c, &dent->key, &key); - printk(KERN_DEBUG "\tkey %s\n", + printk(KERN_ERR "\tkey %s\n", dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); - printk(KERN_DEBUG "\tinum %llu\n", + printk(KERN_ERR "\tinum %llu\n", (unsigned long long)le64_to_cpu(dent->inum)); - printk(KERN_DEBUG "\ttype %d\n", (int)dent->type); - printk(KERN_DEBUG "\tnlen %d\n", nlen); - printk(KERN_DEBUG "\tname "); + printk(KERN_ERR "\ttype %d\n", (int)dent->type); + printk(KERN_ERR "\tnlen %d\n", nlen); + printk(KERN_ERR "\tname "); if (nlen > UBIFS_MAX_NLEN) - printk(KERN_DEBUG "(bad name length, not printing, " + printk(KERN_ERR "(bad name length, not printing, " "bad or corrupted node)"); else { for (i = 0; i < nlen && dent->name[i]; i++) @@ -530,16 +530,16 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) int dlen = le32_to_cpu(ch->len) - UBIFS_DATA_NODE_SZ; key_read(c, &dn->key, &key); - printk(KERN_DEBUG "\tkey %s\n", + printk(KERN_ERR "\tkey %s\n", dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); - printk(KERN_DEBUG "\tsize %u\n", + printk(KERN_ERR "\tsize %u\n", le32_to_cpu(dn->size)); - printk(KERN_DEBUG "\tcompr_typ %d\n", + printk(KERN_ERR "\tcompr_typ %d\n", (int)le16_to_cpu(dn->compr_type)); - printk(KERN_DEBUG "\tdata size %d\n", + printk(KERN_ERR "\tdata size %d\n", dlen); - printk(KERN_DEBUG "\tdata:\n"); - print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 32, 1, + printk(KERN_ERR "\tdata:\n"); + print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1, (void *)&dn->data, dlen, 0); break; } @@ -547,11 +547,11 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) { const struct ubifs_trun_node *trun = node; - printk(KERN_DEBUG "\tinum %u\n", + printk(KERN_ERR "\tinum %u\n", le32_to_cpu(trun->inum)); - printk(KERN_DEBUG "\told_size %llu\n", + printk(KERN_ERR "\told_size %llu\n", (unsigned long long)le64_to_cpu(trun->old_size)); - printk(KERN_DEBUG "\tnew_size %llu\n", + printk(KERN_ERR "\tnew_size %llu\n", (unsigned long long)le64_to_cpu(trun->new_size)); break; } @@ -560,17 +560,17 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) const struct ubifs_idx_node *idx = node; n = le16_to_cpu(idx->child_cnt); - printk(KERN_DEBUG "\tchild_cnt %d\n", n); - printk(KERN_DEBUG "\tlevel %d\n", + printk(KERN_ERR "\tchild_cnt %d\n", n); + printk(KERN_ERR "\tlevel %d\n", (int)le16_to_cpu(idx->level)); - printk(KERN_DEBUG "\tBranches:\n"); + printk(KERN_ERR "\tBranches:\n"); for (i = 0; i < n && i < c->fanout - 1; i++) { const struct ubifs_branch *br; br = ubifs_idx_branch(c, idx, i); key_read(c, &br->key, &key); - printk(KERN_DEBUG "\t%d: LEB %d:%d len %d key %s\n", + printk(KERN_ERR "\t%d: LEB %d:%d len %d key %s\n", i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs), le32_to_cpu(br->len), dbg_snprintf_key(c, &key, key_buf, @@ -584,20 +584,20 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) { const struct ubifs_orph_node *orph = node; - printk(KERN_DEBUG "\tcommit number %llu\n", + printk(KERN_ERR "\tcommit number %llu\n", (unsigned long long) le64_to_cpu(orph->cmt_no) & LLONG_MAX); - printk(KERN_DEBUG "\tlast node flag %llu\n", + printk(KERN_ERR "\tlast node flag %llu\n", (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63); n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3; - printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n); + printk(KERN_ERR "\t%d orphan inode numbers:\n", n); for (i = 0; i < n; i++) - printk(KERN_DEBUG "\t ino %llu\n", + printk(KERN_ERR "\t ino %llu\n", (unsigned long long)le64_to_cpu(orph->inos[i])); break; } default: - printk(KERN_DEBUG "node type %d was not recognized\n", + printk(KERN_ERR "node type %d was not recognized\n", (int)ch->node_type); } spin_unlock(&dbg_lock); @@ -606,16 +606,16 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) void dbg_dump_budget_req(const struct ubifs_budget_req *req) { spin_lock(&dbg_lock); - printk(KERN_DEBUG "Budgeting request: new_ino %d, dirtied_ino %d\n", + printk(KERN_ERR "Budgeting request: new_ino %d, dirtied_ino %d\n", req->new_ino, req->dirtied_ino); - printk(KERN_DEBUG "\tnew_ino_d %d, dirtied_ino_d %d\n", + printk(KERN_ERR "\tnew_ino_d %d, dirtied_ino_d %d\n", req->new_ino_d, req->dirtied_ino_d); - printk(KERN_DEBUG "\tnew_page %d, dirtied_page %d\n", + printk(KERN_ERR "\tnew_page %d, dirtied_page %d\n", req->new_page, req->dirtied_page); - printk(KERN_DEBUG "\tnew_dent %d, mod_dent %d\n", + printk(KERN_ERR "\tnew_dent %d, mod_dent %d\n", req->new_dent, req->mod_dent); - printk(KERN_DEBUG "\tidx_growth %d\n", req->idx_growth); - printk(KERN_DEBUG "\tdata_growth %d dd_growth %d\n", + printk(KERN_ERR "\tidx_growth %d\n", req->idx_growth); + printk(KERN_ERR "\tdata_growth %d dd_growth %d\n", req->data_growth, req->dd_growth); spin_unlock(&dbg_lock); } @@ -623,12 +623,12 @@ void dbg_dump_budget_req(const struct ubifs_budget_req *req) void dbg_dump_lstats(const struct ubifs_lp_stats *lst) { spin_lock(&dbg_lock); - printk(KERN_DEBUG "(pid %d) Lprops statistics: empty_lebs %d, " + printk(KERN_ERR "(pid %d) Lprops statistics: empty_lebs %d, " "idx_lebs %d\n", current->pid, lst->empty_lebs, lst->idx_lebs); - printk(KERN_DEBUG "\ttaken_empty_lebs %d, total_free %lld, " + printk(KERN_ERR "\ttaken_empty_lebs %d, total_free %lld, " "total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free, lst->total_dirty); - printk(KERN_DEBUG "\ttotal_used %lld, total_dark %lld, " + printk(KERN_ERR "\ttotal_used %lld, total_dark %lld, " "total_dead %lld\n", lst->total_used, lst->total_dark, lst->total_dead); spin_unlock(&dbg_lock); @@ -644,21 +644,21 @@ void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi) spin_lock(&c->space_lock); spin_lock(&dbg_lock); - printk(KERN_DEBUG "(pid %d) Budgeting info: data budget sum %lld, " + printk(KERN_ERR "(pid %d) Budgeting info: data budget sum %lld, " "total budget sum %lld\n", current->pid, bi->data_growth + bi->dd_growth, bi->data_growth + bi->dd_growth + bi->idx_growth); - printk(KERN_DEBUG "\tbudg_data_growth %lld, budg_dd_growth %lld, " + printk(KERN_ERR "\tbudg_data_growth %lld, budg_dd_growth %lld, " "budg_idx_growth %lld\n", bi->data_growth, bi->dd_growth, bi->idx_growth); - printk(KERN_DEBUG "\tmin_idx_lebs %d, old_idx_sz %llu, " + printk(KERN_ERR "\tmin_idx_lebs %d, old_idx_sz %llu, " "uncommitted_idx %lld\n", bi->min_idx_lebs, bi->old_idx_sz, bi->uncommitted_idx); - printk(KERN_DEBUG "\tpage_budget %d, inode_budget %d, dent_budget %d\n", + printk(KERN_ERR "\tpage_budget %d, inode_budget %d, dent_budget %d\n", bi->page_budget, bi->inode_budget, bi->dent_budget); - printk(KERN_DEBUG "\tnospace %u, nospace_rp %u\n", + printk(KERN_ERR "\tnospace %u, nospace_rp %u\n", bi->nospace, bi->nospace_rp); - printk(KERN_DEBUG "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n", + printk(KERN_ERR "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n", c->dark_wm, c->dead_wm, c->max_idx_node_sz); if (bi != &c->bi) @@ -669,38 +669,38 @@ void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi) */ goto out_unlock; - printk(KERN_DEBUG "\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n", + printk(KERN_ERR "\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n", c->freeable_cnt, c->calc_idx_sz, c->idx_gc_cnt); - printk(KERN_DEBUG "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, " + printk(KERN_ERR "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, " "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt), atomic_long_read(&c->dirty_zn_cnt), atomic_long_read(&c->clean_zn_cnt)); - printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n", + printk(KERN_ERR "\tgc_lnum %d, ihead_lnum %d\n", c->gc_lnum, c->ihead_lnum); /* If we are in R/O mode, journal heads do not exist */ if (c->jheads) for (i = 0; i < c->jhead_cnt; i++) - printk(KERN_DEBUG "\tjhead %s\t LEB %d\n", + printk(KERN_ERR "\tjhead %s\t LEB %d\n", dbg_jhead(c->jheads[i].wbuf.jhead), c->jheads[i].wbuf.lnum); for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { bud = rb_entry(rb, struct ubifs_bud, rb); - printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum); + printk(KERN_ERR "\tbud LEB %d\n", bud->lnum); } list_for_each_entry(bud, &c->old_buds, list) - printk(KERN_DEBUG "\told bud LEB %d\n", bud->lnum); + printk(KERN_ERR "\told bud LEB %d\n", bud->lnum); list_for_each_entry(idx_gc, &c->idx_gc, list) - printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n", + printk(KERN_ERR "\tGC'ed idx LEB %d unmap %d\n", idx_gc->lnum, idx_gc->unmap); - printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state); + printk(KERN_ERR "\tcommit state %d\n", c->cmt_state); /* Print budgeting predictions */ available = ubifs_calc_available(c, c->bi.min_idx_lebs); outstanding = c->bi.data_growth + c->bi.dd_growth; free = ubifs_get_free_space_nolock(c); - printk(KERN_DEBUG "Budgeting predictions:\n"); - printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n", + printk(KERN_ERR "Budgeting predictions:\n"); + printk(KERN_ERR "\tavailable: %lld, outstanding %lld, free %lld\n", available, outstanding, free); out_unlock: spin_unlock(&dbg_lock); @@ -720,11 +720,11 @@ void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp) dark = ubifs_calc_dark(c, spc); if (lp->flags & LPROPS_INDEX) - printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d " + printk(KERN_ERR "LEB %-7d free %-8d dirty %-8d used %-8d " "free + dirty %-8d flags %#x (", lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc, lp->flags); else - printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d " + printk(KERN_ERR "LEB %-7d free %-8d dirty %-8d used %-8d " "free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d " "flags %#-4x (", lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc, dark, dead, @@ -807,7 +807,7 @@ void dbg_dump_lprops(struct ubifs_info *c) struct ubifs_lprops lp; struct ubifs_lp_stats lst; - printk(KERN_DEBUG "(pid %d) start dumping LEB properties\n", + printk(KERN_ERR "(pid %d) start dumping LEB properties\n", current->pid); ubifs_get_lp_stats(c, &lst); dbg_dump_lstats(&lst); @@ -819,7 +819,7 @@ void dbg_dump_lprops(struct ubifs_info *c) dbg_dump_lprop(c, &lp); } - printk(KERN_DEBUG "(pid %d) finish dumping LEB properties\n", + printk(KERN_ERR "(pid %d) finish dumping LEB properties\n", current->pid); } @@ -828,35 +828,35 @@ void dbg_dump_lpt_info(struct ubifs_info *c) int i; spin_lock(&dbg_lock); - printk(KERN_DEBUG "(pid %d) dumping LPT information\n", current->pid); - printk(KERN_DEBUG "\tlpt_sz: %lld\n", c->lpt_sz); - printk(KERN_DEBUG "\tpnode_sz: %d\n", c->pnode_sz); - printk(KERN_DEBUG "\tnnode_sz: %d\n", c->nnode_sz); - printk(KERN_DEBUG "\tltab_sz: %d\n", c->ltab_sz); - printk(KERN_DEBUG "\tlsave_sz: %d\n", c->lsave_sz); - printk(KERN_DEBUG "\tbig_lpt: %d\n", c->big_lpt); - printk(KERN_DEBUG "\tlpt_hght: %d\n", c->lpt_hght); - printk(KERN_DEBUG "\tpnode_cnt: %d\n", c->pnode_cnt); - printk(KERN_DEBUG "\tnnode_cnt: %d\n", c->nnode_cnt); - printk(KERN_DEBUG "\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt); - printk(KERN_DEBUG "\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt); - printk(KERN_DEBUG "\tlsave_cnt: %d\n", c->lsave_cnt); - printk(KERN_DEBUG "\tspace_bits: %d\n", c->space_bits); - printk(KERN_DEBUG "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits); - printk(KERN_DEBUG "\tlpt_offs_bits: %d\n", c->lpt_offs_bits); - printk(KERN_DEBUG "\tlpt_spc_bits: %d\n", c->lpt_spc_bits); - printk(KERN_DEBUG "\tpcnt_bits: %d\n", c->pcnt_bits); - printk(KERN_DEBUG "\tlnum_bits: %d\n", c->lnum_bits); - printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs); - printk(KERN_DEBUG "\tLPT head is at %d:%d\n", + printk(KERN_ERR "(pid %d) dumping LPT information\n", current->pid); + printk(KERN_ERR "\tlpt_sz: %lld\n", c->lpt_sz); + printk(KERN_ERR "\tpnode_sz: %d\n", c->pnode_sz); + printk(KERN_ERR "\tnnode_sz: %d\n", c->nnode_sz); + printk(KERN_ERR "\tltab_sz: %d\n", c->ltab_sz); + printk(KERN_ERR "\tlsave_sz: %d\n", c->lsave_sz); + printk(KERN_ERR "\tbig_lpt: %d\n", c->big_lpt); + printk(KERN_ERR "\tlpt_hght: %d\n", c->lpt_hght); + printk(KERN_ERR "\tpnode_cnt: %d\n", c->pnode_cnt); + printk(KERN_ERR "\tnnode_cnt: %d\n", c->nnode_cnt); + printk(KERN_ERR "\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt); + printk(KERN_ERR "\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt); + printk(KERN_ERR "\tlsave_cnt: %d\n", c->lsave_cnt); + printk(KERN_ERR "\tspace_bits: %d\n", c->space_bits); + printk(KERN_ERR "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits); + printk(KERN_ERR "\tlpt_offs_bits: %d\n", c->lpt_offs_bits); + printk(KERN_ERR "\tlpt_spc_bits: %d\n", c->lpt_spc_bits); + printk(KERN_ERR "\tpcnt_bits: %d\n", c->pcnt_bits); + printk(KERN_ERR "\tlnum_bits: %d\n", c->lnum_bits); + printk(KERN_ERR "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs); + printk(KERN_ERR "\tLPT head is at %d:%d\n", c->nhead_lnum, c->nhead_offs); - printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", + printk(KERN_ERR "\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs); if (c->big_lpt) - printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n", + printk(KERN_ERR "\tLPT lsave is at %d:%d\n", c->lsave_lnum, c->lsave_offs); for (i = 0; i < c->lpt_lebs; i++) - printk(KERN_DEBUG "\tLPT LEB %d free %d dirty %d tgc %d " + printk(KERN_ERR "\tLPT LEB %d free %d dirty %d tgc %d " "cmt %d\n", i + c->lpt_first, c->ltab[i].free, c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt); spin_unlock(&dbg_lock); @@ -867,12 +867,12 @@ void dbg_dump_sleb(const struct ubifs_info *c, { struct ubifs_scan_node *snod; - printk(KERN_DEBUG "(pid %d) start dumping scanned data from LEB %d:%d\n", + printk(KERN_ERR "(pid %d) start dumping scanned data from LEB %d:%d\n", current->pid, sleb->lnum, offs); list_for_each_entry(snod, &sleb->nodes, list) { cond_resched(); - printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", sleb->lnum, + printk(KERN_ERR "Dumping node at LEB %d:%d len %d\n", sleb->lnum, snod->offs, snod->len); dbg_dump_node(c, snod->node); } @@ -887,7 +887,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) if (dbg_is_tst_rcvry(c)) return; - printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", + printk(KERN_ERR "(pid %d) start dumping LEB %d\n", current->pid, lnum); buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); @@ -902,17 +902,17 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) goto out; } - printk(KERN_DEBUG "LEB %d has %d nodes ending at %d\n", lnum, + printk(KERN_ERR "LEB %d has %d nodes ending at %d\n", lnum, sleb->nodes_cnt, sleb->endpt); list_for_each_entry(snod, &sleb->nodes, list) { cond_resched(); - printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", lnum, + printk(KERN_ERR "Dumping node at LEB %d:%d len %d\n", lnum, snod->offs, snod->len); dbg_dump_node(c, snod->node); } - printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n", + printk(KERN_ERR "(pid %d) finish dumping LEB %d\n", current->pid, lnum); ubifs_scan_destroy(sleb); @@ -934,7 +934,7 @@ void dbg_dump_znode(const struct ubifs_info *c, else zbr = &c->zroot; - printk(KERN_DEBUG "znode %p, LEB %d:%d len %d parent %p iip %d level %d" + printk(KERN_ERR "znode %p, LEB %d:%d len %d parent %p iip %d level %d" " child_cnt %d flags %lx\n", znode, zbr->lnum, zbr->offs, zbr->len, znode->parent, znode->iip, znode->level, znode->child_cnt, znode->flags); @@ -944,18 +944,18 @@ void dbg_dump_znode(const struct ubifs_info *c, return; } - printk(KERN_DEBUG "zbranches:\n"); + printk(KERN_ERR "zbranches:\n"); for (n = 0; n < znode->child_cnt; n++) { zbr = &znode->zbranch[n]; if (znode->level > 0) - printk(KERN_DEBUG "\t%d: znode %p LEB %d:%d len %d key " + printk(KERN_ERR "\t%d: znode %p LEB %d:%d len %d key " "%s\n", n, zbr->znode, zbr->lnum, zbr->offs, zbr->len, dbg_snprintf_key(c, &zbr->key, key_buf, DBG_KEY_BUF_LEN)); else - printk(KERN_DEBUG "\t%d: LNC %p LEB %d:%d len %d key " + printk(KERN_ERR "\t%d: LNC %p LEB %d:%d len %d key " "%s\n", n, zbr->znode, zbr->lnum, zbr->offs, zbr->len, dbg_snprintf_key(c, &zbr->key, @@ -969,16 +969,16 @@ void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) { int i; - printk(KERN_DEBUG "(pid %d) start dumping heap cat %d (%d elements)\n", + printk(KERN_ERR "(pid %d) start dumping heap cat %d (%d elements)\n", current->pid, cat, heap->cnt); for (i = 0; i < heap->cnt; i++) { struct ubifs_lprops *lprops = heap->arr[i]; - printk(KERN_DEBUG "\t%d. LEB %d hpos %d free %d dirty %d " + printk(KERN_ERR "\t%d. LEB %d hpos %d free %d dirty %d " "flags %d\n", i, lprops->lnum, lprops->hpos, lprops->free, lprops->dirty, lprops->flags); } - printk(KERN_DEBUG "(pid %d) finish dumping heap\n", current->pid); + printk(KERN_ERR "(pid %d) finish dumping heap\n", current->pid); } void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, @@ -986,15 +986,15 @@ void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, { int i; - printk(KERN_DEBUG "(pid %d) dumping pnode:\n", current->pid); - printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n", + printk(KERN_ERR "(pid %d) dumping pnode:\n", current->pid); + printk(KERN_ERR "\taddress %zx parent %zx cnext %zx\n", (size_t)pnode, (size_t)parent, (size_t)pnode->cnext); - printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n", + printk(KERN_ERR "\tflags %lu iip %d level %d num %d\n", pnode->flags, iip, pnode->level, pnode->num); for (i = 0; i < UBIFS_LPT_FANOUT; i++) { struct ubifs_lprops *lp = &pnode->lprops[i]; - printk(KERN_DEBUG "\t%d: free %d dirty %d flags %d lnum %d\n", + printk(KERN_ERR "\t%d: free %d dirty %d flags %d lnum %d\n", i, lp->free, lp->dirty, lp->flags, lp->lnum); } } @@ -1004,20 +1004,20 @@ void dbg_dump_tnc(struct ubifs_info *c) struct ubifs_znode *znode; int level; - printk(KERN_DEBUG "\n"); - printk(KERN_DEBUG "(pid %d) start dumping TNC tree\n", current->pid); + printk(KERN_ERR "\n"); + printk(KERN_ERR "(pid %d) start dumping TNC tree\n", current->pid); znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL); level = znode->level; - printk(KERN_DEBUG "== Level %d ==\n", level); + printk(KERN_ERR "== Level %d ==\n", level); while (znode) { if (level != znode->level) { level = znode->level; - printk(KERN_DEBUG "== Level %d ==\n", level); + printk(KERN_ERR "== Level %d ==\n", level); } dbg_dump_znode(c, znode); znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode); } - printk(KERN_DEBUG "(pid %d) finish dumping TNC tree\n", current->pid); + printk(KERN_ERR "(pid %d) finish dumping TNC tree\n", current->pid); } static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode, -- cgit From b06283c7df35b5a49ab141ed38e0280821379096 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 18 Jan 2012 16:06:17 +0200 Subject: UBIFS: make the dbg_lock spinlock static Remove the usage of the 'dbg_lock' spinlock from 'dbg_err()' and make it static. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 2 +- fs/ubifs/debug.h | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 305062886f17..1934084e2088 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -36,7 +36,7 @@ #ifdef CONFIG_UBIFS_FS_DEBUG -DEFINE_SPINLOCK(dbg_lock); +static DEFINE_SPINLOCK(dbg_lock); static const char *get_key_fmt(int fmt) { diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index ad1a6fee6010..9f717655df18 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -164,9 +164,7 @@ struct ubifs_global_debug_info { #define dbg_dump_stack() dump_stack() #define dbg_err(fmt, ...) do { \ - spin_lock(&dbg_lock); \ ubifs_err(fmt, ##__VA_ARGS__); \ - spin_unlock(&dbg_lock); \ } while (0) #define ubifs_dbg_msg(type, fmt, ...) \ @@ -217,7 +215,6 @@ struct ubifs_global_debug_info { /* Additional recovery messages */ #define dbg_rcvry(fmt, ...) ubifs_dbg_msg("rcvry", fmt, ##__VA_ARGS__) -extern spinlock_t dbg_lock; extern struct ubifs_global_debug_info ubifs_dbg; static inline int dbg_is_chk_gen(const struct ubifs_info *c) -- cgit From c43be1085f8480ab36d5c8c76a08e67bdf6d2e18 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 7 Feb 2012 10:58:51 +0200 Subject: UBIFS: do not use inc_link when i_nlink is zero This patch changes the 'i_nlink' counter handling in 'ubifs_unlink()', 'ubifs_rmdir()' and 'ubifs_rename()'. In these function 'i_nlink' may become 0, and if 'ubifs_jnl_update()' failed, we would use 'inc_nlink()' to restore the previous 'i_nlink' value, which is incorrect from the VFS point of view and would cause a 'WARN_ON()' (see 'inc_nlink() implementation). This patches saves the previous 'i_nlink' value in a local variable and uses it at the error path instead of calling 'inc_nlink()'. We do this only for the inodes where 'i_nlink' may potentially become zero. This change has been requested by Al Viro . Signed-off-by: Artem Bityutskiy --- fs/ubifs/dir.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index d6fe1c79f18b..ec9f1870ab7f 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -566,6 +566,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry) int sz_change = CALC_DENT_SIZE(dentry->d_name.len); int err, budgeted = 1; struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 }; + unsigned int saved_nlink = inode->i_nlink; /* * Budget request settings: deletion direntry, deletion inode (+1 for @@ -613,7 +614,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry) out_cancel: dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; - inc_nlink(inode); + set_nlink(inode, saved_nlink); unlock_2_inodes(dir, inode); if (budgeted) ubifs_release_budget(c, &req); @@ -704,8 +705,7 @@ out_cancel: dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; inc_nlink(dir); - inc_nlink(inode); - inc_nlink(inode); + set_nlink(inode, 2); unlock_2_inodes(dir, inode); if (budgeted) ubifs_release_budget(c, &req); @@ -977,6 +977,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, struct ubifs_budget_req ino_req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) }; struct timespec time; + unsigned int saved_nlink; /* * Budget request settings: deletion direntry, new direntry, removing @@ -1059,13 +1060,14 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, if (unlink) { /* * Directories cannot have hard-links, so if this is a - * directory, decrement its @i_nlink twice because an empty - * directory has @i_nlink 2. + * directory, just clear @i_nlink. */ + saved_nlink = new_inode->i_nlink; if (is_dir) + clear_nlink(new_inode); + else drop_nlink(new_inode); new_inode->i_ctime = time; - drop_nlink(new_inode); } else { new_dir->i_size += new_sz; ubifs_inode(new_dir)->ui_size = new_dir->i_size; @@ -1102,9 +1104,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, out_cancel: if (unlink) { - if (is_dir) - inc_nlink(new_inode); - inc_nlink(new_inode); + set_nlink(new_inode, saved_nlink); } else { new_dir->i_size -= new_sz; ubifs_inode(new_dir)->ui_size = new_dir->i_size; -- cgit From 7ca58bad6904c721678cf2171dc78b97cc03d500 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 29 Feb 2012 18:43:01 +0200 Subject: UBIFS: kill CUR_MAX_KEY_LEN macro It is useless and confusing and may make people believe they may just change it, which is not true, because this will also change the on-flash format. Signed-off-by: Artem Bityutskiy --- fs/ubifs/ubifs.h | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 12e94774aa88..93d59aceaaef 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -84,9 +84,6 @@ #define INUM_WARN_WATERMARK 0xFFF00000 #define INUM_WATERMARK 0xFFFFFF00 -/* Largest key size supported in this implementation */ -#define CUR_MAX_KEY_LEN UBIFS_SK_LEN - /* Maximum number of entries in each LPT (LEB category) heap */ #define LPT_HEAP_SZ 256 @@ -277,10 +274,10 @@ struct ubifs_old_idx { /* The below union makes it easier to deal with keys */ union ubifs_key { - uint8_t u8[CUR_MAX_KEY_LEN]; - uint32_t u32[CUR_MAX_KEY_LEN/4]; - uint64_t u64[CUR_MAX_KEY_LEN/8]; - __le32 j32[CUR_MAX_KEY_LEN/4]; + uint8_t u8[UBIFS_SK_LEN]; + uint32_t u32[UBIFS_SK_LEN/4]; + uint64_t u64[UBIFS_SK_LEN/8]; + __le32 j32[UBIFS_SK_LEN/4]; }; /** -- cgit From 18535a7e019e6fb9cdcefd43007bc72a67bf99ee Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:28:16 +0000 Subject: xfs: merge xfs_qm_export_dquot into xfs_qm_scall_getquota The is no good reason to have these two separate, and for the next change we would need the full struct xfs_dquot in xfs_qm_export_dquot, so better just fold the code now instead of changing it spuriously. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_qm_syscalls.c | 118 ++++++++++++++++++++--------------------------- 1 file changed, 49 insertions(+), 69 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 711a86e39ff0..2b85641f33c8 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c @@ -47,9 +47,6 @@ STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, uint); STATIC uint xfs_qm_export_flags(uint); STATIC uint xfs_qm_export_qtype_flags(uint); -STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *, - fs_disk_quota_t *); - /* * Turn off quota accounting and/or enforcement for all udquots and/or @@ -635,42 +632,6 @@ xfs_qm_scall_setqlim( return error; } -int -xfs_qm_scall_getquota( - xfs_mount_t *mp, - xfs_dqid_t id, - uint type, - fs_disk_quota_t *out) -{ - xfs_dquot_t *dqp; - int error; - - /* - * Try to get the dquot. We don't want it allocated on disk, so - * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't - * exist, we'll get ENOENT back. - */ - if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) { - return (error); - } - - /* - * If everything's NULL, this dquot doesn't quite exist as far as - * our utility programs are concerned. - */ - if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { - xfs_qm_dqput(dqp); - return XFS_ERROR(ENOENT); - } - /* - * Convert the disk dquot to the exportable format - */ - xfs_qm_export_dquot(mp, &dqp->q_core, out); - xfs_qm_dqput(dqp); - return (error ? XFS_ERROR(EFAULT) : 0); -} - - STATIC int xfs_qm_log_quotaoff_end( xfs_mount_t *mp, @@ -759,50 +720,66 @@ error0: } -/* - * Translate an internal style on-disk-dquot to the exportable format. - * The main differences are that the counters/limits are all in Basic - * Blocks (BBs) instead of the internal FSBs, and all on-disk data has - * to be converted to the native endianness. - */ -STATIC void -xfs_qm_export_dquot( - xfs_mount_t *mp, - xfs_disk_dquot_t *src, +int +xfs_qm_scall_getquota( + struct xfs_mount *mp, + xfs_dqid_t id, + uint type, struct fs_disk_quota *dst) { + struct xfs_dquot *dqp; + int error; + + /* + * Try to get the dquot. We don't want it allocated on disk, so + * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't + * exist, we'll get ENOENT back. + */ + error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp); + if (error) + return error; + + /* + * If everything's NULL, this dquot doesn't quite exist as far as + * our utility programs are concerned. + */ + if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { + error = XFS_ERROR(ENOENT); + goto out_put; + } + memset(dst, 0, sizeof(*dst)); - dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */ - dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags); - dst->d_id = be32_to_cpu(src->d_id); + dst->d_version = FS_DQUOT_VERSION; + dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); + dst->d_id = be32_to_cpu(dqp->q_core.d_id); dst->d_blk_hardlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit)); + XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); dst->d_blk_softlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit)); - dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit); - dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit); - dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount)); - dst->d_icount = be64_to_cpu(src->d_icount); - dst->d_btimer = be32_to_cpu(src->d_btimer); - dst->d_itimer = be32_to_cpu(src->d_itimer); - dst->d_iwarns = be16_to_cpu(src->d_iwarns); - dst->d_bwarns = be16_to_cpu(src->d_bwarns); + XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); + dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); + dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); + dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_bcount)); + dst->d_icount = be64_to_cpu(dqp->q_core.d_icount); + dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); + dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); + dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); + dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); dst->d_rtb_hardlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit)); + XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); dst->d_rtb_softlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit)); - dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount)); - dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer); - dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns); + XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); + dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtbcount)); + dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); + dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); /* * Internally, we don't reset all the timers when quota enforcement * gets turned off. No need to confuse the user level code, * so return zeroes in that case. */ - if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) || + if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) || (!XFS_IS_OQUOTA_ENFORCED(mp) && - (src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) { + (dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) { dst->d_btimer = 0; dst->d_itimer = 0; dst->d_rtbtimer = 0; @@ -823,6 +800,9 @@ xfs_qm_export_dquot( } } #endif +out_put: + xfs_qm_dqput(dqp); + return error; } STATIC uint -- cgit From 89605011915aec5c6194e53a9f02631d68aea6bc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:28:17 +0000 Subject: xfs: include reservations in quota reporting Report all quota usage including the currently pending reservations. This avoids the need to flush delalloc space before gathering quota information, and matches quota enforcement, which already takes the reservations into account. This fixes xfstests 270. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_qm_bhv.c | 24 ++++++++++++------------ fs/xfs/xfs_qm_syscalls.c | 6 +++--- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c index a0a829addca9..e4e37877f867 100644 --- a/fs/xfs/xfs_qm_bhv.c +++ b/fs/xfs/xfs_qm_bhv.c @@ -40,28 +40,28 @@ STATIC void xfs_fill_statvfs_from_dquot( struct kstatfs *statp, - xfs_disk_dquot_t *dp) + struct xfs_dquot *dqp) { __uint64_t limit; - limit = dp->d_blk_softlimit ? - be64_to_cpu(dp->d_blk_softlimit) : - be64_to_cpu(dp->d_blk_hardlimit); + limit = dqp->q_core.d_blk_softlimit ? + be64_to_cpu(dqp->q_core.d_blk_softlimit) : + be64_to_cpu(dqp->q_core.d_blk_hardlimit); if (limit && statp->f_blocks > limit) { statp->f_blocks = limit; statp->f_bfree = statp->f_bavail = - (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ? - (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0; + (statp->f_blocks > dqp->q_res_bcount) ? + (statp->f_blocks - dqp->q_res_bcount) : 0; } - limit = dp->d_ino_softlimit ? - be64_to_cpu(dp->d_ino_softlimit) : - be64_to_cpu(dp->d_ino_hardlimit); + limit = dqp->q_core.d_ino_softlimit ? + be64_to_cpu(dqp->q_core.d_ino_softlimit) : + be64_to_cpu(dqp->q_core.d_ino_hardlimit); if (limit && statp->f_files > limit) { statp->f_files = limit; statp->f_ffree = - (statp->f_files > be64_to_cpu(dp->d_icount)) ? - (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0; + (statp->f_files > dqp->q_res_icount) ? + (statp->f_ffree - dqp->q_res_icount) : 0; } } @@ -82,7 +82,7 @@ xfs_qm_statvfs( xfs_dquot_t *dqp; if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) { - xfs_fill_statvfs_from_dquot(statp, &dqp->q_core); + xfs_fill_statvfs_from_dquot(statp, dqp); xfs_qm_dqput(dqp); } } diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 2b85641f33c8..b9ac268a2d7c 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c @@ -758,8 +758,8 @@ xfs_qm_scall_getquota( XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); - dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_bcount)); - dst->d_icount = be64_to_cpu(dqp->q_core.d_icount); + dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); + dst->d_icount = dqp->q_res_icount; dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); @@ -768,7 +768,7 @@ xfs_qm_scall_getquota( XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); dst->d_rtb_softlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); - dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtbcount)); + dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); -- cgit From 4b217ed9e30f94b6e8e5e262020ef0ceab6113af Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:28:18 +0000 Subject: quota: make Q_XQUOTASYNC a noop Now that XFS takes quota reservations into account there is no need to flush anything before reporting quotas - in addition to beeing fully transactional all quota information is also 100% coherent with the rest of the filesystem now. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/quota/quota.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 7898cd688a00..7a9bedeb1d57 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -282,10 +282,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, case Q_XGETQUOTA: return quota_getxquota(sb, type, id, addr); case Q_XQUOTASYNC: - /* caller already holds s_umount */ if (sb->s_flags & MS_RDONLY) return -EROFS; - writeback_inodes_sb(sb, WB_REASON_SYNC); + /* XFS quotas are fully coherent now, making this call a noop */ return 0; default: return -EINVAL; -- cgit From a08fd280b58836c910a4af10eee2066e358d16db Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Wed, 29 Feb 2012 15:15:14 -0500 Subject: GFS2: Unlock rindex mutex on glock error This patch fixes an error path in function gfs2_rindex_update that leaves the rindex mutex held. Signed-off-by: Bob Peterson Signed-off-by: Steven Whitehouse --- fs/gfs2/rgrp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 1446b4e0ac73..e09370eec590 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -699,13 +699,14 @@ int gfs2_rindex_update(struct gfs2_sbd *sdp) if (!gfs2_glock_is_locked_by_me(gl)) { error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); if (error) - return error; + goto out_unlock; unlock_required = 1; } if (!sdp->sd_rindex_uptodate) error = gfs2_ri_update(ip); if (unlock_required) gfs2_glock_dq_uninit(&ri_gh); +out_unlock: mutex_unlock(&sdp->sd_rindex_mutex); } -- cgit From a59c30acfbe701dc991f4f84abce27818120a2ac Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 1 Mar 2012 11:17:47 -0500 Subject: NFSv4.1: Get rid of redundant NFS4CLNT_LAYOUTRECALL tests The NFS4CLNT_LAYOUTRECALL tests in pnfs_layout_process and pnfs_update_layout are redundant. In the case of a bulk layout recall, we're always testing for the NFS_LAYOUT_BULK_RECALL flay anyway. In the case of a file or segment recall, the call to pnfs_set_layout_stateid() updates the layout_header 'barrier' sequence id, which triggers the test in pnfs_layoutgets_blocked() and is less race-prone than NFS4CLNT_LAYOUTRECALL anyway. Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index a53421604bc4..402efc2f5b70 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -966,8 +966,7 @@ pnfs_update_layout(struct inode *ino, } /* Do we even need to bother with this? */ - if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) || - test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { + if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { dprintk("%s matches recall, use MDS\n", __func__); goto out_unlock; } @@ -1032,7 +1031,6 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) struct nfs4_layoutget_res *res = &lgp->res; struct pnfs_layout_segment *lseg; struct inode *ino = lo->plh_inode; - struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; int status = 0; /* Inject layout blob into I/O device driver */ @@ -1048,8 +1046,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) } spin_lock(&ino->i_lock); - if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) || - test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { + if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { dprintk("%s forget reply due to recall\n", __func__); goto out_forget_reply; } -- cgit From 0cb3284b535bd5eacc287632b55150c8e5d9edc7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 1 Mar 2012 11:17:50 -0500 Subject: NFSv4.1: Get rid of NFS4CLNT_LAYOUTRECALL The NFS4CLNT_LAYOUTRECALL bit is a long-term impediment to scalability. It basically stops all other recalls by a given server once any layout recall is requested. If the recall is for a different file, then we don't care. If the recall applies to the same file, then we're in one of two situations: Either we are in the case of a replay of an existing request, in which case the session is supposed to deal with matters, or we are dealing with a completely different request, in which case we should just try to process it. Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 6 +----- fs/nfs/nfs4_fs.h | 1 - 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index f71978d107d0..0e0865e38065 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -233,17 +233,13 @@ static u32 initiate_bulk_draining(struct nfs_client *clp, static u32 do_callback_layoutrecall(struct nfs_client *clp, struct cb_layoutrecallargs *args) { - u32 res = NFS4ERR_DELAY; + u32 res; dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type); - if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state)) - goto out; if (args->cbl_recall_type == RETURN_FILE) res = initiate_file_draining(clp, args); else res = initiate_bulk_draining(clp, args); - clear_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state); -out: dprintk("%s returning %i\n", __func__, res); return res; diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index b133b50dec9a..19079ec8252c 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -20,7 +20,6 @@ enum nfs4_client_state { NFS4CLNT_RECLAIM_REBOOT, NFS4CLNT_RECLAIM_NOGRACE, NFS4CLNT_DELEGRETURN, - NFS4CLNT_LAYOUTRECALL, NFS4CLNT_SESSION_RESET, NFS4CLNT_RECALL_SLOT, NFS4CLNT_LEASE_CONFIRM, -- cgit From 57e62324e469e092ecc6c94a7a86fe4bd6ac5172 Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Fri, 24 Feb 2012 14:14:51 -0500 Subject: NFS: Store the legacy idmapper result in the keyring This patch removes the old hashmap-based caching and instead uses a "request key actor" to place an upcall to the legacy idmapper rather than going through /sbin/request-key. This will only be used as a fallback if /etc/request-key.conf isn't configured to use nfsidmap. Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/idmap.c | 554 +++++++++++++++++---------------------------------------- 1 file changed, 166 insertions(+), 388 deletions(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index d4db3b6f4b8e..f72c1fc074e1 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -34,42 +34,28 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include -#include -#include -#include +#include +#include #include +#include +#include #include -#include -#include -#include #include +#include #include #include -#include -#include #include - -/* include files needed by legacy idmapper */ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "nfs4_fs.h" + #include "internal.h" #include "netns.h" #define NFS_UINT_MAXLEN 11 -#define IDMAP_HASH_SZ 128 /* Default cache timeout is 10 minutes */ -unsigned int nfs_idmap_cache_timeout = 600 * HZ; +unsigned int nfs_idmap_cache_timeout = 600; const struct cred *id_resolver_cache; +struct key_type key_type_id_resolver_legacy; /** @@ -261,8 +247,10 @@ static ssize_t nfs_idmap_get_desc(const char *name, size_t namelen, return desclen; } -static ssize_t nfs_idmap_request_key(const char *name, size_t namelen, - const char *type, void *data, size_t data_size) +static ssize_t nfs_idmap_request_key(struct key_type *key_type, + const char *name, size_t namelen, + const char *type, void *data, + size_t data_size, struct idmap *idmap) { const struct cred *saved_cred; struct key *rkey; @@ -275,8 +263,12 @@ static ssize_t nfs_idmap_request_key(const char *name, size_t namelen, goto out; saved_cred = override_creds(id_resolver_cache); - rkey = request_key(&key_type_id_resolver, desc, ""); + if (idmap) + rkey = request_key_with_auxdata(key_type, desc, "", 0, idmap); + else + rkey = request_key(&key_type_id_resolver, desc, ""); revert_creds(saved_cred); + kfree(desc); if (IS_ERR(rkey)) { ret = PTR_ERR(rkey); @@ -309,31 +301,46 @@ out: return ret; } +static ssize_t nfs_idmap_get_key(const char *name, size_t namelen, + const char *type, void *data, + size_t data_size, struct idmap *idmap) +{ + ssize_t ret = nfs_idmap_request_key(&key_type_id_resolver, + name, namelen, type, data, + data_size, NULL); + if (ret < 0) { + ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, + name, namelen, type, data, + data_size, idmap); + } + return ret; +} /* ID -> Name */ -static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf, size_t buflen) +static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf, + size_t buflen, struct idmap *idmap) { char id_str[NFS_UINT_MAXLEN]; int id_len; ssize_t ret; id_len = snprintf(id_str, sizeof(id_str), "%u", id); - ret = nfs_idmap_request_key(id_str, id_len, type, buf, buflen); + ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap); if (ret < 0) return -EINVAL; return ret; } /* Name -> ID */ -static int nfs_idmap_lookup_id(const char *name, size_t namelen, - const char *type, __u32 *id) +static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *type, + __u32 *id, struct idmap *idmap) { char id_str[NFS_UINT_MAXLEN]; long id_long; ssize_t data_size; int ret = 0; - data_size = nfs_idmap_request_key(name, namelen, type, id_str, NFS_UINT_MAXLEN); + data_size = nfs_idmap_get_key(name, namelen, type, id_str, NFS_UINT_MAXLEN, idmap); if (data_size <= 0) { ret = -EINVAL; } else { @@ -344,54 +351,47 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, } /* idmap classic begins here */ -static int param_set_idmap_timeout(const char *val, struct kernel_param *kp) -{ - char *endp; - int num = simple_strtol(val, &endp, 0); - int jif = num * HZ; - if (endp == val || *endp || num < 0 || jif < num) - return -EINVAL; - *((int *)kp->arg) = jif; - return 0; -} - -module_param_call(idmap_cache_timeout, param_set_idmap_timeout, param_get_int, - &nfs_idmap_cache_timeout, 0644); +module_param(nfs_idmap_cache_timeout, int, 0644); -struct idmap_hashent { - unsigned long ih_expires; - __u32 ih_id; - size_t ih_namelen; - const char *ih_name; +struct idmap { + struct rpc_pipe *idmap_pipe; + struct key_construction *idmap_key_cons; }; -struct idmap_hashtable { - __u8 h_type; - struct idmap_hashent *h_entries; +enum { + Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err }; -struct idmap { - struct rpc_pipe *idmap_pipe; - wait_queue_head_t idmap_wq; - struct idmap_msg idmap_im; - struct mutex idmap_lock; /* Serializes upcalls */ - struct mutex idmap_im_lock; /* Protects the hashtable */ - struct idmap_hashtable idmap_user_hash; - struct idmap_hashtable idmap_group_hash; +static const match_table_t nfs_idmap_tokens = { + { Opt_find_uid, "uid:%s" }, + { Opt_find_gid, "gid:%s" }, + { Opt_find_user, "user:%s" }, + { Opt_find_group, "group:%s" }, + { Opt_find_err, NULL } }; +static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); static ssize_t idmap_pipe_downcall(struct file *, const char __user *, size_t); static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *); -static unsigned int fnvhash32(const void *, size_t); - static const struct rpc_pipe_ops idmap_upcall_ops = { .upcall = rpc_pipe_generic_upcall, .downcall = idmap_pipe_downcall, .destroy_msg = idmap_pipe_destroy_msg, }; +struct key_type key_type_id_resolver_legacy = { + .name = "id_resolver", + .instantiate = user_instantiate, + .match = user_match, + .revoke = user_revoke, + .destroy = user_destroy, + .describe = user_describe, + .read = user_read, + .request_key = nfs_idmap_legacy_upcall, +}; + static void __nfs_idmap_unregister(struct rpc_pipe *pipe) { if (pipe->dentry) @@ -468,38 +468,11 @@ nfs_idmap_new(struct nfs_client *clp) return error; } idmap->idmap_pipe = pipe; - mutex_init(&idmap->idmap_lock); - mutex_init(&idmap->idmap_im_lock); - init_waitqueue_head(&idmap->idmap_wq); - idmap->idmap_user_hash.h_type = IDMAP_TYPE_USER; - idmap->idmap_group_hash.h_type = IDMAP_TYPE_GROUP; clp->cl_idmap = idmap; return 0; } -static void -idmap_alloc_hashtable(struct idmap_hashtable *h) -{ - if (h->h_entries != NULL) - return; - h->h_entries = kcalloc(IDMAP_HASH_SZ, - sizeof(*h->h_entries), - GFP_KERNEL); -} - -static void -idmap_free_hashtable(struct idmap_hashtable *h) -{ - int i; - - if (h->h_entries == NULL) - return; - for (i = 0; i < IDMAP_HASH_SZ; i++) - kfree(h->h_entries[i].ih_name); - kfree(h->h_entries); -} - void nfs_idmap_delete(struct nfs_client *clp) { @@ -510,8 +483,6 @@ nfs_idmap_delete(struct nfs_client *clp) nfs_idmap_unregister(clp, idmap->idmap_pipe); rpc_destroy_pipe_data(idmap->idmap_pipe); clp->cl_idmap = NULL; - idmap_free_hashtable(&idmap->idmap_user_hash); - idmap_free_hashtable(&idmap->idmap_group_hash); kfree(idmap); } @@ -617,222 +588,107 @@ void nfs_idmap_quit(void) nfs_idmap_quit_keyring(); } -/* - * Helper routines for manipulating the hashtable - */ -static inline struct idmap_hashent * -idmap_name_hash(struct idmap_hashtable* h, const char *name, size_t len) -{ - if (h->h_entries == NULL) - return NULL; - return &h->h_entries[fnvhash32(name, len) % IDMAP_HASH_SZ]; -} - -static struct idmap_hashent * -idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len) +static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im, + struct rpc_pipe_msg *msg) { - struct idmap_hashent *he = idmap_name_hash(h, name, len); + substring_t substr; + int token, ret; - if (he == NULL) - return NULL; - if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0) - return NULL; - if (time_after(jiffies, he->ih_expires)) - return NULL; - return he; -} + memset(im, 0, sizeof(*im)); + memset(msg, 0, sizeof(*msg)); -static inline struct idmap_hashent * -idmap_id_hash(struct idmap_hashtable* h, __u32 id) -{ - if (h->h_entries == NULL) - return NULL; - return &h->h_entries[fnvhash32(&id, sizeof(id)) % IDMAP_HASH_SZ]; -} + im->im_type = IDMAP_TYPE_GROUP; + token = match_token(desc, nfs_idmap_tokens, &substr); -static struct idmap_hashent * -idmap_lookup_id(struct idmap_hashtable *h, __u32 id) -{ - struct idmap_hashent *he = idmap_id_hash(h, id); + switch (token) { + case Opt_find_uid: + im->im_type = IDMAP_TYPE_USER; + case Opt_find_gid: + im->im_conv = IDMAP_CONV_NAMETOID; + ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ); + break; - if (he == NULL) - return NULL; - if (he->ih_id != id || he->ih_namelen == 0) - return NULL; - if (time_after(jiffies, he->ih_expires)) - return NULL; - return he; -} + case Opt_find_user: + im->im_type = IDMAP_TYPE_USER; + case Opt_find_group: + im->im_conv = IDMAP_CONV_IDTONAME; + ret = match_int(&substr, &im->im_id); + break; -/* - * Routines for allocating new entries in the hashtable. - * For now, we just have 1 entry per bucket, so it's all - * pretty trivial. - */ -static inline struct idmap_hashent * -idmap_alloc_name(struct idmap_hashtable *h, char *name, size_t len) -{ - idmap_alloc_hashtable(h); - return idmap_name_hash(h, name, len); -} + default: + ret = -EINVAL; + goto out; + } -static inline struct idmap_hashent * -idmap_alloc_id(struct idmap_hashtable *h, __u32 id) -{ - idmap_alloc_hashtable(h); - return idmap_id_hash(h, id); -} + msg->data = im; + msg->len = sizeof(struct idmap_msg); -static void -idmap_update_entry(struct idmap_hashent *he, const char *name, - size_t namelen, __u32 id) -{ - char *str = kmalloc(namelen + 1, GFP_KERNEL); - if (str == NULL) - return; - kfree(he->ih_name); - he->ih_id = id; - memcpy(str, name, namelen); - str[namelen] = '\0'; - he->ih_name = str; - he->ih_namelen = namelen; - he->ih_expires = jiffies + nfs_idmap_cache_timeout; +out: + return ret; } -/* - * Name -> ID - */ -static int -nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h, - const char *name, size_t namelen, __u32 *id) +static int nfs_idmap_legacy_upcall(struct key_construction *cons, + const char *op, + void *aux) { - struct rpc_pipe_msg msg; + struct rpc_pipe_msg *msg; struct idmap_msg *im; - struct idmap_hashent *he; - DECLARE_WAITQUEUE(wq, current); - int ret = -EIO; - - im = &idmap->idmap_im; - - /* - * String sanity checks - * Note that the userland daemon expects NUL terminated strings - */ - for (;;) { - if (namelen == 0) - return -EINVAL; - if (name[namelen-1] != '\0') - break; - namelen--; - } - if (namelen >= IDMAP_NAMESZ) - return -EINVAL; - - mutex_lock(&idmap->idmap_lock); - mutex_lock(&idmap->idmap_im_lock); + struct idmap *idmap = (struct idmap *)aux; + struct key *key = cons->key; + int ret; - he = idmap_lookup_name(h, name, namelen); - if (he != NULL) { - *id = he->ih_id; - ret = 0; - goto out; + /* msg and im are freed in idmap_pipe_destroy_msg */ + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (IS_ERR(msg)) { + ret = PTR_ERR(msg); + goto out0; } - memset(im, 0, sizeof(*im)); - memcpy(im->im_name, name, namelen); - - im->im_type = h->h_type; - im->im_conv = IDMAP_CONV_NAMETOID; - - memset(&msg, 0, sizeof(msg)); - msg.data = im; - msg.len = sizeof(*im); - - add_wait_queue(&idmap->idmap_wq, &wq); - if (rpc_queue_upcall(idmap->idmap_pipe, &msg) < 0) { - remove_wait_queue(&idmap->idmap_wq, &wq); - goto out; + im = kmalloc(sizeof(*im), GFP_KERNEL); + if (IS_ERR(im)) { + ret = PTR_ERR(im); + goto out1; } - set_current_state(TASK_UNINTERRUPTIBLE); - mutex_unlock(&idmap->idmap_im_lock); - schedule(); - __set_current_state(TASK_RUNNING); - remove_wait_queue(&idmap->idmap_wq, &wq); - mutex_lock(&idmap->idmap_im_lock); + ret = nfs_idmap_prepare_message(key->description, im, msg); + if (ret < 0) + goto out2; - if (im->im_status & IDMAP_STATUS_SUCCESS) { - *id = im->im_id; - ret = 0; - } + idmap->idmap_key_cons = cons; - out: - memset(im, 0, sizeof(*im)); - mutex_unlock(&idmap->idmap_im_lock); - mutex_unlock(&idmap->idmap_lock); + return rpc_queue_upcall(idmap->idmap_pipe, msg); + +out2: + kfree(im); +out1: + kfree(msg); +out0: + complete_request_key(cons, ret); return ret; } -/* - * ID -> Name - */ -static int -nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h, - __u32 id, char *name) +static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data) { - struct rpc_pipe_msg msg; - struct idmap_msg *im; - struct idmap_hashent *he; - DECLARE_WAITQUEUE(wq, current); - int ret = -EIO; - unsigned int len; - - im = &idmap->idmap_im; - - mutex_lock(&idmap->idmap_lock); - mutex_lock(&idmap->idmap_im_lock); - - he = idmap_lookup_id(h, id); - if (he) { - memcpy(name, he->ih_name, he->ih_namelen); - ret = he->ih_namelen; - goto out; - } - - memset(im, 0, sizeof(*im)); - im->im_type = h->h_type; - im->im_conv = IDMAP_CONV_IDTONAME; - im->im_id = id; - - memset(&msg, 0, sizeof(msg)); - msg.data = im; - msg.len = sizeof(*im); - - add_wait_queue(&idmap->idmap_wq, &wq); + return key_instantiate_and_link(key, data, strlen(data) + 1, + id_resolver_cache->thread_keyring, + authkey); +} - if (rpc_queue_upcall(idmap->idmap_pipe, &msg) < 0) { - remove_wait_queue(&idmap->idmap_wq, &wq); - goto out; - } +static int nfs_idmap_read_message(struct idmap_msg *im, struct key *key, struct key *authkey) +{ + char id_str[NFS_UINT_MAXLEN]; + int ret = -EINVAL; - set_current_state(TASK_UNINTERRUPTIBLE); - mutex_unlock(&idmap->idmap_im_lock); - schedule(); - __set_current_state(TASK_RUNNING); - remove_wait_queue(&idmap->idmap_wq, &wq); - mutex_lock(&idmap->idmap_im_lock); - - if (im->im_status & IDMAP_STATUS_SUCCESS) { - if ((len = strnlen(im->im_name, IDMAP_NAMESZ)) == 0) - goto out; - memcpy(name, im->im_name, len); - ret = len; + switch (im->im_conv) { + case IDMAP_CONV_NAMETOID: + sprintf(id_str, "%d", im->im_id); + ret = nfs_idmap_instantiate(key, authkey, id_str); + break; + case IDMAP_CONV_IDTONAME: + ret = nfs_idmap_instantiate(key, authkey, im->im_name); + break; } - out: - memset(im, 0, sizeof(*im)); - mutex_unlock(&idmap->idmap_im_lock); - mutex_unlock(&idmap->idmap_lock); return ret; } @@ -841,141 +697,69 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) { struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode); struct idmap *idmap = (struct idmap *)rpci->private; - struct idmap_msg im_in, *im = &idmap->idmap_im; - struct idmap_hashtable *h; - struct idmap_hashent *he = NULL; + struct key_construction *cons = idmap->idmap_key_cons; + struct idmap_msg im; size_t namelen_in; int ret; - if (mlen != sizeof(im_in)) - return -ENOSPC; - - if (copy_from_user(&im_in, src, mlen) != 0) - return -EFAULT; - - mutex_lock(&idmap->idmap_im_lock); - - ret = mlen; - im->im_status = im_in.im_status; - /* If we got an error, terminate now, and wake up pending upcalls */ - if (!(im_in.im_status & IDMAP_STATUS_SUCCESS)) { - wake_up(&idmap->idmap_wq); + if (mlen != sizeof(im)) { + ret = -ENOSPC; goto out; } - /* Sanity checking of strings */ - ret = -EINVAL; - namelen_in = strnlen(im_in.im_name, IDMAP_NAMESZ); - if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) + if (copy_from_user(&im, src, mlen) != 0) { + ret = -EFAULT; goto out; + } - switch (im_in.im_type) { - case IDMAP_TYPE_USER: - h = &idmap->idmap_user_hash; - break; - case IDMAP_TYPE_GROUP: - h = &idmap->idmap_group_hash; - break; - default: - goto out; + if (!(im.im_status & IDMAP_STATUS_SUCCESS)) { + ret = mlen; + complete_request_key(idmap->idmap_key_cons, -ENOKEY); + goto out_incomplete; } - switch (im_in.im_conv) { - case IDMAP_CONV_IDTONAME: - /* Did we match the current upcall? */ - if (im->im_conv == IDMAP_CONV_IDTONAME - && im->im_type == im_in.im_type - && im->im_id == im_in.im_id) { - /* Yes: copy string, including the terminating '\0' */ - memcpy(im->im_name, im_in.im_name, namelen_in); - im->im_name[namelen_in] = '\0'; - wake_up(&idmap->idmap_wq); - } - he = idmap_alloc_id(h, im_in.im_id); - break; - case IDMAP_CONV_NAMETOID: - /* Did we match the current upcall? */ - if (im->im_conv == IDMAP_CONV_NAMETOID - && im->im_type == im_in.im_type - && strnlen(im->im_name, IDMAP_NAMESZ) == namelen_in - && memcmp(im->im_name, im_in.im_name, namelen_in) == 0) { - im->im_id = im_in.im_id; - wake_up(&idmap->idmap_wq); - } - he = idmap_alloc_name(h, im_in.im_name, namelen_in); - break; - default: + namelen_in = strnlen(im.im_name, IDMAP_NAMESZ); + if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) { + ret = -EINVAL; goto out; } - /* If the entry is valid, also copy it to the cache */ - if (he != NULL) - idmap_update_entry(he, im_in.im_name, namelen_in, im_in.im_id); - ret = mlen; + ret = nfs_idmap_read_message(&im, cons->key, cons->authkey); + if (ret >= 0) { + key_set_timeout(cons->key, nfs_idmap_cache_timeout); + ret = mlen; + } + out: - mutex_unlock(&idmap->idmap_im_lock); + complete_request_key(idmap->idmap_key_cons, ret); +out_incomplete: return ret; } static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg) { - struct idmap_msg *im = msg->data; - struct idmap *idmap = container_of(im, struct idmap, idmap_im); - - if (msg->errno >= 0) - return; - mutex_lock(&idmap->idmap_im_lock); - im->im_status = IDMAP_STATUS_LOOKUPFAIL; - wake_up(&idmap->idmap_wq); - mutex_unlock(&idmap->idmap_im_lock); -} - -/* - * Fowler/Noll/Vo hash - * http://www.isthe.com/chongo/tech/comp/fnv/ - */ - -#define FNV_P_32 ((unsigned int)0x01000193) /* 16777619 */ -#define FNV_1_32 ((unsigned int)0x811c9dc5) /* 2166136261 */ - -static unsigned int fnvhash32(const void *buf, size_t buflen) -{ - const unsigned char *p, *end = (const unsigned char *)buf + buflen; - unsigned int hash = FNV_1_32; - - for (p = buf; p < end; p++) { - hash *= FNV_P_32; - hash ^= (unsigned int)*p; - } - - return hash; + /* Free memory allocated in nfs_idmap_legacy_upcall() */ + kfree(msg->data); + kfree(msg); } int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid) { struct idmap *idmap = server->nfs_client->cl_idmap; - int ret = -EINVAL; if (nfs_map_string_to_numeric(name, namelen, uid)) return 0; - ret = nfs_idmap_lookup_id(name, namelen, "uid", uid); - if (ret < 0) - ret = nfs_idmap_id(idmap, &idmap->idmap_user_hash, name, namelen, uid); - return ret; + return nfs_idmap_lookup_id(name, namelen, "uid", uid, idmap); } int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *gid) { struct idmap *idmap = server->nfs_client->cl_idmap; - int ret = -EINVAL; if (nfs_map_string_to_numeric(name, namelen, gid)) return 0; - ret = nfs_idmap_lookup_id(name, namelen, "gid", gid); - if (ret < 0) - ret = nfs_idmap_id(idmap, &idmap->idmap_group_hash, name, namelen, gid); - return ret; + return nfs_idmap_lookup_id(name, namelen, "gid", gid, idmap); } int nfs_map_uid_to_name(const struct nfs_server *server, __u32 uid, char *buf, size_t buflen) @@ -983,11 +767,8 @@ int nfs_map_uid_to_name(const struct nfs_server *server, __u32 uid, char *buf, s struct idmap *idmap = server->nfs_client->cl_idmap; int ret = -EINVAL; - if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) { - ret = nfs_idmap_lookup_name(uid, "user", buf, buflen); - if (ret < 0) - ret = nfs_idmap_name(idmap, &idmap->idmap_user_hash, uid, buf); - } + if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) + ret = nfs_idmap_lookup_name(uid, "user", buf, buflen, idmap); if (ret < 0) ret = nfs_map_numeric_to_string(uid, buf, buflen); return ret; @@ -997,11 +778,8 @@ int nfs_map_gid_to_group(const struct nfs_server *server, __u32 gid, char *buf, struct idmap *idmap = server->nfs_client->cl_idmap; int ret = -EINVAL; - if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) { - ret = nfs_idmap_lookup_name(gid, "group", buf, buflen); - if (ret < 0) - ret = nfs_idmap_name(idmap, &idmap->idmap_group_hash, gid, buf); - } + if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) + ret = nfs_idmap_lookup_name(gid, "group", buf, buflen, idmap); if (ret < 0) ret = nfs_map_numeric_to_string(gid, buf, buflen); return ret; -- cgit From db8ac8ba871ae7b97118cfb2913b4986867f09a7 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 17 Feb 2012 15:20:24 -0500 Subject: NFSv4: Send implementation id with exchange_id Send the nfs implementation id in EXCHANGE_ID requests unless the module parameter nfs.send_implementation_id is 0. This adds a CONFIG variable for the nii_domain that defaults to "kernel.org". Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/Kconfig | 12 ++++++++++++ fs/nfs/nfs4xdr.c | 41 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 51 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index ee86cfcd6c33..7bce64c7060e 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -99,6 +99,18 @@ config PNFS_OBJLAYOUT depends on NFS_FS && NFS_V4_1 && SCSI_OSD_ULD default m +config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN + string "NFSv4.1 Implementation ID Domain" + depends on NFS_V4_1 + default "kernel.org" + help + This option defines the domain portion of the implementation ID that + may be sent in the NFS exchange_id operation. The value must be in + the format of a DNS domain name and should be set to the DNS domain + name of the distribution. + If the NFS client is unchanged from the upstream kernel, this + option should be set to the default "kernel.org". + config ROOT_NFS bool "Root file system on NFS" depends on NFS_FS=y && IP_PNP diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index ae7834366712..d824aedb1237 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -44,6 +44,8 @@ #include #include #include +#include +#include #include #include #include @@ -271,7 +273,12 @@ static int nfs4_stat_to_errno(int); 1 /* flags */ + \ 1 /* spa_how */ + \ 0 /* SP4_NONE (for now) */ + \ - 1 /* zero implemetation id array */) + 1 /* implementation id array of size 1 */ + \ + 1 /* nii_domain */ + \ + XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \ + 1 /* nii_name */ + \ + XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \ + 3 /* nii_date */) #define decode_exchange_id_maxsz (op_decode_hdr_maxsz + \ 2 /* eir_clientid */ + \ 1 /* eir_sequenceid */ + \ @@ -838,6 +845,12 @@ const u32 nfs41_maxread_overhead = ((RPC_MAX_HEADER_WITH_AUTH + XDR_UNIT); #endif /* CONFIG_NFS_V4_1 */ +static unsigned short send_implementation_id = 1; + +module_param(send_implementation_id, ushort, 0644); +MODULE_PARM_DESC(send_implementation_id, + "Send implementation ID with NFSv4.1 exchange_id"); + static const umode_t nfs_type2fmt[] = { [NF4BAD] = 0, [NF4REG] = S_IFREG, @@ -1766,6 +1779,8 @@ static void encode_exchange_id(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; + char impl_name[NFS4_OPAQUE_LIMIT]; + int len = 0; p = reserve_space(xdr, 4 + sizeof(args->verifier->data)); *p++ = cpu_to_be32(OP_EXCHANGE_ID); @@ -1776,7 +1791,29 @@ static void encode_exchange_id(struct xdr_stream *xdr, p = reserve_space(xdr, 12); *p++ = cpu_to_be32(args->flags); *p++ = cpu_to_be32(0); /* zero length state_protect4_a */ - *p = cpu_to_be32(0); /* zero length implementation id array */ + + if (send_implementation_id && + sizeof(CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN) > 1 && + sizeof(CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN) + <= NFS4_OPAQUE_LIMIT + 1) + len = snprintf(impl_name, sizeof(impl_name), "%s %s %s %s", + utsname()->sysname, utsname()->release, + utsname()->version, utsname()->machine); + + if (len > 0) { + *p = cpu_to_be32(1); /* implementation id array length=1 */ + + encode_string(xdr, + sizeof(CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN) - 1, + CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN); + encode_string(xdr, len, impl_name); + /* just send zeros for nii_date - the date is in nii_name */ + p = reserve_space(xdr, 12); + p = xdr_encode_hyper(p, 0); + *p = cpu_to_be32(0); + } else + *p = cpu_to_be32(0); /* implementation id array length=0 */ + hdr->nops++; hdr->replen += decode_exchange_id_maxsz; } -- cgit From 9edbd953f8aeabf49b89c7c29ff9e31560775b27 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 17 Feb 2012 15:20:25 -0500 Subject: NFSv4: fix server_scope memory leak server_scope would never be freed if nfs4_check_cl_exchange_flags() returned non-zero Signed-off-by: Weston Andros Adamson Cc: stable@vger.kernel.org Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 87c584dd88b1..20c3bb06763a 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4945,8 +4945,10 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) clp->cl_rpcclient->cl_auth->au_flavor); res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); - if (unlikely(!res.server_scope)) - return -ENOMEM; + if (unlikely(!res.server_scope)) { + status = -ENOMEM; + goto out; + } status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (!status) @@ -4963,12 +4965,13 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) clp->server_scope = NULL; } - if (!clp->server_scope) + if (!clp->server_scope) { clp->server_scope = res.server_scope; - else - kfree(res.server_scope); + goto out; + } } - + kfree(res.server_scope); +out: dprintk("<-- %s status= %d\n", __func__, status); return status; } -- cgit From 7d2ed9ac22bc6bf0d34e8fd291a5295f373b384e Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 17 Feb 2012 15:20:26 -0500 Subject: NFSv4: parse and display server implementation ids Shows the implementation ids in /proc/self/mountstats. This doesn't break the nfs-utils mountstats tool. Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 1 + fs/nfs/nfs4proc.c | 21 +++++++++++++++++++++ fs/nfs/nfs4xdr.c | 42 +++++++++++++++++++++++++++++++++++++----- fs/nfs/super.c | 8 ++++++++ 4 files changed, 67 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 592b5583aa3a..1506adf4d4ed 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -304,6 +304,7 @@ static void nfs_free_client(struct nfs_client *clp) put_net(clp->net); kfree(clp->cl_hostname); kfree(clp->server_scope); + kfree(clp->impl_id); kfree(clp); dprintk("<-- nfs_free_client()\n"); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 20c3bb06763a..90a17cc3ebc9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4950,10 +4950,23 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) goto out; } + res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL); + if (unlikely(!res.impl_id)) { + status = -ENOMEM; + goto out_server_scope; + } + status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (!status) status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); + if (!status) { + /* use the most recent implementation id */ + kfree(clp->impl_id); + clp->impl_id = res.impl_id; + } else + kfree(res.impl_id); + if (!status) { if (clp->server_scope && !nfs41_same_server_scope(clp->server_scope, @@ -4970,8 +4983,16 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) goto out; } } + +out_server_scope: kfree(res.server_scope); out: + if (clp->impl_id) + dprintk("%s: Server Implementation ID: " + "domain: %s, name: %s, date: %llu,%u\n", + __func__, clp->impl_id->domain, clp->impl_id->name, + clp->impl_id->date.seconds, + clp->impl_id->date.nseconds); dprintk("<-- %s status= %d\n", __func__, status); return status; } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index d824aedb1237..b7c04339fdc1 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -291,7 +291,11 @@ static int nfs4_stat_to_errno(int); /* eir_server_scope<> */ \ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \ 1 /* eir_server_impl_id array length */ + \ - 0 /* ignored eir_server_impl_id contents */) + 1 /* nii_domain */ + \ + XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \ + 1 /* nii_name */ + \ + XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \ + 3 /* nii_date */) #define encode_channel_attrs_maxsz (6 + 1 /* ca_rdma_ird.len (0) */) #define decode_channel_attrs_maxsz (6 + \ 1 /* ca_rdma_ird.len */ + \ @@ -5256,6 +5260,7 @@ static int decode_exchange_id(struct xdr_stream *xdr, char *dummy_str; int status; struct nfs_client *clp = res->client; + uint32_t impl_id_count; status = decode_op_hdr(xdr, OP_EXCHANGE_ID); if (status) @@ -5297,11 +5302,38 @@ static int decode_exchange_id(struct xdr_stream *xdr, memcpy(res->server_scope->server_scope, dummy_str, dummy); res->server_scope->server_scope_sz = dummy; - /* Throw away Implementation id array */ - status = decode_opaque_inline(xdr, &dummy, &dummy_str); - if (unlikely(status)) - return status; + /* Implementation Id */ + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + goto out_overflow; + impl_id_count = be32_to_cpup(p++); + if (impl_id_count) { + /* nii_domain */ + status = decode_opaque_inline(xdr, &dummy, &dummy_str); + if (unlikely(status)) + return status; + if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) + return -EIO; + memcpy(res->impl_id->domain, dummy_str, dummy); + + /* nii_name */ + status = decode_opaque_inline(xdr, &dummy, &dummy_str); + if (unlikely(status)) + return status; + if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) + return -EIO; + memcpy(res->impl_id->name, dummy_str, dummy); + + /* nii_date */ + p = xdr_inline_decode(xdr, 12); + if (unlikely(!p)) + goto out_overflow; + p = xdr_decode_hyper(p, &res->impl_id->date.seconds); + res->impl_id->date.nseconds = be32_to_cpup(p); + + /* if there's more than one entry, ignore the rest */ + } return 0; out_overflow: print_overflow_msg(__func__, xdr); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 6708f3044eb0..8154accd1168 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -809,6 +809,14 @@ static int nfs_show_stats(struct seq_file *m, struct dentry *root) seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ); + if (nfss->nfs_client && nfss->nfs_client->impl_id) { + struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id; + seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s'," + "date='%llu,%u'", + impl_id->name, impl_id->domain, + impl_id->date.seconds, impl_id->date.nseconds); + } + seq_printf(m, "\n\tcaps:\t"); seq_printf(m, "caps=0x%x", nfss->caps); seq_printf(m, ",wtmult=%u", nfss->wtmult); -- cgit From fe316bf2d5847bc5dd975668671a7b1067603bc7 Mon Sep 17 00:00:00 2001 From: Jun'ichi Nomura Date: Fri, 2 Mar 2012 10:38:33 +0100 Subject: block: Fix NULL pointer dereference in sd_revalidate_disk Since 2.6.39 (1196f8b), when a driver returns -ENOMEDIUM for open(), __blkdev_get() calls rescan_partitions() to remove in-kernel partition structures and raise KOBJ_CHANGE uevent. However it ends up calling driver's revalidate_disk without open and could cause oops. In the case of SCSI: process A process B ---------------------------------------------- sys_open __blkdev_get sd_open returns -ENOMEDIUM scsi_remove_device rescan_partitions sd_revalidate_disk Oopses are reported here: http://marc.info/?l=linux-scsi&m=132388619710052 This patch separates the partition invalidation from rescan_partitions() and use it for -ENOMEDIUM case. Reported-by: Huajun Li Signed-off-by: Jun'ichi Nomura Acked-by: Tejun Heo Cc: stable@kernel.org Signed-off-by: Jens Axboe --- fs/block_dev.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 0e575d1304b4..5e9f198f7712 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1183,8 +1183,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) * The latter is necessary to prevent ghost * partitions on a removed medium. */ - if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) - rescan_partitions(disk, bdev); + if (bdev->bd_invalidated) { + if (!ret) + rescan_partitions(disk, bdev); + else if (ret == -ENOMEDIUM) + invalidate_partitions(disk, bdev); + } if (ret) goto out_clear; } else { @@ -1214,8 +1218,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) if (bdev->bd_disk->fops->open) ret = bdev->bd_disk->fops->open(bdev, mode); /* the same as first opener case, read comment there */ - if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) - rescan_partitions(bdev->bd_disk, bdev); + if (bdev->bd_invalidated) { + if (!ret) + rescan_partitions(bdev->bd_disk, bdev); + else if (ret == -ENOMEDIUM) + invalidate_partitions(bdev->bd_disk, bdev); + } if (ret) goto out_unlock_bdev; } -- cgit From 2e5b5b3a1b7768c89fbfeca18e75f8ee377e924c Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Thu, 23 Feb 2012 17:41:27 +0900 Subject: sched: Clean up parameter passing of proc_sched_autogroup_set_nice() Pass nice as a value to proc_sched_autogroup_set_nice(). No side effect is expected, and the variable err will be overwritten with the return value. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/4F45FBB7.5090607@ct.jp.nec.com Signed-off-by: Ingo Molnar --- fs/proc/base.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index d4548dd49b02..965d4bde3a3b 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1310,8 +1310,7 @@ sched_autogroup_write(struct file *file, const char __user *buf, if (!p) return -ESRCH; - err = nice; - err = proc_sched_autogroup_set_nice(p, &err); + err = proc_sched_autogroup_set_nice(p, nice); if (err) count = err; -- cgit From 0d71b058092fc98cfef8e8f6d913180a10a55397 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 2 Mar 2012 13:59:49 -0500 Subject: NFS: Extend the -overs= mount option to allow 4.x minorversions Allow the user to mount an NFSv4.0 or NFSv4.1 partition using a standard syntax of '-overs=4.0', or '-overs=4.1' rather than the more cumbersome '-overs=4,minorversion=1'. See also the earlier patch by Dros Adamson, which added the Linux-specific syntax '-ov4.0', '-ov4.1'. Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 84 +++++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 8154accd1168..ab58bb9b6115 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -98,10 +98,10 @@ enum { Opt_namelen, Opt_mountport, Opt_mountvers, - Opt_nfsvers, Opt_minorversion, /* Mount options that take string arguments */ + Opt_nfsvers, Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost, Opt_addr, Opt_mountaddr, Opt_clientaddr, Opt_lookupcache, @@ -166,9 +166,10 @@ static const match_table_t nfs_mount_option_tokens = { { Opt_namelen, "namlen=%s" }, { Opt_mountport, "mountport=%s" }, { Opt_mountvers, "mountvers=%s" }, + { Opt_minorversion, "minorversion=%s" }, + { Opt_nfsvers, "nfsvers=%s" }, { Opt_nfsvers, "vers=%s" }, - { Opt_minorversion, "minorversion=%s" }, { Opt_sec, "sec=%s" }, { Opt_proto, "proto=%s" }, @@ -262,6 +263,22 @@ static match_table_t nfs_local_lock_tokens = { { Opt_local_lock_err, NULL } }; +enum { + Opt_vers_2, Opt_vers_3, Opt_vers_4, Opt_vers_4_0, + Opt_vers_4_1, + + Opt_vers_err +}; + +static match_table_t nfs_vers_tokens = { + { Opt_vers_2, "2" }, + { Opt_vers_3, "3" }, + { Opt_vers_4, "4" }, + { Opt_vers_4_0, "4.0" }, + { Opt_vers_4_1, "4.1" }, + + { Opt_vers_err, NULL } +}; static void nfs_umount_begin(struct super_block *); static int nfs_statfs(struct dentry *, struct kstatfs *); @@ -1064,6 +1081,40 @@ static int nfs_parse_security_flavors(char *value, return 1; } +static int nfs_parse_version_string(char *string, + struct nfs_parsed_mount_data *mnt, + substring_t *args) +{ + mnt->flags &= ~NFS_MOUNT_VER3; + switch (match_token(string, nfs_vers_tokens, args)) { + case Opt_vers_2: + mnt->version = 2; + break; + case Opt_vers_3: + mnt->flags |= NFS_MOUNT_VER3; + mnt->version = 3; + break; + case Opt_vers_4: + /* Backward compatibility option. In future, + * the mount program should always supply + * a NFSv4 minor version number. + */ + mnt->version = 4; + break; + case Opt_vers_4_0: + mnt->version = 4; + mnt->minorversion = 0; + break; + case Opt_vers_4_1: + mnt->version = 4; + mnt->minorversion = 1; + break; + default: + return 0; + } + return 1; +} + static int nfs_get_option_str(substring_t args[], char **option) { kfree(*option); @@ -1317,26 +1368,6 @@ static int nfs_parse_mount_options(char *raw, goto out_invalid_value; mnt->mount_server.version = option; break; - case Opt_nfsvers: - if (nfs_get_option_ul(args, &option)) - goto out_invalid_value; - switch (option) { - case NFS2_VERSION: - mnt->flags &= ~NFS_MOUNT_VER3; - mnt->version = 2; - break; - case NFS3_VERSION: - mnt->flags |= NFS_MOUNT_VER3; - mnt->version = 3; - break; - case NFS4_VERSION: - mnt->flags &= ~NFS_MOUNT_VER3; - mnt->version = 4; - break; - default: - goto out_invalid_value; - } - break; case Opt_minorversion: if (nfs_get_option_ul(args, &option)) goto out_invalid_value; @@ -1348,6 +1379,15 @@ static int nfs_parse_mount_options(char *raw, /* * options that take text values */ + case Opt_nfsvers: + string = match_strdup(args); + if (string == NULL) + goto out_nomem; + rc = nfs_parse_version_string(string, mnt, args); + kfree(string); + if (!rc) + goto out_invalid_value; + break; case Opt_sec: string = match_strdup(args); if (string == NULL) -- cgit From 7bbceb6f2bdda67054bc66035a9543623e539126 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 2 Mar 2012 14:00:20 -0500 Subject: NFS: Ensure we display the minor version correctly in /proc/mounts etc. The 'minorversion' mount option is now deprecated, so we need to display the minor version number in the 'vers=' format. Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ab58bb9b6115..7f0c93f8afe3 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -640,7 +640,6 @@ static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss, struct nfs_client *clp = nfss->nfs_client; seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr); - seq_printf(m, ",minorversion=%u", clp->cl_minorversion); } #else static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss, @@ -649,6 +648,15 @@ static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss, } #endif +static void nfs_show_nfs_version(struct seq_file *m, + unsigned int version, + unsigned int minorversion) +{ + seq_printf(m, ",vers=%u", version); + if (version == 4) + seq_printf(m, ".%u", minorversion); +} + /* * Describe the mount options in force on this server representation */ @@ -676,7 +684,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, u32 version = clp->rpc_ops->version; int local_flock, local_fcntl; - seq_printf(m, ",vers=%u", version); + nfs_show_nfs_version(m, version, clp->cl_minorversion); seq_printf(m, ",rsize=%u", nfss->rsize); seq_printf(m, ",wsize=%u", nfss->wsize); if (nfss->bsize != 0) -- cgit From 3862279a5fcf44d0c68fa54a507a5bcd2ab4f0b7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 2 Mar 2012 14:06:39 -0500 Subject: NFS: Consolidate the parsing of the '-ov4.x' and '-overs=4.x' mount options Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 31 +++---------------------------- 1 file changed, 3 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 7f0c93f8afe3..f4ccdae6a0cf 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -80,7 +80,6 @@ enum { Opt_cto, Opt_nocto, Opt_ac, Opt_noac, Opt_lock, Opt_nolock, - Opt_v2, Opt_v3, Opt_v4, Opt_v4_0, Opt_v4_1, Opt_udp, Opt_tcp, Opt_rdma, Opt_acl, Opt_noacl, Opt_rdirplus, Opt_nordirplus, @@ -133,11 +132,6 @@ static const match_table_t nfs_mount_option_tokens = { { Opt_noac, "noac" }, { Opt_lock, "lock" }, { Opt_nolock, "nolock" }, - { Opt_v2, "v2" }, - { Opt_v3, "v3" }, - { Opt_v4, "v4" }, - { Opt_v4_0, "v4.0" }, - { Opt_v4_1, "v4.1" }, { Opt_udp, "udp" }, { Opt_tcp, "tcp" }, { Opt_rdma, "rdma" }, @@ -183,6 +177,9 @@ static const match_table_t nfs_mount_option_tokens = { { Opt_fscache_uniq, "fsc=%s" }, { Opt_local_lock, "local_lock=%s" }, + /* The following needs to be listed after all other options */ + { Opt_nfsvers, "v%s" }, + { Opt_err, NULL } }; @@ -1228,28 +1225,6 @@ static int nfs_parse_mount_options(char *raw, mnt->flags |= (NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); break; - case Opt_v2: - mnt->flags &= ~NFS_MOUNT_VER3; - mnt->version = 2; - break; - case Opt_v3: - mnt->flags |= NFS_MOUNT_VER3; - mnt->version = 3; - break; - case Opt_v4: - mnt->flags &= ~NFS_MOUNT_VER3; - mnt->version = 4; - break; - case Opt_v4_0: - mnt->flags &= ~NFS_MOUNT_VER3; - mnt->version = 4; - mnt->minorversion = 0; - break; - case Opt_v4_1: - mnt->flags &= ~NFS_MOUNT_VER3; - mnt->version = 4; - mnt->minorversion = 1; - break; case Opt_udp: mnt->flags &= ~NFS_MOUNT_TCP; mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP; -- cgit From 88b8e133c46792d264c991065c2c395d0b3b5482 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 1 Mar 2012 17:00:23 -0500 Subject: NFS: Make nfs_cache_array.size a signed integer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Eliminate a number of implicit type casts in comparisons, and these compiler warnings: fs/nfs/dir.c: In function ‘nfs_readdir_clear_array’: fs/nfs/dir.c:264:16: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] fs/nfs/dir.c: In function ‘nfs_readdir_search_for_cookie’: fs/nfs/dir.c:352:16: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] fs/nfs/dir.c: In function ‘nfs_do_filldir’: fs/nfs/dir.c:769:38: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] fs/nfs/dir.c:780:9: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index bb132a88f4e8..9952170271b2 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -207,7 +207,7 @@ struct nfs_cache_array_entry { }; struct nfs_cache_array { - unsigned int size; + int size; int eof_index; u64 last_cookie; struct nfs_cache_array_entry array[0]; -- cgit From 02a2976c9180a7dcc43bc46cf69bd3687a9d7ea6 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 1 Mar 2012 17:00:31 -0500 Subject: NFS: Clean up debugging in decode_pathname() I noticed recently that decode_attr_fs_locations() is not generating very pretty debugging output. The pathname components each appear on a separate line of output, though that does not appear to be the intended display behavior. The preferred way to generate continued lines of output on the console is to use pr_cont(). Note that incoming pathname4 components contain a string that is not necessarily NUL-terminated. I did actually see some trailing garbage on the console. In addition to correcting the line continuation problem, add a string precision format specifier to ensure that each component string is displayed properly, and that vsnprintf() does not Oops. Someone pointed out that allowing incoming network data to possibly generate a console line of unbounded length may not be such a good idea. Since this output will rarely be enabled, and there is a hard upper bound (NFS4_PATHNAME_MAXCOMPONENTS) in our implementation, this is probably not a major concern. It might be useful to additionally sanity-check the length of each incoming component, however. RFC 3530bis15 does not suggest a maximum number of UTF-8 characters per component for either the pathname4 or component4 types. However, we could invent one that is appropriate for our implementation. Another possibility is to scrap all of this and print these pathnames in upper layers after a reasonable amount of sanity checking in the XDR layer. This would give us an opportunity to allocate a full buffer so that the whole pathname would be output via a single dprintk. Introduced by commit 7aaa0b3b: "NFSv4: convert fs-locations-components to conform to RFC3530," (June 9, 2006). Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index b7c04339fdc1..b5c5212cd184 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3555,16 +3555,17 @@ static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path) n = be32_to_cpup(p); if (n == 0) goto root_path; - dprintk("path "); + dprintk("pathname4: "); path->ncomponents = 0; while (path->ncomponents < n) { struct nfs4_string *component = &path->components[path->ncomponents]; status = decode_opaque_inline(xdr, &component->len, &component->data); if (unlikely(status != 0)) goto out_eio; - if (path->ncomponents != n) - dprintk("/"); - dprintk("%s", component->data); + if (unlikely(nfs_debug & NFSDBG_XDR)) + pr_cont("%s%.*s ", + (path->ncomponents != n ? "/ " : ""), + component->len, component->data); if (path->ncomponents < NFS4_PATHNAME_MAXCOMPONENTS) path->ncomponents++; else { @@ -3573,14 +3574,13 @@ static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path) } } out: - dprintk("\n"); return status; root_path: /* a root pathname is sent as a zero component4 */ path->ncomponents = 1; path->components[0].len=0; path->components[0].data=NULL; - dprintk("path /\n"); + dprintk("pathname4: /\n"); goto out; out_eio: dprintk(" status %d", status); @@ -3606,7 +3606,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st /* Ignore borken servers that return unrequested attrs */ if (unlikely(res == NULL)) goto out; - dprintk("%s: fsroot ", __func__); + dprintk("%s: fsroot:\n", __func__); status = decode_pathname(xdr, &res->fs_path); if (unlikely(status != 0)) goto out; @@ -3627,7 +3627,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st m = be32_to_cpup(p); loc->nservers = 0; - dprintk("%s: servers ", __func__); + dprintk("%s: servers:\n", __func__); while (loc->nservers < m) { struct nfs4_string *server = &loc->servers[loc->nservers]; status = decode_opaque_inline(xdr, &server->len, &server->data); -- cgit From a3ca5651cb5eebe2e56e510bbf5cd60abc301c9f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 1 Mar 2012 17:00:40 -0500 Subject: NFS: Add debugging messages to NFSv4's CLOSE procedure CLOSE is new with NFSv4. Sometimes it's important to know the timing of this operation compared to things like lease renewal. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 90a17cc3ebc9..6c8e170e2e6b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1983,6 +1983,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) struct nfs4_state *state = calldata->state; struct nfs_server *server = NFS_SERVER(calldata->inode); + dprintk("%s: begin!\n", __func__); if (!nfs4_sequence_done(task, &calldata->res.seq_res)) return; /* hmm. we are done with the inode, and in the process of freeing @@ -2010,6 +2011,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) } nfs_release_seqid(calldata->arg.seqid); nfs_refresh_inode(calldata->inode, calldata->res.fattr); + dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); } static void nfs4_close_prepare(struct rpc_task *task, void *data) @@ -2018,6 +2020,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) struct nfs4_state *state = calldata->state; int call_close = 0; + dprintk("%s: begin!\n", __func__); if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) return; @@ -2042,7 +2045,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) if (!call_close) { /* Note: exit _without_ calling nfs4_close_done */ task->tk_action = NULL; - return; + goto out; } if (calldata->arg.fmode == 0) { @@ -2051,7 +2054,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) { rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq, task, NULL); - return; + goto out; } } @@ -2061,8 +2064,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) &calldata->arg.seq_args, &calldata->res.seq_res, task)) - return; + goto out; rpc_call_start(task); +out: + dprintk("%s: done!\n", __func__); } static const struct rpc_call_ops nfs4_close_ops = { -- cgit From 2446ab6070861aba2dd9229463ffbc40016a9f33 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 1 Mar 2012 17:00:56 -0500 Subject: SUNRPC: Use RCU to dereference the rpc_clnt.cl_xprt field A migration event will replace the rpc_xprt used by an rpc_clnt. To ensure this can be done safely, all references to cl_xprt must now use a form of rcu_dereference(). Special care is taken with rpc_peeraddr2str(), which returns a pointer to memory whose lifetime is the same as the rpc_xprt. Signed-off-by: Trond Myklebust [ cel: fix lockdep splats and layering violations ] [ cel: forward ported to 3.4 ] [ cel: remove rpc_max_reqs(), add rpc_net_ns() ] Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 9 +++++---- fs/nfs/client.c | 16 +++++++++++----- fs/nfs/nfs4namespace.c | 2 +- fs/nfs/nfs4proc.c | 13 ++++++++++--- fs/nfs/nfs4state.c | 25 +++++++++++++++++-------- fs/nfs/super.c | 5 +++++ 6 files changed, 49 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 0e0865e38065..1bb297243624 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "nfs4_fs.h" #include "callback.h" #include "delegation.h" @@ -33,7 +34,7 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, res->bitmap[0] = res->bitmap[1] = 0; res->status = htonl(NFS4ERR_BADHANDLE); - dprintk("NFS: GETATTR callback request from %s\n", + dprintk_rcu("NFS: GETATTR callback request from %s\n", rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); inode = nfs_delegation_find_inode(cps->clp, &args->fh); @@ -73,7 +74,7 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ goto out; - dprintk("NFS: RECALL callback request from %s\n", + dprintk_rcu("NFS: RECALL callback request from %s\n", rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); res = htonl(NFS4ERR_BADHANDLE); @@ -533,7 +534,7 @@ __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy, if (!cps->clp) /* set in cb_sequence */ goto out; - dprintk("NFS: RECALL_ANY callback request from %s\n", + dprintk_rcu("NFS: RECALL_ANY callback request from %s\n", rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); status = cpu_to_be32(NFS4ERR_INVAL); @@ -568,7 +569,7 @@ __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy, if (!cps->clp) /* set in cb_sequence */ goto out; - dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", + dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR), args->crsa_target_max_slots); diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 1506adf4d4ed..d038dc5916e5 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1284,16 +1284,18 @@ static int nfs4_init_callback(struct nfs_client *clp) int error; if (clp->rpc_ops->version == 4) { + struct rpc_xprt *xprt; + + xprt = rcu_dereference_raw(clp->cl_rpcclient->cl_xprt); + if (nfs4_has_session(clp)) { - error = xprt_setup_backchannel( - clp->cl_rpcclient->cl_xprt, + error = xprt_setup_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); if (error < 0) return error; } - error = nfs_callback_up(clp->cl_mvops->minor_version, - clp->cl_rpcclient->cl_xprt); + error = nfs_callback_up(clp->cl_mvops->minor_version, xprt); if (error < 0) { dprintk("%s: failed to start callback. Error = %d\n", __func__, error); @@ -1678,7 +1680,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, data->addrlen, parent_client->cl_ipaddr, data->authflavor, - parent_server->client->cl_xprt->prot, + rpc_protocol(parent_server->client), parent_server->client->cl_timeout, parent_client->cl_mvops->minor_version, parent_client->net); @@ -1905,12 +1907,14 @@ static int nfs_server_list_show(struct seq_file *m, void *v) if (clp->cl_cons_state != NFS_CS_READY) return 0; + rcu_read_lock(); seq_printf(m, "v%u %s %s %3d %s\n", clp->rpc_ops->version, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT), atomic_read(&clp->cl_count), clp->cl_hostname); + rcu_read_unlock(); return 0; } @@ -1993,6 +1997,7 @@ static int nfs_volume_list_show(struct seq_file *m, void *v) (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); + rcu_read_lock(); seq_printf(m, "v%u %s %s %-7s %-17s %s\n", clp->rpc_ops->version, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR), @@ -2000,6 +2005,7 @@ static int nfs_volume_list_show(struct seq_file *m, void *v) dev, fsid, nfs_server_fscache_state(server)); + rcu_read_unlock(); return 0; } diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index 667ea7406fd3..9c8eca315f43 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -96,8 +96,8 @@ static int nfs4_validate_fspath(struct dentry *dentry, static size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr *sa, size_t salen, struct nfs_server *server) { + struct net *net = rpc_net_ns(server->client); ssize_t ret; - struct net *net = server->client->cl_xprt->xprt_net; ret = rpc_pton(net, string, len, sa, salen); if (ret == 0) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6c8e170e2e6b..671510cc14c0 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3833,6 +3833,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, *p = htonl((u32)clp->cl_boot_time.tv_nsec); for(;;) { + rcu_read_lock(); setclientid.sc_name_len = scnprintf(setclientid.sc_name, sizeof(setclientid.sc_name), "%s/%s %s %s %u", clp->cl_ipaddr, @@ -3849,6 +3850,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, sizeof(setclientid.sc_uaddr), "%s.%u.%u", clp->cl_ipaddr, port >> 8, port & 255); + rcu_read_unlock(); status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (status != -NFS4ERR_CLID_INUSE) @@ -5244,11 +5246,16 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) void nfs4_destroy_session(struct nfs4_session *session) { + struct rpc_xprt *xprt; + nfs4_proc_destroy_session(session); + + rcu_read_lock(); + xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); + rcu_read_unlock(); dprintk("%s Destroy backchannel for xprt %p\n", - __func__, session->clp->cl_rpcclient->cl_xprt); - xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt, - NFS41_BC_MIN_CALLBACKS); + __func__, xprt); + xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); nfs4_destroy_slot_tables(session); kfree(session); } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index c1111a37dc14..bae959e294cd 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1037,19 +1037,28 @@ static void nfs4_clear_state_manager_bit(struct nfs_client *clp) void nfs4_schedule_state_manager(struct nfs_client *clp) { struct task_struct *task; + char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) return; __module_get(THIS_MODULE); atomic_inc(&clp->cl_count); - task = kthread_run(nfs4_run_state_manager, clp, "%s-manager", - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR)); - if (!IS_ERR(task)) - return; - nfs4_clear_state_manager_bit(clp); - nfs_put_client(clp); - module_put(THIS_MODULE); + + /* The rcu_read_lock() is not strictly necessary, as the state + * manager is the only thread that ever changes the rpc_xprt + * after it's initialized. At this point, we're single threaded. */ + rcu_read_lock(); + snprintf(buf, sizeof(buf), "%s-manager", + rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); + rcu_read_unlock(); + task = kthread_run(nfs4_run_state_manager, clp, buf); + if (IS_ERR(task)) { + printk(KERN_ERR "%s: kthread_run: %ld\n", + __func__, PTR_ERR(task)); + nfs4_clear_state_manager_bit(clp); + nfs_put_client(clp); + module_put(THIS_MODULE); + } } /* diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f4ccdae6a0cf..7002be11d99f 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -701,8 +702,10 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, else seq_puts(m, nfs_infop->nostr); } + rcu_read_lock(); seq_printf(m, ",proto=%s", rpc_peeraddr2str(nfss->client, RPC_DISPLAY_NETID)); + rcu_read_unlock(); if (version == 4) { if (nfss->port != NFS_PORT) seq_printf(m, ",port=%u", nfss->port); @@ -751,9 +754,11 @@ static int nfs_show_options(struct seq_file *m, struct dentry *root) nfs_show_mount_options(m, nfss, 0); + rcu_read_lock(); seq_printf(m, ",addr=%s", rpc_peeraddr2str(nfss->nfs_client->cl_rpcclient, RPC_DISPLAY_ADDR)); + rcu_read_unlock(); return 0; } -- cgit From 4e0038b6b246e4145fc4a53dca61a556d17bc52c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 1 Mar 2012 17:01:05 -0500 Subject: SUNRPC: Move clnt->cl_server into struct rpc_xprt When the cl_xprt field is updated, the cl_server field will also have to change. Since the contents of cl_server follow the remote endpoint of cl_xprt, just move that field to the rpc_xprt. Signed-off-by: Trond Myklebust [ cel: simplify check_gss_callback_principal(), whitespace changes ] [ cel: forward ported to 3.4 ] Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 3 +-- fs/nfs/nfs4proc.c | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 4a122ae71762..2afe23349c7b 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -332,7 +332,6 @@ void nfs_callback_down(int minorversion) int check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) { - struct rpc_clnt *r = clp->cl_rpcclient; char *p = svc_gss_principal(rqstp); if (rqstp->rq_authop->flavour != RPC_AUTH_GSS) @@ -353,7 +352,7 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) if (memcmp(p, "nfs@", 4) != 0) return 0; p += 4; - if (strcmp(p, r->cl_server) != 0) + if (strcmp(p, clp->cl_hostname) != 0) return 0; return 1; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 671510cc14c0..54767dd66cf9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1100,6 +1100,7 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data if (state == NULL) goto err_put_inode; if (data->o_res.delegation_type != 0) { + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; int delegation_flags = 0; rcu_read_lock(); @@ -1111,7 +1112,7 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data pr_err_ratelimited("NFS: Broken NFSv4 server %s is " "returning a delegation for " "OPEN(CLAIM_DELEGATE_CUR)\n", - NFS_CLIENT(inode)->cl_server); + clp->cl_hostname); } else if ((delegation_flags & 1UL<inode, data->owner->so_cred, -- cgit From 31b8e2aec099f22d40277c424d8c24b2a4c95fce Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 1 Mar 2012 17:01:23 -0500 Subject: NFS: Make clientaddr= optional For NFSv4 mounts, the clientaddr= mount option has always been required. Now we have rpc_localaddr() in the kernel, which was modeled after the same logic in the mount.nfs command that constructs the clientaddr= mount option. If user space doesn't provide a clientaddr= mount option, the kernel can now construct its own. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 15 +++++++++++++++ fs/nfs/super.c | 6 ------ 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d038dc5916e5..d30dcbfb6b20 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1346,6 +1346,7 @@ int nfs4_init_client(struct nfs_client *clp, rpc_authflavor_t authflavour, int noresvport) { + char buf[INET6_ADDRSTRLEN + 1]; int error; if (clp->cl_cons_state == NFS_CS_READY) { @@ -1361,6 +1362,20 @@ int nfs4_init_client(struct nfs_client *clp, 1, noresvport); if (error < 0) goto error; + + /* If no clientaddr= option was specified, find a usable cb address */ + if (ip_addr == NULL) { + struct sockaddr_storage cb_addr; + struct sockaddr *sap = (struct sockaddr *)&cb_addr; + + error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr)); + if (error < 0) + goto error; + error = rpc_ntop(sap, buf, sizeof(buf)); + if (error < 0) + goto error; + ip_addr = (const char *)buf; + } strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr)); error = nfs_idmap_new(clp); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 7002be11d99f..3935a371f5a0 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2557,12 +2557,6 @@ static int nfs4_validate_text_mount_data(void *options, return -EINVAL; } - if (args->client_address == NULL) { - dfprintk(MOUNT, - "NFS4: mount program didn't pass callback address\n"); - return -EINVAL; - } - return nfs_parse_devname(dev_name, &args->nfs_server.hostname, NFS4_MAXNAMLEN, -- cgit From 20d27e929fb4790a339a4ddcc9a27f14db06055b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 1 Mar 2012 17:01:31 -0500 Subject: NFS: Add a client-side function to display NFS file handles For debugging, introduce a simplistic function to print NFS file handles on the system console. The main function is hooked into the dprintk debugging facility, but you can directly call the helper, _nfs_display_fhandle(), if you want to print a handle unconditionally. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 6c662598f885..99a4f52c14b2 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1045,6 +1045,51 @@ struct nfs_fh *nfs_alloc_fhandle(void) return fh; } +/** + * _nfs_display_fhandle - display an NFS file handle on the console + * + * @fh: file handle to display + * @caption: display caption + * + * For debugging only. + */ +#ifdef RPC_DEBUG +void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption) +{ + unsigned short i; + + if (fh->size == 0 || fh == NULL) { + printk(KERN_DEFAULT "%s at %p is empty\n", caption, fh); + return; + } + + printk(KERN_DEFAULT "%s at %p is %u bytes:\n", caption, fh, fh->size); + for (i = 0; i < fh->size; i += 16) { + __be32 *pos = (__be32 *)&fh->data[i]; + + switch ((fh->size - i - 1) >> 2) { + case 0: + printk(KERN_DEFAULT " %08x\n", + be32_to_cpup(pos)); + break; + case 1: + printk(KERN_DEFAULT " %08x %08x\n", + be32_to_cpup(pos), be32_to_cpup(pos + 1)); + break; + case 2: + printk(KERN_DEFAULT " %08x %08x %08x\n", + be32_to_cpup(pos), be32_to_cpup(pos + 1), + be32_to_cpup(pos + 2)); + break; + default: + printk(KERN_DEFAULT " %08x %08x %08x %08x\n", + be32_to_cpup(pos), be32_to_cpup(pos + 1), + be32_to_cpup(pos + 2), be32_to_cpup(pos + 3)); + } + } +} +#endif + /** * nfs_inode_attrs_need_update - check if the inode attributes need updating * @inode - pointer to inode -- cgit From bb4dae5e5b5a92f0ffbcc6ac10c5e8afcd87934d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 1 Mar 2012 17:01:48 -0500 Subject: NFS: Simplify arguments of encode_renew() Clean up: pass just the clientid4 to encode_renew(). This enables it to be used by callers who might not have an full nfs_client. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index b5c5212cd184..48f539314f25 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1637,13 +1637,14 @@ static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, co hdr->replen += decode_rename_maxsz; } -static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr) +static void encode_renew(struct xdr_stream *xdr, clientid4 clid, + struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 12); *p++ = cpu_to_be32(OP_RENEW); - xdr_encode_hyper(p, client_stateid->cl_clientid); + xdr_encode_hyper(p, clid); hdr->nops++; hdr->replen += decode_renew_maxsz; } @@ -2692,7 +2693,7 @@ static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr, }; encode_compound_hdr(xdr, req, &hdr); - encode_renew(xdr, clp, &hdr); + encode_renew(xdr, clp->cl_clientid, &hdr); encode_nops(&hdr); } -- cgit From 81934ddb8eb62a85b8015c0f2b824a88510965a2 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 1 Mar 2012 17:01:57 -0500 Subject: NFS: Introduce NFS_ATTR_FATTR_V4_LOCATIONS The Linux NFS client must distinguish between referral events (which it currently supports) and migration events (which it does not yet support). In both types of events, an fs_locations array is returned. But upper layers, not the XDR layer, should make the distinction between a referral and a migration. There really isn't a way for an XDR decoder function to distinguish the two, in general. Slightly adjust the FATTR flags returned by decode_fs_locations() to set NFS_ATTR_FATTR_V4_LOCATIONS only if a non-empty locations array was returned from the server. Then have logic in nfs4proc.c distinguish whether the locations array is for a referral or something else. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 6 +++--- fs/nfs/nfs4xdr.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 54767dd66cf9..281c2def2b19 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -79,6 +79,7 @@ static int _nfs4_proc_open(struct nfs4_opendata *data); static int _nfs4_recover_proc_open(struct nfs4_opendata *data); static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); +static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_fattr *fattr, struct iattr *sattr, @@ -2340,7 +2341,6 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, return nfs4_map_errors(status); } -static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); /* * Get locations and (maybe) other attributes of a referral. * Note that we'll actually follow the referral later when @@ -4797,11 +4797,11 @@ static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || (fattr->valid & NFS_ATTR_FATTR_FILEID)) && (fattr->valid & NFS_ATTR_FATTR_FSID) && - (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL))) + (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) return; fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | - NFS_ATTR_FATTR_NLINK; + NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; fattr->nlink = 2; } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 48f539314f25..a6fb55da874c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3660,7 +3660,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st res->nlocations++; } if (res->nlocations != 0) - status = NFS_ATTR_FATTR_V4_REFERRAL; + status = NFS_ATTR_FATTR_V4_LOCATIONS; out: dprintk("%s: fs_locations done, error = %d\n", __func__, status); return status; -- cgit From 264e6351c59d22303582c45d79f0a5735f51d8d1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 1 Mar 2012 17:02:05 -0500 Subject: NFS: Request fh_expire_type attribute in "server caps" operation The fh_expire_type file attribute is a filesystem wide attribute that consists of flags that indicate what characteristics file handles on this FSID have. Our client doesn't support volatile file handles. It should find out early (say, at mount time) whether the server is going to play shenanighans with file handles during a migration. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 1 + fs/nfs/nfs4xdr.c | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 281c2def2b19..87b9b91f76cf 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2220,6 +2220,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; server->acl_bitmask = res.acl_bitmask; + server->fh_expire_type = res.fh_expire_type; } return status; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index a6fb55da874c..3e0fe9f92e7c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -2676,6 +2676,7 @@ static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req, encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fhandle, &hdr); encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS| + FATTR4_WORD0_FH_EXPIRE_TYPE| FATTR4_WORD0_LINK_SUPPORT| FATTR4_WORD0_SYMLINK_SUPPORT| FATTR4_WORD0_ACLSUPPORT, &hdr); @@ -3223,6 +3224,28 @@ out_overflow: return -EIO; } +static int decode_attr_fh_expire_type(struct xdr_stream *xdr, + uint32_t *bitmap, uint32_t *type) +{ + __be32 *p; + + *type = 0; + if (unlikely(bitmap[0] & (FATTR4_WORD0_FH_EXPIRE_TYPE - 1U))) + return -EIO; + if (likely(bitmap[0] & FATTR4_WORD0_FH_EXPIRE_TYPE)) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + goto out_overflow; + *type = be32_to_cpup(p); + bitmap[0] &= ~FATTR4_WORD0_FH_EXPIRE_TYPE; + } + dprintk("%s: expire type=0x%x\n", __func__, *type); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change) { __be32 *p; @@ -4271,6 +4294,9 @@ static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_re goto xdr_error; if ((status = decode_attr_supported(xdr, bitmap, res->attr_bitmask)) != 0) goto xdr_error; + if ((status = decode_attr_fh_expire_type(xdr, bitmap, + &res->fh_expire_type)) != 0) + goto xdr_error; if ((status = decode_attr_link_support(xdr, bitmap, &res->has_links)) != 0) goto xdr_error; if ((status = decode_attr_symlink_support(xdr, bitmap, &res->has_symlinks)) != 0) -- cgit From 54b50af089552bae368502e35dead67e81129b8d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 2 Mar 2012 16:58:56 -0500 Subject: NFS: Reduce debugging noise from encode_compound_hdr Get rid of encode_compound: tag= when XDR debugging is enabled. The current Linux client never sets compound tags. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 3e0fe9f92e7c..7d3ba1ff787c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -906,7 +906,6 @@ static void encode_compound_hdr(struct xdr_stream *xdr, * but this is not required as a MUST for the server to do so. */ hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen; - dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag); BUG_ON(hdr->taglen > NFS4_MAXTAGLEN); p = reserve_space(xdr, 4 + hdr->taglen + 8); p = xdr_encode_opaque(p, hdr->tag, hdr->taglen); -- cgit From 4b32da2bcf1de2b7a196a0e48389d231b4472c36 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sun, 4 Mar 2012 12:56:55 +0000 Subject: ppp: Replace uses of with Since all that include/linux/if_ppp.h does is #include , this replaces the occurrences of #include with #include . It also corrects an error in Documentation/networking/l2tp.txt, where it referenced include/linux/if_ppp.h as the source of some definitions that are actually now defined in include/linux/if_pppol2tp.h. Signed-off-by: Paul Mackerras Signed-off-by: David S. Miller --- fs/compat_ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index a26bea10e81b..10d8cd90ca6f 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include #include -- cgit From 6aad1c3d3eba3db38b3a1200e2b02ff3af501c5a Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 5 Mar 2012 09:20:59 -0500 Subject: GFS2: Eliminate sd_rindex_mutex Over time, we've slowly eliminated the use of sd_rindex_mutex. Up to this point, it was only used in two places: function gfs2_ri_total (which totals the file system size by reading and parsing the rindex file) and function gfs2_rindex_update which updates the rgrps in memory. Both of these functions have the rindex glock to protect them, so the rindex is unnecessary. Since gfs2_grow writes to the rindex via the meta_fs, the mutex is in the wrong order according to the normal rules. This patch eliminates the mutex entirely to avoid the problem. Signed-off-by: Bob Peterson Signed-off-by: Steven Whitehouse --- fs/gfs2/incore.h | 1 - fs/gfs2/ops_fstype.c | 1 - fs/gfs2/rgrp.c | 22 ++++++++++------------ 3 files changed, 10 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 4d546df58ac9..47d0bda5ac2b 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -644,7 +644,6 @@ struct gfs2_sbd { int sd_rindex_uptodate; spinlock_t sd_rindex_spin; - struct mutex sd_rindex_mutex; struct rb_root sd_rindex_tree; unsigned int sd_rgrps; unsigned int sd_max_rg_data; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index a55baa7f3239..ae5e0a40c9b3 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -83,7 +83,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) spin_lock_init(&sdp->sd_statfs_spin); spin_lock_init(&sdp->sd_rindex_spin); - mutex_init(&sdp->sd_rindex_mutex); sdp->sd_rindex_tree.rb_node = NULL; INIT_LIST_HEAD(&sdp->sd_jindex_list); diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index e09370eec590..6ff9f17f9ac2 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -540,7 +540,6 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp) struct file_ra_state ra_state; int error, rgrps; - mutex_lock(&sdp->sd_rindex_mutex); file_ra_state_init(&ra_state, inode->i_mapping); for (rgrps = 0;; rgrps++) { loff_t pos = rgrps * sizeof(struct gfs2_rindex); @@ -553,11 +552,10 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp) break; total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data); } - mutex_unlock(&sdp->sd_rindex_mutex); return total_data; } -static void rgd_insert(struct gfs2_rgrpd *rgd) +static int rgd_insert(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL; @@ -573,11 +571,13 @@ static void rgd_insert(struct gfs2_rgrpd *rgd) else if (rgd->rd_addr > cur->rd_addr) newn = &((*newn)->rb_right); else - return; + return -EEXIST; } rb_link_node(&rgd->rd_node, parent, newn); rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree); + sdp->sd_rgrps++; + return 0; } /** @@ -631,10 +631,12 @@ static int read_rindex_entry(struct gfs2_inode *ip, if (rgd->rd_data > sdp->sd_max_rg_data) sdp->sd_max_rg_data = rgd->rd_data; spin_lock(&sdp->sd_rindex_spin); - rgd_insert(rgd); - sdp->sd_rgrps++; + error = rgd_insert(rgd); spin_unlock(&sdp->sd_rindex_spin); - return error; + if (!error) + return 0; + + error = 0; /* someone else read in the rgrp; free it and ignore it */ fail: kfree(rgd->rd_bits); @@ -695,22 +697,18 @@ int gfs2_rindex_update(struct gfs2_sbd *sdp) /* Read new copy from disk if we don't have the latest */ if (!sdp->sd_rindex_uptodate) { - mutex_lock(&sdp->sd_rindex_mutex); if (!gfs2_glock_is_locked_by_me(gl)) { error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); if (error) - goto out_unlock; + return error; unlock_required = 1; } if (!sdp->sd_rindex_uptodate) error = gfs2_ri_update(ip); if (unlock_required) gfs2_glock_dq_uninit(&ri_gh); -out_unlock: - mutex_unlock(&sdp->sd_rindex_mutex); } - return error; } -- cgit From 58884c4df005ee5ee854cfcd0385d5a6bf25aa30 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 5 Mar 2012 10:19:35 -0500 Subject: GFS2: make sure rgrps are up to date in func gfs2_blk2rgrpd This patch adds a call to gfs2_rindex_update from function gfs2_blk2rgrpd and removes calls to it that are made redundant by it. The problem is that a gfs2_grow can add rgrps to the rindex, then put those rgrps into use, thus rendering the rindex we read in at mount time incomplete. Signed-off-by: Bob Peterson Signed-off-by: Steven Whitehouse --- fs/gfs2/rgrp.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 6ff9f17f9ac2..19bde40b4864 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -332,6 +332,9 @@ struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact) struct rb_node *n, *next; struct gfs2_rgrpd *cur; + if (gfs2_rindex_update(sdp)) + return NULL; + spin_lock(&sdp->sd_rindex_spin); n = sdp->sd_rindex_tree.rb_node; while (n) { @@ -917,10 +920,6 @@ int gfs2_fitrim(struct file *filp, void __user *argp) if (!blk_queue_discard(q)) return -EOPNOTSUPP; - ret = gfs2_rindex_update(sdp); - if (ret) - return ret; - if (argp == NULL) { r.start = 0; r.len = ULLONG_MAX; @@ -1671,13 +1670,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) { struct gfs2_rgrpd *rgd; struct gfs2_holder rgd_gh; - int error; - - error = gfs2_rindex_update(sdp); - if (error) - return error; + int error = -EINVAL; - error = -EINVAL; rgd = gfs2_blk2rgrpd(sdp, no_addr, 1); if (!rgd) goto fail; -- cgit From aa6bf01d391935a8929333bc2e243084ea0c58db Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 29 Feb 2012 09:53:48 +0000 Subject: xfs: use per-filesystem I/O completion workqueues The new concurrency managed workqueues are cheap enough that we can create per-filesystem instead of global workqueues. This allows us to remove the trylock or defer scheme on the ilock, which is not helpful once we have outstanding log reservations until finishing a size update. Also allow the default concurrency on this workqueues so that I/O completions blocking on the ilock for one inode do not block process for another inode. Reviewed-by: Dave Chinner Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_aops.c | 39 ++++++++++----------------------------- fs/xfs/xfs_aops.h | 2 -- fs/xfs/xfs_buf.c | 17 ----------------- fs/xfs/xfs_mount.h | 3 +++ fs/xfs/xfs_super.c | 39 ++++++++++++++++++++++++++++++++++++++- 5 files changed, 51 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 74b9baf36ac3..540a01742c6d 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -126,21 +126,15 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend) /* * Update on-disk file size now that data has been written to disk. - * - * This function does not block as blocking on the inode lock in IO completion - * can lead to IO completion order dependency deadlocks.. If it can't get the - * inode ilock it will return EAGAIN. Callers must handle this. */ -STATIC int +STATIC void xfs_setfilesize( - xfs_ioend_t *ioend) + struct xfs_ioend *ioend) { - xfs_inode_t *ip = XFS_I(ioend->io_inode); + struct xfs_inode *ip = XFS_I(ioend->io_inode); xfs_fsize_t isize; - if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) - return EAGAIN; - + xfs_ilock(ip, XFS_ILOCK_EXCL); isize = xfs_ioend_new_eof(ioend); if (isize) { trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); @@ -149,7 +143,6 @@ xfs_setfilesize( } xfs_iunlock(ip, XFS_ILOCK_EXCL); - return 0; } /* @@ -163,10 +156,12 @@ xfs_finish_ioend( struct xfs_ioend *ioend) { if (atomic_dec_and_test(&ioend->io_remaining)) { + struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; + if (ioend->io_type == IO_UNWRITTEN) - queue_work(xfsconvertd_workqueue, &ioend->io_work); + queue_work(mp->m_unwritten_workqueue, &ioend->io_work); else if (xfs_ioend_is_append(ioend)) - queue_work(xfsdatad_workqueue, &ioend->io_work); + queue_work(mp->m_data_workqueue, &ioend->io_work); else xfs_destroy_ioend(ioend); } @@ -207,23 +202,9 @@ xfs_end_io( * We might have to update the on-disk file size after extending * writes. */ - error = xfs_setfilesize(ioend); - ASSERT(!error || error == EAGAIN); - + xfs_setfilesize(ioend); done: - /* - * If we didn't complete processing of the ioend, requeue it to the - * tail of the workqueue for another attempt later. Otherwise destroy - * it. - */ - if (error == EAGAIN) { - atomic_inc(&ioend->io_remaining); - xfs_finish_ioend(ioend); - /* ensure we don't spin on blocked ioends */ - delay(1); - } else { - xfs_destroy_ioend(ioend); - } + xfs_destroy_ioend(ioend); } /* diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h index 116dd5c37034..06e4caf38203 100644 --- a/fs/xfs/xfs_aops.h +++ b/fs/xfs/xfs_aops.h @@ -18,8 +18,6 @@ #ifndef __XFS_AOPS_H__ #define __XFS_AOPS_H__ -extern struct workqueue_struct *xfsdatad_workqueue; -extern struct workqueue_struct *xfsconvertd_workqueue; extern mempool_t *xfs_ioend_pool; /* diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 4dff85c7d7eb..6819b5163e33 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -45,8 +45,6 @@ static kmem_zone_t *xfs_buf_zone; STATIC int xfsbufd(void *); static struct workqueue_struct *xfslogd_workqueue; -struct workqueue_struct *xfsdatad_workqueue; -struct workqueue_struct *xfsconvertd_workqueue; #ifdef XFS_BUF_LOCK_TRACKING # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) @@ -1793,21 +1791,8 @@ xfs_buf_init(void) if (!xfslogd_workqueue) goto out_free_buf_zone; - xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1); - if (!xfsdatad_workqueue) - goto out_destroy_xfslogd_workqueue; - - xfsconvertd_workqueue = alloc_workqueue("xfsconvertd", - WQ_MEM_RECLAIM, 1); - if (!xfsconvertd_workqueue) - goto out_destroy_xfsdatad_workqueue; - return 0; - out_destroy_xfsdatad_workqueue: - destroy_workqueue(xfsdatad_workqueue); - out_destroy_xfslogd_workqueue: - destroy_workqueue(xfslogd_workqueue); out_free_buf_zone: kmem_zone_destroy(xfs_buf_zone); out: @@ -1817,8 +1802,6 @@ xfs_buf_init(void) void xfs_buf_terminate(void) { - destroy_workqueue(xfsconvertd_workqueue); - destroy_workqueue(xfsdatad_workqueue); destroy_workqueue(xfslogd_workqueue); kmem_zone_destroy(xfs_buf_zone); } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index c082e44dad2d..9eba73887829 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -211,6 +211,9 @@ typedef struct xfs_mount { struct shrinker m_inode_shrink; /* inode reclaim shrinker */ int64_t m_low_space[XFS_LOWSP_MAX]; /* low free space thresholds */ + + struct workqueue_struct *m_data_workqueue; + struct workqueue_struct *m_unwritten_workqueue; } xfs_mount_t; /* diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 5e0d43f231a4..c7f7bc2855a4 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -759,6 +759,36 @@ xfs_setup_devices( return 0; } +STATIC int +xfs_init_mount_workqueues( + struct xfs_mount *mp) +{ + mp->m_data_workqueue = alloc_workqueue("xfs-data/%s", + WQ_MEM_RECLAIM, 0, mp->m_fsname); + if (!mp->m_data_workqueue) + goto out; + + mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", + WQ_MEM_RECLAIM, 0, mp->m_fsname); + if (!mp->m_unwritten_workqueue) + goto out_destroy_data_iodone_queue; + + return 0; + +out_destroy_data_iodone_queue: + destroy_workqueue(mp->m_data_workqueue); +out: + return -ENOMEM; +} + +STATIC void +xfs_destroy_mount_workqueues( + struct xfs_mount *mp) +{ + destroy_workqueue(mp->m_data_workqueue); + destroy_workqueue(mp->m_unwritten_workqueue); +} + /* Catch misguided souls that try to use this interface on XFS */ STATIC struct inode * xfs_fs_alloc_inode( @@ -982,6 +1012,7 @@ xfs_fs_put_super( xfs_unmountfs(mp); xfs_freesb(mp); xfs_icsb_destroy_counters(mp); + xfs_destroy_mount_workqueues(mp); xfs_close_devices(mp); xfs_free_fsname(mp); kfree(mp); @@ -1308,10 +1339,14 @@ xfs_fs_fill_super( if (error) goto out_free_fsname; - error = xfs_icsb_init_counters(mp); + error = xfs_init_mount_workqueues(mp); if (error) goto out_close_devices; + error = xfs_icsb_init_counters(mp); + if (error) + goto out_destroy_workqueues; + error = xfs_readsb(mp, flags); if (error) goto out_destroy_counters; @@ -1374,6 +1409,8 @@ xfs_fs_fill_super( xfs_freesb(mp); out_destroy_counters: xfs_icsb_destroy_counters(mp); +out_destroy_workqueues: + xfs_destroy_mount_workqueues(mp); out_close_devices: xfs_close_devices(mp); out_free_fsname: -- cgit From 6923e686f19cb7017fc9777a10e06c2e2b2a2936 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 29 Feb 2012 09:53:49 +0000 Subject: xfs: do not require an ioend for new EOF calculation Replace xfs_ioend_new_eof with a new inline xfs_new_eof helper that doesn't require and ioend, and is available also outside of xfs_aops.c. Also make the code a bit more clear by using a normal if statement instead of a slightly misleading MIN(). Reviewed-by: Dave Chinner Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_aops.c | 24 ++++-------------------- fs/xfs/xfs_inode.h | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 540a01742c6d..745492b6c666 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -98,23 +98,6 @@ xfs_destroy_ioend( mempool_free(ioend, xfs_ioend_pool); } -/* - * If the end of the current ioend is beyond the current EOF, - * return the new EOF value, otherwise zero. - */ -STATIC xfs_fsize_t -xfs_ioend_new_eof( - xfs_ioend_t *ioend) -{ - xfs_inode_t *ip = XFS_I(ioend->io_inode); - xfs_fsize_t isize; - xfs_fsize_t bsize; - - bsize = ioend->io_offset + ioend->io_size; - isize = MIN(i_size_read(VFS_I(ip)), bsize); - return isize > ip->i_d.di_size ? isize : 0; -} - /* * Fast and loose check if this write could update the on-disk inode size. */ @@ -135,7 +118,7 @@ xfs_setfilesize( xfs_fsize_t isize; xfs_ilock(ip, XFS_ILOCK_EXCL); - isize = xfs_ioend_new_eof(ioend); + isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size); if (isize) { trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); ip->i_d.di_size = isize; @@ -357,6 +340,7 @@ xfs_submit_ioend_bio( xfs_ioend_t *ioend, struct bio *bio) { + struct xfs_inode *ip = XFS_I(ioend->io_inode); atomic_inc(&ioend->io_remaining); bio->bi_private = ioend; bio->bi_end_io = xfs_end_bio; @@ -365,8 +349,8 @@ xfs_submit_ioend_bio( * If the I/O is beyond EOF we mark the inode dirty immediately * but don't update the inode size until I/O completion. */ - if (xfs_ioend_new_eof(ioend)) - xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); + if (xfs_new_eof(ip, ioend->io_offset + ioend->io_size)) + xfs_mark_inode_dirty(ip); submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index eda493780395..7f90469141d7 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -274,6 +274,20 @@ static inline xfs_fsize_t XFS_ISIZE(struct xfs_inode *ip) return ip->i_d.di_size; } +/* + * If this I/O goes past the on-disk inode size update it unless it would + * be past the current in-core inode size. + */ +static inline xfs_fsize_t +xfs_new_eof(struct xfs_inode *ip, xfs_fsize_t new_size) +{ + xfs_fsize_t i_size = i_size_read(VFS_I(ip)); + + if (new_size > i_size) + new_size = i_size; + return new_size > ip->i_d.di_size ? new_size : 0; +} + /* * i_flags helper functions */ -- cgit From 84803fb78237014cbbc86c0f012b273a199f4691 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 29 Feb 2012 09:53:50 +0000 Subject: xfs: log file size updates as part of unwritten extent conversion If we convert and unwritten extent past the current i_size log the size update as part of the extent manipulation transactions instead of doing an unlogged metadata update later. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers --- fs/xfs/xfs_aops.c | 11 ++++++----- fs/xfs/xfs_iomap.c | 19 ++++++++++++++++++- 2 files changed, 24 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 745492b6c666..8e11b07bb281 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -179,13 +179,14 @@ xfs_end_io( ioend->io_error = -error; goto done; } + } else { + /* + * We might have to update the on-disk file size after + * extending writes. + */ + xfs_setfilesize(ioend); } - /* - * We might have to update the on-disk file size after extending - * writes. - */ - xfs_setfilesize(ioend); done: xfs_destroy_ioend(ioend); } diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 246c7d57c6f9..71a464503c43 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -31,6 +31,7 @@ #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" +#include "xfs_inode_item.h" #include "xfs_btree.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" @@ -645,6 +646,7 @@ xfs_iomap_write_unwritten( xfs_trans_t *tp; xfs_bmbt_irec_t imap; xfs_bmap_free_t free_list; + xfs_fsize_t i_size; uint resblks; int committed; int error; @@ -705,7 +707,22 @@ xfs_iomap_write_unwritten( if (error) goto error_on_bmapi_transaction; - error = xfs_bmap_finish(&(tp), &(free_list), &committed); + /* + * Log the updated inode size as we go. We have to be careful + * to only log it up to the actual write offset if it is + * halfway into a block. + */ + i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); + if (i_size > offset + count) + i_size = offset + count; + + i_size = xfs_new_eof(ip, i_size); + if (i_size) { + ip->i_d.di_size = i_size; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + } + + error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) goto error_on_bmapi_transaction; -- cgit From 7e03b7cc0736eefe7471782c344112ad6eba951e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:12:57 -0500 Subject: NFS: Fix a compile issue when !CONFIG_NFS_V4_1 The attempt to display the implementation ID needs to be conditional on whether or not CONFIG_NFS_V4_1 is defined Reported-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 3935a371f5a0..aac403085be5 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -775,7 +775,6 @@ static void show_sessions(struct seq_file *m, struct nfs_server *server) {} #endif #endif -#ifdef CONFIG_NFS_V4 #ifdef CONFIG_NFS_V4_1 static void show_pnfs(struct seq_file *m, struct nfs_server *server) { @@ -785,9 +784,26 @@ static void show_pnfs(struct seq_file *m, struct nfs_server *server) else seq_printf(m, "not configured"); } + +static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss) +{ + if (nfss->nfs_client && nfss->nfs_client->impl_id) { + struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id; + seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s'," + "date='%llu,%u'", + impl_id->name, impl_id->domain, + impl_id->date.seconds, impl_id->date.nseconds); + } +} #else -static void show_pnfs(struct seq_file *m, struct nfs_server *server) {} +#ifdef CONFIG_NFS_V4 +static void show_pnfs(struct seq_file *m, struct nfs_server *server) +{ +} #endif +static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss) +{ +} #endif static int nfs_show_devname(struct seq_file *m, struct dentry *root) @@ -836,13 +852,7 @@ static int nfs_show_stats(struct seq_file *m, struct dentry *root) seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ); - if (nfss->nfs_client && nfss->nfs_client->impl_id) { - struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id; - seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s'," - "date='%llu,%u'", - impl_id->name, impl_id->domain, - impl_id->date.seconds, impl_id->date.nseconds); - } + show_implementation_id(m, nfss); seq_printf(m, "\n\tcaps:\t"); seq_printf(m, "caps=0x%x", nfss->caps); -- cgit From fa68a1ba1de349f0d1fcc54171b95236efe24148 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 6 Mar 2012 10:14:35 -0500 Subject: NFS: Fix a typo in _nfs_display_fhandle The check for 'fh == NULL' needs to come _before_ we dereference fh. Reported-by: Dan Carpenter Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 99a4f52c14b2..ba03b7908149 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1058,7 +1058,7 @@ void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption) { unsigned short i; - if (fh->size == 0 || fh == NULL) { + if (fh == NULL || fh->size == 0) { printk(KERN_DEFAULT "%s at %p is empty\n", caption, fh); return; } -- cgit From a1d0b5eebc4fd6e0edb02688b35f17f67f42aea5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 5 Mar 2012 19:56:44 -0500 Subject: NFS: Properly handle the case where the delegation is revoked If we know that the delegation stateid is bad or revoked, we need to remove that delegation as soon as possible, and then mark all the stateids that relied on that delegation for recovery. We cannot use the delegation as part of the recovery process. Also note that NFSv4.1 uses a different error code (NFS4ERR_DELEG_REVOKED) to indicate that the delegation was revoked. Finally, ensure that setlk() and setattr() can both recover safely from a revoked delegation. Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org --- fs/nfs/delegation.c | 11 +++++++++++ fs/nfs/delegation.h | 1 + fs/nfs/nfs4_fs.h | 2 ++ fs/nfs/nfs4proc.c | 18 ++++++++++++++++-- fs/nfs/nfs4state.c | 29 +++++++++++++++++++++++++++-- 5 files changed, 57 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 7f2654069806..ac889af8ccf5 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -466,6 +466,17 @@ static void nfs_delegation_run_state_manager(struct nfs_client *clp) nfs4_schedule_state_manager(clp); } +void nfs_remove_bad_delegation(struct inode *inode) +{ + struct nfs_delegation *delegation; + + delegation = nfs_detach_delegation(NFS_I(inode), NFS_SERVER(inode)); + if (delegation) { + nfs_inode_find_state_and_recover(inode, &delegation->stateid); + nfs_free_delegation(delegation); + } +} + /** * nfs_expire_all_delegation_types * @clp: client to process diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index d9322e490c56..691a79609184 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -45,6 +45,7 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp); void nfs_handle_cb_pathdown(struct nfs_client *clp); int nfs_client_return_marked_delegations(struct nfs_client *clp); int nfs_delegations_present(struct nfs_client *clp); +void nfs_remove_bad_delegation(struct inode *inode); void nfs_delegation_mark_reclaim(struct nfs_client *clp); void nfs_delegation_reap_unclaimed(struct nfs_client *clp); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 19079ec8252c..7ddad3fa4074 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -317,6 +317,8 @@ extern void nfs4_put_open_state(struct nfs4_state *); extern void nfs4_close_state(struct nfs4_state *, fmode_t); extern void nfs4_close_sync(struct nfs4_state *, fmode_t); extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); +extern void nfs_inode_find_state_and_recover(struct inode *inode, + const nfs4_stateid *stateid); extern void nfs4_schedule_lease_recovery(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *); extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ea7adfc868c2..f31fcea1af7e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -268,8 +268,11 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc switch(errorcode) { case 0: return 0; + case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: + if (state != NULL) + nfs_remove_bad_delegation(state->inode); case -NFS4ERR_OPENMODE: if (state == NULL) break; @@ -1331,8 +1334,11 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state * The show must go on: exit, but mark the * stateid as needing recovery. */ + case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: + nfs_inode_find_state_and_recover(state->inode, + stateid); nfs4_schedule_stateid_recovery(server, state); case -EKEYEXPIRED: /* @@ -1931,7 +1937,9 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = { + .state = state, + }; int err; do { err = nfs4_handle_exception(server, @@ -3760,8 +3768,11 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, if (task->tk_status >= 0) return 0; switch(task->tk_status) { + case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: + if (state != NULL) + nfs_remove_bad_delegation(state->inode); case -NFS4ERR_OPENMODE: if (state == NULL) break; @@ -4604,7 +4615,9 @@ out: static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = { + .state = state, + }; int err; do { @@ -4697,6 +4710,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) * The show must go on: exit, but mark the * stateid as needing recovery. */ + case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OPENMODE: diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 2f760604246f..d60e7ad2690e 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1106,12 +1106,37 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4 { struct nfs_client *clp = server->nfs_client; - if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags)) - nfs_async_inode_return_delegation(state->inode, &state->stateid); nfs4_state_mark_reclaim_nograce(clp, state); nfs4_schedule_state_manager(clp); } +void nfs_inode_find_state_and_recover(struct inode *inode, + const nfs4_stateid *stateid) +{ + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_open_context *ctx; + struct nfs4_state *state; + bool found = false; + + spin_lock(&inode->i_lock); + list_for_each_entry(ctx, &nfsi->open_files, list) { + state = ctx->state; + if (state == NULL) + continue; + if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) + continue; + if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0) + continue; + nfs4_state_mark_reclaim_nograce(clp, state); + found = true; + } + spin_unlock(&inode->i_lock); + if (found) + nfs4_schedule_state_manager(clp); +} + + static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) { struct inode *inode = state->inode; -- cgit From 8e663f0e5fabf57065aed1cfdaff5b13057dce23 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:56 -0500 Subject: NFSv4.1: Fix matching of the stateids when returning a delegation nfs41_validate_delegation_stateid is broken if we supply a stateid with a non-zero sequence id. Instead of trying to match the sequence id, the function assumes that we always want to error. While this is true for a delegation callback, it is not true in general. Also fix a typo in nfs4_callback_recall. Reported-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 10 +++++----- fs/nfs/delegation.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 1bb297243624..ea8321923f28 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -87,8 +87,7 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, res = 0; break; case -ENOENT: - if (res != 0) - res = htonl(NFS4ERR_BAD_STATEID); + res = htonl(NFS4ERR_BAD_STATEID); break; default: res = htonl(NFS4ERR_RESOURCE); @@ -325,10 +324,11 @@ int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const n if (delegation == NULL) return 0; - if (stateid->stateid.seqid != 0) + if (stateid->stateid.seqid != 0 && + stateid->stateid.seqid != delegation->stateid.stateid.seqid) return 0; - if (memcmp(&delegation->stateid.stateid.other, - &stateid->stateid.other, + if (memcmp(delegation->stateid.stateid.other, + stateid->stateid.other, NFS4_STATEID_OTHER_SIZE)) return 0; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index ac889af8ccf5..c14512cea798 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -542,7 +542,7 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp) /** * nfs_async_inode_return_delegation - asynchronously return a delegation * @inode: inode to process - * @stateid: state ID information from CB_RECALL arguments + * @stateid: state ID information * * Returns zero on success, or a negative errno value. */ -- cgit From 36281caa839f4441c793c81d2e3cc5ea44ad5aa2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:56 -0500 Subject: NFSv4: Further clean-ups of delegation stateid validation Change the name to reflect what we're really doing: testing two stateids for whether or not they match according the the rules in RFC3530 and RFC5661. Move the code from callback_proc.c to nfs4proc.c Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 24 ------------------------ fs/nfs/delegation.c | 2 +- fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4proc.c | 27 +++++++++++++++++++++++++-- 4 files changed, 27 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index ea8321923f28..1b5d809a105e 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -98,14 +98,6 @@ out: return res; } -int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid) -{ - if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data, - sizeof(delegation->stateid.data)) != 0) - return 0; - return 1; -} - #if defined(CONFIG_NFS_V4_1) /* @@ -319,22 +311,6 @@ out: return res; } -int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid) -{ - if (delegation == NULL) - return 0; - - if (stateid->stateid.seqid != 0 && - stateid->stateid.seqid != delegation->stateid.stateid.seqid) - return 0; - if (memcmp(delegation->stateid.stateid.other, - stateid->stateid.other, - NFS4_STATEID_OTHER_SIZE)) - return 0; - - return 1; -} - /* * Validate the sequenceID sent by the server. * Return success if the sequenceID is one more than what we last saw on diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index c14512cea798..c7249e26e2e9 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -556,7 +556,7 @@ int nfs_async_inode_return_delegation(struct inode *inode, rcu_read_lock(); delegation = rcu_dereference(NFS_I(inode)->delegation); - if (!clp->cl_mvops->validate_stateid(delegation, stateid)) { + if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) { rcu_read_unlock(); return -ENOENT; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 7ddad3fa4074..624d4becf017 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -43,7 +43,7 @@ struct nfs4_minor_version_ops { struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply); - int (*validate_stateid)(struct nfs_delegation *, + bool (*match_stateid)(const nfs4_stateid *, const nfs4_stateid *); int (*find_root_sec)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f31fcea1af7e..b0647b387403 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6271,8 +6271,31 @@ static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) } while (exception.retry); return err; } + +static bool nfs41_match_stateid(const nfs4_stateid *s1, + const nfs4_stateid *s2) +{ + if (memcmp(s1->stateid.other, s2->stateid.other, + sizeof(s1->stateid.other)) != 0) + return false; + + if (s1->stateid.seqid == s2->stateid.seqid) + return true; + if (s1->stateid.seqid == 0 || s2->stateid.seqid == 0) + return true; + + return false; +} + #endif /* CONFIG_NFS_V4_1 */ +static bool nfs4_match_stateid(const nfs4_stateid *s1, + const nfs4_stateid *s2) +{ + return memcmp(s1->data, s2->data, sizeof(s1->data)) == 0; +} + + struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, @@ -6331,7 +6354,7 @@ struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { .minor_version = 0, .call_sync = _nfs4_call_sync, - .validate_stateid = nfs4_validate_delegation_stateid, + .match_stateid = nfs4_match_stateid, .find_root_sec = nfs4_find_root_sec, .reboot_recovery_ops = &nfs40_reboot_recovery_ops, .nograce_recovery_ops = &nfs40_nograce_recovery_ops, @@ -6342,7 +6365,7 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { .minor_version = 1, .call_sync = _nfs4_call_sync_session, - .validate_stateid = nfs41_validate_delegation_stateid, + .match_stateid = nfs41_match_stateid, .find_root_sec = nfs41_find_root_sec, .reboot_recovery_ops = &nfs41_reboot_recovery_ops, .nograce_recovery_ops = &nfs41_nograce_recovery_ops, -- cgit From d0b496d2fc08cc51000fcdd9739235d1cab890cd Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:56 -0500 Subject: NFSv4: Rename encode_stateid() to encode_open_stateid() The current version of encode_stateid really only applies to open stateids. You can't use it for locks, delegations or layouts. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index bca8c77e5fe0..af11e8b5d367 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1528,7 +1528,7 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr) hdr->replen += decode_putrootfh_maxsz; } -static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx, int zero_seqid) +static void encode_open_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx, int zero_seqid) { nfs4_stateid stateid; __be32 *p; @@ -1550,7 +1550,7 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_READ); - encode_stateid(xdr, args->context, args->lock_context, + encode_open_stateid(xdr, args->context, args->lock_context, hdr->minorversion); p = reserve_space(xdr, 12); @@ -1739,7 +1739,7 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_WRITE); - encode_stateid(xdr, args->context, args->lock_context, + encode_open_stateid(xdr, args->context, args->lock_context, hdr->minorversion); p = reserve_space(xdr, 16); -- cgit From cb17e556f6202c200d38a2e0c05a5bd29060389f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:56 -0500 Subject: NFSv4: Add a helper for encoding opaque data Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index af11e8b5d367..de4cb5cfc318 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -885,6 +885,14 @@ static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes) return p; } +static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, len); + xdr_encode_opaque_fixed(p, buf, len); +} + static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) { __be32 *p; @@ -922,11 +930,7 @@ static void encode_nops(struct compound_hdr *hdr) static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf) { - __be32 *p; - - p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE); - BUG_ON(p == NULL); - xdr_encode_opaque_fixed(p, verf->data, NFS4_VERIFIER_SIZE); + encode_opaque_fixed(xdr, verf->data, NFS4_VERIFIER_SIZE); } static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server) -- cgit From ea9d23f51041036b5d5d062dae2fafe0f670449c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:56 -0500 Subject: NFSv4: Add a helper for encoding stateids Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 113 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 62 insertions(+), 51 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index de4cb5cfc318..c03ba77679ad 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -928,6 +928,11 @@ static void encode_nops(struct compound_hdr *hdr) *hdr->nops_p = htonl(hdr->nops); } +static void encode_nfs4_stateid(struct xdr_stream *xdr, const nfs4_stateid *stateid) +{ + encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE); +} + static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf) { encode_opaque_fixed(xdr, verf->data, NFS4_VERIFIER_SIZE); @@ -1070,10 +1075,10 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg { __be32 *p; - p = reserve_space(xdr, 8+NFS4_STATEID_SIZE); + p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_CLOSE); - *p++ = cpu_to_be32(arg->seqid->sequence->counter); - xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE); + *p = cpu_to_be32(arg->seqid->sequence->counter); + encode_nfs4_stateid(xdr, arg->stateid); hdr->nops++; hdr->replen += decode_close_maxsz; } @@ -1260,15 +1265,16 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); *p = cpu_to_be32(args->new_lock_owner); if (args->new_lock_owner){ - p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4); - *p++ = cpu_to_be32(args->open_seqid->sequence->counter); - p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE); - *p++ = cpu_to_be32(args->lock_seqid->sequence->counter); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(args->open_seqid->sequence->counter); + encode_nfs4_stateid(xdr, args->open_stateid); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(args->lock_seqid->sequence->counter); encode_lockowner(xdr, &args->lock_owner); } else { - p = reserve_space(xdr, NFS4_STATEID_SIZE+4); - p = xdr_encode_opaque_fixed(p, args->lock_stateid->data, NFS4_STATEID_SIZE); + encode_nfs4_stateid(xdr, args->lock_stateid); + p = reserve_space(xdr, 4); *p = cpu_to_be32(args->lock_seqid->sequence->counter); } hdr->nops++; @@ -1293,11 +1299,12 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar { __be32 *p; - p = reserve_space(xdr, 12+NFS4_STATEID_SIZE+16); + p = reserve_space(xdr, 12); *p++ = cpu_to_be32(OP_LOCKU); *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); - *p++ = cpu_to_be32(args->seqid->sequence->counter); - p = xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE); + *p = cpu_to_be32(args->seqid->sequence->counter); + encode_nfs4_stateid(xdr, args->stateid); + p = reserve_space(xdr, 16); p = xdr_encode_hyper(p, args->fl->fl_start); xdr_encode_hyper(p, nfs4_lock_length(args->fl)); hdr->nops++; @@ -1457,9 +1464,9 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc { __be32 *p; - p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); - *p++ = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR); - xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR); + encode_nfs4_stateid(xdr, stateid); encode_string(xdr, name->len, name->name); } @@ -1488,9 +1495,10 @@ static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_co { __be32 *p; - p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4); - *p++ = cpu_to_be32(OP_OPEN_CONFIRM); - p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_OPEN_CONFIRM); + encode_nfs4_stateid(xdr, arg->stateid); + p = reserve_space(xdr, 4); *p = cpu_to_be32(arg->seqid->sequence->counter); hdr->nops++; hdr->replen += decode_open_confirm_maxsz; @@ -1500,9 +1508,10 @@ static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_close { __be32 *p; - p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4); - *p++ = cpu_to_be32(OP_OPEN_DOWNGRADE); - p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_OPEN_DOWNGRADE); + encode_nfs4_stateid(xdr, arg->stateid); + p = reserve_space(xdr, 4); *p = cpu_to_be32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->fmode); hdr->nops++; @@ -1535,16 +1544,14 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr) static void encode_open_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx, int zero_seqid) { nfs4_stateid stateid; - __be32 *p; - p = reserve_space(xdr, NFS4_STATEID_SIZE); if (ctx->state != NULL) { nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid); if (zero_seqid) stateid.stateid.seqid = 0; - xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE); + encode_nfs4_stateid(xdr, &stateid); } else - xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE); + encode_nfs4_stateid(xdr, &zero_stateid); } static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr) @@ -1668,9 +1675,9 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun { __be32 *p; - p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); - *p++ = cpu_to_be32(OP_SETATTR); - xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_SETATTR); + encode_nfs4_stateid(xdr, &zero_stateid); p = reserve_space(xdr, 2*4); *p++ = cpu_to_be32(1); *p = cpu_to_be32(FATTR4_WORD0_ACL); @@ -1697,9 +1704,9 @@ static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs { __be32 *p; - p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); - *p++ = cpu_to_be32(OP_SETATTR); - xdr_encode_opaque_fixed(p, arg->stateid.data, NFS4_STATEID_SIZE); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_SETATTR); + encode_nfs4_stateid(xdr, &arg->stateid); hdr->nops++; hdr->replen += decode_setattr_maxsz; encode_attrs(xdr, arg->iap, server); @@ -1760,10 +1767,9 @@ static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *state { __be32 *p; - p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); - - *p++ = cpu_to_be32(OP_DELEGRETURN); - xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_DELEGRETURN); + encode_nfs4_stateid(xdr, stateid); hdr->nops++; hdr->replen += decode_delegreturn_maxsz; } @@ -1999,7 +2005,7 @@ encode_layoutget(struct xdr_stream *xdr, { __be32 *p; - p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE); + p = reserve_space(xdr, 40); *p++ = cpu_to_be32(OP_LAYOUTGET); *p++ = cpu_to_be32(0); /* Signal layout available */ *p++ = cpu_to_be32(args->type); @@ -2007,7 +2013,8 @@ encode_layoutget(struct xdr_stream *xdr, p = xdr_encode_hyper(p, args->range.offset); p = xdr_encode_hyper(p, args->range.length); p = xdr_encode_hyper(p, args->minlength); - p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE); + encode_nfs4_stateid(xdr, &args->stateid); + p = reserve_space(xdr, 4); *p = cpu_to_be32(args->maxcount); dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n", @@ -2032,13 +2039,14 @@ encode_layoutcommit(struct xdr_stream *xdr, dprintk("%s: lbw: %llu type: %d\n", __func__, args->lastbytewritten, NFS_SERVER(args->inode)->pnfs_curr_ld->id); - p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE); + p = reserve_space(xdr, 24); *p++ = cpu_to_be32(OP_LAYOUTCOMMIT); /* Only whole file layouts */ p = xdr_encode_hyper(p, 0); /* offset */ p = xdr_encode_hyper(p, args->lastbytewritten + 1); /* length */ - *p++ = cpu_to_be32(0); /* reclaim */ - p = xdr_encode_opaque_fixed(p, args->stateid.data, NFS4_STATEID_SIZE); + *p = cpu_to_be32(0); /* reclaim */ + encode_nfs4_stateid(xdr, &args->stateid); + p = reserve_space(xdr, 20); *p++ = cpu_to_be32(1); /* newoffset = TRUE */ p = xdr_encode_hyper(p, args->lastbytewritten); *p++ = cpu_to_be32(0); /* Never send time_modify_changed */ @@ -2070,11 +2078,11 @@ encode_layoutreturn(struct xdr_stream *xdr, *p++ = cpu_to_be32(args->layout_type); *p++ = cpu_to_be32(IOMODE_ANY); *p = cpu_to_be32(RETURN_FILE); - p = reserve_space(xdr, 16 + NFS4_STATEID_SIZE); + p = reserve_space(xdr, 16); p = xdr_encode_hyper(p, 0); p = xdr_encode_hyper(p, NFS4_MAX_UINT64); spin_lock(&args->inode->i_lock); - xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE); + encode_nfs4_stateid(xdr, &args->stateid); spin_unlock(&args->inode->i_lock); if (NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn) { NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn( @@ -2107,10 +2115,10 @@ static void encode_test_stateid(struct xdr_stream *xdr, { __be32 *p; - p = reserve_space(xdr, 8 + NFS4_STATEID_SIZE); + p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_TEST_STATEID); - *p++ = cpu_to_be32(1); - xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE); + *p = cpu_to_be32(1); + encode_nfs4_stateid(xdr, args->stateid); hdr->nops++; hdr->replen += decode_test_stateid_maxsz; } @@ -2120,9 +2128,9 @@ static void encode_free_stateid(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; - p = reserve_space(xdr, 4 + NFS4_STATEID_SIZE); - *p++ = cpu_to_be32(OP_FREE_STATEID); - xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_FREE_STATEID); + encode_nfs4_stateid(xdr, args->stateid); hdr->nops++; hdr->replen += decode_free_stateid_maxsz; } @@ -5640,11 +5648,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, status = decode_op_hdr(xdr, OP_LAYOUTGET); if (status) return status; - p = xdr_inline_decode(xdr, 8 + NFS4_STATEID_SIZE); + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + goto out_overflow; + res->return_on_close = be32_to_cpup(p); + decode_stateid(xdr, &res->stateid); + p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; - res->return_on_close = be32_to_cpup(p++); - p = xdr_decode_opaque_fixed(p, res->stateid.data, NFS4_STATEID_SIZE); layout_count = be32_to_cpup(p); if (!layout_count) { dprintk("%s: server responded with empty layout array\n", -- cgit From 1e3987c3052a48fbfc8f5d30214c825eff41192d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:56 -0500 Subject: NFSv4: Rename nfs4_copy_stateid() It is really a function for selecting the correct stateid to use in a read or write situation. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4proc.c | 2 +- fs/nfs/nfs4state.c | 2 +- fs/nfs/nfs4xdr.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 624d4becf017..308d2f999c3d 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -329,7 +329,7 @@ extern void nfs41_handle_server_scope(struct nfs_client *, struct server_scope **); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); -extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t); +extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t); extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask); extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b0647b387403..f181c70ea933 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1922,7 +1922,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) { /* Use that stateid */ } else if (state != NULL) { - nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid); + nfs4_select_rw_stateid(&arg.stateid, state, current->files, current->tgid); } else memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid)); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index d60e7ad2690e..6ba82271c868 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -888,7 +888,7 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) * Byte-range lock aware utility to initialize the stateid of read/write * requests. */ -void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid) +void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid) { struct nfs4_lock_state *lsp; int seq; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index c03ba77679ad..38736dca1b18 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1546,7 +1546,7 @@ static void encode_open_stateid(struct xdr_stream *xdr, const struct nfs_open_co nfs4_stateid stateid; if (ctx->state != NULL) { - nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid); + nfs4_select_rw_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid); if (zero_seqid) stateid.stateid.seqid = 0; encode_nfs4_stateid(xdr, &stateid); -- cgit From f597c53790f662662281b82b7692a22d2a4d4afa Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:56 -0500 Subject: NFSv4: Add helpers for basic copying of stateids Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 14 ++++++-------- fs/nfs/nfs4_fs.h | 10 ++++++++++ fs/nfs/nfs4proc.c | 34 +++++++++++++++------------------- fs/nfs/nfs4state.c | 6 +++--- fs/nfs/pnfs.c | 10 ++++------ 5 files changed, 38 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index c7249e26e2e9..87f7544f3dce 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -105,7 +105,7 @@ again: continue; if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) continue; - if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0) + if (!nfs4_stateid_match(&state->stateid, stateid)) continue; get_nfs_open_context(ctx); spin_unlock(&inode->i_lock); @@ -139,8 +139,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, if (delegation != NULL) { spin_lock(&delegation->lock); if (delegation->inode != NULL) { - memcpy(delegation->stateid.data, res->delegation.data, - sizeof(delegation->stateid.data)); + nfs4_stateid_copy(&delegation->stateid, &res->delegation); delegation->type = res->delegation_type; delegation->maxsize = res->maxsize; oldcred = delegation->cred; @@ -236,8 +235,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct delegation = kmalloc(sizeof(*delegation), GFP_NOFS); if (delegation == NULL) return -ENOMEM; - memcpy(delegation->stateid.data, res->delegation.data, - sizeof(delegation->stateid.data)); + nfs4_stateid_copy(&delegation->stateid, &res->delegation); delegation->type = res->delegation_type; delegation->maxsize = res->maxsize; delegation->change_attr = inode->i_version; @@ -250,8 +248,8 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct old_delegation = rcu_dereference_protected(nfsi->delegation, lockdep_is_held(&clp->cl_lock)); if (old_delegation != NULL) { - if (memcmp(&delegation->stateid, &old_delegation->stateid, - sizeof(old_delegation->stateid)) == 0 && + if (nfs4_stateid_match(&delegation->stateid, + &old_delegation->stateid) && delegation->type == old_delegation->type) { goto out; } @@ -708,7 +706,7 @@ int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) rcu_read_lock(); delegation = rcu_dereference(nfsi->delegation); if (delegation != NULL) { - memcpy(dst->data, delegation->stateid.data, sizeof(dst->data)); + nfs4_stateid_copy(dst, &delegation->stateid); ret = 1; } rcu_read_unlock(); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 308d2f999c3d..1c54ef3146d4 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -349,6 +349,16 @@ struct nfs4_mount_data; extern struct svc_version nfs4_callback_version1; extern struct svc_version nfs4_callback_version4; +static inline void nfs4_stateid_copy(nfs4_stateid *dst, const nfs4_stateid *src) +{ + memcpy(dst->data, src->data, sizeof(dst->data)); +} + +static inline bool nfs4_stateid_match(const nfs4_stateid *dst, const nfs4_stateid *src) +{ + return memcmp(dst->data, src->data, sizeof(dst->data)) == 0; +} + #else #define nfs4_close_state(a, b) do { } while (0) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f181c70ea933..ce0ad81dd466 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -941,8 +941,8 @@ static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) - memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data)); - memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data)); + nfs4_stateid_copy(&state->stateid, stateid); + nfs4_stateid_copy(&state->open_stateid, stateid); switch (fmode) { case FMODE_READ: set_bit(NFS_O_RDONLY_STATE, &state->flags); @@ -970,7 +970,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s */ write_seqlock(&state->seqlock); if (deleg_stateid != NULL) { - memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data)); + nfs4_stateid_copy(&state->stateid, deleg_stateid); set_bit(NFS_DELEGATED_STATE, &state->flags); } if (open_stateid != NULL) @@ -1001,7 +1001,7 @@ static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stat if (delegation == NULL) delegation = &deleg_cur->stateid; - else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0) + else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) goto no_delegation_unlock; nfs_mark_delegation_referenced(deleg_cur); @@ -1062,7 +1062,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) break; } /* Save the delegation */ - memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data)); + nfs4_stateid_copy(&stateid, &delegation->stateid); rcu_read_unlock(); ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); if (ret != 0) @@ -1225,10 +1225,10 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * * Check if we need to update the current stateid. */ if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && - memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) { + !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { write_seqlock(&state->seqlock); if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) - memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)); + nfs4_stateid_copy(&state->stateid, &state->open_stateid); write_sequnlock(&state->seqlock); } return 0; @@ -1297,8 +1297,7 @@ static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs if (IS_ERR(opendata)) return PTR_ERR(opendata); opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; - memcpy(opendata->o_arg.u.delegation.data, stateid->data, - sizeof(opendata->o_arg.u.delegation.data)); + nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); ret = nfs4_open_recover(opendata, state); nfs4_opendata_put(opendata); return ret; @@ -1363,8 +1362,7 @@ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) data->rpc_status = task->tk_status; if (data->rpc_status == 0) { - memcpy(data->o_res.stateid.data, data->c_res.stateid.data, - sizeof(data->o_res.stateid.data)); + nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); nfs_confirm_seqid(&data->owner->so_seqid, 0); renew_lease(data->o_res.server, data->timestamp); data->rpc_done = 1; @@ -1924,7 +1922,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, } else if (state != NULL) { nfs4_select_rw_stateid(&arg.stateid, state, current->files, current->tgid); } else - memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid)); + nfs4_stateid_copy(&arg.stateid, &zero_stateid); status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); if (status == 0 && state != NULL) @@ -3989,7 +3987,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co data->args.stateid = &data->stateid; data->args.bitmask = server->attr_bitmask; nfs_copy_fh(&data->fh, NFS_FH(inode)); - memcpy(&data->stateid, stateid, sizeof(data->stateid)); + nfs4_stateid_copy(&data->stateid, stateid); data->res.fattr = &data->fattr; data->res.server = server; nfs_fattr_init(data->res.fattr); @@ -4172,9 +4170,8 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) return; switch (task->tk_status) { case 0: - memcpy(calldata->lsp->ls_stateid.data, - calldata->res.stateid.data, - sizeof(calldata->lsp->ls_stateid.data)); + nfs4_stateid_copy(&calldata->lsp->ls_stateid, + &calldata->res.stateid); renew_lease(calldata->server, calldata->timestamp); break; case -NFS4ERR_BAD_STATEID: @@ -4387,8 +4384,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) goto out; } if (data->rpc_status == 0) { - memcpy(data->lsp->ls_stateid.data, data->res.stateid.data, - sizeof(data->lsp->ls_stateid.data)); + nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); } @@ -6292,7 +6288,7 @@ static bool nfs41_match_stateid(const nfs4_stateid *s1, static bool nfs4_match_stateid(const nfs4_stateid *s1, const nfs4_stateid *s2) { - return memcmp(s1->data, s2->data, sizeof(s1->data)) == 0; + return nfs4_stateid_match(s1, s2); } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6ba82271c868..55c8a81cd6fb 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -895,7 +895,7 @@ void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owne do { seq = read_seqbegin(&state->seqlock); - memcpy(dst, &state->stateid, sizeof(*dst)); + nfs4_stateid_copy(dst, &state->stateid); } while (read_seqretry(&state->seqlock, seq)); if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) return; @@ -903,7 +903,7 @@ void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owne spin_lock(&state->state_lock); lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE); if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) - memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); + nfs4_stateid_copy(dst, &lsp->ls_stateid); spin_unlock(&state->state_lock); nfs4_put_lock_state(lsp); } @@ -1126,7 +1126,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode, continue; if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) continue; - if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0) + if (!nfs4_stateid_match(&state->stateid, stateid)) continue; nfs4_state_mark_reclaim_nograce(clp, state); found = true; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 402efc2f5b70..c190e9c2e3d2 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -499,7 +499,7 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid); newseq = be32_to_cpu(new->stateid.seqid); if ((int)(newseq - oldseq) > 0) { - memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid)); + nfs4_stateid_copy(&lo->plh_stateid, new); if (update_barrier) { u32 new_barrier = be32_to_cpu(new->stateid.seqid); @@ -549,11 +549,10 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, do { seq = read_seqbegin(&open_state->seqlock); - memcpy(dst->data, open_state->stateid.data, - sizeof(open_state->stateid.data)); + nfs4_stateid_copy(dst, &open_state->stateid); } while (read_seqretry(&open_state->seqlock, seq)); } else - memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data)); + nfs4_stateid_copy(dst, &lo->plh_stateid); spin_unlock(&lo->plh_inode->i_lock); dprintk("<-- %s\n", __func__); return status; @@ -1527,8 +1526,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) end_pos = nfsi->layout->plh_lwb; nfsi->layout->plh_lwb = 0; - memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data, - sizeof(nfsi->layout->plh_stateid.data)); + nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid); spin_unlock(&inode->i_lock); data->args.inode = inode; -- cgit From 2d2f24add1ff903ff8e0ce61c5c05635cc636985 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:57 -0500 Subject: NFSv4: Simplify the struct nfs4_stateid Replace the union with the common struct stateid4 as defined in both RFC3530 and RFC5661. This makes it easier to access the sequence id, which will again make implementing support for parallel OPEN calls easier. Signed-off-by: Trond Myklebust --- fs/nfs/callback_xdr.c | 4 ++-- fs/nfs/nfs4_fs.h | 4 ++-- fs/nfs/nfs4proc.c | 7 +++---- fs/nfs/nfs4state.c | 4 ++-- fs/nfs/nfs4xdr.c | 6 +++--- fs/nfs/pnfs.c | 10 +++++----- 6 files changed, 17 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 5466829c7e77..fd6cfdb917da 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -138,10 +138,10 @@ static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) { __be32 *p; - p = read_buf(xdr, 16); + p = read_buf(xdr, NFS4_STATEID_SIZE); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); - memcpy(stateid->data, p, 16); + memcpy(stateid, p, NFS4_STATEID_SIZE); return 0; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 1c54ef3146d4..16373df96f90 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -351,12 +351,12 @@ extern struct svc_version nfs4_callback_version4; static inline void nfs4_stateid_copy(nfs4_stateid *dst, const nfs4_stateid *src) { - memcpy(dst->data, src->data, sizeof(dst->data)); + memcpy(dst, src, sizeof(*dst)); } static inline bool nfs4_stateid_match(const nfs4_stateid *dst, const nfs4_stateid *src) { - return memcmp(dst->data, src->data, sizeof(dst->data)) == 0; + return memcmp(dst, src, sizeof(*dst)) == 0; } #else diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ce0ad81dd466..e0e35288361c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6271,13 +6271,12 @@ static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) static bool nfs41_match_stateid(const nfs4_stateid *s1, const nfs4_stateid *s2) { - if (memcmp(s1->stateid.other, s2->stateid.other, - sizeof(s1->stateid.other)) != 0) + if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) return false; - if (s1->stateid.seqid == s2->stateid.seqid) + if (s1->seqid == s2->seqid) return true; - if (s1->stateid.seqid == 0 || s2->stateid.seqid == 0) + if (s1->seqid == 0 || s2->seqid == 0) return true; return false; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 55c8a81cd6fb..1dad5c53c7fa 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1240,8 +1240,8 @@ restart: * Open state on this file cannot be recovered * All we can do is revert to using the zero stateid. */ - memset(state->stateid.data, 0, - sizeof(state->stateid.data)); + memset(&state->stateid, 0, + sizeof(state->stateid)); /* Mark the file as being 'closed' */ state->state = 0; break; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 38736dca1b18..76ef98632839 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -930,7 +930,7 @@ static void encode_nops(struct compound_hdr *hdr) static void encode_nfs4_stateid(struct xdr_stream *xdr, const nfs4_stateid *stateid) { - encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE); + encode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE); } static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf) @@ -1548,7 +1548,7 @@ static void encode_open_stateid(struct xdr_stream *xdr, const struct nfs_open_co if (ctx->state != NULL) { nfs4_select_rw_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid); if (zero_seqid) - stateid.stateid.seqid = 0; + stateid.seqid = 0; encode_nfs4_stateid(xdr, &stateid); } else encode_nfs4_stateid(xdr, &zero_stateid); @@ -4237,7 +4237,7 @@ static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len) static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) { - return decode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE); + return decode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE); } static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index c190e9c2e3d2..6f1c1e3d12bc 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -496,12 +496,12 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, { u32 oldseq, newseq; - oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid); - newseq = be32_to_cpu(new->stateid.seqid); + oldseq = be32_to_cpu(lo->plh_stateid.seqid); + newseq = be32_to_cpu(new->seqid); if ((int)(newseq - oldseq) > 0) { nfs4_stateid_copy(&lo->plh_stateid, new); if (update_barrier) { - u32 new_barrier = be32_to_cpu(new->stateid.seqid); + u32 new_barrier = be32_to_cpu(new->seqid); if ((int)(new_barrier - lo->plh_barrier)) lo->plh_barrier = new_barrier; @@ -525,7 +525,7 @@ pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid, int lget) { if ((stateid) && - (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0) + (int)(lo->plh_barrier - be32_to_cpu(stateid->seqid)) >= 0) return true; return lo->plh_block_lgets || test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) || @@ -759,7 +759,7 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier) } if (!found) { struct pnfs_layout_hdr *lo = nfsi->layout; - u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid); + u32 current_seqid = be32_to_cpu(lo->plh_stateid.seqid); /* Since close does not return a layout stateid for use as * a barrier, we choose the worst-case barrier. -- cgit From 6fdfb0bc2a43f5deb612b7f79d9c7750708e0184 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:57 -0500 Subject: NFSv4: Minor clean ups for encode_string() Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 76ef98632839..d6e8306d02a7 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -897,8 +897,7 @@ static void encode_string(struct xdr_stream *xdr, unsigned int len, const char * { __be32 *p; - p = xdr_reserve_space(xdr, 4 + len); - BUG_ON(p == NULL); + p = reserve_space(xdr, 4 + len); xdr_encode_opaque(p, str, len); } @@ -915,8 +914,8 @@ static void encode_compound_hdr(struct xdr_stream *xdr, hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen; BUG_ON(hdr->taglen > NFS4_MAXTAGLEN); - p = reserve_space(xdr, 4 + hdr->taglen + 8); - p = xdr_encode_opaque(p, hdr->tag, hdr->taglen); + encode_string(xdr, hdr->taglen, hdr->tag); + p = reserve_space(xdr, 8); *p++ = cpu_to_be32(hdr->minorversion); hdr->nops_p = p; *p = cpu_to_be32(hdr->nops); @@ -1216,9 +1215,9 @@ static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct { __be32 *p; - p = reserve_space(xdr, 8 + name->len); - *p++ = cpu_to_be32(OP_LINK); - xdr_encode_opaque(p, name->name, name->len); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_LINK); + encode_string(xdr, name->len, name->name); hdr->nops++; hdr->replen += decode_link_maxsz; } @@ -1324,12 +1323,11 @@ static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lo static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { - int len = name->len; __be32 *p; - p = reserve_space(xdr, 8 + len); - *p++ = cpu_to_be32(OP_LOOKUP); - xdr_encode_opaque(p, name->name, len); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_LOOKUP); + encode_string(xdr, name->len, name->name); hdr->nops++; hdr->replen += decode_lookup_maxsz; } @@ -1521,12 +1519,11 @@ static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_close static void encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hdr *hdr) { - int len = fh->size; __be32 *p; - p = reserve_space(xdr, 8 + len); - *p++ = cpu_to_be32(OP_PUTFH); - xdr_encode_opaque(p, fh->data, len); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_PUTFH); + encode_string(xdr, fh->size, fh->data); hdr->nops++; hdr->replen += decode_putfh_maxsz; } @@ -1628,9 +1625,9 @@ static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struc { __be32 *p; - p = reserve_space(xdr, 8 + name->len); - *p++ = cpu_to_be32(OP_REMOVE); - xdr_encode_opaque(p, name->name, name->len); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_REMOVE); + encode_string(xdr, name->len, name->name); hdr->nops++; hdr->replen += decode_remove_maxsz; } @@ -1776,12 +1773,11 @@ static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *state static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { - int len = name->len; __be32 *p; - p = reserve_space(xdr, 8 + len); - *p++ = cpu_to_be32(OP_SECINFO); - xdr_encode_opaque(p, name->name, len); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_SECINFO); + encode_string(xdr, name->len, name->name); hdr->nops++; hdr->replen += decode_secinfo_maxsz; } -- cgit From 4ade9821602ada8f56f3a3eb444dedbe42f1730e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:57 -0500 Subject: NFSv4: Add a helper for encoding NFSv4 sequence ids Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 47 ++++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index d6e8306d02a7..3b38ca5bafe8 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -901,6 +901,20 @@ static void encode_string(struct xdr_stream *xdr, unsigned int len, const char * xdr_encode_opaque(p, str, len); } +static void encode_uint32(struct xdr_stream *xdr, u32 n) +{ + __be32 *p; + + p = reserve_space(xdr, 4); + *p = cpu_to_be32(n); +} + +static void encode_nfs4_seqid(struct xdr_stream *xdr, + const struct nfs_seqid *seqid) +{ + encode_uint32(xdr, seqid->sequence->counter); +} + static void encode_compound_hdr(struct xdr_stream *xdr, struct rpc_rqst *req, struct compound_hdr *hdr) @@ -1074,9 +1088,9 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg { __be32 *p; - p = reserve_space(xdr, 8); - *p++ = cpu_to_be32(OP_CLOSE); - *p = cpu_to_be32(arg->seqid->sequence->counter); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_CLOSE); + encode_nfs4_seqid(xdr, arg->seqid); encode_nfs4_stateid(xdr, arg->stateid); hdr->nops++; hdr->replen += decode_close_maxsz; @@ -1264,17 +1278,14 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); *p = cpu_to_be32(args->new_lock_owner); if (args->new_lock_owner){ - p = reserve_space(xdr, 4); - *p = cpu_to_be32(args->open_seqid->sequence->counter); + encode_nfs4_seqid(xdr, args->open_seqid); encode_nfs4_stateid(xdr, args->open_stateid); - p = reserve_space(xdr, 4); - *p = cpu_to_be32(args->lock_seqid->sequence->counter); + encode_nfs4_seqid(xdr, args->lock_seqid); encode_lockowner(xdr, &args->lock_owner); } else { encode_nfs4_stateid(xdr, args->lock_stateid); - p = reserve_space(xdr, 4); - *p = cpu_to_be32(args->lock_seqid->sequence->counter); + encode_nfs4_seqid(xdr, args->lock_seqid); } hdr->nops++; hdr->replen += decode_lock_maxsz; @@ -1298,10 +1309,10 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar { __be32 *p; - p = reserve_space(xdr, 12); + p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_LOCKU); - *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); - *p = cpu_to_be32(args->seqid->sequence->counter); + *p = cpu_to_be32(nfs4_lock_type(args->fl, 0)); + encode_nfs4_seqid(xdr, args->seqid); encode_nfs4_stateid(xdr, args->stateid); p = reserve_space(xdr, 16); p = xdr_encode_hyper(p, args->fl->fl_start); @@ -1360,9 +1371,9 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, * owner 4 = 32 */ - p = reserve_space(xdr, 8); - *p++ = cpu_to_be32(OP_OPEN); - *p = cpu_to_be32(arg->seqid->sequence->counter); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_OPEN); + encode_nfs4_seqid(xdr, arg->seqid); encode_share_access(xdr, arg->fmode); p = reserve_space(xdr, 32); p = xdr_encode_hyper(p, arg->clientid); @@ -1496,8 +1507,7 @@ static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_co p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_OPEN_CONFIRM); encode_nfs4_stateid(xdr, arg->stateid); - p = reserve_space(xdr, 4); - *p = cpu_to_be32(arg->seqid->sequence->counter); + encode_nfs4_seqid(xdr, arg->seqid); hdr->nops++; hdr->replen += decode_open_confirm_maxsz; } @@ -1509,8 +1519,7 @@ static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_close p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_OPEN_DOWNGRADE); encode_nfs4_stateid(xdr, arg->stateid); - p = reserve_space(xdr, 4); - *p = cpu_to_be32(arg->seqid->sequence->counter); + encode_nfs4_seqid(xdr, arg->seqid); encode_share_access(xdr, arg->fmode); hdr->nops++; hdr->replen += decode_open_downgrade_maxsz; -- cgit From ab19b4813fdbdef8f9c8732d1f7a2a69ae78d00b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 18:13:57 -0500 Subject: NFSv4: Add a encode op helper Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 161 +++++++++++-------------------------------------------- 1 file changed, 32 insertions(+), 129 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 3b38ca5bafe8..e9d4ac06b5d9 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -935,6 +935,15 @@ static void encode_compound_hdr(struct xdr_stream *xdr, *p = cpu_to_be32(hdr->nops); } +static void encode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 op, + uint32_t replen, + struct compound_hdr *hdr) +{ + encode_uint32(xdr, op); + hdr->nops++; + hdr->replen += replen; +} + static void encode_nops(struct compound_hdr *hdr) { BUG_ON(hdr->nops > NFS4_MAX_OPS); @@ -1086,14 +1095,9 @@ static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hd static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_CLOSE); + encode_op_hdr(xdr, OP_CLOSE, decode_close_maxsz, hdr); encode_nfs4_seqid(xdr, arg->seqid); encode_nfs4_stateid(xdr, arg->stateid); - hdr->nops++; - hdr->replen += decode_close_maxsz; } static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) @@ -1172,8 +1176,7 @@ encode_getattr_three(struct xdr_stream *xdr, { __be32 *p; - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_GETATTR); + encode_op_hdr(xdr, OP_GETATTR, decode_getattr_maxsz, hdr); if (bm2) { p = reserve_space(xdr, 16); *p++ = cpu_to_be32(3); @@ -1190,8 +1193,6 @@ encode_getattr_three(struct xdr_stream *xdr, *p++ = cpu_to_be32(1); *p = cpu_to_be32(bm0); } - hdr->nops++; - hdr->replen += decode_getattr_maxsz; } static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) @@ -1217,23 +1218,13 @@ static void encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, stru static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_GETFH); - hdr->nops++; - hdr->replen += decode_getfh_maxsz; + encode_op_hdr(xdr, OP_GETFH, decode_getfh_maxsz, hdr); } static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_LINK); + encode_op_hdr(xdr, OP_LINK, decode_link_maxsz, hdr); encode_string(xdr, name->len, name->name); - hdr->nops++; - hdr->replen += decode_link_maxsz; } static inline int nfs4_lock_type(struct file_lock *fl, int block) @@ -1323,24 +1314,14 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_RELEASE_LOCKOWNER); + encode_op_hdr(xdr, OP_RELEASE_LOCKOWNER, decode_release_lockowner_maxsz, hdr); encode_lockowner(xdr, lowner); - hdr->nops++; - hdr->replen += decode_release_lockowner_maxsz; } static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_LOOKUP); + encode_op_hdr(xdr, OP_LOOKUP, decode_lookup_maxsz, hdr); encode_string(xdr, name->len, name->name); - hdr->nops++; - hdr->replen += decode_lookup_maxsz; } static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode) @@ -1371,8 +1352,6 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, * owner 4 = 32 */ - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_OPEN); encode_nfs4_seqid(xdr, arg->seqid); encode_share_access(xdr, arg->fmode); p = reserve_space(xdr, 32); @@ -1481,6 +1460,7 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc static void encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, struct compound_hdr *hdr) { + encode_op_hdr(xdr, OP_OPEN, decode_open_maxsz, hdr); encode_openhdr(xdr, arg); encode_opentype(xdr, arg); switch (arg->claim) { @@ -1496,55 +1476,33 @@ static void encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, default: BUG(); } - hdr->nops++; - hdr->replen += decode_open_maxsz; } static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_OPEN_CONFIRM); + encode_op_hdr(xdr, OP_OPEN_CONFIRM, decode_open_confirm_maxsz, hdr); encode_nfs4_stateid(xdr, arg->stateid); encode_nfs4_seqid(xdr, arg->seqid); - hdr->nops++; - hdr->replen += decode_open_confirm_maxsz; } static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_OPEN_DOWNGRADE); + encode_op_hdr(xdr, OP_OPEN_DOWNGRADE, decode_open_downgrade_maxsz, hdr); encode_nfs4_stateid(xdr, arg->stateid); encode_nfs4_seqid(xdr, arg->seqid); encode_share_access(xdr, arg->fmode); - hdr->nops++; - hdr->replen += decode_open_downgrade_maxsz; } static void encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_PUTFH); + encode_op_hdr(xdr, OP_PUTFH, decode_putfh_maxsz, hdr); encode_string(xdr, fh->size, fh->data); - hdr->nops++; - hdr->replen += decode_putfh_maxsz; } static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_PUTROOTFH); - hdr->nops++; - hdr->replen += decode_putrootfh_maxsz; + encode_op_hdr(xdr, OP_PUTROOTFH, decode_putrootfh_maxsz, hdr); } static void encode_open_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx, int zero_seqid) @@ -1564,17 +1522,13 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, { __be32 *p; - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_READ); - + encode_op_hdr(xdr, OP_READ, decode_read_maxsz, hdr); encode_open_stateid(xdr, args->context, args->lock_context, hdr->minorversion); p = reserve_space(xdr, 12); p = xdr_encode_hyper(p, args->offset); *p = cpu_to_be32(args->count); - hdr->nops++; - hdr->replen += decode_read_maxsz; } static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr) @@ -1622,35 +1576,20 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_READLINK); - hdr->nops++; - hdr->replen += decode_readlink_maxsz; + encode_op_hdr(xdr, OP_READLINK, decode_readlink_maxsz, hdr); } static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_REMOVE); + encode_op_hdr(xdr, OP_REMOVE, decode_remove_maxsz, hdr); encode_string(xdr, name->len, name->name); - hdr->nops++; - hdr->replen += decode_remove_maxsz; } static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_RENAME); + encode_op_hdr(xdr, OP_RENAME, decode_rename_maxsz, hdr); encode_string(xdr, oldname->len, oldname->name); encode_string(xdr, newname->len, newname->name); - hdr->nops++; - hdr->replen += decode_rename_maxsz; } static void encode_renew(struct xdr_stream *xdr, clientid4 clid, @@ -1668,12 +1607,7 @@ static void encode_renew(struct xdr_stream *xdr, clientid4 clid, static void encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_RESTOREFH); - hdr->nops++; - hdr->replen += decode_restorefh_maxsz; + encode_op_hdr(xdr, OP_RESTOREFH, decode_restorefh_maxsz, hdr); } static void @@ -1681,8 +1615,7 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun { __be32 *p; - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_SETATTR); + encode_op_hdr(xdr, OP_SETATTR, decode_setacl_maxsz, hdr); encode_nfs4_stateid(xdr, &zero_stateid); p = reserve_space(xdr, 2*4); *p++ = cpu_to_be32(1); @@ -1691,30 +1624,18 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun p = reserve_space(xdr, 4); *p = cpu_to_be32(arg->acl_len); xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len); - hdr->nops++; - hdr->replen += decode_setacl_maxsz; } static void encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_SAVEFH); - hdr->nops++; - hdr->replen += decode_savefh_maxsz; + encode_op_hdr(xdr, OP_SAVEFH, decode_savefh_maxsz, hdr); } static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_SETATTR); + encode_op_hdr(xdr, OP_SETATTR, decode_setattr_maxsz, hdr); encode_nfs4_stateid(xdr, &arg->stateid); - hdr->nops++; - hdr->replen += decode_setattr_maxsz; encode_attrs(xdr, arg->iap, server); } @@ -1753,9 +1674,7 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg { __be32 *p; - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_WRITE); - + encode_op_hdr(xdr, OP_WRITE, decode_write_maxsz, hdr); encode_open_stateid(xdr, args->context, args->lock_context, hdr->minorversion); @@ -1765,30 +1684,18 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg *p = cpu_to_be32(args->count); xdr_write_pages(xdr, args->pages, args->pgbase, args->count); - hdr->nops++; - hdr->replen += decode_write_maxsz; } static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_DELEGRETURN); + encode_op_hdr(xdr, OP_DELEGRETURN, decode_delegreturn_maxsz, hdr); encode_nfs4_stateid(xdr, stateid); - hdr->nops++; - hdr->replen += decode_delegreturn_maxsz; } static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_SECINFO); + encode_op_hdr(xdr, OP_SECINFO, decode_secinfo_maxsz, hdr); encode_string(xdr, name->len, name->name); - hdr->nops++; - hdr->replen += decode_secinfo_maxsz; } #if defined(CONFIG_NFS_V4_1) @@ -2132,12 +2039,8 @@ static void encode_free_stateid(struct xdr_stream *xdr, struct nfs41_free_stateid_args *args, struct compound_hdr *hdr) { - __be32 *p; - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_FREE_STATEID); + encode_op_hdr(xdr, OP_FREE_STATEID, decode_free_stateid_maxsz, hdr); encode_nfs4_stateid(xdr, args->stateid); - hdr->nops++; - hdr->replen += decode_free_stateid_maxsz; } #endif /* CONFIG_NFS_V4_1 */ -- cgit From cd93710e8d290711ba2e08e1d1a380013aad667d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 2 Mar 2012 17:14:31 -0500 Subject: NFS: Fix nfs4_verifier memory alignment Clean up due to code review. The nfs4_verifier's data field is not guaranteed to be u32-aligned. Casting an array of chars to a u32 * is considered generally hazardous. Fix this by using a __be32 array to generate a verifier's contents, and then byte-copy the contents into the verifier field. The contents of a verifier, for all intents and purposes, are opaque bytes. Only local code that generates a verifier need know the actual content and format. Everyone else compares the full byte array for exact equality. Also, sizeof(nfs4_verifer) is the size of the in-core verifier data structure, but NFS4_VERIFIER_SIZE is the number of octets in an XDR'd verifier. The two are not interchangeable, even if they happen to have the same value. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 32 +++++++++++++++++++------------- fs/nfs/nfs4xdr.c | 40 ++++++++++++++++++++-------------------- 2 files changed, 39 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e0e35288361c..1ec05222ccbc 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -836,13 +836,15 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, p->o_arg.dir_bitmask = server->cache_consistency_bitmask; p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; if (attrs != NULL && attrs->ia_valid != 0) { - u32 *s; + __be32 verf[2]; p->o_arg.u.attrs = &p->attrs; memcpy(&p->attrs, attrs, sizeof(p->attrs)); - s = (u32 *) p->o_arg.u.verifier.data; - s[0] = jiffies; - s[1] = current->pid; + + verf[0] = jiffies; + verf[1] = current->pid; + memcpy(p->o_arg.u.verifier.data, verf, + sizeof(p->o_arg.u.verifier.data)); } p->c_arg.fh = &p->o_res.fh; p->c_arg.stateid = &p->o_res.stateid; @@ -3819,6 +3821,16 @@ wait_on_recovery: return -EAGAIN; } +static void nfs4_construct_boot_verifier(struct nfs_client *clp, + nfs4_verifier *bootverf) +{ + __be32 verf[2]; + + verf[0] = htonl((u32)clp->cl_boot_time.tv_sec); + verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec); + memcpy(bootverf->data, verf, sizeof(bootverf->data)); +} + int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred, struct nfs4_setclientid_res *res) @@ -3835,13 +3847,10 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, .rpc_resp = res, .rpc_cred = cred, }; - __be32 *p; int loop = 0; int status; - p = (__be32*)sc_verifier.data; - *p++ = htonl((u32)clp->cl_boot_time.tv_sec); - *p = htonl((u32)clp->cl_boot_time.tv_nsec); + nfs4_construct_boot_verifier(clp, &sc_verifier); for(;;) { rcu_read_lock(); @@ -4933,6 +4942,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) { nfs4_verifier verifier; struct nfs41_exchange_id_args args = { + .verifier = &verifier, .client = clp, .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, }; @@ -4946,15 +4956,11 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) .rpc_resp = &res, .rpc_cred = cred, }; - __be32 *p; dprintk("--> %s\n", __func__); BUG_ON(clp == NULL); - p = (u32 *)verifier.data; - *p++ = htonl((u32)clp->cl_boot_time.tv_sec); - *p = htonl((u32)clp->cl_boot_time.tv_nsec); - args.verifier = &verifier; + nfs4_construct_boot_verifier(clp, &verifier); args.id_len = scnprintf(args.id, sizeof(args.id), "%s/%s.%s/%u", diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e9d4ac06b5d9..62effaf579c4 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1538,7 +1538,7 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg FATTR4_WORD1_MOUNTED_ON_FILEID, }; uint32_t dircount = readdir->count >> 1; - __be32 *p; + __be32 *p, verf[2]; if (readdir->plus) { attrs[0] |= FATTR4_WORD0_TYPE|FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE| @@ -1553,10 +1553,11 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) attrs[0] |= FATTR4_WORD0_FILEID; - p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20); + p = reserve_space(xdr, 12); *p++ = cpu_to_be32(OP_READDIR); p = xdr_encode_hyper(p, readdir->cookie); - p = xdr_encode_opaque_fixed(p, readdir->verifier.data, NFS4_VERIFIER_SIZE); + encode_nfs4_verifier(xdr, &readdir->verifier); + p = reserve_space(xdr, 20); *p++ = cpu_to_be32(dircount); *p++ = cpu_to_be32(readdir->count); *p++ = cpu_to_be32(2); @@ -1565,11 +1566,11 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *p = cpu_to_be32(attrs[1] & readdir->bitmask[1]); hdr->nops++; hdr->replen += decode_readdir_maxsz; + memcpy(verf, readdir->verifier.data, sizeof(verf)); dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n", __func__, (unsigned long long)readdir->cookie, - ((u32 *)readdir->verifier.data)[0], - ((u32 *)readdir->verifier.data)[1], + verf[0], verf[1], attrs[0] & readdir->bitmask[0], attrs[1] & readdir->bitmask[1]); } @@ -1643,9 +1644,9 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie { __be32 *p; - p = reserve_space(xdr, 4 + NFS4_VERIFIER_SIZE); - *p++ = cpu_to_be32(OP_SETCLIENTID); - xdr_encode_opaque_fixed(p, setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_SETCLIENTID); + encode_nfs4_verifier(xdr, setclientid->sc_verifier); encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); p = reserve_space(xdr, 4); @@ -1662,10 +1663,10 @@ static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4 { __be32 *p; - p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE); + p = reserve_space(xdr, 12); *p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM); p = xdr_encode_hyper(p, arg->clientid); - xdr_encode_opaque_fixed(p, arg->confirm.data, NFS4_VERIFIER_SIZE); + encode_nfs4_verifier(xdr, &arg->confirm); hdr->nops++; hdr->replen += decode_setclientid_confirm_maxsz; } @@ -1708,9 +1709,9 @@ static void encode_exchange_id(struct xdr_stream *xdr, char impl_name[NFS4_OPAQUE_LIMIT]; int len = 0; - p = reserve_space(xdr, 4 + sizeof(args->verifier->data)); - *p++ = cpu_to_be32(OP_EXCHANGE_ID); - xdr_encode_opaque_fixed(p, args->verifier->data, sizeof(args->verifier->data)); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(OP_EXCHANGE_ID); + encode_nfs4_verifier(xdr, args->verifier); encode_string(xdr, args->id_len, args->id); @@ -4162,7 +4163,7 @@ static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res) static int decode_verifier(struct xdr_stream *xdr, void *verifier) { - return decode_opaque_fixed(xdr, verifier, 8); + return decode_opaque_fixed(xdr, verifier, NFS4_VERIFIER_SIZE); } static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res) @@ -4854,17 +4855,16 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n size_t hdrlen; u32 recvd, pglen = rcvbuf->page_len; int status; + __be32 verf[2]; status = decode_op_hdr(xdr, OP_READDIR); if (!status) status = decode_verifier(xdr, readdir->verifier.data); if (unlikely(status)) return status; + memcpy(verf, readdir->verifier.data, sizeof(verf)); dprintk("%s: verifier = %08x:%08x\n", - __func__, - ((u32 *)readdir->verifier.data)[0], - ((u32 *)readdir->verifier.data)[1]); - + __func__, verf[0], verf[1]); hdrlen = (char *) xdr->p - (char *) iov->iov_base; recvd = rcvbuf->len - hdrlen; @@ -5111,7 +5111,7 @@ static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res) goto out_overflow; res->count = be32_to_cpup(p++); res->verf->committed = be32_to_cpup(p++); - memcpy(res->verf->verifier, p, 8); + memcpy(res->verf->verifier, p, NFS4_VERIFIER_SIZE); return 0; out_overflow: print_overflow_msg(__func__, xdr); @@ -5455,7 +5455,7 @@ static int decode_getdevicelist(struct xdr_stream *xdr, p += 2; /* Read verifier */ - p = xdr_decode_opaque_fixed(p, verftemp.verifier, 8); + p = xdr_decode_opaque_fixed(p, verftemp.verifier, NFS4_VERIFIER_SIZE); res->num_devs = be32_to_cpup(p); -- cgit From 700195142185c05757cfd27f8070ae0e9e07710b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 4 Mar 2012 20:49:32 -0500 Subject: NFSv4: Cleanup - convert more functions to use encode_op_hdr Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 62effaf579c4..f958849cb304 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1644,8 +1644,7 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie { __be32 *p; - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_SETCLIENTID); + encode_op_hdr(xdr, OP_SETCLIENTID, decode_setclientid_maxsz, hdr); encode_nfs4_verifier(xdr, setclientid->sc_verifier); encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); @@ -1655,8 +1654,6 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr); p = reserve_space(xdr, 4); *p = cpu_to_be32(setclientid->sc_cb_ident); - hdr->nops++; - hdr->replen += decode_setclientid_maxsz; } static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr) @@ -1709,8 +1706,7 @@ static void encode_exchange_id(struct xdr_stream *xdr, char impl_name[NFS4_OPAQUE_LIMIT]; int len = 0; - p = reserve_space(xdr, 4); - *p = cpu_to_be32(OP_EXCHANGE_ID); + encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr); encode_nfs4_verifier(xdr, args->verifier); encode_string(xdr, args->id_len, args->id); @@ -1740,9 +1736,6 @@ static void encode_exchange_id(struct xdr_stream *xdr, *p = cpu_to_be32(0); } else *p = cpu_to_be32(0); /* implementation id array length=0 */ - - hdr->nops++; - hdr->replen += decode_exchange_id_maxsz; } static void encode_create_session(struct xdr_stream *xdr, -- cgit From 475d4ba02c3748b69cc71fa5c11c4b281cac5928 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 5 Mar 2012 11:27:16 -0500 Subject: NFSv4: More xdr cleanups Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 164 +++++++++++++++++-------------------------------------- 1 file changed, 50 insertions(+), 114 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f958849cb304..4ea9f50a32f2 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1084,13 +1084,8 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 8); - *p++ = cpu_to_be32(OP_ACCESS); - *p = cpu_to_be32(access); - hdr->nops++; - hdr->replen += decode_access_maxsz; + encode_op_hdr(xdr, OP_ACCESS, decode_access_maxsz, hdr); + encode_uint32(xdr, access); } static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) @@ -1104,21 +1099,18 @@ static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *ar { __be32 *p; - p = reserve_space(xdr, 16); - *p++ = cpu_to_be32(OP_COMMIT); + encode_op_hdr(xdr, OP_COMMIT, decode_commit_maxsz, hdr); + p = reserve_space(xdr, 12); p = xdr_encode_hyper(p, args->offset); *p = cpu_to_be32(args->count); - hdr->nops++; - hdr->replen += decode_commit_maxsz; } static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr) { __be32 *p; - p = reserve_space(xdr, 8); - *p++ = cpu_to_be32(OP_CREATE); - *p = cpu_to_be32(create->ftype); + encode_op_hdr(xdr, OP_CREATE, decode_create_maxsz, hdr); + encode_uint32(xdr, create->ftype); switch (create->ftype) { case NF4LNK: @@ -1138,9 +1130,6 @@ static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg * } encode_string(xdr, create->name->len, create->name->name); - hdr->nops++; - hdr->replen += decode_create_maxsz; - encode_attrs(xdr, create->attrs, create->server); } @@ -1148,25 +1137,21 @@ static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct c { __be32 *p; - p = reserve_space(xdr, 12); - *p++ = cpu_to_be32(OP_GETATTR); + encode_op_hdr(xdr, OP_GETATTR, decode_getattr_maxsz, hdr); + p = reserve_space(xdr, 8); *p++ = cpu_to_be32(1); *p = cpu_to_be32(bitmap); - hdr->nops++; - hdr->replen += decode_getattr_maxsz; } static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr) { __be32 *p; - p = reserve_space(xdr, 16); - *p++ = cpu_to_be32(OP_GETATTR); + encode_op_hdr(xdr, OP_GETATTR, decode_getattr_maxsz, hdr); + p = reserve_space(xdr, 12); *p++ = cpu_to_be32(2); *p++ = cpu_to_be32(bm0); *p = cpu_to_be32(bm1); - hdr->nops++; - hdr->replen += decode_getattr_maxsz; } static void @@ -1261,8 +1246,8 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args { __be32 *p; - p = reserve_space(xdr, 32); - *p++ = cpu_to_be32(OP_LOCK); + encode_op_hdr(xdr, OP_LOCK, decode_lock_maxsz, hdr); + p = reserve_space(xdr, 28); *p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block)); *p++ = cpu_to_be32(args->reclaim); p = xdr_encode_hyper(p, args->fl->fl_start); @@ -1278,38 +1263,31 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args encode_nfs4_stateid(xdr, args->lock_stateid); encode_nfs4_seqid(xdr, args->lock_seqid); } - hdr->nops++; - hdr->replen += decode_lock_maxsz; } static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr) { __be32 *p; - p = reserve_space(xdr, 24); - *p++ = cpu_to_be32(OP_LOCKT); + encode_op_hdr(xdr, OP_LOCKT, decode_lockt_maxsz, hdr); + p = reserve_space(xdr, 20); *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); p = xdr_encode_hyper(p, args->fl->fl_start); p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); encode_lockowner(xdr, &args->lock_owner); - hdr->nops++; - hdr->replen += decode_lockt_maxsz; } static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr) { __be32 *p; - p = reserve_space(xdr, 8); - *p++ = cpu_to_be32(OP_LOCKU); - *p = cpu_to_be32(nfs4_lock_type(args->fl, 0)); + encode_op_hdr(xdr, OP_LOCKU, decode_locku_maxsz, hdr); + encode_uint32(xdr, nfs4_lock_type(args->fl, 0)); encode_nfs4_seqid(xdr, args->seqid); encode_nfs4_stateid(xdr, args->stateid); p = reserve_space(xdr, 16); p = xdr_encode_hyper(p, args->fl->fl_start); xdr_encode_hyper(p, nfs4_lock_length(args->fl)); - hdr->nops++; - hdr->replen += decode_locku_maxsz; } static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr) @@ -1553,8 +1531,8 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) attrs[0] |= FATTR4_WORD0_FILEID; - p = reserve_space(xdr, 12); - *p++ = cpu_to_be32(OP_READDIR); + encode_op_hdr(xdr, OP_READDIR, decode_readdir_maxsz, hdr); + p = reserve_space(xdr, 8); p = xdr_encode_hyper(p, readdir->cookie); encode_nfs4_verifier(xdr, &readdir->verifier); p = reserve_space(xdr, 20); @@ -1564,8 +1542,6 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *p++ = cpu_to_be32(attrs[0] & readdir->bitmask[0]); *p = cpu_to_be32(attrs[1] & readdir->bitmask[1]); - hdr->nops++; - hdr->replen += decode_readdir_maxsz; memcpy(verf, readdir->verifier.data, sizeof(verf)); dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n", __func__, @@ -1598,11 +1574,9 @@ static void encode_renew(struct xdr_stream *xdr, clientid4 clid, { __be32 *p; - p = reserve_space(xdr, 12); - *p++ = cpu_to_be32(OP_RENEW); + encode_op_hdr(xdr, OP_RENEW, decode_renew_maxsz, hdr); + p = reserve_space(xdr, 8); xdr_encode_hyper(p, clid); - hdr->nops++; - hdr->replen += decode_renew_maxsz; } static void @@ -1660,12 +1634,11 @@ static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4 { __be32 *p; - p = reserve_space(xdr, 12); - *p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM); + encode_op_hdr(xdr, OP_SETCLIENTID_CONFIRM, + decode_setclientid_confirm_maxsz, hdr); + p = reserve_space(xdr, 8); p = xdr_encode_hyper(p, arg->clientid); encode_nfs4_verifier(xdr, &arg->confirm); - hdr->nops++; - hdr->replen += decode_setclientid_confirm_maxsz; } static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) @@ -1758,8 +1731,8 @@ static void encode_create_session(struct xdr_stream *xdr, len = scnprintf(machine_name, sizeof(machine_name), "%s", clp->cl_ipaddr); - p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); - *p++ = cpu_to_be32(OP_CREATE_SESSION); + encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); + p = reserve_space(xdr, 16 + 2*28 + 20 + len + 12); p = xdr_encode_hyper(p, clp->cl_clientid); *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ *p++ = cpu_to_be32(args->flags); /*flags */ @@ -1792,33 +1765,22 @@ static void encode_create_session(struct xdr_stream *xdr, *p++ = cpu_to_be32(0); /* UID */ *p++ = cpu_to_be32(0); /* GID */ *p = cpu_to_be32(0); /* No more gids */ - hdr->nops++; - hdr->replen += decode_create_session_maxsz; } static void encode_destroy_session(struct xdr_stream *xdr, struct nfs4_session *session, struct compound_hdr *hdr) { - __be32 *p; - p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN); - *p++ = cpu_to_be32(OP_DESTROY_SESSION); - xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); - hdr->nops++; - hdr->replen += decode_destroy_session_maxsz; + encode_op_hdr(xdr, OP_DESTROY_SESSION, decode_destroy_session_maxsz, hdr); + encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); } static void encode_reclaim_complete(struct xdr_stream *xdr, struct nfs41_reclaim_complete_args *args, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 8); - *p++ = cpu_to_be32(OP_RECLAIM_COMPLETE); - *p++ = cpu_to_be32(args->one_fs); - hdr->nops++; - hdr->replen += decode_reclaim_complete_maxsz; + encode_op_hdr(xdr, OP_RECLAIM_COMPLETE, decode_reclaim_complete_maxsz, hdr); + encode_uint32(xdr, args->one_fs); } #endif /* CONFIG_NFS_V4_1 */ @@ -1840,8 +1802,7 @@ static void encode_sequence(struct xdr_stream *xdr, WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE); slot = tp->slots + args->sa_slotid; - p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN + 16); - *p++ = cpu_to_be32(OP_SEQUENCE); + encode_op_hdr(xdr, OP_SEQUENCE, decode_sequence_maxsz, hdr); /* * Sessionid + seqid + slotid + max slotid + cache_this @@ -1855,13 +1816,12 @@ static void encode_sequence(struct xdr_stream *xdr, ((u32 *)session->sess_id.data)[3], slot->seq_nr, args->sa_slotid, tp->highest_used_slotid, args->sa_cache_this); + p = reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 16); p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); *p++ = cpu_to_be32(slot->seq_nr); *p++ = cpu_to_be32(args->sa_slotid); *p++ = cpu_to_be32(tp->highest_used_slotid); *p = cpu_to_be32(args->sa_cache_this); - hdr->nops++; - hdr->replen += decode_sequence_maxsz; #endif /* CONFIG_NFS_V4_1 */ } @@ -1876,14 +1836,12 @@ encode_getdevicelist(struct xdr_stream *xdr, .data = "dummmmmy", }; - p = reserve_space(xdr, 20); - *p++ = cpu_to_be32(OP_GETDEVICELIST); + encode_op_hdr(xdr, OP_GETDEVICELIST, decode_getdevicelist_maxsz, hdr); + p = reserve_space(xdr, 16); *p++ = cpu_to_be32(args->layoutclass); *p++ = cpu_to_be32(NFS4_PNFS_GETDEVLIST_MAXNUM); xdr_encode_hyper(p, 0ULL); /* cookie */ encode_nfs4_verifier(xdr, &dummy); - hdr->nops++; - hdr->replen += decode_getdevicelist_maxsz; } static void @@ -1893,15 +1851,13 @@ encode_getdeviceinfo(struct xdr_stream *xdr, { __be32 *p; - p = reserve_space(xdr, 16 + NFS4_DEVICEID4_SIZE); - *p++ = cpu_to_be32(OP_GETDEVICEINFO); + encode_op_hdr(xdr, OP_GETDEVICEINFO, decode_getdeviceinfo_maxsz, hdr); + p = reserve_space(xdr, 12 + NFS4_DEVICEID4_SIZE); p = xdr_encode_opaque_fixed(p, args->pdev->dev_id.data, NFS4_DEVICEID4_SIZE); *p++ = cpu_to_be32(args->pdev->layout_type); *p++ = cpu_to_be32(args->pdev->pglen); /* gdia_maxcount */ *p++ = cpu_to_be32(0); /* bitmap length 0 */ - hdr->nops++; - hdr->replen += decode_getdeviceinfo_maxsz; } static void @@ -1911,8 +1867,8 @@ encode_layoutget(struct xdr_stream *xdr, { __be32 *p; - p = reserve_space(xdr, 40); - *p++ = cpu_to_be32(OP_LAYOUTGET); + encode_op_hdr(xdr, OP_LAYOUTGET, decode_layoutget_maxsz, hdr); + p = reserve_space(xdr, 36); *p++ = cpu_to_be32(0); /* Signal layout available */ *p++ = cpu_to_be32(args->type); *p++ = cpu_to_be32(args->range.iomode); @@ -1920,8 +1876,7 @@ encode_layoutget(struct xdr_stream *xdr, p = xdr_encode_hyper(p, args->range.length); p = xdr_encode_hyper(p, args->minlength); encode_nfs4_stateid(xdr, &args->stateid); - p = reserve_space(xdr, 4); - *p = cpu_to_be32(args->maxcount); + encode_uint32(xdr, args->maxcount); dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n", __func__, @@ -1930,8 +1885,6 @@ encode_layoutget(struct xdr_stream *xdr, (unsigned long)args->range.offset, (unsigned long)args->range.length, args->maxcount); - hdr->nops++; - hdr->replen += decode_layoutget_maxsz; } static int @@ -1945,8 +1898,8 @@ encode_layoutcommit(struct xdr_stream *xdr, dprintk("%s: lbw: %llu type: %d\n", __func__, args->lastbytewritten, NFS_SERVER(args->inode)->pnfs_curr_ld->id); - p = reserve_space(xdr, 24); - *p++ = cpu_to_be32(OP_LAYOUTCOMMIT); + encode_op_hdr(xdr, OP_LAYOUTCOMMIT, decode_layoutcommit_maxsz, hdr); + p = reserve_space(xdr, 20); /* Only whole file layouts */ p = xdr_encode_hyper(p, 0); /* offset */ p = xdr_encode_hyper(p, args->lastbytewritten + 1); /* length */ @@ -1961,13 +1914,9 @@ encode_layoutcommit(struct xdr_stream *xdr, if (NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit) NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit( NFS_I(inode)->layout, xdr, args); - else { - p = reserve_space(xdr, 4); - *p = cpu_to_be32(0); /* no layout-type payload */ - } + else + encode_uint32(xdr, 0); /* no layout-type payload */ - hdr->nops++; - hdr->replen += decode_layoutcommit_maxsz; return 0; } @@ -1978,8 +1927,8 @@ encode_layoutreturn(struct xdr_stream *xdr, { __be32 *p; - p = reserve_space(xdr, 20); - *p++ = cpu_to_be32(OP_LAYOUTRETURN); + encode_op_hdr(xdr, OP_LAYOUTRETURN, decode_layoutreturn_maxsz, hdr); + p = reserve_space(xdr, 16); *p++ = cpu_to_be32(0); /* reclaim. always 0 for now */ *p++ = cpu_to_be32(args->layout_type); *p++ = cpu_to_be32(IOMODE_ANY); @@ -1993,12 +1942,8 @@ encode_layoutreturn(struct xdr_stream *xdr, if (NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn) { NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn( NFS_I(args->inode)->layout, xdr, args); - } else { - p = reserve_space(xdr, 4); - *p = cpu_to_be32(0); - } - hdr->nops++; - hdr->replen += decode_layoutreturn_maxsz; + } else + encode_uint32(xdr, 0); } static int @@ -2006,12 +1951,8 @@ encode_secinfo_no_name(struct xdr_stream *xdr, const struct nfs41_secinfo_no_name_args *args, struct compound_hdr *hdr) { - __be32 *p; - p = reserve_space(xdr, 8); - *p++ = cpu_to_be32(OP_SECINFO_NO_NAME); - *p++ = cpu_to_be32(args->style); - hdr->nops++; - hdr->replen += decode_secinfo_no_name_maxsz; + encode_op_hdr(xdr, OP_SECINFO_NO_NAME, decode_secinfo_no_name_maxsz, hdr); + encode_uint32(xdr, args->style); return 0; } @@ -2019,14 +1960,9 @@ static void encode_test_stateid(struct xdr_stream *xdr, struct nfs41_test_stateid_args *args, struct compound_hdr *hdr) { - __be32 *p; - - p = reserve_space(xdr, 8); - *p++ = cpu_to_be32(OP_TEST_STATEID); - *p = cpu_to_be32(1); + encode_op_hdr(xdr, OP_TEST_STATEID, decode_test_stateid_maxsz, hdr); + encode_uint32(xdr, 1); encode_nfs4_stateid(xdr, args->stateid); - hdr->nops++; - hdr->replen += decode_test_stateid_maxsz; } static void encode_free_stateid(struct xdr_stream *xdr, -- cgit From ff2eb6818d0d5b2691c112f51c539a817fcc59fc Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 5 Mar 2012 11:40:12 -0500 Subject: NFSv4: Add a helper encode_uint64 Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4ea9f50a32f2..e4bb8e6409a7 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -909,6 +909,14 @@ static void encode_uint32(struct xdr_stream *xdr, u32 n) *p = cpu_to_be32(n); } +static void encode_uint64(struct xdr_stream *xdr, u64 n) +{ + __be32 *p; + + p = reserve_space(xdr, 8); + xdr_encode_hyper(p, n); +} + static void encode_nfs4_seqid(struct xdr_stream *xdr, const struct nfs_seqid *seqid) { @@ -1532,8 +1540,7 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg attrs[0] |= FATTR4_WORD0_FILEID; encode_op_hdr(xdr, OP_READDIR, decode_readdir_maxsz, hdr); - p = reserve_space(xdr, 8); - p = xdr_encode_hyper(p, readdir->cookie); + encode_uint64(xdr, readdir->cookie); encode_nfs4_verifier(xdr, &readdir->verifier); p = reserve_space(xdr, 20); *p++ = cpu_to_be32(dircount); @@ -1572,11 +1579,8 @@ static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, co static void encode_renew(struct xdr_stream *xdr, clientid4 clid, struct compound_hdr *hdr) { - __be32 *p; - encode_op_hdr(xdr, OP_RENEW, decode_renew_maxsz, hdr); - p = reserve_space(xdr, 8); - xdr_encode_hyper(p, clid); + encode_uint64(xdr, clid); } static void @@ -1632,12 +1636,9 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr) { - __be32 *p; - encode_op_hdr(xdr, OP_SETCLIENTID_CONFIRM, decode_setclientid_confirm_maxsz, hdr); - p = reserve_space(xdr, 8); - p = xdr_encode_hyper(p, arg->clientid); + encode_uint64(xdr, arg->clientid); encode_nfs4_verifier(xdr, &arg->confirm); } -- cgit From b0f8ef202ec7f07ba9bd93150d54ef4327851422 Mon Sep 17 00:00:00 2001 From: Santosh Nayak Date: Fri, 2 Mar 2012 11:47:26 +0530 Subject: cifs: possible memory leak in xattr. Memory is allocated irrespective of whether CIFS_ACL is configured or not. But free is happenning only if CIFS_ACL is set. This is a possible memory leak scenario. Fix is: Allocate and free memory only if CIFS_ACL is configured. Signed-off-by: Santosh Nayak Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/xattr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 45f07c46f3ed..10d92cf57ab6 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -105,7 +105,6 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, struct cifs_tcon *pTcon; struct super_block *sb; char *full_path; - struct cifs_ntsd *pacl; if (direntry == NULL) return -EIO; @@ -164,23 +163,24 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL, strlen(CIFS_XATTR_CIFS_ACL)) == 0) { +#ifdef CONFIG_CIFS_ACL + struct cifs_ntsd *pacl; pacl = kmalloc(value_size, GFP_KERNEL); if (!pacl) { cFYI(1, "%s: Can't allocate memory for ACL", __func__); rc = -ENOMEM; } else { -#ifdef CONFIG_CIFS_ACL memcpy(pacl, ea_value, value_size); rc = set_cifs_acl(pacl, value_size, direntry->d_inode, full_path, CIFS_ACL_DACL); if (rc == 0) /* force revalidate of the inode */ CIFS_I(direntry->d_inode)->time = 0; kfree(pacl); + } #else cFYI(1, "Set CIFS ACL not supported yet"); #endif /* CONFIG_CIFS_ACL */ - } } else { int temp; temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS, -- cgit From d5751469f210d2149cc2159ffff66cbeef6da3f2 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Mon, 5 Mar 2012 09:39:20 +0300 Subject: CIFS: Do not kmalloc under the flocks spinlock Reorganize the code to make the memory already allocated before spinlock'ed loop. Cc: stable@vger.kernel.org Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/file.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 56 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 4dd9283885e7..5e64748a2917 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) for (lockp = &inode->i_flock; *lockp != NULL; \ lockp = &(*lockp)->fl_next) +struct lock_to_push { + struct list_head llist; + __u64 offset; + __u64 length; + __u32 pid; + __u16 netfid; + __u8 type; +}; + static int cifs_push_posix_locks(struct cifsFileInfo *cfile) { struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct file_lock *flock, **before; - struct cifsLockInfo *lck, *tmp; + unsigned int count = 0, i = 0; int rc = 0, xid, type; + struct list_head locks_to_send, *el; + struct lock_to_push *lck, *tmp; __u64 length; - struct list_head locks_to_send; xid = GetXid(); @@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) return rc; } + lock_flocks(); + cifs_for_each_lock(cfile->dentry->d_inode, before) { + if ((*before)->fl_flags & FL_POSIX) + count++; + } + unlock_flocks(); + INIT_LIST_HEAD(&locks_to_send); + /* + * Allocating count locks is enough because no locks can be added to + * the list while we are holding cinode->lock_mutex that protects + * locking operations of this inode. + */ + for (; i < count; i++) { + lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); + if (!lck) { + rc = -ENOMEM; + goto err_out; + } + list_add_tail(&lck->llist, &locks_to_send); + } + + i = 0; + el = locks_to_send.next; lock_flocks(); cifs_for_each_lock(cfile->dentry->d_inode, before) { + if (el == &locks_to_send) { + /* something is really wrong */ + cERROR(1, "Can't push all brlocks!"); + break; + } flock = *before; + if ((flock->fl_flags & FL_POSIX) == 0) + continue; length = 1 + flock->fl_end - flock->fl_start; if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) type = CIFS_RDLCK; else type = CIFS_WRLCK; - - lck = cifs_lock_init(flock->fl_start, length, type, - cfile->netfid); - if (!lck) { - rc = -ENOMEM; - goto send_locks; - } + lck = list_entry(el, struct lock_to_push, llist); lck->pid = flock->fl_pid; - - list_add_tail(&lck->llist, &locks_to_send); + lck->netfid = cfile->netfid; + lck->length = length; + lck->type = type; + lck->offset = flock->fl_start; + i++; + el = el->next; } - -send_locks: unlock_flocks(); list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { @@ -979,11 +1015,18 @@ send_locks: kfree(lck); } +out: cinode->can_cache_brlcks = false; mutex_unlock(&cinode->lock_mutex); FreeXid(xid); return rc; +err_out: + list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { + list_del(&lck->llist); + kfree(lck); + } + goto out; } static int -- cgit From 35e478f42271673f79066a1ed008c6604621c6fe Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 7 Mar 2012 10:43:02 +0000 Subject: GFS2: Flush pending glock work when evicting an inode This ensures that we will not try to access the inode thats being flushed via the glock after it has been freed. Signed-off-by: Steven Whitehouse --- fs/gfs2/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index f3faf72fa7ae..6172fa77ad59 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1557,6 +1557,7 @@ out: end_writeback(inode); gfs2_dir_hash_inval(ip); ip->i_gl->gl_object = NULL; + flush_delayed_work_sync(&ip->i_gl->gl_work); gfs2_glock_add_to_lru(ip->i_gl); gfs2_glock_put(ip->i_gl); ip->i_gl = NULL; -- cgit From c097b2ca5140249abc3fb5ae9a545c35125ae8d0 Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Mon, 5 Mar 2012 15:08:06 -0800 Subject: writeback: fix fn name in writeback_inodes_sb_nr_if_idle() comment header Signed-off-by: Fengguang Wu Signed-off-by: Jiri Kosina --- fs/fs-writeback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f855916657ba..82e959da686c 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1284,7 +1284,7 @@ int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason) EXPORT_SYMBOL(writeback_inodes_sb_if_idle); /** - * writeback_inodes_sb_if_idle - start writeback if none underway + * writeback_inodes_sb_nr_if_idle - start writeback if none underway * @sb: the superblock * @nr: the number of pages to write * @reason: reason why some writeback work was initiated -- cgit From 5a1f36c90c9b21a7aa31c29a1926b376dd6a11cf Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 7 Mar 2012 16:29:45 +0200 Subject: UBIFS: improve error messages Ricard complaints that the following error message is odd: "UBIFS error (pid 1578): validate_sb: bad superblock, error 8" and he is right. This patch improves the error messages a bit and makes them more user-friendly. Reported-by: Ricard Wanderlof Signed-off-by: Artem Bityutskiy --- fs/ubifs/sb.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 6094c5a5d7a8..771f7fb6ce92 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -410,13 +410,23 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) } if (c->main_lebs < UBIFS_MIN_MAIN_LEBS) { - err = 7; + ubifs_err("too few main LEBs count %d, must be at least %d", + c->main_lebs, UBIFS_MIN_MAIN_LEBS); goto failed; } - if (c->max_bud_bytes < (long long)c->leb_size * UBIFS_MIN_BUD_LEBS || - c->max_bud_bytes > (long long)c->leb_size * c->main_lebs) { - err = 8; + max_bytes = (long long)c->leb_size * UBIFS_MIN_BUD_LEBS; + if (c->max_bud_bytes < max_bytes) { + ubifs_err("too small journal (%lld bytes), must be at least " + "%lld bytes", c->max_bud_bytes, max_bytes); + goto failed; + } + + max_bytes = (long long)c->leb_size * c->main_lebs; + if (c->max_bud_bytes > max_bytes) { + ubifs_err("too large journal size (%lld bytes), only %lld bytes" + "available in the main area", + c->max_bud_bytes, max_bytes); goto failed; } @@ -450,7 +460,6 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) goto failed; } - max_bytes = c->main_lebs * (long long)c->leb_size; if (c->rp_size < 0 || max_bytes < c->rp_size) { err = 14; goto failed; -- cgit From d8e0539ebdff5ff27fa7eb019715d9dfb5171a3b Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Tue, 6 Mar 2012 20:46:43 -0500 Subject: NFS: add filehandle crc for debug display Match wireshark's CRC-32 hash for easier debugging Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index ba03b7908149..0337554f1561 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -1045,7 +1046,23 @@ struct nfs_fh *nfs_alloc_fhandle(void) return fh; } -/** +#ifdef RPC_DEBUG +/* + * _nfs_display_fhandle_hash - calculate the crc32 hash for the filehandle + * in the same way that wireshark does + * + * @fh: file handle + * + * For debugging only. + */ +u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh) +{ + /* wireshark uses 32-bit AUTODIN crc and does a bitwise + * not on the result */ + return ~crc32(0xFFFFFFFF, &fh->data[0], fh->size); +} + +/* * _nfs_display_fhandle - display an NFS file handle on the console * * @fh: file handle to display @@ -1053,7 +1070,6 @@ struct nfs_fh *nfs_alloc_fhandle(void) * * For debugging only. */ -#ifdef RPC_DEBUG void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption) { unsigned short i; @@ -1063,7 +1079,8 @@ void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption) return; } - printk(KERN_DEFAULT "%s at %p is %u bytes:\n", caption, fh, fh->size); + printk(KERN_DEFAULT "%s at %p is %u bytes, crc: 0x%08x:\n", + caption, fh, fh->size, _nfs_display_fhandle_hash(fh)); for (i = 0; i < fh->size; i += 16) { __be32 *pos = (__be32 *)&fh->data[i]; -- cgit From 4f1abd226d80ef763c50e3930b369b63dffbb312 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Tue, 6 Mar 2012 21:58:20 -0500 Subject: NFS: add fh_crc to debug output Print the filehandle crc in two debug messages Signed-off-by: Weston Andros Adamson Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 0337554f1561..70e25c9c5670 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -390,9 +390,10 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) unlock_new_inode(inode); } else nfs_refresh_inode(inode, fattr); - dprintk("NFS: nfs_fhget(%s/%Ld ct=%d)\n", + dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n", inode->i_sb->s_id, (long long)NFS_FILEID(inode), + nfs_display_fhandle_hash(fh), atomic_read(&inode->i_count)); out: @@ -1274,8 +1275,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) unsigned long now = jiffies; unsigned long save_cache_validity; - dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n", + dfprintk(VFS, "NFS: %s(%s/%ld fh_crc=0x%08x ct=%d info=0x%x)\n", __func__, inode->i_sb->s_id, inode->i_ino, + nfs_display_fhandle_hash(NFS_FH(inode)), atomic_read(&inode->i_count), fattr->valid); if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) -- cgit From 9cb8196839ab4ec87710526e9c43ac7f5dba69d3 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Wed, 7 Mar 2012 10:49:41 -0500 Subject: NFSv4.1 handle DS stateid errors Handle DS READ and WRITE stateid errors by recovering the stateid on the MDS. NFS4ERR_OLD_STATEID is ignored as the client always sends a state sequenceid of zero for DS READ and WRITE stateids. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 1 + fs/nfs/nfs4filelayout.c | 29 ++++++++++++++++++++++++++++- fs/nfs/nfs4state.c | 2 ++ 3 files changed, 31 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 87f7544f3dce..97d53574bf53 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -474,6 +474,7 @@ void nfs_remove_bad_delegation(struct inode *inode) nfs_free_delegation(delegation); } } +EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation); /** * nfs_expire_all_delegation_types diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 47e8f3435d38..b2d3bb5971bb 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -36,6 +36,7 @@ #include #include "internal.h" +#include "delegation.h" #include "nfs4filelayout.h" #define NFSDBG_FACILITY NFSDBG_PNFS_LD @@ -86,12 +87,31 @@ static int filelayout_async_handle_error(struct rpc_task *task, struct nfs_client *clp, int *reset) { + struct nfs_server *mds_server = NFS_SERVER(state->inode); + struct nfs_client *mds_client = mds_server->nfs_client; + if (task->tk_status >= 0) return 0; - *reset = 0; switch (task->tk_status) { + /* MDS state errors */ + case -NFS4ERR_DELEG_REVOKED: + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_BAD_STATEID: + if (state != NULL) + nfs_remove_bad_delegation(state->inode); + case -NFS4ERR_OPENMODE: + if (state == NULL) + break; + nfs4_schedule_stateid_recovery(mds_server, state); + goto wait_on_recovery; + case -NFS4ERR_EXPIRED: + if (state != NULL) + nfs4_schedule_stateid_recovery(mds_server, state); + nfs4_schedule_lease_recovery(mds_client); + goto wait_on_recovery; + /* DS session errors */ case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: @@ -117,8 +137,15 @@ static int filelayout_async_handle_error(struct rpc_task *task, *reset = 1; break; } +out: task->tk_status = 0; return -EAGAIN; +wait_on_recovery: + rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL); + if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0) + rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task); + goto out; + } /* NFS_PROTO call done callback routines */ diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 1dad5c53c7fa..a58d02a0c27d 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1072,6 +1072,7 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp) set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); nfs4_schedule_state_manager(clp); } +EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery); void nfs4_schedule_path_down_recovery(struct nfs_client *clp) { @@ -1109,6 +1110,7 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4 nfs4_state_mark_reclaim_nograce(clp, state); nfs4_schedule_state_manager(clp); } +EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery); void nfs_inode_find_state_and_recover(struct inode *inode, const nfs4_stateid *stateid) -- cgit From cf470c3e004efe16d73dc8ba9b29bdc9a5327cda Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 7 Mar 2012 13:49:12 -0500 Subject: NFSv4: Don't free the nfs4_lock_state until after the release_lockowner Otherwise we can end up with sequence id problems if the client reuses the owner_id before the server has processed the release_lockowner Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 4 +++- fs/nfs/nfs4proc.c | 31 ++++++++++++++++++++----------- fs/nfs/nfs4state.c | 8 +++++--- 3 files changed, 28 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 16373df96f90..026878cb2698 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -213,7 +213,7 @@ extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, boo extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, struct nfs4_fs_locations *fs_locations, struct page *page); -extern void nfs4_release_lockowner(const struct nfs4_lock_state *); +extern int nfs4_release_lockowner(struct nfs4_lock_state *); extern const struct xattr_handler *nfs4_xattr_handlers[]; #if defined(CONFIG_NFS_V4_1) @@ -338,6 +338,8 @@ extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); extern void nfs_release_seqid(struct nfs_seqid *seqid); extern void nfs_free_seqid(struct nfs_seqid *seqid); +extern void nfs4_free_lock_state(struct nfs4_lock_state *lsp); + extern const nfs4_stateid zero_stateid; /* nfs4xdr.c */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1ec05222ccbc..32e0d08a9771 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4745,8 +4745,15 @@ out: return err; } +struct nfs_release_lockowner_data { + struct nfs4_lock_state *lsp; + struct nfs_release_lockowner_args args; +}; + static void nfs4_release_lockowner_release(void *calldata) { + struct nfs_release_lockowner_data *data = calldata; + nfs4_free_lock_state(data->lsp); kfree(calldata); } @@ -4754,24 +4761,26 @@ const struct rpc_call_ops nfs4_release_lockowner_ops = { .rpc_release = nfs4_release_lockowner_release, }; -void nfs4_release_lockowner(const struct nfs4_lock_state *lsp) +int nfs4_release_lockowner(struct nfs4_lock_state *lsp) { struct nfs_server *server = lsp->ls_state->owner->so_server; - struct nfs_release_lockowner_args *args; + struct nfs_release_lockowner_data *data; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], }; if (server->nfs_client->cl_mvops->minor_version != 0) - return; - args = kmalloc(sizeof(*args), GFP_NOFS); - if (!args) - return; - args->lock_owner.clientid = server->nfs_client->cl_clientid; - args->lock_owner.id = lsp->ls_seqid.owner_id; - args->lock_owner.s_dev = server->s_dev; - msg.rpc_argp = args; - rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args); + return -EINVAL; + data = kmalloc(sizeof(*data), GFP_NOFS); + if (!data) + return -ENOMEM; + data->lsp = lsp; + data->args.lock_owner.clientid = server->nfs_client->cl_clientid; + data->args.lock_owner.id = lsp->ls_seqid.owner_id; + data->args.lock_owner.s_dev = server->s_dev; + msg.rpc_argp = &data->args; + rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); + return 0; } #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a58d02a0c27d..7adc46b4c7f8 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -784,7 +784,7 @@ out_free: return NULL; } -static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) +void nfs4_free_lock_state(struct nfs4_lock_state *lsp) { struct nfs_server *server = lsp->ls_state->owner->so_server; @@ -842,8 +842,10 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp) if (list_empty(&state->lock_states)) clear_bit(LK_STATE_IN_USE, &state->flags); spin_unlock(&state->state_lock); - if (lsp->ls_flags & NFS_LOCK_INITIALIZED) - nfs4_release_lockowner(lsp); + if (lsp->ls_flags & NFS_LOCK_INITIALIZED) { + if (nfs4_release_lockowner(lsp) == 0) + return; + } nfs4_free_lock_state(lsp); } -- cgit From 3114ea7a24d3264c090556a2444fc6d2c06176d4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 7 Mar 2012 16:39:06 -0500 Subject: NFSv4: Return the delegation if the server returns NFS4ERR_OPENMODE If a setattr() fails because of an NFS4ERR_OPENMODE error, it is probably due to us holding a read delegation. Ensure that the recovery routines return that delegation in this case. Reported-by: Miklos Szeredi Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org --- fs/nfs/nfs4_fs.h | 1 + fs/nfs/nfs4proc.c | 13 ++++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 026878cb2698..d1989e3f23c3 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -182,6 +182,7 @@ struct nfs4_exception { long timeout; int retry; struct nfs4_state *state; + struct inode *inode; }; struct nfs4_state_recovery_ops { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 32e0d08a9771..a8dd04db764f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -262,18 +262,28 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc { struct nfs_client *clp = server->nfs_client; struct nfs4_state *state = exception->state; + struct inode *inode = exception->inode; int ret = errorcode; exception->retry = 0; switch(errorcode) { case 0: return 0; + case -NFS4ERR_OPENMODE: + if (nfs_have_delegation(inode, FMODE_READ)) { + nfs_inode_return_delegation(inode); + exception->retry = 1; + return 0; + } + if (state == NULL) + break; + nfs4_schedule_stateid_recovery(server, state); + goto wait_on_recovery; case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: if (state != NULL) nfs_remove_bad_delegation(state->inode); - case -NFS4ERR_OPENMODE: if (state == NULL) break; nfs4_schedule_stateid_recovery(server, state); @@ -1939,6 +1949,7 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_server *server = NFS_SERVER(inode); struct nfs4_exception exception = { .state = state, + .inode = inode, }; int err; do { -- cgit From 75ca61c101601a7071d93571920be9697b3fda9b Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Thu, 8 Mar 2012 12:10:23 +0000 Subject: GFS2: Remove a __GFP_NOFAIL allocation In order to ensure that we've got enough buffer heads for flushing the journal, the orignal code used __GFP_NOFAIL when performing this allocation. Here we dispense with that in favour of using a mempool. This should improve efficiency in low memory conditions since flushing the journal is a good way to get memory back, we don't want to be spinning, waiting on memory allocations. The buffers which are allocated via this mempool are fairly short lived, so that we'll recycle them pretty quickly. Although there are other memory allocations which occur during the journal flush process, this is the one which can potentially require the most memory, so the most important one to fix. The amount of memory reserved is a fixed amount, and we should not need to scale it when there are a greater number of filesystems in use. Signed-off-by: Steven Whitehouse --- fs/gfs2/lops.c | 5 +++-- fs/gfs2/main.c | 18 ++++++++++++++++++ fs/gfs2/util.c | 1 + fs/gfs2/util.h | 3 +++ 4 files changed, 25 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index fe369bd9e10c..87e6e0d66bb7 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -199,7 +200,7 @@ static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate) struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd; end_buffer_write_sync(bh, uptodate); - free_buffer_head(bh); + mempool_free(bh, gfs2_bh_pool); unlock_buffer(real_bh); brelse(real_bh); if (atomic_dec_and_test(&sdp->sd_log_in_flight)) @@ -220,7 +221,7 @@ static struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp, u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); struct buffer_head *bh; - bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL); + bh = mempool_alloc(gfs2_bh_pool, GFP_NOFS); atomic_set(&bh->b_count, 1); bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock); set_bh_page(bh, real->b_page, bh_offset(real)); diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index a8d9bcd0e19c..754426b1e52c 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "gfs2.h" #include "incore.h" @@ -69,6 +70,16 @@ static void gfs2_init_gl_aspace_once(void *foo) address_space_init_once(mapping); } +static void *gfs2_bh_alloc(gfp_t mask, void *data) +{ + return alloc_buffer_head(mask); +} + +static void gfs2_bh_free(void *ptr, void *data) +{ + return free_buffer_head(ptr); +} + /** * init_gfs2_fs - Register GFS2 as a filesystem * @@ -151,6 +162,10 @@ static int __init init_gfs2_fs(void) gfs2_control_wq = alloc_workqueue("gfs2_control", WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0); if (!gfs2_control_wq) + goto fail_recovery; + + gfs2_bh_pool = mempool_create(1024, gfs2_bh_alloc, gfs2_bh_free, NULL); + if (!gfs2_bh_pool) goto fail_control; gfs2_register_debugfs(); @@ -160,6 +175,8 @@ static int __init init_gfs2_fs(void) return 0; fail_control: + destroy_workqueue(gfs2_control_wq); +fail_recovery: destroy_workqueue(gfs_recovery_wq); fail_wq: unregister_filesystem(&gfs2meta_fs_type); @@ -208,6 +225,7 @@ static void __exit exit_gfs2_fs(void) rcu_barrier(); + mempool_destroy(gfs2_bh_pool); kmem_cache_destroy(gfs2_quotad_cachep); kmem_cache_destroy(gfs2_rgrpd_cachep); kmem_cache_destroy(gfs2_bufdata_cachep); diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index 53511291fe36..9e7765e8e7b0 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -25,6 +25,7 @@ struct kmem_cache *gfs2_inode_cachep __read_mostly; struct kmem_cache *gfs2_bufdata_cachep __read_mostly; struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; struct kmem_cache *gfs2_quotad_cachep __read_mostly; +mempool_t *gfs2_bh_pool __read_mostly; void gfs2_assert_i(struct gfs2_sbd *sdp) { diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h index b432e04600de..a4ce76c67dbb 100644 --- a/fs/gfs2/util.h +++ b/fs/gfs2/util.h @@ -10,6 +10,8 @@ #ifndef __UTIL_DOT_H__ #define __UTIL_DOT_H__ +#include + #include "incore.h" #define fs_printk(level, fs, fmt, arg...) \ @@ -150,6 +152,7 @@ extern struct kmem_cache *gfs2_inode_cachep; extern struct kmem_cache *gfs2_bufdata_cachep; extern struct kmem_cache *gfs2_rgrpd_cachep; extern struct kmem_cache *gfs2_quotad_cachep; +extern mempool_t *gfs2_bh_pool; static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, unsigned int *p) -- cgit From 2dc317565b6fd264929b41aaa9674431d75178ef Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 8 Mar 2012 11:03:53 -0500 Subject: NFSv4.1 cleanup DS stateid error handling The error handler nfs4_state parameter is never NULL in the pNFS case as the open_context must carry an nfs_state. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayout.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index b2d3bb5971bb..768f6f86c9f0 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -99,16 +99,12 @@ static int filelayout_async_handle_error(struct rpc_task *task, case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: - if (state != NULL) - nfs_remove_bad_delegation(state->inode); + nfs_remove_bad_delegation(state->inode); case -NFS4ERR_OPENMODE: - if (state == NULL) - break; nfs4_schedule_stateid_recovery(mds_server, state); goto wait_on_recovery; case -NFS4ERR_EXPIRED: - if (state != NULL) - nfs4_schedule_stateid_recovery(mds_server, state); + nfs4_schedule_stateid_recovery(mds_server, state); nfs4_schedule_lease_recovery(mds_client); goto wait_on_recovery; /* DS session errors */ @@ -145,7 +141,6 @@ wait_on_recovery: if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0) rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task); goto out; - } /* NFS_PROTO call done callback routines */ -- cgit From 7210cb7a72a22303cdb225bd1aea28697a17bbae Mon Sep 17 00:00:00 2001 From: David Teigland Date: Thu, 8 Mar 2012 12:37:12 -0600 Subject: dlm: fix slow rsb search in dir recovery The function used to find an rsb during directory recovery was searching the single linear list of rsb's. This wasted a lot of time compared to using the standard hash table to find the rsb. Signed-off-by: David Teigland --- fs/dlm/dir.c | 17 +++++++++++++++++ fs/dlm/lock.c | 8 ++++---- fs/dlm/lock.h | 3 +++ 3 files changed, 24 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index 83641574b016..dc5eb598b81f 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -351,11 +351,28 @@ int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen, static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len) { struct dlm_rsb *r; + uint32_t hash, bucket; + int rv; + + hash = jhash(name, len, 0); + bucket = hash & (ls->ls_rsbtbl_size - 1); + + spin_lock(&ls->ls_rsbtbl[bucket].lock); + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, 0, &r); + if (rv) + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, + name, len, 0, &r); + spin_unlock(&ls->ls_rsbtbl[bucket].lock); + + if (!rv) + return r; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (len == r->res_length && !memcmp(name, r->res_name, len)) { up_read(&ls->ls_root_sem); + log_error(ls, "find_rsb_root revert to root_list %s", + r->res_name); return r; } } diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index d47183043c59..fa5c07d51dcc 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -411,8 +411,8 @@ static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen) return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN); } -static int search_rsb_tree(struct rb_root *tree, char *name, int len, - unsigned int flags, struct dlm_rsb **r_ret) +int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len, + unsigned int flags, struct dlm_rsb **r_ret) { struct rb_node *node = tree->rb_node; struct dlm_rsb *r; @@ -474,12 +474,12 @@ static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b, struct dlm_rsb *r; int error; - error = search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, flags, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, flags, &r); if (!error) { kref_get(&r->res_ref); goto out; } - error = search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, flags, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, flags, &r); if (error) goto out; diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index 265017a7c3e7..1a255307f6ff 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -28,6 +28,9 @@ void dlm_scan_waiters(struct dlm_ls *ls); void dlm_scan_timeout(struct dlm_ls *ls); void dlm_adjust_timeouts(struct dlm_ls *ls); +int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len, + unsigned int flags, struct dlm_rsb **r_ret); + int dlm_purge_locks(struct dlm_ls *ls); void dlm_purge_mstcpy_locks(struct dlm_rsb *r); void dlm_grant_after_purge(struct dlm_ls *ls); -- cgit From 54d20f006ceff1f2f1e69d0e54049b6c0765c039 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 8 Mar 2012 13:03:10 -0800 Subject: Revert "sysfs: Kill nlink counting." This reverts commit 524b6c5b39b931311dfe5a2f5abae2f5c9731676. It has shown to break userspace tools, which is not acceptable. Reported-by: Jiri Slaby Cc: Eric W. Biederman Cc: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/dir.c | 6 ++++++ fs/sysfs/inode.c | 3 +++ fs/sysfs/sysfs.h | 1 + 3 files changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index dd3779cf3a3b..2a7a3f5d1ca6 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -91,6 +91,9 @@ static int sysfs_link_sibling(struct sysfs_dirent *sd) struct rb_node **node = &sd->s_parent->s_dir.children.rb_node; struct rb_node *parent = NULL; + if (sysfs_type(sd) == SYSFS_DIR) + sd->s_parent->s_dir.subdirs++; + while (*node) { struct sysfs_dirent *pos; int result; @@ -123,6 +126,9 @@ static int sysfs_link_sibling(struct sysfs_dirent *sd) */ static void sysfs_unlink_sibling(struct sysfs_dirent *sd) { + if (sysfs_type(sd) == SYSFS_DIR) + sd->s_parent->s_dir.subdirs--; + rb_erase(&sd->s_rb, &sd->s_parent->s_dir.children); } diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index cc7ea5de2fdd..feb2d69396cf 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -217,6 +217,9 @@ static void sysfs_refresh_inode(struct sysfs_dirent *sd, struct inode *inode) iattrs->ia_secdata, iattrs->ia_secdata_len); } + + if (sysfs_type(sd) == SYSFS_DIR) + set_nlink(inode, sd->s_dir.subdirs + 2); } int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 6289a00287db..661a9639570b 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -19,6 +19,7 @@ struct sysfs_open_dirent; struct sysfs_elem_dir { struct kobject *kobj; + unsigned long subdirs; /* children rbtree starts here and goes through sd->s_rb */ struct rb_root children; }; -- cgit From 2f2d76cc3e938389feee671b46252dde6880b3b7 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Thu, 8 Mar 2012 05:55:59 +0000 Subject: dlm: Do not allocate a fd for peeloff avoids allocating a fd that a) propagates to every kernel thread and usermodehelper b) is not properly released. References: http://article.gmane.org/gmane.linux.network.drbd/22529 Signed-off-by: Benjamin Poirier Signed-off-by: David S. Miller --- fs/dlm/lowcomms.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 0b3109ee4257..ca0c59a4246c 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -474,9 +475,6 @@ static void process_sctp_notification(struct connection *con, int prim_len, ret; int addr_len; struct connection *new_con; - sctp_peeloff_arg_t parg; - int parglen = sizeof(parg); - int err; /* * We get this before any data for an association. @@ -525,23 +523,19 @@ static void process_sctp_notification(struct connection *con, return; /* Peel off a new sock */ - parg.associd = sn->sn_assoc_change.sac_assoc_id; - ret = kernel_getsockopt(con->sock, IPPROTO_SCTP, - SCTP_SOCKOPT_PEELOFF, - (void *)&parg, &parglen); + sctp_lock_sock(con->sock->sk); + ret = sctp_do_peeloff(con->sock->sk, + sn->sn_assoc_change.sac_assoc_id, + &new_con->sock); + sctp_release_sock(con->sock->sk); if (ret < 0) { log_print("Can't peel off a socket for " "connection %d to node %d: err=%d", - parg.associd, nodeid, ret); - return; - } - new_con->sock = sockfd_lookup(parg.sd, &err); - if (!new_con->sock) { - log_print("sockfd_lookup error %d", err); + (int)sn->sn_assoc_change.sac_assoc_id, + nodeid, ret); return; } add_sock(new_con->sock, new_con); - sockfd_put(new_con->sock); log_print("connecting to %d sctp association %d", nodeid, (int)sn->sn_assoc_change.sac_assoc_id); -- cgit From bfcfaa77bdf0f775263e906015982a608df01c76 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 6 Mar 2012 11:16:17 -0800 Subject: vfs: use 'unsigned long' accesses for dcache name comparison and hashing Ok, this is hacky, and only works on little-endian machines with goo unaligned handling. And even then only with CONFIG_DEBUG_PAGEALLOC disabled, since it can access up to 7 bytes after the pathname. But it runs like a bat out of hell. Signed-off-by: Linus Torvalds --- fs/Kconfig | 4 ++ fs/dcache.c | 23 ++++++++++++ fs/namei.c | 122 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 149 insertions(+) (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index d621f02a3f9e..aa195265362f 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -4,6 +4,10 @@ menu "File systems" +# Use unaligned word dcache accesses +config DCACHE_WORD_ACCESS + bool + if BLOCK source "fs/ext2/Kconfig" diff --git a/fs/dcache.c b/fs/dcache.c index bcbdb33fcc20..ffd47a16d870 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -144,6 +144,28 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, static inline int dentry_cmp(const unsigned char *cs, size_t scount, const unsigned char *ct, size_t tcount) { +#ifdef CONFIG_DCACHE_WORD_ACCESS + unsigned long a,b,mask; + + if (unlikely(scount != tcount)) + return 1; + + for (;;) { + a = *(unsigned long *)cs; + b = *(unsigned long *)ct; + if (tcount < sizeof(unsigned long)) + break; + if (unlikely(a != b)) + return 1; + cs += sizeof(unsigned long); + ct += sizeof(unsigned long); + tcount -= sizeof(unsigned long); + if (!tcount) + return 0; + } + mask = ~(~0ul << tcount*8); + return unlikely(!!((a ^ b) & mask)); +#else if (scount != tcount) return 1; @@ -155,6 +177,7 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount, tcount--; } while (tcount); return 0; +#endif } static void __d_free(struct rcu_head *head) diff --git a/fs/namei.c b/fs/namei.c index e2ba62820a0f..378497a744b4 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1374,6 +1374,126 @@ static inline int can_lookup(struct inode *inode) return 1; } +/* + * We can do the critical dentry name comparison and hashing + * operations one word at a time, but we are limited to: + * + * - Architectures with fast unaligned word accesses. We could + * do a "get_unaligned()" if this helps and is sufficiently + * fast. + * + * - Little-endian machines (so that we can generate the mask + * of low bytes efficiently). Again, we *could* do a byte + * swapping load on big-endian architectures if that is not + * expensive enough to make the optimization worthless. + * + * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we + * do not trap on the (extremely unlikely) case of a page + * crossing operation. + * + * - Furthermore, we need an efficient 64-bit compile for the + * 64-bit case in order to generate the "number of bytes in + * the final mask". Again, that could be replaced with a + * efficient population count instruction or similar. + */ +#ifdef CONFIG_DCACHE_WORD_ACCESS + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608 >> 56; +} + +static inline unsigned int fold_hash(unsigned long hash) +{ + hash += hash >> (8*sizeof(int)); + return hash; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#define fold_hash(x) (x) + +#endif + +unsigned int full_name_hash(const unsigned char *name, unsigned int len) +{ + unsigned long a, mask; + unsigned long hash = 0; + + for (;;) { + a = *(unsigned long *)name; + hash *= 9; + if (len < sizeof(unsigned long)) + break; + hash += a; + name += sizeof(unsigned long); + len -= sizeof(unsigned long); + if (!len) + goto done; + } + mask = ~(~0ul << len*8); + hash += mask & a; +done: + return fold_hash(hash); +} +EXPORT_SYMBOL(full_name_hash); + +#define ONEBYTES 0x0101010101010101ul +#define SLASHBYTES 0x2f2f2f2f2f2f2f2ful +#define HIGHBITS 0x8080808080808080ul + +/* Return the high bit set in the first byte that is a zero */ +static inline unsigned long has_zero(unsigned long a) +{ + return ((a - ONEBYTES) & ~a) & HIGHBITS; +} + +/* + * Calculate the length and hash of the path component, and + * return the length of the component; + */ +static inline unsigned long hash_name(const char *name, unsigned int *hashp) +{ + unsigned long a, mask, hash, len; + + hash = a = 0; + len = -sizeof(unsigned long); + do { + hash = (hash + a) * 9; + len += sizeof(unsigned long); + a = *(unsigned long *)(name+len); + /* Do we have any NUL or '/' bytes in this word? */ + mask = has_zero(a) | has_zero(a ^ SLASHBYTES); + } while (!mask); + + /* The mask *below* the first high bit set */ + mask = (mask - 1) & ~mask; + mask >>= 7; + hash += a & mask; + *hashp = fold_hash(hash); + + return len + count_masked_bytes(mask); +} + +#else + unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long hash = init_name_hash(); @@ -1402,6 +1522,8 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp) return len; } +#endif + /* * Name resolution. * This is the basic name resolution function, turning a pathname into -- cgit From 0032a7a749a49b2c044092a1d0af5cfd0077f35d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 8 Mar 2012 17:16:12 -0500 Subject: NFS: Don't copy read delegation stateids in setattr The server will just return an NFS4ERR_OPENMODE anyway. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 16 ++++++++++------ fs/nfs/delegation.h | 2 +- fs/nfs/nfs4proc.c | 2 +- 3 files changed, 12 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 97d53574bf53..e27c0972f94e 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -694,21 +694,25 @@ int nfs_delegations_present(struct nfs_client *clp) * nfs4_copy_delegation_stateid - Copy inode's state ID information * @dst: stateid data structure to fill in * @inode: inode to check + * @flags: delegation type requirement * - * Returns one and fills in "dst->data" * if inode had a delegation, - * otherwise zero is returned. + * Returns "true" and fills in "dst->data" * if inode had a delegation, + * otherwise "false" is returned. */ -int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) +bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, + fmode_t flags) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_delegation *delegation; - int ret = 0; + bool ret; + flags &= FMODE_READ|FMODE_WRITE; rcu_read_lock(); delegation = rcu_dereference(nfsi->delegation); - if (delegation != NULL) { + ret = (delegation != NULL && (delegation->type & flags) == flags); + if (ret) { nfs4_stateid_copy(dst, &delegation->stateid); - ret = 1; + nfs_mark_delegation_referenced(delegation); } rcu_read_unlock(); return ret; diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 691a79609184..e193012123e7 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -54,7 +54,7 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp); int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync); int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid); int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl); -int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode); +bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags); void nfs_mark_delegation_referenced(struct nfs_delegation *delegation); int nfs_have_delegation(struct inode *inode, fmode_t flags); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a8dd04db764f..3578ad36a5b8 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1929,7 +1929,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, nfs_fattr_init(fattr); - if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) { + if (nfs4_copy_delegation_stateid(&arg.stateid, inode, FMODE_WRITE)) { /* Use that stateid */ } else if (state != NULL) { nfs4_select_rw_stateid(&arg.stateid, state, current->files, current->tgid); -- cgit From 4fc8796d23819da814ec25b7793bde8f104f1a2a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 8 Mar 2012 17:42:01 -0500 Subject: NFSv4: Clean up nfs4_select_rw_stateid() Ensure that we select delegation stateids first, then lock stateids and then open stateids. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 3 ++- fs/nfs/nfs4proc.c | 8 +++++--- fs/nfs/nfs4state.c | 45 +++++++++++++++++++++++++++++++++------------ fs/nfs/nfs4xdr.c | 13 +++++++++---- 4 files changed, 49 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index d1989e3f23c3..b47bdb9c1612 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -330,7 +330,8 @@ extern void nfs41_handle_server_scope(struct nfs_client *, struct server_scope **); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); -extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t); +extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *, + fmode_t, fl_owner_t, pid_t); extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask); extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 3578ad36a5b8..3bf5593741ee 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1929,10 +1929,12 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, nfs_fattr_init(fattr); - if (nfs4_copy_delegation_stateid(&arg.stateid, inode, FMODE_WRITE)) { + if (state != NULL) { + nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, + current->files, current->tgid); + } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode, + FMODE_WRITE)) { /* Use that stateid */ - } else if (state != NULL) { - nfs4_select_rw_stateid(&arg.stateid, state, current->files, current->tgid); } else nfs4_stateid_copy(&arg.stateid, &zero_stateid); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 7adc46b4c7f8..de44804d9864 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -886,28 +886,49 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) return 0; } -/* - * Byte-range lock aware utility to initialize the stateid of read/write - * requests. - */ -void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid) +static bool nfs4_copy_lock_stateid(nfs4_stateid *dst, struct nfs4_state *state, + fl_owner_t fl_owner, pid_t fl_pid) { struct nfs4_lock_state *lsp; - int seq; + bool ret = false; - do { - seq = read_seqbegin(&state->seqlock); - nfs4_stateid_copy(dst, &state->stateid); - } while (read_seqretry(&state->seqlock, seq)); if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) - return; + goto out; spin_lock(&state->state_lock); lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE); - if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) + if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) { nfs4_stateid_copy(dst, &lsp->ls_stateid); + ret = true; + } spin_unlock(&state->state_lock); nfs4_put_lock_state(lsp); +out: + return ret; +} + +static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) +{ + int seq; + + do { + seq = read_seqbegin(&state->seqlock); + nfs4_stateid_copy(dst, &state->stateid); + } while (read_seqretry(&state->seqlock, seq)); +} + +/* + * Byte-range lock aware utility to initialize the stateid of read/write + * requests. + */ +void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state, + fmode_t fmode, fl_owner_t fl_owner, pid_t fl_pid) +{ + if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) + return; + if (nfs4_copy_lock_stateid(dst, state, fl_owner, fl_pid)) + return; + nfs4_copy_open_stateid(dst, state); } struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e4bb8e6409a7..f7e064d997f6 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1491,12 +1491,17 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr) encode_op_hdr(xdr, OP_PUTROOTFH, decode_putrootfh_maxsz, hdr); } -static void encode_open_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx, int zero_seqid) +static void encode_open_stateid(struct xdr_stream *xdr, + const struct nfs_open_context *ctx, + const struct nfs_lock_context *l_ctx, + fmode_t fmode, + int zero_seqid) { nfs4_stateid stateid; if (ctx->state != NULL) { - nfs4_select_rw_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid); + nfs4_select_rw_stateid(&stateid, ctx->state, + fmode, l_ctx->lockowner, l_ctx->pid); if (zero_seqid) stateid.seqid = 0; encode_nfs4_stateid(xdr, &stateid); @@ -1510,7 +1515,7 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, encode_op_hdr(xdr, OP_READ, decode_read_maxsz, hdr); encode_open_stateid(xdr, args->context, args->lock_context, - hdr->minorversion); + FMODE_READ, hdr->minorversion); p = reserve_space(xdr, 12); p = xdr_encode_hyper(p, args->offset); @@ -1648,7 +1653,7 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg encode_op_hdr(xdr, OP_WRITE, decode_write_maxsz, hdr); encode_open_stateid(xdr, args->context, args->lock_context, - hdr->minorversion); + FMODE_WRITE, hdr->minorversion); p = reserve_space(xdr, 16); p = xdr_encode_hyper(p, args->offset); -- cgit From 34cc1781c2ae921107e89f6633cfab7436e355ba Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Fri, 9 Mar 2012 10:45:56 +0000 Subject: GFS2: Clean up log flush header writing We already send both a pre and post flush to the block device when writing a journal header. There is no need to wait for the previous I/O specifically when we do this, unless we've turned "barriers" off. As a side effect, this also cleans up the code path for flushing the journal and makes it more readable. Signed-off-by: Steven Whitehouse --- fs/gfs2/log.c | 131 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 66 insertions(+), 65 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 2b9f0d9b1b28..4752eadc7f6e 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -491,66 +491,8 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) sdp->sd_log_tail = new_tail; } -/** - * log_write_header - Get and initialize a journal header buffer - * @sdp: The GFS2 superblock - * - * Returns: the initialized log buffer descriptor - */ -static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) -{ - u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); - struct buffer_head *bh; - struct gfs2_log_header *lh; - unsigned int tail; - u32 hash; - - bh = sb_getblk(sdp->sd_vfs, blkno); - lock_buffer(bh); - memset(bh->b_data, 0, bh->b_size); - set_buffer_uptodate(bh); - clear_buffer_dirty(bh); - - gfs2_ail1_empty(sdp); - tail = current_tail(sdp); - - lh = (struct gfs2_log_header *)bh->b_data; - memset(lh, 0, sizeof(struct gfs2_log_header)); - lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); - lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); - lh->lh_header.__pad0 = cpu_to_be64(0); - lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); - lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); - lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); - lh->lh_flags = cpu_to_be32(flags); - lh->lh_tail = cpu_to_be32(tail); - lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); - hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header)); - lh->lh_hash = cpu_to_be32(hash); - - bh->b_end_io = end_buffer_write_sync; - get_bh(bh); - if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) - submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); - else - submit_bh(WRITE_FLUSH_FUA | REQ_META, bh); - wait_on_buffer(bh); - - if (!buffer_uptodate(bh)) - gfs2_io_error_bh(sdp, bh); - brelse(bh); - - if (sdp->sd_log_tail != tail) - log_pull_tail(sdp, tail); - else - gfs2_assert_withdraw(sdp, !pull); - - sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); - gfs2_log_incr_head(sdp); -} - -static void log_flush_commit(struct gfs2_sbd *sdp) +static void log_flush_wait(struct gfs2_sbd *sdp) { DEFINE_WAIT(wait); @@ -563,8 +505,6 @@ static void log_flush_commit(struct gfs2_sbd *sdp) } while(atomic_read(&sdp->sd_log_in_flight)); finish_wait(&sdp->sd_log_flush_wait, &wait); } - - log_write_header(sdp, 0, 0); } static int bd_cmp(void *priv, struct list_head *a, struct list_head *b) @@ -633,6 +573,68 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp) gfs2_log_unlock(sdp); } +/** + * log_write_header - Get and initialize a journal header buffer + * @sdp: The GFS2 superblock + * + * Returns: the initialized log buffer descriptor + */ + +static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) +{ + u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); + struct buffer_head *bh; + struct gfs2_log_header *lh; + unsigned int tail; + u32 hash; + + bh = sb_getblk(sdp->sd_vfs, blkno); + lock_buffer(bh); + memset(bh->b_data, 0, bh->b_size); + set_buffer_uptodate(bh); + clear_buffer_dirty(bh); + + gfs2_ail1_empty(sdp); + tail = current_tail(sdp); + + lh = (struct gfs2_log_header *)bh->b_data; + memset(lh, 0, sizeof(struct gfs2_log_header)); + lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); + lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); + lh->lh_header.__pad0 = cpu_to_be64(0); + lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); + lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); + lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); + lh->lh_flags = cpu_to_be32(flags); + lh->lh_tail = cpu_to_be32(tail); + lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); + hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header)); + lh->lh_hash = cpu_to_be32(hash); + + bh->b_end_io = end_buffer_write_sync; + get_bh(bh); + if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { + gfs2_ordered_wait(sdp); + log_flush_wait(sdp); + submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); + } else { + submit_bh(WRITE_FLUSH_FUA | REQ_META, bh); + } + wait_on_buffer(bh); + + if (!buffer_uptodate(bh)) + gfs2_io_error_bh(sdp, bh); + brelse(bh); + + if (sdp->sd_log_tail != tail) + log_pull_tail(sdp, tail); + else + gfs2_assert_withdraw(sdp, !pull); + + sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); + gfs2_log_incr_head(sdp); +} + /** * gfs2_log_flush - flush incore transaction(s) * @sdp: the filesystem @@ -676,11 +678,10 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) gfs2_ordered_write(sdp); lops_before_commit(sdp); - gfs2_ordered_wait(sdp); - if (sdp->sd_log_head != sdp->sd_log_flush_head) - log_flush_commit(sdp); - else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ + if (sdp->sd_log_head != sdp->sd_log_flush_head) { + log_write_header(sdp, 0, 0); + } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ gfs2_log_lock(sdp); atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ trace_gfs2_log_blocks(sdp, -1); -- cgit From 58a7d5fb8e31279b992db4027e44b053a84b7344 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 8 Mar 2012 13:16:32 -0600 Subject: GFS2: call gfs2_write_alloc_required for each chunk gfs2_fallocate was calling gfs2_write_alloc_required() once at the start of the function. This caused problems since gfs2_write_alloc_required used a long unsigned int for the len, but gfs2_fallocate could allocate a much larger amount. This patch will move the call into the loop where the chunks are actually allocated and zeroed out. This will keep the allocation size under the limit, and also allow gfs2_fallocate to quickly skip over sections of the file that are already completely allocated. fallcate_chunk was also not correctly setting the file size. It was using the len veriable to find the last block written to, but by the time it was setting the size, the len variable had already been decremented to 0. Signed-off-by: Benjamin Marzinski Signed-off-by: Steven Whitehouse --- fs/gfs2/file.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 310f2fb6f7ea..76834587a8a4 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -676,6 +676,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, struct gfs2_inode *ip = GFS2_I(inode); struct buffer_head *dibh; int error; + loff_t size = len; unsigned int nr_blks; sector_t lblock = offset >> inode->i_blkbits; @@ -709,8 +710,8 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, goto out; } } - if (offset + len > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE)) - i_size_write(inode, offset + len); + if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE)) + i_size_write(inode, offset + size); mark_inode_dirty(inode); @@ -779,12 +780,14 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, if (unlikely(error)) goto out_uninit; - if (!gfs2_write_alloc_required(ip, offset, len)) - goto out_unlock; - while (len > 0) { if (len < bytes) bytes = len; + if (!gfs2_write_alloc_required(ip, offset, bytes)) { + len -= bytes; + offset += bytes; + continue; + } qa = gfs2_qadata_get(ip); if (!qa) { error = -ENOMEM; -- cgit From ad1e3968292e3af1c49ccbd0fb7d2674010f8efc Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 10 Mar 2012 11:23:15 -0500 Subject: NFSv4.0: Re-establish the callback channel on NFS4ERR_CB_PATHDOWN When the NFSv4.0 server tells us that it can no-longer talk to us on the callback channel, we should attempt a new SETCLIENTID in order to re-transmit the callback channel information. Note that as long as we do not change the boot verifier, this is a safe procedure; the server is required to keep our state. Also move the function nfs_handle_cb_pathdown to fs/nfs/nfs4state.c, and change the name in order to mark it as being specific to NFSv4.0. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 17 ----------------- fs/nfs/delegation.h | 1 - fs/nfs/nfs4state.c | 18 ++++++++++++++++-- 3 files changed, 16 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index e27c0972f94e..12de88353eeb 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -453,11 +453,6 @@ static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, rcu_read_unlock(); } -static void nfs_client_mark_return_all_delegations(struct nfs_client *clp) -{ - nfs_client_mark_return_all_delegation_types(clp, FMODE_READ|FMODE_WRITE); -} - static void nfs_delegation_run_state_manager(struct nfs_client *clp) { if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) @@ -498,18 +493,6 @@ void nfs_expire_all_delegations(struct nfs_client *clp) nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE); } -/** - * nfs_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN - * @clp: client to process - * - */ -void nfs_handle_cb_pathdown(struct nfs_client *clp) -{ - if (clp == NULL) - return; - nfs_client_mark_return_all_delegations(clp); -} - static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server) { struct nfs_delegation *delegation; diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index e193012123e7..cd6a7a8dadae 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -42,7 +42,6 @@ void nfs_super_return_all_delegations(struct super_block *sb); void nfs_expire_all_delegations(struct nfs_client *clp); void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags); void nfs_expire_unreferenced_delegations(struct nfs_client *clp); -void nfs_handle_cb_pathdown(struct nfs_client *clp); int nfs_client_return_marked_delegations(struct nfs_client *clp); int nfs_delegations_present(struct nfs_client *clp); void nfs_remove_bad_delegation(struct inode *inode); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index de44804d9864..5fa43cd9bfc5 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1097,9 +1097,23 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp) } EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery); +/* + * nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN + * @clp: client to process + * + * Set the NFS4CLNT_LEASE_EXPIRED state in order to force a + * resend of the SETCLIENTID and hence re-establish the + * callback channel. Then return all existing delegations. + */ +static void nfs40_handle_cb_pathdown(struct nfs_client *clp) +{ + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + nfs_expire_all_delegations(clp); +} + void nfs4_schedule_path_down_recovery(struct nfs_client *clp) { - nfs_handle_cb_pathdown(clp); + nfs40_handle_cb_pathdown(clp); nfs4_schedule_state_manager(clp); } @@ -1444,7 +1458,7 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) case 0: break; case -NFS4ERR_CB_PATH_DOWN: - nfs_handle_cb_pathdown(clp); + nfs40_handle_cb_pathdown(clp); break; case -NFS4ERR_NO_GRACE: nfs4_state_end_reclaim_reboot(clp); -- cgit From 978d6d8c4574098050b22281b9ed06818c0b23ca Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Mon, 12 Dec 2011 10:02:30 -0600 Subject: vfs: Correctly set the dir i_mutex lockdep class 9a7aa12f3911853a introduced additional logic around setting the i_mutex lockdep class for directory inodes. The idea was that some filesystems may want their own special lockdep class for different directory inodes and calling unlock_new_inode() should not clobber one of those special classes. I believe that the added conditional, around the *negated* return value of lockdep_match_class(), caused directory inodes to be placed in the wrong lockdep class. inode_init_always() sets the i_mutex lockdep class with i_mutex_key for all inodes. If the filesystem did not change the class during inode initialization, then the conditional mentioned above was false and the directory inode was incorrectly left in the non-directory lockdep class. If the filesystem did set a special lockdep class, then the conditional mentioned above was true and that class was clobbered with i_mutex_dir_key. This patch removes the negation from the conditional so that the i_mutex lockdep class is properly set for directory inodes. Special classes are preserved and directory inodes with unmodified classes are set with i_mutex_dir_key. Signed-off-by: Tyler Hicks Reviewed-by: Jan Kara Signed-off-by: Al Viro --- fs/inode.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index d3ebdbe723d0..8affbc9c211b 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -938,8 +938,7 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode) struct file_system_type *type = inode->i_sb->s_type; /* Set new key only if filesystem hasn't already changed it */ - if (!lockdep_match_class(&inode->i_mutex, - &type->i_mutex_key)) { + if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) { /* * ensure nobody is actually holding i_mutex */ -- cgit From f6940fe9092e796119af691c7f722c252f4fc524 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 20 Feb 2012 17:54:00 +0100 Subject: udf: Fix deadlock in udf_release_file() udf_release_file() can be called from munmap() path with mmap_sem held. Thus we cannot take i_mutex there because that ranks above mmap_sem. Luckily, i_mutex is not needed in udf_release_file() anymore since protection by i_data_sem is enough to protect from races with write and truncate. Reported-by: Al Viro Reviewed-by: Namjae Jeon Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/udf/file.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/udf/file.c b/fs/udf/file.c index dca0c3881e82..d567b8448dfc 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -201,12 +201,10 @@ out: static int udf_release_file(struct inode *inode, struct file *filp) { if (filp->f_mode & FMODE_WRITE) { - mutex_lock(&inode->i_mutex); down_write(&UDF_I(inode)->i_data_sem); udf_discard_prealloc(inode); udf_truncate_tail_extent(inode); up_write(&UDF_I(inode)->i_data_sem); - mutex_unlock(&inode->i_mutex); } return 0; } -- cgit From 097b180ca09b581ef0dc24fbcfc1b227de3875df Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 6 Mar 2012 13:56:33 +0100 Subject: vfs: fix double put after complete_walk() complete_walk() already puts nd->path, no need to do it again at cleanup time. This would result in Oopses if triggered, apparently the codepath is not too well exercised. Signed-off-by: Miklos Szeredi CC: stable@vger.kernel.org Signed-off-by: Al Viro --- fs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index e2ba62820a0f..f79aef16320b 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2261,7 +2261,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path, /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ error = complete_walk(nd); if (error) - goto exit; + return ERR_PTR(error); error = -EISDIR; if (S_ISDIR(nd->inode->i_mode)) goto exit; -- cgit From 7f6c7e62fcc123e6bd9206da99a2163fe3facc31 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 6 Mar 2012 13:56:34 +0100 Subject: vfs: fix return value from do_last() complete_walk() returns either ECHILD or ESTALE. do_last() turns this into ECHILD unconditionally. If not in RCU mode, this error will reach userspace which is complete nonsense. Signed-off-by: Miklos Szeredi CC: stable@vger.kernel.org Signed-off-by: Al Viro --- fs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index f79aef16320b..46ea9cc16647 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2162,7 +2162,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path, /* sayonara */ error = complete_walk(nd); if (error) - return ERR_PTR(-ECHILD); + return ERR_PTR(error); error = -ENOTDIR; if (nd->flags & LOOKUP_DIRECTORY) { -- cgit From 310fa7a36722017088af123043ebd231cd6bc559 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 10 Mar 2012 17:07:28 -0500 Subject: restore smp_mb() in unlock_new_inode() wait_on_inode() doesn't have ->i_lock Signed-off-by: Al Viro --- fs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 8affbc9c211b..83ab215baab1 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -965,6 +965,7 @@ void unlock_new_inode(struct inode *inode) spin_lock(&inode->i_lock); WARN_ON(!(inode->i_state & I_NEW)); inode->i_state &= ~I_NEW; + smp_mb(); wake_up_bit(&inode->i_state, __I_NEW); spin_unlock(&inode->i_lock); } -- cgit From 9994b62b5621f88828d442fcd03fe3ce4c43344b Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 8 Mar 2012 17:29:34 -0500 Subject: NFS: remove NFS_PAGE_TAG_LOCKED The last real use of this tag was removed by commit 7f2f12d963 NFS: Simplify nfs_wb_page() Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 32 +------------------------------- fs/nfs/write.c | 20 +++++++++----------- 2 files changed, 10 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 77a184e2fe47..fc5b54b84f8f 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -107,36 +107,6 @@ void nfs_unlock_request(struct nfs_page *req) nfs_release_request(req); } -/** - * nfs_set_page_tag_locked - Tag a request as locked - * @req: - */ -int nfs_set_page_tag_locked(struct nfs_page *req) -{ - if (!nfs_lock_request_dontget(req)) - return 0; - if (test_bit(PG_MAPPED, &req->wb_flags)) - radix_tree_tag_set(&NFS_I(req->wb_context->dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); - return 1; -} - -/** - * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers - */ -void nfs_clear_page_tag_locked(struct nfs_page *req) -{ - if (test_bit(PG_MAPPED, &req->wb_flags)) { - struct inode *inode = req->wb_context->dentry->d_inode; - struct nfs_inode *nfsi = NFS_I(inode); - - spin_lock(&inode->i_lock); - radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); - nfs_unlock_request(req); - spin_unlock(&inode->i_lock); - } else - nfs_unlock_request(req); -} - /* * nfs_clear_request - Free up all resources allocated to the request * @req: @@ -469,7 +439,7 @@ int nfs_scan_list(struct nfs_inode *nfsi, if (req->wb_index > idx_end) goto out; idx_start = req->wb_index + 1; - if (nfs_set_page_tag_locked(req)) { + if (nfs_lock_request_dontget(req)) { kref_get(&req->wb_kref); radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, tag); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0b1831d95849..fd8a4f07bc0c 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -236,10 +236,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo req = nfs_page_find_request_locked(page); if (req == NULL) break; - if (nfs_set_page_tag_locked(req)) + if (nfs_lock_request_dontget(req)) break; /* Note: If we hold the page lock, as is the case in nfs_writepage, - * then the call to nfs_set_page_tag_locked() will always + * then the call to nfs_lock_request_dontget() will always * succeed provided that someone hasn't already marked the * request as dirty (in which case we don't care). */ @@ -397,8 +397,6 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) set_page_private(req->wb_page, (unsigned long)req); nfsi->npages++; kref_get(&req->wb_kref); - radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, - NFS_PAGE_TAG_LOCKED); spin_unlock(&inode->i_lock); radix_tree_preload_end(); out: @@ -604,7 +602,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, || end < req->wb_offset) goto out_flushme; - if (nfs_set_page_tag_locked(req)) + if (nfs_lock_request_dontget(req)) break; /* The request is locked, so wait and then retry */ @@ -684,7 +682,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, nfs_grow_file(page, offset, count); nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); nfs_mark_request_dirty(req); - nfs_clear_page_tag_locked(req); + nfs_unlock_request(req); return 0; } @@ -777,7 +775,7 @@ static void nfs_writepage_release(struct nfs_page *req, if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data)) nfs_inode_remove_request(req); - nfs_clear_page_tag_locked(req); + nfs_unlock_request(req); nfs_end_page_writeback(page); } @@ -925,7 +923,7 @@ static void nfs_redirty_request(struct nfs_page *req) struct page *page = req->wb_page; nfs_mark_request_dirty(req); - nfs_clear_page_tag_locked(req); + nfs_unlock_request(req); nfs_end_page_writeback(page); } @@ -1199,7 +1197,7 @@ static void nfs_writeback_release_full(void *calldata) remove_request: nfs_inode_remove_request(req); next: - nfs_clear_page_tag_locked(req); + nfs_unlock_request(req); nfs_end_page_writeback(page); } nfs_writedata_release(calldata); @@ -1411,7 +1409,7 @@ void nfs_retry_commit(struct list_head *page_list, dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); dec_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); - nfs_clear_page_tag_locked(req); + nfs_unlock_request(req); } } EXPORT_SYMBOL_GPL(nfs_retry_commit); @@ -1486,7 +1484,7 @@ void nfs_commit_release_pages(struct nfs_write_data *data) dprintk(" mismatch\n"); nfs_mark_request_dirty(req); next: - nfs_clear_page_tag_locked(req); + nfs_unlock_request(req); } } EXPORT_SYMBOL_GPL(nfs_commit_release_pages); -- cgit From d6d6dc7cdfda7c8f49a89a7b7261846f319da6d1 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 8 Mar 2012 17:29:35 -0500 Subject: NFS: remove nfs_inode radix tree The radix tree is only being used to compile lists of reqs needing commit. It is simpler to just put the reqs directly into a list. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 2 +- fs/nfs/internal.h | 2 + fs/nfs/nfs4filelayout.c | 109 ++++++++++++++++++++++++++++++++++--------- fs/nfs/nfs4filelayout.h | 7 ++- fs/nfs/pagelist.c | 61 ------------------------ fs/nfs/pnfs.h | 82 +++++++++++++++++---------------- fs/nfs/write.c | 120 +++++++++++++++++++++++++++--------------------- 7 files changed, 206 insertions(+), 177 deletions(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 70e25c9c5670..1a19f8d30c14 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1560,7 +1560,7 @@ static void init_once(void *foo) INIT_LIST_HEAD(&nfsi->open_files); INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); - INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); + INIT_LIST_HEAD(&nfsi->commit_list); nfsi->npages = 0; nfsi->ncommit = 0; atomic_set(&nfsi->silly_count, 1); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 0c3648a947d1..04a914704e7b 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -308,6 +308,8 @@ extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); extern void nfs_readdata_release(struct nfs_read_data *rdata); /* write.c */ +extern int nfs_scan_commit_list(struct list_head *src, struct list_head *dst, + int max); extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head); extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio, diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 768f6f86c9f0..716fac6bc082 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -682,14 +682,16 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, int size = (fl->stripe_type == STRIPE_SPARSE) ? fl->dsaddr->ds_num : fl->dsaddr->stripe_count; - fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags); + fl->commit_buckets = kcalloc(size, sizeof(struct nfs4_fl_commit_bucket), gfp_flags); if (!fl->commit_buckets) { filelayout_free_lseg(&fl->generic_hdr); return NULL; } fl->number_of_buckets = size; - for (i = 0; i < size; i++) - INIT_LIST_HEAD(&fl->commit_buckets[i]); + for (i = 0; i < size; i++) { + INIT_LIST_HEAD(&fl->commit_buckets[i].written); + INIT_LIST_HEAD(&fl->commit_buckets[i].committing); + } } return &fl->generic_hdr; } @@ -767,11 +769,6 @@ static const struct nfs_pageio_ops filelayout_pg_write_ops = { .pg_doio = pnfs_generic_pg_writepages, }; -static bool filelayout_mark_pnfs_commit(struct pnfs_layout_segment *lseg) -{ - return !FILELAYOUT_LSEG(lseg)->commit_through_mds; -} - static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j) { if (fl->stripe_type == STRIPE_SPARSE) @@ -780,13 +777,39 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j) return j; } -struct list_head *filelayout_choose_commit_list(struct nfs_page *req) +/* The generic layer is about to remove the req from the commit list. + * If this will make the bucket empty, it will need to put the lseg reference. + * Note inode lock is held, so we can't do the put here. + */ +static struct pnfs_layout_segment * +filelayout_remove_commit_req(struct nfs_page *req) +{ + if (list_is_singular(&req->wb_list)) { + struct inode *inode = req->wb_context->dentry->d_inode; + struct pnfs_layout_segment *lseg; + + /* From here we can find the bucket, but for the moment, + * since there is only one relevant lseg... + */ + list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { + if (lseg->pls_range.iomode == IOMODE_RW) + return lseg; + } + } + return NULL; +} + +static struct list_head * +filelayout_choose_commit_list(struct nfs_page *req, + struct pnfs_layout_segment *lseg) { - struct pnfs_layout_segment *lseg = req->wb_commit_lseg; struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); u32 i, j; struct list_head *list; + if (fl->commit_through_mds) + return &NFS_I(req->wb_context->dentry->d_inode)->commit_list; + /* Note that we are calling nfs4_fl_calc_j_index on each page * that ends up being committed to a data server. An attractive * alternative is to add a field to nfs_write_data and nfs_page @@ -796,9 +819,14 @@ struct list_head *filelayout_choose_commit_list(struct nfs_page *req) j = nfs4_fl_calc_j_index(lseg, (loff_t)req->wb_index << PAGE_CACHE_SHIFT); i = select_bucket_index(fl, j); - list = &fl->commit_buckets[i]; + list = &fl->commit_buckets[i].written; if (list_empty(list)) { - /* Non-empty buckets hold a reference on the lseg */ + /* Non-empty buckets hold a reference on the lseg. That ref + * is normally transferred to the COMMIT call and released + * there. It could also be released if the last req is pulled + * off due to a rewrite, in which case it will be done in + * filelayout_remove_commit_req + */ get_lseg(lseg); } return list; @@ -860,18 +888,56 @@ static int filelayout_initiate_commit(struct nfs_write_data *data, int how) /* * This is only useful while we are using whole file layouts. */ -static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode) +static struct pnfs_layout_segment * +find_only_write_lseg_locked(struct inode *inode) { - struct pnfs_layout_segment *lseg, *rv = NULL; + struct pnfs_layout_segment *lseg; - spin_lock(&inode->i_lock); list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) if (lseg->pls_range.iomode == IOMODE_RW) - rv = get_lseg(lseg); + return get_lseg(lseg); + return NULL; +} + +static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode) +{ + struct pnfs_layout_segment *rv; + + spin_lock(&inode->i_lock); + rv = find_only_write_lseg_locked(inode); spin_unlock(&inode->i_lock); return rv; } +/* Move reqs from written to committing lists, returning count of number moved. + * Note called with i_lock held. + */ +static int filelayout_scan_commit_lists(struct inode *inode, int max) +{ + struct pnfs_layout_segment *lseg; + struct nfs4_filelayout_segment *fl; + int i, rv = 0, cnt; + + lseg = find_only_write_lseg_locked(inode); + if (!lseg) + return 0; + fl = FILELAYOUT_LSEG(lseg); + if (fl->commit_through_mds) + goto out_put; + for (i = 0; i < fl->number_of_buckets; i++) { + if (list_empty(&fl->commit_buckets[i].written)) + continue; + cnt = nfs_scan_commit_list(&fl->commit_buckets[i].written, + &fl->commit_buckets[i].committing, + max); + max -= cnt; + rv += cnt; + } +out_put: + put_lseg(lseg); + return rv; +} + static int alloc_ds_commits(struct inode *inode, struct list_head *list) { struct pnfs_layout_segment *lseg; @@ -886,7 +952,7 @@ static int alloc_ds_commits(struct inode *inode, struct list_head *list) return 0; fl = FILELAYOUT_LSEG(lseg); for (i = 0; i < fl->number_of_buckets; i++) { - if (list_empty(&fl->commit_buckets[i])) + if (list_empty(&fl->commit_buckets[i].committing)) continue; data = nfs_commitdata_alloc(); if (!data) @@ -900,9 +966,9 @@ static int alloc_ds_commits(struct inode *inode, struct list_head *list) out_bad: for (j = i; j < fl->number_of_buckets; j++) { - if (list_empty(&fl->commit_buckets[i])) + if (list_empty(&fl->commit_buckets[i].committing)) continue; - nfs_retry_commit(&fl->commit_buckets[i], lseg); + nfs_retry_commit(&fl->commit_buckets[i].committing, lseg); put_lseg(lseg); /* associated with emptying bucket */ } put_lseg(lseg); @@ -937,7 +1003,7 @@ filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how); } else { - nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index], data->lseg); + nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index].committing, data->lseg); filelayout_initiate_commit(data, how); } } @@ -967,8 +1033,9 @@ static struct pnfs_layoutdriver_type filelayout_type = { .free_lseg = filelayout_free_lseg, .pg_read_ops = &filelayout_pg_read_ops, .pg_write_ops = &filelayout_pg_write_ops, - .mark_pnfs_commit = filelayout_mark_pnfs_commit, .choose_commit_list = filelayout_choose_commit_list, + .remove_commit_req = filelayout_remove_commit_req, + .scan_commit_lists = filelayout_scan_commit_lists, .commit_pagelist = filelayout_commit_pagelist, .read_pagelist = filelayout_read_pagelist, .write_pagelist = filelayout_write_pagelist, diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h index 2e42284253fa..21190bb1f5e3 100644 --- a/fs/nfs/nfs4filelayout.h +++ b/fs/nfs/nfs4filelayout.h @@ -74,6 +74,11 @@ struct nfs4_file_layout_dsaddr { struct nfs4_pnfs_ds *ds_list[1]; }; +struct nfs4_fl_commit_bucket { + struct list_head written; + struct list_head committing; +}; + struct nfs4_filelayout_segment { struct pnfs_layout_segment generic_hdr; u32 stripe_type; @@ -84,7 +89,7 @@ struct nfs4_filelayout_segment { struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ unsigned int num_fh; struct nfs_fh **fh_array; - struct list_head *commit_buckets; /* Sort commits to ds */ + struct nfs4_fl_commit_bucket *commit_buckets; /* Sort commits to ds */ int number_of_buckets; }; diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index fc5b54b84f8f..d21fceaa9f62 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -396,67 +396,6 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) } } -#define NFS_SCAN_MAXENTRIES 16 -/** - * nfs_scan_list - Scan a list for matching requests - * @nfsi: NFS inode - * @dst: Destination list - * @idx_start: lower bound of page->index to scan - * @npages: idx_start + npages sets the upper bound to scan. - * @tag: tag to scan for - * - * Moves elements from one of the inode request lists. - * If the number of requests is set to 0, the entire address_space - * starting at index idx_start, is scanned. - * The requests are *not* checked to ensure that they form a contiguous set. - * You must be holding the inode's i_lock when calling this function - */ -int nfs_scan_list(struct nfs_inode *nfsi, - struct list_head *dst, pgoff_t idx_start, - unsigned int npages, int tag) -{ - struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; - struct nfs_page *req; - pgoff_t idx_end; - int found, i; - int res; - struct list_head *list; - - res = 0; - if (npages == 0) - idx_end = ~0; - else - idx_end = idx_start + npages - 1; - - for (;;) { - found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, - (void **)&pgvec[0], idx_start, - NFS_SCAN_MAXENTRIES, tag); - if (found <= 0) - break; - for (i = 0; i < found; i++) { - req = pgvec[i]; - if (req->wb_index > idx_end) - goto out; - idx_start = req->wb_index + 1; - if (nfs_lock_request_dontget(req)) { - kref_get(&req->wb_kref); - radix_tree_tag_clear(&nfsi->nfs_page_tree, - req->wb_index, tag); - list = pnfs_choose_commit_list(req, dst); - nfs_list_add_request(req, list); - res++; - if (res == INT_MAX) - goto out; - } - } - /* for latency reduction */ - cond_resched_lock(&nfsi->vfs_inode.i_lock); - } -out: - return res; -} - int __init nfs_init_nfspagecache(void) { nfs_page_cachep = kmem_cache_create("nfs_page", diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 8088d51f495e..ef92f676cf1e 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -94,11 +94,10 @@ struct pnfs_layoutdriver_type { const struct nfs_pageio_ops *pg_read_ops; const struct nfs_pageio_ops *pg_write_ops; - /* Returns true if layoutdriver wants to divert this request to - * driver's commit routine. - */ - bool (*mark_pnfs_commit)(struct pnfs_layout_segment *lseg); - struct list_head * (*choose_commit_list) (struct nfs_page *req); + struct list_head * (*choose_commit_list) (struct nfs_page *req, + struct pnfs_layout_segment *lseg); + struct pnfs_layout_segment *(*remove_commit_req) (struct nfs_page *req); + int (*scan_commit_lists) (struct inode *inode, int max); int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how); /* @@ -262,20 +261,6 @@ static inline int pnfs_enabled_sb(struct nfs_server *nfss) return nfss->pnfs_curr_ld != NULL; } -static inline void -pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) -{ - if (lseg) { - struct pnfs_layoutdriver_type *ld; - - ld = NFS_SERVER(req->wb_page->mapping->host)->pnfs_curr_ld; - if (ld->mark_pnfs_commit && ld->mark_pnfs_commit(lseg)) { - set_bit(PG_PNFS_COMMIT, &req->wb_flags); - req->wb_commit_lseg = get_lseg(lseg); - } - } -} - static inline int pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how) { @@ -285,26 +270,38 @@ pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how) } static inline struct list_head * -pnfs_choose_commit_list(struct nfs_page *req, struct list_head *mds) +pnfs_choose_commit_list(struct nfs_page *req, struct pnfs_layout_segment *lseg) { + struct inode *inode = req->wb_context->dentry->d_inode; struct list_head *rv; - if (test_and_clear_bit(PG_PNFS_COMMIT, &req->wb_flags)) { - struct inode *inode = req->wb_commit_lseg->pls_layout->plh_inode; - - set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags); - rv = NFS_SERVER(inode)->pnfs_curr_ld->choose_commit_list(req); - /* matched by ref taken when PG_PNFS_COMMIT is set */ - put_lseg(req->wb_commit_lseg); - } else - rv = mds; + if (lseg && NFS_SERVER(inode)->pnfs_curr_ld->choose_commit_list) + rv = NFS_SERVER(inode)->pnfs_curr_ld->choose_commit_list(req, lseg); + else + rv = &NFS_I(inode)->commit_list; return rv; } -static inline void pnfs_clear_request_commit(struct nfs_page *req) +static inline struct pnfs_layout_segment * +pnfs_clear_request_commit(struct nfs_page *req) { - if (test_and_clear_bit(PG_PNFS_COMMIT, &req->wb_flags)) - put_lseg(req->wb_commit_lseg); + struct inode *inode = req->wb_context->dentry->d_inode; + + if (NFS_SERVER(inode)->pnfs_curr_ld && + NFS_SERVER(inode)->pnfs_curr_ld->remove_commit_req) + return NFS_SERVER(inode)->pnfs_curr_ld->remove_commit_req(req); + else + return NULL; +} + +static inline int +pnfs_scan_commit_lists(struct inode *inode, int max) +{ + if (NFS_SERVER(inode)->pnfs_curr_ld && + NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists) + return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(inode, max); + else + return 0; } /* Should the pNFS client commit and return the layout upon a setattr */ @@ -400,11 +397,6 @@ static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, st return false; } -static inline void -pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) -{ -} - static inline int pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how) { @@ -412,13 +404,23 @@ pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how) } static inline struct list_head * -pnfs_choose_commit_list(struct nfs_page *req, struct list_head *mds) +pnfs_choose_commit_list(struct nfs_page *req, struct pnfs_layout_segment *lseg) { - return mds; + struct inode *inode = req->wb_context->dentry->d_inode; + + return &NFS_I(inode)->commit_list; } -static inline void pnfs_clear_request_commit(struct nfs_page *req) +static inline struct pnfs_layout_segment * +pnfs_clear_request_commit(struct nfs_page *req) { + return NULL; +} + +static inline int +pnfs_scan_commit_lists(struct inode *inode, int max) +{ + return 0; } static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index fd8a4f07bc0c..a630ad65d64c 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -375,21 +375,14 @@ out_err: /* * Insert a write request into an inode */ -static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) +static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) { struct nfs_inode *nfsi = NFS_I(inode); - int error; - - error = radix_tree_preload(GFP_NOFS); - if (error != 0) - goto out; /* Lock the request! */ nfs_lock_request_dontget(req); spin_lock(&inode->i_lock); - error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); - BUG_ON(error); if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE)) inode->i_version++; set_bit(PG_MAPPED, &req->wb_flags); @@ -398,11 +391,10 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) nfsi->npages++; kref_get(&req->wb_kref); spin_unlock(&inode->i_lock); - radix_tree_preload_end(); -out: - return error; } +static struct pnfs_layout_segment *nfs_clear_request_commit(struct nfs_page *req); + /* * Remove a write request from an inode */ @@ -410,16 +402,18 @@ static void nfs_inode_remove_request(struct nfs_page *req) { struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); + struct pnfs_layout_segment *lseg; BUG_ON (!NFS_WBACK_BUSY(req)); spin_lock(&inode->i_lock); + lseg = nfs_clear_request_commit(req); set_page_private(req->wb_page, 0); ClearPagePrivate(req->wb_page); clear_bit(PG_MAPPED, &req->wb_flags); - radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); nfsi->npages--; spin_unlock(&inode->i_lock); + put_lseg(lseg); nfs_release_request(req); } @@ -438,31 +432,38 @@ nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) { struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); + struct list_head *clist; + clist = pnfs_choose_commit_list(req, lseg); spin_lock(&inode->i_lock); set_bit(PG_CLEAN, &(req)->wb_flags); - radix_tree_tag_set(&nfsi->nfs_page_tree, - req->wb_index, - NFS_PAGE_TAG_COMMIT); + nfs_list_add_request(req, clist); nfsi->ncommit++; spin_unlock(&inode->i_lock); - pnfs_mark_request_commit(req, lseg); inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); __mark_inode_dirty(inode, I_DIRTY_DATASYNC); } -static int +static void +nfs_clear_page_commit(struct page *page) +{ + dec_zone_page_state(page, NR_UNSTABLE_NFS); + dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); +} + +static struct pnfs_layout_segment * nfs_clear_request_commit(struct nfs_page *req) { - struct page *page = req->wb_page; + struct pnfs_layout_segment *lseg = NULL; if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) { - dec_zone_page_state(page, NR_UNSTABLE_NFS); - dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); - return 1; + nfs_clear_page_commit(req->wb_page); + lseg = pnfs_clear_request_commit(req); + NFS_I(req->wb_context->dentry->d_inode)->ncommit--; + list_del(&req->wb_list); } - return 0; + return lseg; } static inline @@ -494,10 +495,10 @@ nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) { } -static inline int +static inline struct pnfs_layout_segment * nfs_clear_request_commit(struct nfs_page *req) { - return 0; + return NULL; } static inline @@ -518,46 +519,67 @@ int nfs_reschedule_unstable_write(struct nfs_page *req, static int nfs_need_commit(struct nfs_inode *nfsi) { - return radix_tree_tagged(&nfsi->nfs_page_tree, NFS_PAGE_TAG_COMMIT); + return nfsi->ncommit > 0; } +/* i_lock held by caller */ +int +nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max) +{ + struct nfs_page *req, *tmp; + int ret = 0; + + list_for_each_entry_safe(req, tmp, src, wb_list) { + if (nfs_lock_request_dontget(req)) { + kref_get(&req->wb_kref); + list_move_tail(&req->wb_list, dst); + clear_bit(PG_CLEAN, &(req)->wb_flags); + ret++; + if (ret == max) + break; + } + } + return ret; +} +EXPORT_SYMBOL_GPL(nfs_scan_commit_list); + /* * nfs_scan_commit - Scan an inode for commit requests * @inode: NFS inode to scan * @dst: destination list - * @idx_start: lower bound of page->index to scan. - * @npages: idx_start + npages sets the upper bound to scan. * * Moves requests from the inode's 'commit' request list. * The requests are *not* checked to ensure that they form a contiguous set. */ static int -nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) +nfs_scan_commit(struct inode *inode, struct list_head *dst) { struct nfs_inode *nfsi = NFS_I(inode); - int ret; - - if (!nfs_need_commit(nfsi)) - return 0; + int ret = 0; spin_lock(&inode->i_lock); - ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); - if (ret > 0) + if (nfsi->ncommit > 0) { + int pnfs_ret; + + ret = nfs_scan_commit_list(&nfsi->commit_list, dst, INT_MAX); + pnfs_ret = pnfs_scan_commit_lists(inode, INT_MAX - ret); + if (pnfs_ret) { + ret += pnfs_ret; + set_bit(NFS_INO_PNFS_COMMIT, &nfsi->flags); + } nfsi->ncommit -= ret; + } spin_unlock(&inode->i_lock); - - if (nfs_need_commit(NFS_I(inode))) - __mark_inode_dirty(inode, I_DIRTY_DATASYNC); - return ret; } + #else static inline int nfs_need_commit(struct nfs_inode *nfsi) { return 0; } -static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) +static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst) { return 0; } @@ -579,6 +601,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, unsigned int rqend; unsigned int end; int error; + struct pnfs_layout_segment *lseg = NULL; if (!PagePrivate(page)) return NULL; @@ -614,12 +637,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, spin_lock(&inode->i_lock); } - if (nfs_clear_request_commit(req) && - radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, - req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL) { - NFS_I(inode)->ncommit--; - pnfs_clear_request_commit(req); - } + lseg = nfs_clear_request_commit(req); /* Okay, the request matches. Update the region */ if (offset < req->wb_offset) { @@ -632,6 +650,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, req->wb_bytes = rqend - req->wb_offset; out_unlock: spin_unlock(&inode->i_lock); + put_lseg(lseg); return req; out_flushme: spin_unlock(&inode->i_lock); @@ -653,7 +672,6 @@ static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, { struct inode *inode = page->mapping->host; struct nfs_page *req; - int error; req = nfs_try_to_update_request(inode, page, offset, bytes); if (req != NULL) @@ -661,11 +679,7 @@ static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, req = nfs_create_request(ctx, inode, page, offset, bytes); if (IS_ERR(req)) goto out; - error = nfs_inode_add_request(inode, req); - if (error != 0) { - nfs_release_request(req); - req = ERR_PTR(error); - } + nfs_inode_add_request(inode, req); out: return req; } @@ -1458,7 +1472,7 @@ void nfs_commit_release_pages(struct nfs_write_data *data) while (!list_empty(&data->pages)) { req = nfs_list_entry(data->pages.next); nfs_list_remove_request(req); - nfs_clear_request_commit(req); + nfs_clear_page_commit(req->wb_page); dprintk("NFS: commit (%s/%lld %d@%lld)", req->wb_context->dentry->d_sb->s_id, @@ -1515,7 +1529,7 @@ int nfs_commit_inode(struct inode *inode, int how) res = nfs_commit_set_lock(NFS_I(inode), may_wait); if (res <= 0) goto out_mark_dirty; - res = nfs_scan_commit(inode, &head, 0, 0); + res = nfs_scan_commit(inode, &head); if (res) { int error; -- cgit From cb9c1c4a880bc734c2848f8647be2cfa336ee346 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Sun, 11 Mar 2012 18:20:23 +0400 Subject: NFS: replace global bl_mount_reply with per-net one This global variable is used for blocklayout downcall and thus can be corrupted if case of existence of multiple networks namespaces. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.h | 5 ----- fs/nfs/blocklayout/blocklayoutdev.c | 9 +++++---- fs/nfs/netns.h | 6 ++++++ 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h index 0966b39bbcfb..58ac8614c4c4 100644 --- a/fs/nfs/blocklayout/blocklayout.h +++ b/fs/nfs/blocklayout/blocklayout.h @@ -153,11 +153,6 @@ BLK_LSEG2EXT(struct pnfs_layout_segment *lseg) return BLK_LO2EXT(lseg->pls_layout); } -struct bl_dev_msg { - int32_t status; - uint32_t major, minor; -}; - struct bl_msg_hdr { u8 type; u16 totallen; /* length of entire message, including hdr itself */ diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c index b48f782a94ad..1d58642b1530 100644 --- a/fs/nfs/blocklayout/blocklayoutdev.c +++ b/fs/nfs/blocklayout/blocklayoutdev.c @@ -79,15 +79,16 @@ int nfs4_blkdev_put(struct block_device *bdev) return blkdev_put(bdev, FMODE_READ); } -static struct bl_dev_msg bl_mount_reply; - ssize_t bl_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) { + struct nfs_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info, + nfs_net_id); + if (mlen != sizeof (struct bl_dev_msg)) return -EINVAL; - if (copy_from_user(&bl_mount_reply, src, mlen) != 0) + if (copy_from_user(&nn->bl_mount_reply, src, mlen) != 0) return -EFAULT; wake_up(&bl_wq); @@ -118,10 +119,10 @@ nfs4_blk_decode_device(struct nfs_server *server, }; uint8_t *dataptr; DECLARE_WAITQUEUE(wq, current); - struct bl_dev_msg *reply = &bl_mount_reply; int offset, len, i, rc; struct net *net = server->nfs_client->net; struct nfs_net *nn = net_generic(net, nfs_net_id); + struct bl_dev_msg *reply = &nn->bl_mount_reply; dprintk("%s CREATING PIPEFS MESSAGE\n", __func__); dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data, diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index 7baad89ae60e..73425f555cde 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -4,9 +4,15 @@ #include #include +struct bl_dev_msg { + int32_t status; + uint32_t major, minor; +}; + struct nfs_net { struct cache_detail *nfs_dns_resolve; struct rpc_pipe *bl_device_pipe; + struct bl_dev_msg bl_mount_reply; struct list_head nfs_client_list; struct list_head nfs_volume_list; #ifdef CONFIG_NFS_V4 -- cgit From 5ffaf8554163d9f3873988ce2f9977f6c6f408d2 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Sun, 11 Mar 2012 18:20:31 +0400 Subject: NFS: replace global bl_wq with per-net one This queue is used for sleeping in kernel and it have to be per-net since we don't want to wake any other waiters except in out network nemespace. BTW, move wq to per-net data is easy. But some way to handle upcall timeouts have to be provided. On message destroy in case of timeout, tasks, waiting for message to be delivered, should be awakened. Thus, some data required to located the right wait queue. Chosen solution replaces rpc_pipe_msg object with new introduced bl_pipe_msg object, containing rpc_pipe_msg and proper wq. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.c | 4 +--- fs/nfs/blocklayout/blocklayout.h | 7 +++++-- fs/nfs/blocklayout/blocklayoutdev.c | 32 ++++++++++++++++++-------------- fs/nfs/blocklayout/blocklayoutdm.c | 26 ++++++++++++++------------ fs/nfs/netns.h | 1 + 5 files changed, 39 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 783ebd51bd5f..61501346324e 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -46,8 +46,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Andy Adamson "); MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); -wait_queue_head_t bl_wq; - static void print_page(struct page *page) { dprintk("PRINTPAGE page %p\n", page); @@ -1117,6 +1115,7 @@ static int nfs4blocklayout_net_init(struct net *net) struct nfs_net *nn = net_generic(net, nfs_net_id); struct dentry *dentry; + init_waitqueue_head(&nn->bl_wq); nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); if (IS_ERR(nn->bl_device_pipe)) return PTR_ERR(nn->bl_device_pipe); @@ -1153,7 +1152,6 @@ static int __init nfs4blocklayout_init(void) if (ret) goto out; - init_waitqueue_head(&bl_wq); ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block); if (ret) goto out_remove; diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h index 58ac8614c4c4..03350690118e 100644 --- a/fs/nfs/blocklayout/blocklayout.h +++ b/fs/nfs/blocklayout/blocklayout.h @@ -153,13 +153,16 @@ BLK_LSEG2EXT(struct pnfs_layout_segment *lseg) return BLK_LO2EXT(lseg->pls_layout); } +struct bl_pipe_msg { + struct rpc_pipe_msg msg; + wait_queue_head_t *bl_wq; +}; + struct bl_msg_hdr { u8 type; u16 totallen; /* length of entire message, including hdr itself */ }; -extern wait_queue_head_t bl_wq; - #define BL_DEVICE_UMOUNT 0x0 /* Umount--delete devices */ #define BL_DEVICE_MOUNT 0x1 /* Mount--create devices*/ #define BL_DEVICE_REQUEST_INIT 0x0 /* Start request */ diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c index 1d58642b1530..a5c88a554d92 100644 --- a/fs/nfs/blocklayout/blocklayoutdev.c +++ b/fs/nfs/blocklayout/blocklayoutdev.c @@ -91,16 +91,18 @@ ssize_t bl_pipe_downcall(struct file *filp, const char __user *src, if (copy_from_user(&nn->bl_mount_reply, src, mlen) != 0) return -EFAULT; - wake_up(&bl_wq); + wake_up(&nn->bl_wq); return mlen; } void bl_pipe_destroy_msg(struct rpc_pipe_msg *msg) { + struct bl_pipe_msg *bl_pipe_msg = container_of(msg, struct bl_pipe_msg, msg); + if (msg->errno >= 0) return; - wake_up(&bl_wq); + wake_up(bl_pipe_msg->bl_wq); } /* @@ -112,7 +114,8 @@ nfs4_blk_decode_device(struct nfs_server *server, { struct pnfs_block_dev *rv; struct block_device *bd = NULL; - struct rpc_pipe_msg msg; + struct bl_pipe_msg bl_pipe_msg; + struct rpc_pipe_msg *msg = &bl_pipe_msg.msg; struct bl_msg_hdr bl_msg = { .type = BL_DEVICE_MOUNT, .totallen = dev->mincount, @@ -128,15 +131,16 @@ nfs4_blk_decode_device(struct nfs_server *server, dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data, dev->mincount); - memset(&msg, 0, sizeof(msg)); - msg.data = kzalloc(sizeof(bl_msg) + dev->mincount, GFP_NOFS); - if (!msg.data) { + bl_pipe_msg.bl_wq = &nn->bl_wq; + memset(msg, 0, sizeof(*msg)); + msg->data = kzalloc(sizeof(bl_msg) + dev->mincount, GFP_NOFS); + if (!msg->data) { rv = ERR_PTR(-ENOMEM); goto out; } - memcpy(msg.data, &bl_msg, sizeof(bl_msg)); - dataptr = (uint8_t *) msg.data; + memcpy(msg->data, &bl_msg, sizeof(bl_msg)); + dataptr = (uint8_t *) msg->data; len = dev->mincount; offset = sizeof(bl_msg); for (i = 0; len > 0; i++) { @@ -145,13 +149,13 @@ nfs4_blk_decode_device(struct nfs_server *server, len -= PAGE_CACHE_SIZE; offset += PAGE_CACHE_SIZE; } - msg.len = sizeof(bl_msg) + dev->mincount; + msg->len = sizeof(bl_msg) + dev->mincount; dprintk("%s CALLING USERSPACE DAEMON\n", __func__); - add_wait_queue(&bl_wq, &wq); - rc = rpc_queue_upcall(nn->bl_device_pipe, &msg); + add_wait_queue(&nn->bl_wq, &wq); + rc = rpc_queue_upcall(nn->bl_device_pipe, msg); if (rc < 0) { - remove_wait_queue(&bl_wq, &wq); + remove_wait_queue(&nn->bl_wq, &wq); rv = ERR_PTR(rc); goto out; } @@ -159,7 +163,7 @@ nfs4_blk_decode_device(struct nfs_server *server, set_current_state(TASK_UNINTERRUPTIBLE); schedule(); __set_current_state(TASK_RUNNING); - remove_wait_queue(&bl_wq, &wq); + remove_wait_queue(&nn->bl_wq, &wq); if (reply->status != BL_DEVICE_REQUEST_PROC) { dprintk("%s failed to open device: %d\n", @@ -191,7 +195,7 @@ nfs4_blk_decode_device(struct nfs_server *server, bd->bd_block_size); out: - kfree(msg.data); + kfree(msg->data); return rv; } diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index a0f588fa49c1..30fc22af7bbb 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -40,7 +40,8 @@ static void dev_remove(struct net *net, dev_t dev) { - struct rpc_pipe_msg msg; + struct bl_pipe_msg bl_pipe_msg; + struct rpc_pipe_msg *msg = &bl_pipe_msg.msg; struct bl_dev_msg bl_umount_request; struct bl_msg_hdr bl_msg = { .type = BL_DEVICE_UMOUNT, @@ -52,33 +53,34 @@ static void dev_remove(struct net *net, dev_t dev) dprintk("Entering %s\n", __func__); - memset(&msg, 0, sizeof(msg)); - msg.data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); - if (!msg.data) + bl_pipe_msg.bl_wq = &nn->bl_wq; + memset(&msg, 0, sizeof(*msg)); + msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); + if (!msg->data) goto out; memset(&bl_umount_request, 0, sizeof(bl_umount_request)); bl_umount_request.major = MAJOR(dev); bl_umount_request.minor = MINOR(dev); - memcpy(msg.data, &bl_msg, sizeof(bl_msg)); - dataptr = (uint8_t *) msg.data; + memcpy(msg->data, &bl_msg, sizeof(bl_msg)); + dataptr = (uint8_t *) msg->data; memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request)); - msg.len = sizeof(bl_msg) + bl_msg.totallen; + msg->len = sizeof(bl_msg) + bl_msg.totallen; - add_wait_queue(&bl_wq, &wq); - if (rpc_queue_upcall(nn->bl_device_pipe, &msg) < 0) { - remove_wait_queue(&bl_wq, &wq); + add_wait_queue(&nn->bl_wq, &wq); + if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { + remove_wait_queue(&nn->bl_wq, &wq); goto out; } set_current_state(TASK_UNINTERRUPTIBLE); schedule(); __set_current_state(TASK_RUNNING); - remove_wait_queue(&bl_wq, &wq); + remove_wait_queue(&nn->bl_wq, &wq); out: - kfree(msg.data); + kfree(msg->data); } /* diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index 73425f555cde..aa14ec303e94 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -13,6 +13,7 @@ struct nfs_net { struct cache_detail *nfs_dns_resolve; struct rpc_pipe *bl_device_pipe; struct bl_dev_msg bl_mount_reply; + wait_queue_head_t bl_wq; struct list_head nfs_client_list; struct list_head nfs_volume_list; #ifdef CONFIG_NFS_V4 -- cgit From 17280175c587469b34757263c7cfc608f0ea2334 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 11 Mar 2012 13:11:00 -0400 Subject: NFS: Fix a number of sparse warnings Fix a number of "warning: symbol 'foo' was not declared. Should it be static?" conditions. Fix 2 cases of "warning: Using plain integer as NULL pointer" fs/nfs/delegation.c:263:31: warning: restricted fmode_t degrades to integer - We want to allow upgrades to a WRITE delegation, but should otherwise consider servers that hand out duplicate delegations to be borken. Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 4 ++-- fs/nfs/client.c | 5 ++--- fs/nfs/delegation.c | 5 ++++- fs/nfs/dns_resolve.c | 1 + fs/nfs/idmap.c | 8 ++++---- fs/nfs/nfs3acl.c | 2 +- fs/nfs/nfs4filelayout.c | 10 +++++----- fs/nfs/nfs4filelayoutdev.c | 4 ++-- fs/nfs/nfs4proc.c | 23 ++++++++++++----------- fs/nfs/nfs4state.c | 3 ++- fs/nfs/objlayout/objlayout.c | 2 +- fs/nfs/pnfs_dev.c | 2 +- fs/nfs/unlink.c | 2 +- 13 files changed, 38 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 2afe23349c7b..eb95f5091c1a 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -101,7 +101,7 @@ nfs4_callback_svc(void *vrqstp) /* * Prepare to bring up the NFSv4 callback service */ -struct svc_rqst * +static struct svc_rqst * nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) { int ret; @@ -172,7 +172,7 @@ nfs41_callback_svc(void *vrqstp) /* * Bring up the NFSv4.1 callback service */ -struct svc_rqst * +static struct svc_rqst * nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) { struct svc_rqst *rqstp; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d30dcbfb6b20..f1f047c376d9 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -443,9 +443,8 @@ static int nfs_sockaddr_cmp(const struct sockaddr *sa1, } /* Common match routine for v4.0 and v4.1 callback services */ -bool -nfs4_cb_match_client(const struct sockaddr *addr, struct nfs_client *clp, - u32 minorversion) +static bool nfs4_cb_match_client(const struct sockaddr *addr, + struct nfs_client *clp, u32 minorversion) { struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 12de88353eeb..89af1d269274 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -256,11 +256,14 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct /* * Deal with broken servers that hand out two * delegations for the same file. + * Allow for upgrades to a WRITE delegation, but + * nothing else. */ dfprintk(FILE, "%s: server %s handed out " "a duplicate delegation!\n", __func__, clp->cl_hostname); - if (delegation->type <= old_delegation->type) { + if (delegation->type == old_delegation->type || + !(delegation->type & FMODE_WRITE)) { freeme = delegation; delegation = NULL; goto out; diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index fcd8f1d7430f..b3924b8a6000 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -10,6 +10,7 @@ #include #include +#include "dns_resolve.h" ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen, struct sockaddr *sa, size_t salen) diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index f72c1fc074e1..f9f89fc83ee0 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -54,8 +54,8 @@ /* Default cache timeout is 10 minutes */ unsigned int nfs_idmap_cache_timeout = 600; -const struct cred *id_resolver_cache; -struct key_type key_type_id_resolver_legacy; +static const struct cred *id_resolver_cache; +static struct key_type key_type_id_resolver_legacy; /** @@ -160,7 +160,7 @@ static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen) return snprintf(buf, buflen, "%u", id); } -struct key_type key_type_id_resolver = { +static struct key_type key_type_id_resolver = { .name = "id_resolver", .instantiate = user_instantiate, .match = user_match, @@ -381,7 +381,7 @@ static const struct rpc_pipe_ops idmap_upcall_ops = { .destroy_msg = idmap_pipe_destroy_msg, }; -struct key_type key_type_id_resolver_legacy = { +static struct key_type key_type_id_resolver_legacy = { .name = "id_resolver", .instantiate = user_instantiate, .match = user_match, diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 7ef23979896d..e4498dc351a8 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -192,7 +192,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type) .pages = pages, }; struct nfs3_getaclres res = { - 0 + NULL, }; struct rpc_message msg = { .rpc_argp = &args, diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 716fac6bc082..379a085f8f25 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -323,21 +323,21 @@ static void filelayout_commit_release(void *data) nfs_commitdata_release(wdata); } -struct rpc_call_ops filelayout_read_call_ops = { +static const struct rpc_call_ops filelayout_read_call_ops = { .rpc_call_prepare = filelayout_read_prepare, .rpc_call_done = filelayout_read_call_done, .rpc_count_stats = filelayout_read_count_stats, .rpc_release = filelayout_read_release, }; -struct rpc_call_ops filelayout_write_call_ops = { +static const struct rpc_call_ops filelayout_write_call_ops = { .rpc_call_prepare = filelayout_write_prepare, .rpc_call_done = filelayout_write_call_done, .rpc_count_stats = filelayout_write_count_stats, .rpc_release = filelayout_write_release, }; -struct rpc_call_ops filelayout_commit_call_ops = { +static const struct rpc_call_ops filelayout_commit_call_ops = { .rpc_call_prepare = filelayout_write_prepare, .rpc_call_done = filelayout_write_call_done, .rpc_count_stats = filelayout_write_count_stats, @@ -723,7 +723,7 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, return (p_stripe == r_stripe); } -void +static void filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { @@ -740,7 +740,7 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio, nfs_pageio_reset_read_mds(pgio); } -void +static void filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index 41677f0bf792..a866bbd2890a 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -45,7 +45,7 @@ * - incremented when a device id maps a data server already in the cache. * - decremented when deviceid is removed from the cache. */ -DEFINE_SPINLOCK(nfs4_ds_cache_lock); +static DEFINE_SPINLOCK(nfs4_ds_cache_lock); static LIST_HEAD(nfs4_data_server_cache); /* Debug routines */ @@ -108,7 +108,7 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) return false; } -bool +static bool _same_data_server_addrs_locked(const struct list_head *dsaddrs1, const struct list_head *dsaddrs2) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 3bf5593741ee..36a7cda03445 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -677,12 +677,12 @@ static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) nfs41_sequence_done(task, data->seq_res); } -struct rpc_call_ops nfs41_call_sync_ops = { +static const struct rpc_call_ops nfs41_call_sync_ops = { .rpc_call_prepare = nfs41_call_sync_prepare, .rpc_call_done = nfs41_call_sync_done, }; -struct rpc_call_ops nfs41_call_priv_sync_ops = { +static const struct rpc_call_ops nfs41_call_priv_sync_ops = { .rpc_call_prepare = nfs41_call_priv_sync_prepare, .rpc_call_done = nfs41_call_sync_done, }; @@ -4770,7 +4770,7 @@ static void nfs4_release_lockowner_release(void *calldata) kfree(calldata); } -const struct rpc_call_ops nfs4_release_lockowner_ops = { +static const struct rpc_call_ops nfs4_release_lockowner_ops = { .rpc_release = nfs4_release_lockowner_release, }; @@ -4910,7 +4910,8 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct return status; } -int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) +static int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, + struct nfs4_secinfo_flavors *flavors) { struct nfs4_exception exception = { }; int err; @@ -5096,7 +5097,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) dprintk("<-- %s\n", __func__); } -struct rpc_call_ops nfs4_get_lease_time_ops = { +static const struct rpc_call_ops nfs4_get_lease_time_ops = { .rpc_call_prepare = nfs4_get_lease_time_prepare, .rpc_call_done = nfs4_get_lease_time_done, }; @@ -6319,7 +6320,7 @@ static bool nfs4_match_stateid(const nfs4_stateid *s1, } -struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { +static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, .recover_open = nfs4_open_reclaim, @@ -6329,7 +6330,7 @@ struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { }; #if defined(CONFIG_NFS_V4_1) -struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { +static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, .recover_open = nfs4_open_reclaim, @@ -6340,7 +6341,7 @@ struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { }; #endif /* CONFIG_NFS_V4_1 */ -struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { +static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, .recover_open = nfs4_open_expired, @@ -6350,7 +6351,7 @@ struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { }; #if defined(CONFIG_NFS_V4_1) -struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { +static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, .recover_open = nfs41_open_expired, @@ -6360,14 +6361,14 @@ struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { }; #endif /* CONFIG_NFS_V4_1 */ -struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { +static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { .sched_state_renewal = nfs4_proc_async_renew, .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, .renew_lease = nfs4_proc_renew, }; #if defined(CONFIG_NFS_V4_1) -struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { +static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { .sched_state_renewal = nfs41_proc_async_sequence, .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, .renew_lease = nfs4_proc_sequence, diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5fa43cd9bfc5..7c586070d028 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -876,7 +876,8 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) if (fl->fl_flags & FL_POSIX) lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE); else if (fl->fl_flags & FL_FLOCK) - lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE); + lsp = nfs4_get_lock_state(state, NULL, fl->fl_pid, + NFS4_FLOCK_LOCK_TYPE); else return -EINVAL; if (lsp == NULL) diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c index 2bd185277adb..157c47e277e0 100644 --- a/fs/nfs/objlayout/objlayout.c +++ b/fs/nfs/objlayout/objlayout.c @@ -156,7 +156,7 @@ last_byte_offset(u64 start, u64 len) return end > start ? end - 1 : NFS4_MAX_UINT64; } -void _fix_verify_io_params(struct pnfs_layout_segment *lseg, +static void _fix_verify_io_params(struct pnfs_layout_segment *lseg, struct page ***p_pages, unsigned *p_pgbase, u64 offset, unsigned long count) { diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c index 4f359d2a26eb..6b4cd3849306 100644 --- a/fs/nfs/pnfs_dev.c +++ b/fs/nfs/pnfs_dev.c @@ -92,7 +92,7 @@ _lookup_deviceid(const struct pnfs_layoutdriver_type *ld, * @clp nfs_client associated with deviceid * @id deviceid to look up */ -struct nfs4_deviceid_node * +static struct nfs4_deviceid_node * _find_get_deviceid(const struct pnfs_layoutdriver_type *ld, const struct nfs_client *clp, const struct nfs4_deviceid *id, long hash) diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index 490613b709b6..fae71c9f5050 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -108,7 +108,7 @@ static void nfs_async_unlink_release(void *calldata) } #if defined(CONFIG_NFS_V4_1) -void nfs_unlink_prepare(struct rpc_task *task, void *calldata) +static void nfs_unlink_prepare(struct rpc_task *task, void *calldata) { struct nfs_unlinkdata *data = calldata; struct nfs_server *server = NFS_SERVER(data->dir); -- cgit From 4b7c8dd205d6df1629ccde9f6dcf6a85d34c37ff Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Mon, 12 Mar 2012 11:28:24 -0400 Subject: NFS: Only define some function when v4.1 is enabled Now that the nfs4_cb_match_client() function is static, gcc notices that it is only used when CONFIG_NFS_V4_1 is enabled. Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index f1f047c376d9..2f378487ccde 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -404,6 +404,7 @@ static int nfs_sockaddr_cmp_ip4(const struct sockaddr *sa1, (sin1->sin_port == sin2->sin_port); } +#if defined(CONFIG_NFS_V4_1) /* * Test if two socket addresses represent the same actual socket, * by comparing (only) relevant fields, excluding the port number. @@ -422,6 +423,7 @@ static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1, } return 0; } +#endif /* CONFIG_NFS_V4_1 */ /* * Test if two socket addresses represent the same actual socket, @@ -442,6 +444,7 @@ static int nfs_sockaddr_cmp(const struct sockaddr *sa1, return 0; } +#if defined(CONFIG_NFS_V4_1) /* Common match routine for v4.0 and v4.1 callback services */ static bool nfs4_cb_match_client(const struct sockaddr *addr, struct nfs_client *clp, u32 minorversion) @@ -464,6 +467,7 @@ static bool nfs4_cb_match_client(const struct sockaddr *addr, return true; } +#endif /* CONFIG_NFS_V4_1 */ /* * Find an nfs_client on the list that matches the initialisation data -- cgit From 11588f493a2441f09ceb2088d07cc012b53cbf75 Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Mon, 12 Mar 2012 11:33:00 -0400 Subject: NFS: Check return value from rpc_queue_upcall() This function could fail to queue the upcall if rpc.idmapd is not running, causing a warning message to be printed. Instead, I want to check the return value and revoke the key if the upcall can't be run. Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/idmap.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index f9f89fc83ee0..a701a83047d3 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -656,14 +656,19 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, idmap->idmap_key_cons = cons; - return rpc_queue_upcall(idmap->idmap_pipe, msg); + ret = rpc_queue_upcall(idmap->idmap_pipe, msg); + if (ret < 0) + goto out2; + + return ret; out2: kfree(im); out1: kfree(msg); out0: - complete_request_key(cons, ret); + key_revoke(cons->key); + key_revoke(cons->authkey); return ret; } -- cgit From 9a3ba432330e504ac61ff0043dbdaba7cea0e35a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 12 Mar 2012 18:01:48 -0400 Subject: NFSv4: Rate limit the state manager warning messages Prevent the state manager from filling up system logs when recovery fails on the server. Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org --- fs/nfs/callback_xdr.c | 4 +++- fs/nfs/nfs4proc.c | 2 +- fs/nfs/nfs4state.c | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index fd6cfdb917da..95bfc243992c 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include #include #include "nfs4_fs.h" @@ -167,7 +169,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound if (hdr->minorversion <= 1) { hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */ } else { - printk(KERN_WARNING "NFS: %s: NFSv4 server callback with " + pr_warn_ratelimited("NFS: %s: NFSv4 server callback with " "illegal minor version %u!\n", __func__, hdr->minorversion); return htonl(NFS4ERR_MINOR_VERS_MISMATCH); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 36a7cda03445..5e0961acfef4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1876,7 +1876,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, * the user though... */ if (status == -NFS4ERR_BAD_SEQID) { - printk(KERN_WARNING "NFS: v4 server %s " + pr_warn_ratelimited("NFS: v4 server %s " " returned a bad sequence-id error!\n", NFS_SERVER(dir)->nfs_client->cl_hostname); exception.retry = 1; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 7c586070d028..cb708b20a775 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -984,7 +984,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) case -NFS4ERR_BAD_SEQID: if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) return; - printk(KERN_WARNING "NFS: v4 server returned a bad" + pr_warn_ratelimited("NFS: v4 server returned a bad" " sequence-id error on an" " unconfirmed sequence %p!\n", seqid->sequence); @@ -1840,7 +1840,7 @@ static void nfs4_state_manager(struct nfs_client *clp) } while (atomic_read(&clp->cl_count) > 1); return; out_error: - printk(KERN_WARNING "NFS: state manager failed on NFSv4 server %s" + pr_warn_ratelimited("NFS: state manager failed on NFSv4 server %s" " with error %d\n", clp->cl_hostname, -status); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); -- cgit From e138ead73f872559778bb0c326e795206f96d3ce Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 13 Mar 2012 20:18:48 +0300 Subject: NFS: null dereference in dev_remove() In commit 5ffaf85541 "NFS: replace global bl_wq with per-net one" we made "msg" a pointer instead of a struct stored in stack memory. But we forgot to change the memset() here so we're still clearing stack memory instead clearing the struct like we intended. It will lead to a kernel crash. Signed-off-by: Dan Carpenter Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayoutdm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index 30fc22af7bbb..737d839bc17b 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -54,7 +54,7 @@ static void dev_remove(struct net *net, dev_t dev) dprintk("Entering %s\n", __func__); bl_pipe_msg.bl_wq = &nn->bl_wq; - memset(&msg, 0, sizeof(*msg)); + memset(msg, 0, sizeof(*msg)); msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); if (!msg->data) goto out; -- cgit From 281627df3eb55e1b729b9bb06fff5ff112929646 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Mar 2012 08:41:05 +0000 Subject: xfs: log file size updates at I/O completion time Do not use unlogged metadata updates and the VFS dirty bit for updating the file size after writeback. In addition to causing various problems with updates getting delayed for far too long this also drags in the unscalable VFS dirty tracking, and is one of the few remaining unlogged metadata updates. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers --- fs/xfs/xfs_aops.c | 133 ++++++++++++++++++++++++++++++++++++++++++++---------- fs/xfs/xfs_aops.h | 2 + 2 files changed, 111 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 8e11b07bb281..0dbb9e70fe21 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -26,6 +26,7 @@ #include "xfs_bmap_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" +#include "xfs_inode_item.h" #include "xfs_alloc.h" #include "xfs_error.h" #include "xfs_rw.h" @@ -107,25 +108,65 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend) XFS_I(ioend->io_inode)->i_d.di_size; } +STATIC int +xfs_setfilesize_trans_alloc( + struct xfs_ioend *ioend) +{ + struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; + struct xfs_trans *tp; + int error; + + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + + error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + + ioend->io_append_trans = tp; + + /* + * We hand off the transaction to the completion thread now, so + * clear the flag here. + */ + current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); + return 0; +} + /* * Update on-disk file size now that data has been written to disk. */ -STATIC void +STATIC int xfs_setfilesize( struct xfs_ioend *ioend) { struct xfs_inode *ip = XFS_I(ioend->io_inode); + struct xfs_trans *tp = ioend->io_append_trans; xfs_fsize_t isize; + /* + * The transaction was allocated in the I/O submission thread, + * thus we need to mark ourselves as beeing in a transaction + * manually. + */ + current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); + xfs_ilock(ip, XFS_ILOCK_EXCL); isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size); - if (isize) { - trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); - ip->i_d.di_size = isize; - xfs_mark_inode_dirty(ip); + if (!isize) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_trans_cancel(tp, 0); + return 0; } - xfs_iunlock(ip, XFS_ILOCK_EXCL); + trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); + + ip->i_d.di_size = isize; + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + return xfs_trans_commit(tp, 0); } /* @@ -143,7 +184,7 @@ xfs_finish_ioend( if (ioend->io_type == IO_UNWRITTEN) queue_work(mp->m_unwritten_workqueue, &ioend->io_work); - else if (xfs_ioend_is_append(ioend)) + else if (ioend->io_append_trans) queue_work(mp->m_data_workqueue, &ioend->io_work); else xfs_destroy_ioend(ioend); @@ -173,18 +214,32 @@ xfs_end_io( * range to normal written extens after the data I/O has finished. */ if (ioend->io_type == IO_UNWRITTEN) { + /* + * For buffered I/O we never preallocate a transaction when + * doing the unwritten extent conversion, but for direct I/O + * we do not know if we are converting an unwritten extent + * or not at the point where we preallocate the transaction. + */ + if (ioend->io_append_trans) { + ASSERT(ioend->io_isdirect); + + current_set_flags_nested( + &ioend->io_append_trans->t_pflags, PF_FSTRANS); + xfs_trans_cancel(ioend->io_append_trans, 0); + } + error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); if (error) { ioend->io_error = -error; goto done; } + } else if (ioend->io_append_trans) { + error = xfs_setfilesize(ioend); + if (error) + ioend->io_error = -error; } else { - /* - * We might have to update the on-disk file size after - * extending writes. - */ - xfs_setfilesize(ioend); + ASSERT(!xfs_ioend_is_append(ioend)); } done: @@ -224,6 +279,7 @@ xfs_alloc_ioend( */ atomic_set(&ioend->io_remaining, 1); ioend->io_isasync = 0; + ioend->io_isdirect = 0; ioend->io_error = 0; ioend->io_list = NULL; ioend->io_type = type; @@ -234,6 +290,7 @@ xfs_alloc_ioend( ioend->io_size = 0; ioend->io_iocb = NULL; ioend->io_result = 0; + ioend->io_append_trans = NULL; INIT_WORK(&ioend->io_work, xfs_end_io); return ioend; @@ -341,18 +398,9 @@ xfs_submit_ioend_bio( xfs_ioend_t *ioend, struct bio *bio) { - struct xfs_inode *ip = XFS_I(ioend->io_inode); atomic_inc(&ioend->io_remaining); bio->bi_private = ioend; bio->bi_end_io = xfs_end_bio; - - /* - * If the I/O is beyond EOF we mark the inode dirty immediately - * but don't update the inode size until I/O completion. - */ - if (xfs_new_eof(ip, ioend->io_offset + ioend->io_size)) - xfs_mark_inode_dirty(ip); - submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); } @@ -999,8 +1047,20 @@ xfs_vm_writepage( wbc, end_index); } - if (iohead) + if (iohead) { + /* + * Reserve log space if we might write beyond the on-disk + * inode size. + */ + if (ioend->io_type != IO_UNWRITTEN && + xfs_ioend_is_append(ioend)) { + err = xfs_setfilesize_trans_alloc(ioend); + if (err) + goto error; + } + xfs_submit_ioend(wbc, iohead); + } return 0; @@ -1280,17 +1340,32 @@ xfs_vm_direct_IO( { struct inode *inode = iocb->ki_filp->f_mapping->host; struct block_device *bdev = xfs_find_bdev_for_inode(inode); + struct xfs_ioend *ioend = NULL; ssize_t ret; if (rw & WRITE) { - iocb->private = xfs_alloc_ioend(inode, IO_DIRECT); + size_t size = iov_length(iov, nr_segs); + + /* + * We need to preallocate a transaction for a size update + * here. In the case that this write both updates the size + * and converts at least on unwritten extent we will cancel + * the still clean transaction after the I/O has finished. + */ + iocb->private = ioend = xfs_alloc_ioend(inode, IO_DIRECT); + if (offset + size > XFS_I(inode)->i_d.di_size) { + ret = xfs_setfilesize_trans_alloc(ioend); + if (ret) + goto out_destroy_ioend; + ioend->io_isdirect = 1; + } ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, xfs_get_blocks_direct, xfs_end_io_direct_write, NULL, 0); if (ret != -EIOCBQUEUED && iocb->private) - xfs_destroy_ioend(iocb->private); + goto out_trans_cancel; } else { ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, @@ -1299,6 +1374,16 @@ xfs_vm_direct_IO( } return ret; + +out_trans_cancel: + if (ioend->io_append_trans) { + current_set_flags_nested(&ioend->io_append_trans->t_pflags, + PF_FSTRANS); + xfs_trans_cancel(ioend->io_append_trans, 0); + } +out_destroy_ioend: + xfs_destroy_ioend(ioend); + return ret; } STATIC void diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h index 06e4caf38203..84eafbcb0d9d 100644 --- a/fs/xfs/xfs_aops.h +++ b/fs/xfs/xfs_aops.h @@ -46,12 +46,14 @@ typedef struct xfs_ioend { int io_error; /* I/O error code */ atomic_t io_remaining; /* hold count */ unsigned int io_isasync : 1; /* needs aio_complete */ + unsigned int io_isdirect : 1;/* direct I/O */ struct inode *io_inode; /* file being written to */ struct buffer_head *io_buffer_head;/* buffer linked list head */ struct buffer_head *io_buffer_tail;/* buffer linked list tail */ size_t io_size; /* size of the extent */ xfs_off_t io_offset; /* offset in the file */ struct work_struct io_work; /* xfsdatad work queue */ + struct xfs_trans *io_append_trans;/* xact. for size update */ struct kiocb *io_iocb; int io_result; } xfs_ioend_t; -- cgit From 35c80422afc8394d1ecbab3c0b17fcd539e4e5c2 Mon Sep 17 00:00:00 2001 From: Nigel Cunningham Date: Fri, 3 Feb 2012 19:59:41 +1100 Subject: PM / Sleep: JBD and JBD2 missing set_freezable() With the latest and greatest changes to the freezer, I started seeing panics that were caused by jbd2 running post-process freezing and hitting the canary BUG_ON for non-TuxOnIce I/O submission. I've traced this back to a lack of set_freezable calls in both jbd and jbd2. Since they're clearly meant to be frozen (there are tests for freezing()), I submit the following patch to add the missing calls. Signed-off-by: Nigel Cunningham Acked-by: Jan Kara Signed-off-by: Rafael J. Wysocki --- fs/jbd/journal.c | 2 ++ fs/jbd2/journal.c | 2 ++ 2 files changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 59c09f9541b5..89cd985aee06 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -129,6 +129,8 @@ static int kjournald(void *arg) setup_timer(&journal->j_commit_timer, commit_timeout, (unsigned long)current); + set_freezable(); + /* Record that the journal thread is running */ journal->j_task = current; wake_up(&journal->j_wait_done_commit); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index c0a5f9f1b127..663e47cbaa78 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -139,6 +139,8 @@ static int kjournald2(void *arg) setup_timer(&journal->j_commit_timer, commit_timeout, (unsigned long)current); + set_freezable(); + /* Record that the journal thread is running */ journal->j_task = current; wake_up(&journal->j_wait_done_commit); -- cgit From 8a9c9980f24f6d86e0ec0150ed35fba45d0c9f88 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 29 Feb 2012 09:53:52 +0000 Subject: xfs: log timestamp updates Timestamps on regular files are the last metadata that XFS does not update transactionally. Now that we use the delaylog mode exclusively and made the log scode scale extremly well there is no need to bypass that code for timestamp updates. Logging all updates allows to drop a lot of code, and will allow for further performance improvements later on. Note that this patch drops optimized handling of fdatasync - it will be added back in a separate commit. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers --- fs/xfs/xfs_file.c | 83 ++++++------------------------------ fs/xfs/xfs_iget.c | 1 - fs/xfs/xfs_inode.c | 25 +---------- fs/xfs/xfs_inode.h | 5 --- fs/xfs/xfs_inode_item.c | 36 ---------------- fs/xfs/xfs_inode_item.h | 5 +-- fs/xfs/xfs_iops.c | 58 ------------------------- fs/xfs/xfs_itable.c | 21 +++------ fs/xfs/xfs_super.c | 108 ++++++++++++++++------------------------------- fs/xfs/xfs_sync.c | 36 ---------------- fs/xfs/xfs_sync.h | 2 - fs/xfs/xfs_trace.h | 2 +- fs/xfs/xfs_trans_inode.c | 4 ++ 13 files changed, 65 insertions(+), 321 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 7e5bc872f2b4..78d8b0299592 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -163,7 +163,6 @@ xfs_file_fsync( struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; int error = 0; int log_flushed = 0; xfs_lsn_t lsn = 0; @@ -194,75 +193,15 @@ xfs_file_fsync( } /* - * We always need to make sure that the required inode state is safe on - * disk. The inode might be clean but we still might need to force the - * log because of committed transactions that haven't hit the disk yet. - * Likewise, there could be unflushed non-transactional changes to the - * inode core that have to go to disk and this requires us to issue - * a synchronous transaction to capture these changes correctly. - * - * This code relies on the assumption that if the i_update_core field - * of the inode is clear and the inode is unpinned then it is clean - * and no action is required. + * All metadata updates are logged, which means that we just have + * to flush the log up to the latest LSN that touched the inode. */ xfs_ilock(ip, XFS_ILOCK_SHARED); - - /* - * First check if the VFS inode is marked dirty. All the dirtying - * of non-transactional updates do not go through mark_inode_dirty*, - * which allows us to distinguish between pure timestamp updates - * and i_size updates which need to be caught for fdatasync. - * After that also check for the dirty state in the XFS inode, which - * might gets cleared when the inode gets written out via the AIL - * or xfs_iflush_cluster. - */ - if (((inode->i_state & I_DIRTY_DATASYNC) || - ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && - ip->i_update_core) { - /* - * Kick off a transaction to log the inode core to get the - * updates. The sync transaction will also force the log. - */ - xfs_iunlock(ip, XFS_ILOCK_SHARED); - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, 0, - XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - if (error) { - xfs_trans_cancel(tp, 0); - return -error; - } - xfs_ilock(ip, XFS_ILOCK_EXCL); - - /* - * Note - it's possible that we might have pushed ourselves out - * of the way during trans_reserve which would flush the inode. - * But there's no guarantee that the inode buffer has actually - * gone out yet (it's delwri). Plus the buffer could be pinned - * anyway if it's part of an inode in another recent - * transaction. So we play it safe and fire off the - * transaction anyway. - */ - xfs_trans_ijoin(tp, ip, 0); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - error = xfs_trans_commit(tp, 0); - + if (xfs_ipincount(ip)) lsn = ip->i_itemp->ili_last_lsn; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } else { - /* - * Timestamps/size haven't changed since last inode flush or - * inode transaction commit. That means either nothing got - * written or a transaction committed which caught the updates. - * If the latter happened and the transaction hasn't hit the - * disk yet, the inode will be still be pinned. If it is, - * force the log. - */ - if (xfs_ipincount(ip)) - lsn = ip->i_itemp->ili_last_lsn; - xfs_iunlock(ip, XFS_ILOCK_SHARED); - } + xfs_iunlock(ip, XFS_ILOCK_SHARED); - if (!error && lsn) + if (lsn) error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); /* @@ -659,9 +598,6 @@ restart: return error; } - if (likely(!(file->f_mode & FMODE_NOCMTIME))) - file_update_time(file); - /* * If the offset is beyond the size of the file, we need to zero any * blocks that fall between the existing EOF and the start of this @@ -684,6 +620,15 @@ restart: if (error) return error; + /* + * Updating the timestamps will grab the ilock again from + * xfs_fs_dirty_inode, so we have to call it after dropping the + * lock above. Eventually we should look into a way to avoid + * the pointless lock roundtrip. + */ + if (likely(!(file->f_mode & FMODE_NOCMTIME))) + file_update_time(file); + /* * If we're writing the file then make sure to clear the setuid and * setgid bits if the process is not being run by root. This keeps diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 37f22dad5f59..af3f30a3d9c2 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -91,7 +91,6 @@ xfs_inode_alloc( ip->i_afp = NULL; memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; - ip->i_update_core = 0; ip->i_delayed_blks = 0; memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index b21022499c2e..7ce9ccbf17c4 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1656,7 +1656,6 @@ retry: iip = ip->i_itemp; if (!iip || xfs_inode_clean(ip)) { ASSERT(ip != free_ip); - ip->i_update_core = 0; xfs_ifunlock(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL); continue; @@ -2451,7 +2450,6 @@ xfs_iflush( * to disk, because the log record didn't make it to disk! */ if (XFS_FORCED_SHUTDOWN(mp)) { - ip->i_update_core = 0; if (iip) iip->ili_format.ilf_fields = 0; xfs_ifunlock(ip); @@ -2533,26 +2531,6 @@ xfs_iflush_int( /* set *dip = inode's place in the buffer */ dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); - /* - * Clear i_update_core before copying out the data. - * This is for coordination with our timestamp updates - * that don't hold the inode lock. They will always - * update the timestamps BEFORE setting i_update_core, - * so if we clear i_update_core after they set it we - * are guaranteed to see their updates to the timestamps. - * I believe that this depends on strongly ordered memory - * semantics, but we have that. We use the SYNCHRONIZE - * macro to make sure that the compiler does not reorder - * the i_update_core access below the data copy below. - */ - ip->i_update_core = 0; - SYNCHRONIZE(); - - /* - * Make sure to get the latest timestamps from the Linux inode. - */ - xfs_synchronize_times(ip); - if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, @@ -2711,8 +2689,7 @@ xfs_iflush_int( } else { /* * We're flushing an inode which is not in the AIL and has - * not been logged but has i_update_core set. For this - * case we can use a B_DELWRI flush and immediately drop + * not been logged. For this case we can immediately drop * the inode flush lock because we can avoid the whole * AIL state thing. It's OK to drop the flush lock now, * because we've already locked the buffer and to do anything diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 7f90469141d7..f123dbe6d42a 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -241,7 +241,6 @@ typedef struct xfs_inode { spinlock_t i_flags_lock; /* inode i_flags lock */ /* Miscellaneous state. */ unsigned long i_flags; /* see defined flags below */ - unsigned char i_update_core; /* timestamps/size is dirty */ unsigned int i_delayed_blks; /* count of delay alloc blks */ xfs_icdinode_t i_d; /* most of ondisk inode */ @@ -534,10 +533,6 @@ void xfs_promote_inode(struct xfs_inode *); void xfs_lock_inodes(xfs_inode_t **, int, uint); void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); -void xfs_synchronize_times(xfs_inode_t *); -void xfs_mark_inode_dirty(xfs_inode_t *); -void xfs_mark_inode_dirty_sync(xfs_inode_t *); - #define IHOLD(ip) \ do { \ ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index adc8a261b5d0..7a60da64f31d 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -254,42 +254,6 @@ xfs_inode_item_format( vecp++; nvecs = 1; - /* - * Clear i_update_core if the timestamps (or any other - * non-transactional modification) need flushing/logging - * and we're about to log them with the rest of the core. - * - * This is the same logic as xfs_iflush() but this code can't - * run at the same time as xfs_iflush because we're in commit - * processing here and so we have the inode lock held in - * exclusive mode. Although it doesn't really matter - * for the timestamps if both routines were to grab the - * timestamps or not. That would be ok. - * - * We clear i_update_core before copying out the data. - * This is for coordination with our timestamp updates - * that don't hold the inode lock. They will always - * update the timestamps BEFORE setting i_update_core, - * so if we clear i_update_core after they set it we - * are guaranteed to see their updates to the timestamps - * either here. Likewise, if they set it after we clear it - * here, we'll see it either on the next commit of this - * inode or the next time the inode gets flushed via - * xfs_iflush(). This depends on strongly ordered memory - * semantics, but we have that. We use the SYNCHRONIZE - * macro to make sure that the compiler does not reorder - * the i_update_core access below the data copy below. - */ - if (ip->i_update_core) { - ip->i_update_core = 0; - SYNCHRONIZE(); - } - - /* - * Make sure to get the latest timestamps from the Linux inode. - */ - xfs_synchronize_times(ip); - vecp->i_addr = &ip->i_d; vecp->i_len = sizeof(struct xfs_icdinode); vecp->i_type = XLOG_REG_TYPE_ICORE; diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index d3dee61e6d91..25784b066568 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -148,9 +148,8 @@ typedef struct xfs_inode_log_item { static inline int xfs_inode_clean(xfs_inode_t *ip) { - return (!ip->i_itemp || - !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) && - !ip->i_update_core; + return !ip->i_itemp || + !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL); } extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index ab302539e5b9..7c01cda16727 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -50,59 +50,6 @@ #include #include -/* - * Bring the timestamps in the XFS inode uptodate. - * - * Used before writing the inode to disk. - */ -void -xfs_synchronize_times( - xfs_inode_t *ip) -{ - struct inode *inode = VFS_I(ip); - - ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; - ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; - ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec; - ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec; - ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec; - ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec; -} - -/* - * If the linux inode is valid, mark it dirty, else mark the dirty state - * in the XFS inode to make sure we pick it up when reclaiming the inode. - */ -void -xfs_mark_inode_dirty_sync( - xfs_inode_t *ip) -{ - struct inode *inode = VFS_I(ip); - - if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) - mark_inode_dirty_sync(inode); - else { - barrier(); - ip->i_update_core = 1; - } -} - -void -xfs_mark_inode_dirty( - xfs_inode_t *ip) -{ - struct inode *inode = VFS_I(ip); - - if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) - mark_inode_dirty(inode); - else { - barrier(); - ip->i_update_core = 1; - } - -} - - int xfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { @@ -678,19 +625,16 @@ xfs_setattr_nonsize( inode->i_atime = iattr->ia_atime; ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; - ip->i_update_core = 1; } if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; - ip->i_update_core = 1; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; - ip->i_update_core = 1; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); @@ -918,13 +862,11 @@ xfs_setattr_size( inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; - ip->i_update_core = 1; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; - ip->i_update_core = 1; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 751e94fe1f77..9720c54bbed0 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -62,7 +62,6 @@ xfs_bulkstat_one_int( { struct xfs_icdinode *dic; /* dinode core info pointer */ struct xfs_inode *ip; /* incore inode pointer */ - struct inode *inode; struct xfs_bstat *buf; /* return buffer */ int error = 0; /* error value */ @@ -86,7 +85,6 @@ xfs_bulkstat_one_int( ASSERT(ip->i_imap.im_blkno != 0); dic = &ip->i_d; - inode = VFS_I(ip); /* xfs_iget returns the following without needing * further change. @@ -99,19 +97,12 @@ xfs_bulkstat_one_int( buf->bs_uid = dic->di_uid; buf->bs_gid = dic->di_gid; buf->bs_size = dic->di_size; - - /* - * We need to read the timestamps from the Linux inode because - * the VFS keeps writing directly into the inode structure instead - * of telling us about the updates. - */ - buf->bs_atime.tv_sec = inode->i_atime.tv_sec; - buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec; - buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec; - buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec; - buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec; - buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec; - + buf->bs_atime.tv_sec = dic->di_atime.t_sec; + buf->bs_atime.tv_nsec = dic->di_atime.t_nsec; + buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; + buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; + buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; + buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec; buf->bs_xflags = xfs_ip2xflags(ip); buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; buf->bs_extents = dic->di_nextents; diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index c7f7bc2855a4..e602c8c67c5c 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -863,91 +863,58 @@ xfs_fs_inode_init_once( } /* - * Dirty the XFS inode when mark_inode_dirty_sync() is called so that - * we catch unlogged VFS level updates to the inode. + * This is called by the VFS when dirtying inode metadata. This can happen + * for a few reasons, but we only care about timestamp updates, given that + * we handled the rest ourselves. In theory no other calls should happen, + * but for example generic_write_end() keeps dirtying the inode after + * updating i_size. Thus we check that the flags are exactly I_DIRTY_SYNC, + * and skip this call otherwise. * - * We need the barrier() to maintain correct ordering between unlogged - * updates and the transaction commit code that clears the i_update_core - * field. This requires all updates to be completed before marking the - * inode dirty. + * We'll hopefull get a different method just for updating timestamps soon, + * at which point this hack can go away, and maybe we'll also get real + * error handling here. */ STATIC void xfs_fs_dirty_inode( - struct inode *inode, - int flags) -{ - barrier(); - XFS_I(inode)->i_update_core = 1; -} - -STATIC int -xfs_fs_write_inode( struct inode *inode, - struct writeback_control *wbc) + int flags) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; - int error = EAGAIN; - - trace_xfs_write_inode(ip); - - if (XFS_FORCED_SHUTDOWN(mp)) - return -XFS_ERROR(EIO); - - if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) { - /* - * Make sure the inode has made it it into the log. Instead - * of forcing it all the way to stable storage using a - * synchronous transaction we let the log force inside the - * ->sync_fs call do that for thus, which reduces the number - * of synchronous log forces dramatically. - */ - error = xfs_log_dirty_inode(ip, NULL, 0); - if (error) - goto out; - return 0; - } else { - if (!ip->i_update_core) - return 0; + struct xfs_trans *tp; + int error; - /* - * We make this non-blocking if the inode is contended, return - * EAGAIN to indicate to the caller that they did not succeed. - * This prevents the flush path from blocking on inodes inside - * another operation right now, they get caught later by - * xfs_sync. - */ - if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) - goto out; + if (flags != I_DIRTY_SYNC) + return; - if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) - goto out_unlock; + trace_xfs_dirty_inode(ip); - /* - * Now we have the flush lock and the inode is not pinned, we - * can check if the inode is really clean as we know that - * there are no pending transaction completions, it is not - * waiting on the delayed write queue and there is no IO in - * progress. - */ - if (xfs_inode_clean(ip)) { - xfs_ifunlock(ip); - error = 0; - goto out_unlock; - } - error = xfs_iflush(ip, SYNC_TRYLOCK); + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + goto trouble; } - - out_unlock: - xfs_iunlock(ip, XFS_ILOCK_SHARED); - out: + xfs_ilock(ip, XFS_ILOCK_EXCL); /* - * if we failed to write out the inode then mark - * it dirty again so we'll try again later. + * Grab all the latest timestamps from the Linux inode. */ + ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; + ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; + ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec; + ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec; + ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec; + ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec; + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + error = xfs_trans_commit(tp, 0); if (error) - xfs_mark_inode_dirty_sync(ip); - return -error; + goto trouble; + return; + +trouble: + xfs_warn(mp, "failed to update timestamps for inode 0x%llx", ip->i_ino); } STATIC void @@ -1466,7 +1433,6 @@ static const struct super_operations xfs_super_operations = { .alloc_inode = xfs_fs_alloc_inode, .destroy_inode = xfs_fs_destroy_inode, .dirty_inode = xfs_fs_dirty_inode, - .write_inode = xfs_fs_write_inode, .evict_inode = xfs_fs_evict_inode, .put_super = xfs_fs_put_super, .sync_fs = xfs_fs_sync_fs, diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index 71bf846b7280..205ebcb34d9e 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c @@ -336,32 +336,6 @@ xfs_sync_fsdata( return error; } -int -xfs_log_dirty_inode( - struct xfs_inode *ip, - struct xfs_perag *pag, - int flags) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - int error; - - if (!ip->i_update_core) - return 0; - - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - if (error) { - xfs_trans_cancel(tp, 0); - return error; - } - - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - return xfs_trans_commit(tp, 0); -} - /* * When remounting a filesystem read-only or freezing the filesystem, we have * two phases to execute. This first phase is syncing the data before we @@ -385,16 +359,6 @@ xfs_quiesce_data( { int error, error2 = 0; - /* - * Log all pending size and timestamp updates. The vfs writeback - * code is supposed to do this, but due to its overagressive - * livelock detection it will skip inodes where appending writes - * were written out in the first non-blocking sync phase if their - * completion took long enough that it happened after taking the - * timestamp for the cut-off in the blocking phase. - */ - xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0); - /* force out the log */ xfs_log_force(mp, XFS_LOG_SYNC); diff --git a/fs/xfs/xfs_sync.h b/fs/xfs/xfs_sync.h index fa965479d788..941202e7ac6e 100644 --- a/fs/xfs/xfs_sync.h +++ b/fs/xfs/xfs_sync.h @@ -34,8 +34,6 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inodes(struct xfs_inode *ip); -int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags); - int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); int xfs_reclaim_inodes_count(struct xfs_mount *mp); void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 3b369c1277f0..ceaf6fe67e41 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -580,7 +580,7 @@ DEFINE_INODE_EVENT(xfs_ioctl_setattr); DEFINE_INODE_EVENT(xfs_dir_fsync); DEFINE_INODE_EVENT(xfs_file_fsync); DEFINE_INODE_EVENT(xfs_destroy_inode); -DEFINE_INODE_EVENT(xfs_write_inode); +DEFINE_INODE_EVENT(xfs_dirty_inode); DEFINE_INODE_EVENT(xfs_evict_inode); DEFINE_INODE_EVENT(xfs_dquot_dqalloc); diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 32f0288ae10f..892763effdf1 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -95,10 +95,14 @@ xfs_trans_ichgtime( if ((flags & XFS_ICHGTIME_MOD) && !timespec_equal(&inode->i_mtime, &tv)) { inode->i_mtime = tv; + ip->i_d.di_mtime.t_sec = tv.tv_sec; + ip->i_d.di_mtime.t_nsec = tv.tv_nsec; } if ((flags & XFS_ICHGTIME_CHG) && !timespec_equal(&inode->i_ctime, &tv)) { inode->i_ctime = tv; + ip->i_d.di_ctime.t_sec = tv.tv_sec; + ip->i_d.di_ctime.t_nsec = tv.tv_nsec; } } -- cgit From 339a5f5dd9d3ac3d68a594d81507e1eab77ed223 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 29 Feb 2012 09:53:53 +0000 Subject: xfs: make xfs_inode_item_size idempotent Move all code messing with the inode log item flags into xfs_inode_item_format to make sure xfs_inode_item_size really only calculates the the number of vectors, but doesn't modify any state of the inode item. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers --- fs/xfs/xfs_inode_item.c | 215 +++++++++++++++++++----------------------------- 1 file changed, 83 insertions(+), 132 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 7a60da64f31d..965d3d083625 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -57,77 +57,28 @@ xfs_inode_item_size( struct xfs_inode *ip = iip->ili_inode; uint nvecs = 2; - /* - * Only log the data/extents/b-tree root if there is something - * left to log. - */ - iip->ili_format.ilf_fields |= XFS_ILOG_CORE; - switch (ip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: - iip->ili_format.ilf_fields &= - ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | - XFS_ILOG_DEV | XFS_ILOG_UUID); if ((iip->ili_format.ilf_fields & XFS_ILOG_DEXT) && - (ip->i_d.di_nextents > 0) && - (ip->i_df.if_bytes > 0)) { - ASSERT(ip->i_df.if_u1.if_extents != NULL); + ip->i_d.di_nextents > 0 && + ip->i_df.if_bytes > 0) nvecs++; - } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_DEXT; - } break; case XFS_DINODE_FMT_BTREE: - iip->ili_format.ilf_fields &= - ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | - XFS_ILOG_DEV | XFS_ILOG_UUID); if ((iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) && - (ip->i_df.if_broot_bytes > 0)) { - ASSERT(ip->i_df.if_broot != NULL); + ip->i_df.if_broot_bytes > 0) nvecs++; - } else { - ASSERT(!(iip->ili_format.ilf_fields & - XFS_ILOG_DBROOT)); -#ifdef XFS_TRANS_DEBUG - if (iip->ili_root_size > 0) { - ASSERT(iip->ili_root_size == - ip->i_df.if_broot_bytes); - ASSERT(memcmp(iip->ili_orig_root, - ip->i_df.if_broot, - iip->ili_root_size) == 0); - } else { - ASSERT(ip->i_df.if_broot_bytes == 0); - } -#endif - iip->ili_format.ilf_fields &= ~XFS_ILOG_DBROOT; - } break; case XFS_DINODE_FMT_LOCAL: - iip->ili_format.ilf_fields &= - ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | - XFS_ILOG_DEV | XFS_ILOG_UUID); if ((iip->ili_format.ilf_fields & XFS_ILOG_DDATA) && - (ip->i_df.if_bytes > 0)) { - ASSERT(ip->i_df.if_u1.if_data != NULL); - ASSERT(ip->i_d.di_size > 0); + ip->i_df.if_bytes > 0) nvecs++; - } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_DDATA; - } break; case XFS_DINODE_FMT_DEV: - iip->ili_format.ilf_fields &= - ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | - XFS_ILOG_DEXT | XFS_ILOG_UUID); - break; - case XFS_DINODE_FMT_UUID: - iip->ili_format.ilf_fields &= - ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | - XFS_ILOG_DEXT | XFS_ILOG_DEV); break; default: @@ -135,56 +86,31 @@ xfs_inode_item_size( break; } - /* - * If there are no attributes associated with this file, - * then there cannot be anything more to log. - * Clear all attribute-related log flags. - */ - if (!XFS_IFORK_Q(ip)) { - iip->ili_format.ilf_fields &= - ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); + if (!XFS_IFORK_Q(ip)) return nvecs; - } + /* * Log any necessary attribute data. */ switch (ip->i_d.di_aformat) { case XFS_DINODE_FMT_EXTENTS: - iip->ili_format.ilf_fields &= - ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT); if ((iip->ili_format.ilf_fields & XFS_ILOG_AEXT) && - (ip->i_d.di_anextents > 0) && - (ip->i_afp->if_bytes > 0)) { - ASSERT(ip->i_afp->if_u1.if_extents != NULL); + ip->i_d.di_anextents > 0 && + ip->i_afp->if_bytes > 0) nvecs++; - } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_AEXT; - } break; case XFS_DINODE_FMT_BTREE: - iip->ili_format.ilf_fields &= - ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); if ((iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) && - (ip->i_afp->if_broot_bytes > 0)) { - ASSERT(ip->i_afp->if_broot != NULL); + ip->i_afp->if_broot_bytes > 0) nvecs++; - } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_ABROOT; - } break; case XFS_DINODE_FMT_LOCAL: - iip->ili_format.ilf_fields &= - ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); if ((iip->ili_format.ilf_fields & XFS_ILOG_ADATA) && - (ip->i_afp->if_bytes > 0)) { - ASSERT(ip->i_afp->if_u1.if_data != NULL); + ip->i_afp->if_bytes > 0) nvecs++; - } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_ADATA; - } break; default: @@ -292,16 +218,17 @@ xfs_inode_item_format( switch (ip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: - ASSERT(!(iip->ili_format.ilf_fields & - (XFS_ILOG_DDATA | XFS_ILOG_DBROOT | - XFS_ILOG_DEV | XFS_ILOG_UUID))); - if (iip->ili_format.ilf_fields & XFS_ILOG_DEXT) { - ASSERT(ip->i_df.if_bytes > 0); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEV | XFS_ILOG_UUID); + + if ((iip->ili_format.ilf_fields & XFS_ILOG_DEXT) && + ip->i_d.di_nextents > 0 && + ip->i_df.if_bytes > 0) { ASSERT(ip->i_df.if_u1.if_extents != NULL); - ASSERT(ip->i_d.di_nextents > 0); + ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0); ASSERT(iip->ili_extents_buf == NULL); - ASSERT((ip->i_df.if_bytes / - (uint)sizeof(xfs_bmbt_rec_t)) > 0); + #ifdef XFS_NATIVE_HOST if (ip->i_d.di_nextents == ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { @@ -323,15 +250,18 @@ xfs_inode_item_format( iip->ili_format.ilf_dsize = vecp->i_len; vecp++; nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_DEXT; } break; case XFS_DINODE_FMT_BTREE: - ASSERT(!(iip->ili_format.ilf_fields & - (XFS_ILOG_DDATA | XFS_ILOG_DEXT | - XFS_ILOG_DEV | XFS_ILOG_UUID))); - if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) { - ASSERT(ip->i_df.if_broot_bytes > 0); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | + XFS_ILOG_DEV | XFS_ILOG_UUID); + + if ((iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) && + ip->i_df.if_broot_bytes > 0) { ASSERT(ip->i_df.if_broot != NULL); vecp->i_addr = ip->i_df.if_broot; vecp->i_len = ip->i_df.if_broot_bytes; @@ -339,15 +269,30 @@ xfs_inode_item_format( vecp++; nvecs++; iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; + } else { + ASSERT(!(iip->ili_format.ilf_fields & + XFS_ILOG_DBROOT)); +#ifdef XFS_TRANS_DEBUG + if (iip->ili_root_size > 0) { + ASSERT(iip->ili_root_size == + ip->i_df.if_broot_bytes); + ASSERT(memcmp(iip->ili_orig_root, + ip->i_df.if_broot, + iip->ili_root_size) == 0); + } else { + ASSERT(ip->i_df.if_broot_bytes == 0); + } +#endif + iip->ili_format.ilf_fields &= ~XFS_ILOG_DBROOT; } break; case XFS_DINODE_FMT_LOCAL: - ASSERT(!(iip->ili_format.ilf_fields & - (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | - XFS_ILOG_DEV | XFS_ILOG_UUID))); - if (iip->ili_format.ilf_fields & XFS_ILOG_DDATA) { - ASSERT(ip->i_df.if_bytes > 0); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | + XFS_ILOG_DEV | XFS_ILOG_UUID); + if ((iip->ili_format.ilf_fields & XFS_ILOG_DDATA) && + ip->i_df.if_bytes > 0) { ASSERT(ip->i_df.if_u1.if_data != NULL); ASSERT(ip->i_d.di_size > 0); @@ -365,13 +310,15 @@ xfs_inode_item_format( vecp++; nvecs++; iip->ili_format.ilf_dsize = (unsigned)data_bytes; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_DDATA; } break; case XFS_DINODE_FMT_DEV: - ASSERT(!(iip->ili_format.ilf_fields & - (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | - XFS_ILOG_DDATA | XFS_ILOG_UUID))); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEXT | XFS_ILOG_UUID); if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { iip->ili_format.ilf_u.ilfu_rdev = ip->i_df.if_u2.if_rdev; @@ -379,9 +326,9 @@ xfs_inode_item_format( break; case XFS_DINODE_FMT_UUID: - ASSERT(!(iip->ili_format.ilf_fields & - (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | - XFS_ILOG_DDATA | XFS_ILOG_DEV))); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEXT | XFS_ILOG_DEV); if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { iip->ili_format.ilf_u.ilfu_uuid = ip->i_df.if_u2.if_uuid; @@ -394,31 +341,26 @@ xfs_inode_item_format( } /* - * If there are no attributes associated with the file, - * then we're done. - * Assert that no attribute-related log flags are set. + * If there are no attributes associated with the file, then we're done. */ if (!XFS_IFORK_Q(ip)) { iip->ili_format.ilf_size = nvecs; - ASSERT(!(iip->ili_format.ilf_fields & - (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); return; } switch (ip->i_d.di_aformat) { case XFS_DINODE_FMT_EXTENTS: - ASSERT(!(iip->ili_format.ilf_fields & - (XFS_ILOG_ADATA | XFS_ILOG_ABROOT))); - if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) { -#ifdef DEBUG - int nrecs = ip->i_afp->if_bytes / - (uint)sizeof(xfs_bmbt_rec_t); - ASSERT(nrecs > 0); - ASSERT(nrecs == ip->i_d.di_anextents); - ASSERT(ip->i_afp->if_bytes > 0); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT); + + if ((iip->ili_format.ilf_fields & XFS_ILOG_AEXT) && + ip->i_d.di_anextents > 0 && + ip->i_afp->if_bytes > 0) { + ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) == + ip->i_d.di_anextents); ASSERT(ip->i_afp->if_u1.if_extents != NULL); - ASSERT(ip->i_d.di_anextents > 0); -#endif #ifdef XFS_NATIVE_HOST /* * There are not delayed allocation extents @@ -435,29 +377,36 @@ xfs_inode_item_format( iip->ili_format.ilf_asize = vecp->i_len; vecp++; nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_AEXT; } break; case XFS_DINODE_FMT_BTREE: - ASSERT(!(iip->ili_format.ilf_fields & - (XFS_ILOG_ADATA | XFS_ILOG_AEXT))); - if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) { - ASSERT(ip->i_afp->if_broot_bytes > 0); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); + + if ((iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) && + ip->i_afp->if_broot_bytes > 0) { ASSERT(ip->i_afp->if_broot != NULL); + vecp->i_addr = ip->i_afp->if_broot; vecp->i_len = ip->i_afp->if_broot_bytes; vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT; vecp++; nvecs++; iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_ABROOT; } break; case XFS_DINODE_FMT_LOCAL: - ASSERT(!(iip->ili_format.ilf_fields & - (XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); - if (iip->ili_format.ilf_fields & XFS_ILOG_ADATA) { - ASSERT(ip->i_afp->if_bytes > 0); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); + + if ((iip->ili_format.ilf_fields & XFS_ILOG_ADATA) && + ip->i_afp->if_bytes > 0) { ASSERT(ip->i_afp->if_u1.if_data != NULL); vecp->i_addr = ip->i_afp->if_u1.if_data; @@ -474,6 +423,8 @@ xfs_inode_item_format( vecp++; nvecs++; iip->ili_format.ilf_asize = (unsigned)data_bytes; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_ADATA; } break; -- cgit From f5d8d5c4bf29c9f7754d9cbe5e27c785106ba872 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 29 Feb 2012 09:53:54 +0000 Subject: xfs: split in-core and on-disk inode log item fields Add a new ili_fields member to the inode log item to isolate the in-memory flags from the ones that actually go to the log. This will allow tracking timestamp-only updates for fdatasync and O_DSYNC in the next patch and prepares for divorcing the on-disk log format from the in-memory log item a little further down the road. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers --- fs/xfs/xfs_dfrag.c | 24 +++++++------- fs/xfs/xfs_inode.c | 69 +++++++++++++++++++--------------------- fs/xfs/xfs_inode_item.c | 83 +++++++++++++++++++++++++----------------------- fs/xfs/xfs_inode_item.h | 4 +-- fs/xfs/xfs_trans_inode.c | 4 +-- 5 files changed, 92 insertions(+), 92 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index dd974a55c77d..1137bbc5eccb 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c @@ -215,7 +215,7 @@ xfs_swap_extents( xfs_trans_t *tp; xfs_bstat_t *sbp = &sxp->sx_stat; xfs_ifork_t *tempifp, *ifp, *tifp; - int ilf_fields, tilf_fields; + int src_log_flags, target_log_flags; int error = 0; int aforkblks = 0; int taforkblks = 0; @@ -385,9 +385,8 @@ xfs_swap_extents( tip->i_delayed_blks = ip->i_delayed_blks; ip->i_delayed_blks = 0; - ilf_fields = XFS_ILOG_CORE; - - switch(ip->i_d.di_format) { + src_log_flags = XFS_ILOG_CORE; + switch (ip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: /* If the extents fit in the inode, fix the * pointer. Otherwise it's already NULL or @@ -397,16 +396,15 @@ xfs_swap_extents( ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; } - ilf_fields |= XFS_ILOG_DEXT; + src_log_flags |= XFS_ILOG_DEXT; break; case XFS_DINODE_FMT_BTREE: - ilf_fields |= XFS_ILOG_DBROOT; + src_log_flags |= XFS_ILOG_DBROOT; break; } - tilf_fields = XFS_ILOG_CORE; - - switch(tip->i_d.di_format) { + target_log_flags = XFS_ILOG_CORE; + switch (tip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: /* If the extents fit in the inode, fix the * pointer. Otherwise it's already NULL or @@ -416,10 +414,10 @@ xfs_swap_extents( tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext; } - tilf_fields |= XFS_ILOG_DEXT; + target_log_flags |= XFS_ILOG_DEXT; break; case XFS_DINODE_FMT_BTREE: - tilf_fields |= XFS_ILOG_DBROOT; + target_log_flags |= XFS_ILOG_DBROOT; break; } @@ -427,8 +425,8 @@ xfs_swap_extents( xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); - xfs_trans_log_inode(tp, ip, ilf_fields); - xfs_trans_log_inode(tp, tip, tilf_fields); + xfs_trans_log_inode(tp, ip, src_log_flags); + xfs_trans_log_inode(tp, tip, target_log_flags); /* * If this is a synchronous mount, make sure that the diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 7ce9ccbf17c4..bc46c0a133d3 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1661,8 +1661,8 @@ retry: continue; } - iip->ili_last_fields = iip->ili_format.ilf_fields; - iip->ili_format.ilf_fields = 0; + iip->ili_last_fields = iip->ili_fields; + iip->ili_fields = 0; iip->ili_logged = 1; xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, &iip->ili_item.li_lsn); @@ -2176,7 +2176,7 @@ xfs_iflush_fork( mp = ip->i_mount; switch (XFS_IFORK_FORMAT(ip, whichfork)) { case XFS_DINODE_FMT_LOCAL: - if ((iip->ili_format.ilf_fields & dataflag[whichfork]) && + if ((iip->ili_fields & dataflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(ifp->if_u1.if_data != NULL); ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); @@ -2186,8 +2186,8 @@ xfs_iflush_fork( case XFS_DINODE_FMT_EXTENTS: ASSERT((ifp->if_flags & XFS_IFEXTENTS) || - !(iip->ili_format.ilf_fields & extflag[whichfork])); - if ((iip->ili_format.ilf_fields & extflag[whichfork]) && + !(iip->ili_fields & extflag[whichfork])); + if ((iip->ili_fields & extflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(xfs_iext_get_ext(ifp, 0)); ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); @@ -2197,7 +2197,7 @@ xfs_iflush_fork( break; case XFS_DINODE_FMT_BTREE: - if ((iip->ili_format.ilf_fields & brootflag[whichfork]) && + if ((iip->ili_fields & brootflag[whichfork]) && (ifp->if_broot_bytes > 0)) { ASSERT(ifp->if_broot != NULL); ASSERT(ifp->if_broot_bytes <= @@ -2210,14 +2210,14 @@ xfs_iflush_fork( break; case XFS_DINODE_FMT_DEV: - if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { + if (iip->ili_fields & XFS_ILOG_DEV) { ASSERT(whichfork == XFS_DATA_FORK); xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev); } break; case XFS_DINODE_FMT_UUID: - if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { + if (iip->ili_fields & XFS_ILOG_UUID) { ASSERT(whichfork == XFS_DATA_FORK); memcpy(XFS_DFORK_DPTR(dip), &ip->i_df.if_u2.if_uuid, @@ -2451,7 +2451,7 @@ xfs_iflush( */ if (XFS_FORCED_SHUTDOWN(mp)) { if (iip) - iip->ili_format.ilf_fields = 0; + iip->ili_fields = 0; xfs_ifunlock(ip); return XFS_ERROR(EIO); } @@ -2641,36 +2641,33 @@ xfs_iflush_int( xfs_inobp_check(mp, bp); /* - * We've recorded everything logged in the inode, so we'd - * like to clear the ilf_fields bits so we don't log and - * flush things unnecessarily. However, we can't stop - * logging all this information until the data we've copied - * into the disk buffer is written to disk. If we did we might - * overwrite the copy of the inode in the log with all the - * data after re-logging only part of it, and in the face of - * a crash we wouldn't have all the data we need to recover. + * We've recorded everything logged in the inode, so we'd like to clear + * the ili_fields bits so we don't log and flush things unnecessarily. + * However, we can't stop logging all this information until the data + * we've copied into the disk buffer is written to disk. If we did we + * might overwrite the copy of the inode in the log with all the data + * after re-logging only part of it, and in the face of a crash we + * wouldn't have all the data we need to recover. * - * What we do is move the bits to the ili_last_fields field. - * When logging the inode, these bits are moved back to the - * ilf_fields field. In the xfs_iflush_done() routine we - * clear ili_last_fields, since we know that the information - * those bits represent is permanently on disk. As long as - * the flush completes before the inode is logged again, then - * both ilf_fields and ili_last_fields will be cleared. + * What we do is move the bits to the ili_last_fields field. When + * logging the inode, these bits are moved back to the ili_fields field. + * In the xfs_iflush_done() routine we clear ili_last_fields, since we + * know that the information those bits represent is permanently on + * disk. As long as the flush completes before the inode is logged + * again, then both ili_fields and ili_last_fields will be cleared. * - * We can play with the ilf_fields bits here, because the inode - * lock must be held exclusively in order to set bits there - * and the flush lock protects the ili_last_fields bits. - * Set ili_logged so the flush done - * routine can tell whether or not to look in the AIL. - * Also, store the current LSN of the inode so that we can tell - * whether the item has moved in the AIL from xfs_iflush_done(). - * In order to read the lsn we need the AIL lock, because - * it is a 64 bit value that cannot be read atomically. + * We can play with the ili_fields bits here, because the inode lock + * must be held exclusively in order to set bits there and the flush + * lock protects the ili_last_fields bits. Set ili_logged so the flush + * done routine can tell whether or not to look in the AIL. Also, store + * the current LSN of the inode so that we can tell whether the item has + * moved in the AIL from xfs_iflush_done(). In order to read the lsn we + * need the AIL lock, because it is a 64 bit value that cannot be read + * atomically. */ - if (iip != NULL && iip->ili_format.ilf_fields != 0) { - iip->ili_last_fields = iip->ili_format.ilf_fields; - iip->ili_format.ilf_fields = 0; + if (iip != NULL && iip->ili_fields != 0) { + iip->ili_last_fields = iip->ili_fields; + iip->ili_fields = 0; iip->ili_logged = 1; xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 965d3d083625..8becef4f9e6a 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -59,20 +59,20 @@ xfs_inode_item_size( switch (ip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: - if ((iip->ili_format.ilf_fields & XFS_ILOG_DEXT) && + if ((iip->ili_fields & XFS_ILOG_DEXT) && ip->i_d.di_nextents > 0 && ip->i_df.if_bytes > 0) nvecs++; break; case XFS_DINODE_FMT_BTREE: - if ((iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) && + if ((iip->ili_fields & XFS_ILOG_DBROOT) && ip->i_df.if_broot_bytes > 0) nvecs++; break; case XFS_DINODE_FMT_LOCAL: - if ((iip->ili_format.ilf_fields & XFS_ILOG_DDATA) && + if ((iip->ili_fields & XFS_ILOG_DDATA) && ip->i_df.if_bytes > 0) nvecs++; break; @@ -95,20 +95,20 @@ xfs_inode_item_size( */ switch (ip->i_d.di_aformat) { case XFS_DINODE_FMT_EXTENTS: - if ((iip->ili_format.ilf_fields & XFS_ILOG_AEXT) && + if ((iip->ili_fields & XFS_ILOG_AEXT) && ip->i_d.di_anextents > 0 && ip->i_afp->if_bytes > 0) nvecs++; break; case XFS_DINODE_FMT_BTREE: - if ((iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) && + if ((iip->ili_fields & XFS_ILOG_ABROOT) && ip->i_afp->if_broot_bytes > 0) nvecs++; break; case XFS_DINODE_FMT_LOCAL: - if ((iip->ili_format.ilf_fields & XFS_ILOG_ADATA) && + if ((iip->ili_fields & XFS_ILOG_ADATA) && ip->i_afp->if_bytes > 0) nvecs++; break; @@ -185,7 +185,6 @@ xfs_inode_item_format( vecp->i_type = XLOG_REG_TYPE_ICORE; vecp++; nvecs++; - iip->ili_format.ilf_fields |= XFS_ILOG_CORE; /* * If this is really an old format inode, then we need to @@ -218,11 +217,11 @@ xfs_inode_item_format( switch (ip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: - iip->ili_format.ilf_fields &= + iip->ili_fields &= ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV | XFS_ILOG_UUID); - if ((iip->ili_format.ilf_fields & XFS_ILOG_DEXT) && + if ((iip->ili_fields & XFS_ILOG_DEXT) && ip->i_d.di_nextents > 0 && ip->i_df.if_bytes > 0) { ASSERT(ip->i_df.if_u1.if_extents != NULL); @@ -251,16 +250,16 @@ xfs_inode_item_format( vecp++; nvecs++; } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_DEXT; + iip->ili_fields &= ~XFS_ILOG_DEXT; } break; case XFS_DINODE_FMT_BTREE: - iip->ili_format.ilf_fields &= + iip->ili_fields &= ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV | XFS_ILOG_UUID); - if ((iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) && + if ((iip->ili_fields & XFS_ILOG_DBROOT) && ip->i_df.if_broot_bytes > 0) { ASSERT(ip->i_df.if_broot != NULL); vecp->i_addr = ip->i_df.if_broot; @@ -270,7 +269,7 @@ xfs_inode_item_format( nvecs++; iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; } else { - ASSERT(!(iip->ili_format.ilf_fields & + ASSERT(!(iip->ili_fields & XFS_ILOG_DBROOT)); #ifdef XFS_TRANS_DEBUG if (iip->ili_root_size > 0) { @@ -283,15 +282,15 @@ xfs_inode_item_format( ASSERT(ip->i_df.if_broot_bytes == 0); } #endif - iip->ili_format.ilf_fields &= ~XFS_ILOG_DBROOT; + iip->ili_fields &= ~XFS_ILOG_DBROOT; } break; case XFS_DINODE_FMT_LOCAL: - iip->ili_format.ilf_fields &= + iip->ili_fields &= ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | XFS_ILOG_DEV | XFS_ILOG_UUID); - if ((iip->ili_format.ilf_fields & XFS_ILOG_DDATA) && + if ((iip->ili_fields & XFS_ILOG_DDATA) && ip->i_df.if_bytes > 0) { ASSERT(ip->i_df.if_u1.if_data != NULL); ASSERT(ip->i_d.di_size > 0); @@ -311,25 +310,25 @@ xfs_inode_item_format( nvecs++; iip->ili_format.ilf_dsize = (unsigned)data_bytes; } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_DDATA; + iip->ili_fields &= ~XFS_ILOG_DDATA; } break; case XFS_DINODE_FMT_DEV: - iip->ili_format.ilf_fields &= + iip->ili_fields &= ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEXT | XFS_ILOG_UUID); - if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { + if (iip->ili_fields & XFS_ILOG_DEV) { iip->ili_format.ilf_u.ilfu_rdev = ip->i_df.if_u2.if_rdev; } break; case XFS_DINODE_FMT_UUID: - iip->ili_format.ilf_fields &= + iip->ili_fields &= ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEXT | XFS_ILOG_DEV); - if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { + if (iip->ili_fields & XFS_ILOG_UUID) { iip->ili_format.ilf_u.ilfu_uuid = ip->i_df.if_u2.if_uuid; } @@ -344,18 +343,17 @@ xfs_inode_item_format( * If there are no attributes associated with the file, then we're done. */ if (!XFS_IFORK_Q(ip)) { - iip->ili_format.ilf_size = nvecs; - iip->ili_format.ilf_fields &= + iip->ili_fields &= ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); - return; + goto out; } switch (ip->i_d.di_aformat) { case XFS_DINODE_FMT_EXTENTS: - iip->ili_format.ilf_fields &= + iip->ili_fields &= ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT); - if ((iip->ili_format.ilf_fields & XFS_ILOG_AEXT) && + if ((iip->ili_fields & XFS_ILOG_AEXT) && ip->i_d.di_anextents > 0 && ip->i_afp->if_bytes > 0) { ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) == @@ -378,15 +376,15 @@ xfs_inode_item_format( vecp++; nvecs++; } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_AEXT; + iip->ili_fields &= ~XFS_ILOG_AEXT; } break; case XFS_DINODE_FMT_BTREE: - iip->ili_format.ilf_fields &= + iip->ili_fields &= ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); - if ((iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) && + if ((iip->ili_fields & XFS_ILOG_ABROOT) && ip->i_afp->if_broot_bytes > 0) { ASSERT(ip->i_afp->if_broot != NULL); @@ -397,15 +395,15 @@ xfs_inode_item_format( nvecs++; iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_ABROOT; + iip->ili_fields &= ~XFS_ILOG_ABROOT; } break; case XFS_DINODE_FMT_LOCAL: - iip->ili_format.ilf_fields &= + iip->ili_fields &= ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); - if ((iip->ili_format.ilf_fields & XFS_ILOG_ADATA) && + if ((iip->ili_fields & XFS_ILOG_ADATA) && ip->i_afp->if_bytes > 0) { ASSERT(ip->i_afp->if_u1.if_data != NULL); @@ -424,7 +422,7 @@ xfs_inode_item_format( nvecs++; iip->ili_format.ilf_asize = (unsigned)data_bytes; } else { - iip->ili_format.ilf_fields &= ~XFS_ILOG_ADATA; + iip->ili_fields &= ~XFS_ILOG_ADATA; } break; @@ -433,6 +431,14 @@ xfs_inode_item_format( break; } +out: + /* + * Now update the log format that goes out to disk from the in-core + * values. We always write the inode core to make the arithmetic + * games in recovery easier, which isn't a big deal as just about any + * transaction would dirty it anyway. + */ + iip->ili_format.ilf_fields = XFS_ILOG_CORE | iip->ili_fields; iip->ili_format.ilf_size = nvecs; } @@ -517,7 +523,7 @@ xfs_inode_item_trylock( #ifdef DEBUG if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { - ASSERT(iip->ili_format.ilf_fields != 0); + ASSERT(iip->ili_fields != 0); ASSERT(iip->ili_logged == 0); ASSERT(lip->li_flags & XFS_LI_IN_AIL); } @@ -549,7 +555,7 @@ xfs_inode_item_unlock( if (iip->ili_extents_buf != NULL) { ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS); ASSERT(ip->i_d.di_nextents > 0); - ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT); + ASSERT(iip->ili_fields & XFS_ILOG_DEXT); ASSERT(ip->i_df.if_bytes > 0); kmem_free(iip->ili_extents_buf); iip->ili_extents_buf = NULL; @@ -557,7 +563,7 @@ xfs_inode_item_unlock( if (iip->ili_aextents_buf != NULL) { ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS); ASSERT(ip->i_d.di_anextents > 0); - ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT); + ASSERT(iip->ili_fields & XFS_ILOG_AEXT); ASSERT(ip->i_afp->if_bytes > 0); kmem_free(iip->ili_aextents_buf); iip->ili_aextents_buf = NULL; @@ -672,8 +678,7 @@ xfs_inode_item_push( * lock without sleeping, then there must not have been * anyone in the process of flushing the inode. */ - ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || - iip->ili_format.ilf_fields != 0); + ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || iip->ili_fields != 0); /* * Push the inode to it's backing buffer. This will not remove the @@ -896,7 +901,7 @@ xfs_iflush_abort( * Clear the inode logging fields so no more flushes are * attempted. */ - iip->ili_format.ilf_fields = 0; + iip->ili_fields = 0; } /* * Release the inode's flush lock since we're done with it. diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index 25784b066568..bc183d81ad32 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -134,6 +134,7 @@ typedef struct xfs_inode_log_item { unsigned short ili_lock_flags; /* lock flags */ unsigned short ili_logged; /* flushed logged data */ unsigned int ili_last_fields; /* fields when flushed */ + unsigned int ili_fields; /* fields to be logged */ struct xfs_bmbt_rec *ili_extents_buf; /* array of logged data exts */ struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged @@ -148,8 +149,7 @@ typedef struct xfs_inode_log_item { static inline int xfs_inode_clean(xfs_inode_t *ip) { - return !ip->i_itemp || - !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL); + return !ip->i_itemp || !(ip->i_itemp->ili_fields & XFS_ILOG_ALL); } extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 892763effdf1..7a7442c03f2b 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -130,12 +130,12 @@ xfs_trans_log_inode( /* * Always OR in the bits from the ili_last_fields field. * This is to coordinate with the xfs_iflush() and xfs_iflush_done() - * routines in the eventual clearing of the ilf_fields bits. + * routines in the eventual clearing of the ili_fields bits. * See the big comment in xfs_iflush() for an explanation of * this coordination mechanism. */ flags |= ip->i_itemp->ili_last_fields; - ip->i_itemp->ili_format.ilf_fields |= flags; + ip->i_itemp->ili_fields |= flags; } #ifdef XFS_TRANS_DEBUG -- cgit From 8f639ddea0c4978ae9b4e46ea041c9e5afe0ee8d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 29 Feb 2012 09:53:55 +0000 Subject: xfs: reimplement fdatasync support Add an in-memory only flag to say we logged timestamps only, and use it to check if fdatasync can optimize away the log force. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers --- fs/xfs/xfs_file.c | 7 +++++-- fs/xfs/xfs_inode_item.c | 3 ++- fs/xfs/xfs_inode_item.h | 11 ++++++++++- fs/xfs/xfs_super.c | 2 +- 4 files changed, 18 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 78d8b0299592..54a67dd9ac0a 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -197,8 +197,11 @@ xfs_file_fsync( * to flush the log up to the latest LSN that touched the inode. */ xfs_ilock(ip, XFS_ILOCK_SHARED); - if (xfs_ipincount(ip)) - lsn = ip->i_itemp->ili_last_lsn; + if (xfs_ipincount(ip)) { + if (!datasync || + (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) + lsn = ip->i_itemp->ili_last_lsn; + } xfs_iunlock(ip, XFS_ILOCK_SHARED); if (lsn) diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 8becef4f9e6a..05d924efceaf 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -438,7 +438,8 @@ out: * games in recovery easier, which isn't a big deal as just about any * transaction would dirty it anyway. */ - iip->ili_format.ilf_fields = XFS_ILOG_CORE | iip->ili_fields; + iip->ili_format.ilf_fields = XFS_ILOG_CORE | + (iip->ili_fields & ~XFS_ILOG_TIMESTAMP); iip->ili_format.ilf_size = nvecs; } diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index bc183d81ad32..41d61c3b7a36 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -86,6 +86,15 @@ typedef struct xfs_inode_log_format_64 { #define XFS_ILOG_AEXT 0x080 /* log i_af.if_extents */ #define XFS_ILOG_ABROOT 0x100 /* log i_af.i_broot */ + +/* + * The timestamps are dirty, but not necessarily anything else in the inode + * core. Unlike the other fields above this one must never make it to disk + * in the ilf_fields of the inode_log_format, but is purely store in-memory in + * ili_fields in the inode_log_item. + */ +#define XFS_ILOG_TIMESTAMP 0x4000 + #define XFS_ILOG_NONCORE (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \ XFS_ILOG_DBROOT | XFS_ILOG_DEV | \ XFS_ILOG_UUID | XFS_ILOG_ADATA | \ @@ -101,7 +110,7 @@ typedef struct xfs_inode_log_format_64 { XFS_ILOG_DEXT | XFS_ILOG_DBROOT | \ XFS_ILOG_DEV | XFS_ILOG_UUID | \ XFS_ILOG_ADATA | XFS_ILOG_AEXT | \ - XFS_ILOG_ABROOT) + XFS_ILOG_ABROOT | XFS_ILOG_TIMESTAMP) static inline int xfs_ilog_fbroot(int w) { diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index e602c8c67c5c..e9ad7894648e 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -907,7 +907,7 @@ xfs_fs_dirty_inode( ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec; xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); error = xfs_trans_commit(tp, 0); if (error) goto trouble; -- cgit From 5318a29c1943e9719e71495db6efb6fc084a45a9 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Tue, 13 Mar 2012 20:44:26 -0700 Subject: pnfs-obj: Uglify objio_segment allocation for the sake of the principle :-( At some past instance Linus Trovalds wrote: > From: Linus Torvalds > commit a84a79e4d369a73c0130b5858199e949432da4c6 upstream. > > The size is always valid, but variable-length arrays generate worse code > for no good reason (unless the function happens to be inlined and the > compiler sees the length for the simple constant it is). > > Also, there seems to be some code generation problem on POWER, where > Henrik Bakken reports that register r28 can get corrupted under some > subtle circumstances (interrupt happening at the wrong time?). That all > indicates some seriously broken compiler issues, but since variable > length arrays are bad regardless, there's little point in trying to > chase it down. > > "Just don't do that, then". Since then any use of "variable length arrays" has become blasphemous. Even in perfectly good, beautiful, perfectly safe code like the one below where the variable length arrays are only used as a sizeof() parameter, for type-safe dynamic structure allocations. GCC is not executing any stack allocation code. I have produced a small file which defines two functions main1(unsigned numdevs) and main2(unsigned numdevs). main1 uses code as before with call to malloc and main2 uses code as of after this patch. I compiled it as: gcc -O2 -S see_asm.c and here is what I get: main1: .LFB7: .cfi_startproc mov %edi, %edi leaq 4(%rdi,%rdi), %rdi salq $3, %rdi jmp malloc .cfi_endproc .LFE7: .size main1, .-main1 .p2align 4,,15 .globl main2 .type main2, @function main2: .LFB8: .cfi_startproc mov %edi, %edi addq $2, %rdi salq $4, %rdi jmp malloc .cfi_endproc .LFE8: .size main2, .-main2 .section .text.startup,"ax",@progbits .p2align 4,,15 *Exact* same code !!! So please seriously consider not accepting this patch and leave the perfectly good code intact. CC: Linus Torvalds Signed-off-by: Boaz Harrosh Signed-off-by: Trond Myklebust --- fs/nfs/objlayout/objio_osd.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 405a62bdb9b4..3a621a2fd321 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -205,25 +205,36 @@ static void copy_single_comp(struct ore_components *oc, unsigned c, int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags, struct objio_segment **pseg) { - struct __alloc_objio_segment { - struct objio_segment olseg; - struct ore_dev *ods[numdevs]; - struct ore_comp comps[numdevs]; - } *aolseg; - - aolseg = kzalloc(sizeof(*aolseg), gfp_flags); - if (unlikely(!aolseg)) { +/* This is the in memory structure of the objio_segment + * + * struct __alloc_objio_segment { + * struct objio_segment olseg; + * struct ore_dev *ods[numdevs]; + * struct ore_comp comps[numdevs]; + * } *aolseg; + * NOTE: The code as above compiles and runs perfectly. It is elegant, + * type safe and compact. At some Past time Linus has decided he does not + * like variable length arrays, For the sake of this principal we uglify + * the code as below. + */ + struct objio_segment *lseg; + size_t lseg_size = sizeof(*lseg) + + numdevs * sizeof(lseg->oc.ods[0]) + + numdevs * sizeof(*lseg->oc.comps); + + lseg = kzalloc(lseg_size, gfp_flags); + if (unlikely(!lseg)) { dprintk("%s: Faild allocation numdevs=%d size=%zd\n", __func__, - numdevs, sizeof(*aolseg)); + numdevs, lseg_size); return -ENOMEM; } - aolseg->olseg.oc.numdevs = numdevs; - aolseg->olseg.oc.single_comp = EC_MULTPLE_COMPS; - aolseg->olseg.oc.comps = aolseg->comps; - aolseg->olseg.oc.ods = aolseg->ods; + lseg->oc.numdevs = numdevs; + lseg->oc.single_comp = EC_MULTPLE_COMPS; + lseg->oc.ods = (void *)(lseg + 1); + lseg->oc.comps = (void *)(lseg->oc.ods + numdevs); - *pseg = &aolseg->olseg; + *pseg = lseg; return 0; } -- cgit From 96dcadc2fdd111dca90d559f189a30c65394451a Mon Sep 17 00:00:00 2001 From: William Dauchy Date: Wed, 14 Mar 2012 12:32:04 +0100 Subject: NFSv4: Rate limit the state manager for lock reclaim warning messages Adding rate limit on `Lock reclaim failed` messages since it could fill up system logs Signed-off-by: William Dauchy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index cb708b20a775..119006b0815a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1261,7 +1261,8 @@ restart: spin_lock(&state->state_lock); list_for_each_entry(lock, &state->lock_states, ls_locks) { if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) - printk("NFS: %s: Lock reclaim " + pr_warn_ratelimited("NFS: " + "%s: Lock reclaim " "failed!\n", __func__); } spin_unlock(&state->state_lock); -- cgit From 48776fd22344ad80adcbac0abc9c0da60c6481d2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Mar 2012 08:52:33 +0000 Subject: xfs: use common code for quota statistics Switch the quota code over to use the generic XFS statistics infrastructure. While the legacy /proc/fs/xfs/xqm and /proc/fs/xfs/xqmstats interfaces are preserved for now the statistics that still have a meaning with the current code are now also available from /proc/fs/xfs/stats. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/Makefile | 3 -- fs/xfs/xfs_dquot.c | 14 ++++--- fs/xfs/xfs_qm.c | 9 +++-- fs/xfs/xfs_qm.h | 2 - fs/xfs/xfs_qm_bhv.c | 2 - fs/xfs/xfs_qm_stats.c | 105 -------------------------------------------------- fs/xfs/xfs_qm_stats.h | 53 ------------------------- fs/xfs/xfs_stats.c | 99 +++++++++++++++++++++++++++++++++++++++++------ fs/xfs/xfs_stats.h | 10 +++++ 9 files changed, 110 insertions(+), 187 deletions(-) delete mode 100644 fs/xfs/xfs_qm_stats.c delete mode 100644 fs/xfs/xfs_qm_stats.h (limited to 'fs') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 427a4e82a588..0a9977983f92 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -96,9 +96,6 @@ xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \ xfs_qm_bhv.o \ xfs_qm.o \ xfs_quotaops.o -ifeq ($(CONFIG_XFS_QUOTA),y) -xfs-$(CONFIG_PROC_FS) += xfs_qm_stats.o -endif xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o xfs-$(CONFIG_PROC_FS) += xfs_stats.o diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 71e615fef174..98d7e25947fa 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -74,7 +74,7 @@ xfs_qm_dqdestroy( mutex_destroy(&dqp->q_qlock); kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); - atomic_dec(&xfs_Gqm->qm_totaldquots); + XFS_STATS_DEC(xs_qm_dquot); } /* @@ -516,7 +516,7 @@ xfs_qm_dqread( if (!(type & XFS_DQ_USER)) lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); - atomic_inc(&xfs_Gqm->qm_totaldquots); + XFS_STATS_INC(xs_qm_dquot); trace_xfs_dqread(dqp); @@ -712,12 +712,12 @@ restart: */ switch (xfs_qm_dqlookup(mp, id, h, O_dqpp)) { case -1: - XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); + XFS_STATS_INC(xs_qm_dquot_dups); mutex_unlock(&h->qh_lock); delay(1); goto restart; case 0: - XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); + XFS_STATS_INC(xs_qm_dqcachehits); /* * The dquot was found, moved to the front of the chain, * taken off the freelist if it was on it, and locked @@ -729,7 +729,7 @@ restart: trace_xfs_dqget_hit(*O_dqpp); return 0; /* success */ default: - XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); + XFS_STATS_INC(xs_qm_dqcachemisses); break; } @@ -804,7 +804,7 @@ restart: xfs_qm_dqput(tmpdqp); mutex_unlock(&h->qh_lock); xfs_qm_dqdestroy(dqp); - XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); + XFS_STATS_INC(xs_qm_dquot_dups); goto restart; default: break; @@ -873,6 +873,7 @@ recurse: if (list_empty(&dqp->q_freelist)) { list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); xfs_Gqm->qm_dqfrlist_cnt++; + XFS_STATS_INC(xs_qm_dquot_unused); } mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); @@ -1178,6 +1179,7 @@ xfs_qm_dqpurge( ASSERT(!list_empty(&dqp->q_freelist)); list_del_init(&dqp->q_freelist); xfs_Gqm->qm_dqfrlist_cnt--; + XFS_STATS_DEC(xs_qm_dquot_unused); mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); xfs_qm_dqdestroy(dqp); diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index c872feaf3697..0dde1f48c280 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -134,7 +134,6 @@ xfs_Gqm_init(void) } else xqm->qm_dqtrxzone = qm_dqtrxzone; - atomic_set(&xqm->qm_totaldquots, 0); xqm->qm_nrefs = 0; return xqm; @@ -1637,10 +1636,11 @@ xfs_qm_dqreclaim_one( xfs_dqunlock(dqp); trace_xfs_dqreclaim_want(dqp); - XQM_STATS_INC(xqmstats.xs_qm_dqwants); + XFS_STATS_INC(xs_qm_dqwants); list_del_init(&dqp->q_freelist); xfs_Gqm->qm_dqfrlist_cnt--; + XFS_STATS_DEC(xs_qm_dquot_unused); return; } @@ -1690,9 +1690,10 @@ xfs_qm_dqreclaim_one( ASSERT(dqp->q_nrefs == 0); list_move_tail(&dqp->q_freelist, dispose_list); xfs_Gqm->qm_dqfrlist_cnt--; + XFS_STATS_DEC(xs_qm_dquot_unused); trace_xfs_dqreclaim_done(dqp); - XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); + XFS_STATS_INC(xs_qm_dqreclaims); return; out_busy: @@ -1704,7 +1705,7 @@ out_busy: list_move_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); trace_xfs_dqreclaim_busy(dqp); - XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); + XFS_STATS_INC(xs_qm_dqreclaim_misses); } STATIC int diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 9a9b997e1a0a..89f213f7252a 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -21,7 +21,6 @@ #include "xfs_dquot_item.h" #include "xfs_dquot.h" #include "xfs_quota_priv.h" -#include "xfs_qm_stats.h" struct xfs_qm; struct xfs_inode; @@ -60,7 +59,6 @@ typedef struct xfs_qm { struct list_head qm_dqfrlist; /* freelist of dquots */ struct mutex qm_dqfrlist_lock; int qm_dqfrlist_cnt; - atomic_t qm_totaldquots; /* total incore dquots */ uint qm_nrefs; /* file systems with quota on */ kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c index e4e37877f867..809f86857c6d 100644 --- a/fs/xfs/xfs_qm_bhv.c +++ b/fs/xfs/xfs_qm_bhv.c @@ -162,13 +162,11 @@ xfs_qm_init(void) { printk(KERN_INFO "SGI XFS Quota Management subsystem\n"); mutex_init(&xfs_Gqm_lock); - xfs_qm_init_procfs(); } void __exit xfs_qm_exit(void) { - xfs_qm_cleanup_procfs(); if (qm_dqzone) kmem_zone_destroy(qm_dqzone); if (qm_dqtrxzone) diff --git a/fs/xfs/xfs_qm_stats.c b/fs/xfs/xfs_qm_stats.c deleted file mode 100644 index 5729ba570877..000000000000 --- a/fs/xfs/xfs_qm_stats.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2000-2003 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_itable.h" -#include "xfs_bmap.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_qm.h" - -struct xqmstats xqmstats; - -static int xqm_proc_show(struct seq_file *m, void *v) -{ - /* maximum; incore; ratio free to inuse; freelist */ - seq_printf(m, "%d\t%d\t%d\t%u\n", - 0, - xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, - 0, - xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0); - return 0; -} - -static int xqm_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, xqm_proc_show, NULL); -} - -static const struct file_operations xqm_proc_fops = { - .owner = THIS_MODULE, - .open = xqm_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int xqmstat_proc_show(struct seq_file *m, void *v) -{ - /* quota performance statistics */ - seq_printf(m, "qm %u %u %u %u %u %u %u %u\n", - xqmstats.xs_qm_dqreclaims, - xqmstats.xs_qm_dqreclaim_misses, - xqmstats.xs_qm_dquot_dups, - xqmstats.xs_qm_dqcachemisses, - xqmstats.xs_qm_dqcachehits, - xqmstats.xs_qm_dqwants, - xqmstats.xs_qm_dqshake_reclaims, - xqmstats.xs_qm_dqinact_reclaims); - return 0; -} - -static int xqmstat_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, xqmstat_proc_show, NULL); -} - -static const struct file_operations xqmstat_proc_fops = { - .owner = THIS_MODULE, - .open = xqmstat_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void -xfs_qm_init_procfs(void) -{ - proc_create("fs/xfs/xqmstat", 0, NULL, &xqmstat_proc_fops); - proc_create("fs/xfs/xqm", 0, NULL, &xqm_proc_fops); -} - -void -xfs_qm_cleanup_procfs(void) -{ - remove_proc_entry("fs/xfs/xqm", NULL); - remove_proc_entry("fs/xfs/xqmstat", NULL); -} diff --git a/fs/xfs/xfs_qm_stats.h b/fs/xfs/xfs_qm_stats.h deleted file mode 100644 index 5b964fc0dc09..000000000000 --- a/fs/xfs/xfs_qm_stats.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2002 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_QM_STATS_H__ -#define __XFS_QM_STATS_H__ - -#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) - -/* - * XQM global statistics - */ -struct xqmstats { - __uint32_t xs_qm_dqreclaims; - __uint32_t xs_qm_dqreclaim_misses; - __uint32_t xs_qm_dquot_dups; - __uint32_t xs_qm_dqcachemisses; - __uint32_t xs_qm_dqcachehits; - __uint32_t xs_qm_dqwants; - __uint32_t xs_qm_dqshake_reclaims; - __uint32_t xs_qm_dqinact_reclaims; -}; - -extern struct xqmstats xqmstats; - -# define XQM_STATS_INC(count) ( (count)++ ) - -extern void xfs_qm_init_procfs(void); -extern void xfs_qm_cleanup_procfs(void); - -#else - -# define XQM_STATS_INC(count) do { } while (0) - -static inline void xfs_qm_init_procfs(void) { }; -static inline void xfs_qm_cleanup_procfs(void) { }; - -#endif - -#endif /* __XFS_QM_STATS_H__ */ diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c index 76fdc5861932..ce372b7d5644 100644 --- a/fs/xfs/xfs_stats.c +++ b/fs/xfs/xfs_stats.c @@ -20,9 +20,18 @@ DEFINE_PER_CPU(struct xfsstats, xfsstats); +static int counter_val(int idx) +{ + int val = 0, cpu; + + for_each_possible_cpu(cpu) + val += *(((__u32 *)&per_cpu(xfsstats, cpu) + idx)); + return val; +} + static int xfs_stat_proc_show(struct seq_file *m, void *v) { - int c, i, j, val; + int i, j; __uint64_t xs_xstrat_bytes = 0; __uint64_t xs_write_bytes = 0; __uint64_t xs_read_bytes = 0; @@ -50,20 +59,16 @@ static int xfs_stat_proc_show(struct seq_file *m, void *v) { "abtc2", XFSSTAT_END_ABTC_V2 }, { "bmbt2", XFSSTAT_END_BMBT_V2 }, { "ibt2", XFSSTAT_END_IBT_V2 }, + /* we print both series of quota information together */ + { "qm", XFSSTAT_END_QM }, }; /* Loop over all stats groups */ - for (i=j = 0; i < ARRAY_SIZE(xstats); i++) { + for (i = j = 0; i < ARRAY_SIZE(xstats); i++) { seq_printf(m, "%s", xstats[i].desc); /* inner loop does each group */ - while (j < xstats[i].endpoint) { - val = 0; - /* sum over all cpus */ - for_each_possible_cpu(c) - val += *(((__u32*)&per_cpu(xfsstats, c) + j)); - seq_printf(m, " %u", val); - j++; - } + for (; j < xstats[i].endpoint; j++) + seq_printf(m, " %u", counter_val(j)); seq_putc(m, '\n'); } /* extra precision counters */ @@ -97,6 +102,58 @@ static const struct file_operations xfs_stat_proc_fops = { .release = single_release, }; +/* legacy quota interfaces */ +#ifdef CONFIG_XFS_QUOTA +static int xqm_proc_show(struct seq_file *m, void *v) +{ + /* maximum; incore; ratio free to inuse; freelist */ + seq_printf(m, "%d\t%d\t%d\t%u\n", + 0, + counter_val(XFSSTAT_END_XQMSTAT), + 0, + counter_val(XFSSTAT_END_XQMSTAT + 1)); + return 0; +} + +static int xqm_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, xqm_proc_show, NULL); +} + +static const struct file_operations xqm_proc_fops = { + .owner = THIS_MODULE, + .open = xqm_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* legacy quota stats interface no 2 */ +static int xqmstat_proc_show(struct seq_file *m, void *v) +{ + int j; + + seq_printf(m, "qm"); + for (j = XFSSTAT_END_IBT_V2; j < XFSSTAT_END_XQMSTAT; j++) + seq_printf(m, " %u", counter_val(j)); + seq_putc(m, '\n'); + return 0; +} + +static int xqmstat_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, xqmstat_proc_show, NULL); +} + +static const struct file_operations xqmstat_proc_fops = { + .owner = THIS_MODULE, + .open = xqmstat_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif /* CONFIG_XFS_QUOTA */ + int xfs_init_procfs(void) { @@ -105,10 +162,24 @@ xfs_init_procfs(void) if (!proc_create("fs/xfs/stat", 0, NULL, &xfs_stat_proc_fops)) - goto out_remove_entry; + goto out_remove_xfs_dir; +#ifdef CONFIG_XFS_QUOTA + if (!proc_create("fs/xfs/xqmstat", 0, NULL, + &xqmstat_proc_fops)) + goto out_remove_stat_file; + if (!proc_create("fs/xfs/xqm", 0, NULL, + &xqm_proc_fops)) + goto out_remove_xqmstat_file; +#endif return 0; - out_remove_entry: +#ifdef CONFIG_XFS_QUOTA + out_remove_xqmstat_file: + remove_proc_entry("fs/xfs/xqmstat", NULL); + out_remove_stat_file: + remove_proc_entry("fs/xfs/stat", NULL); +#endif + out_remove_xfs_dir: remove_proc_entry("fs/xfs", NULL); out: return -ENOMEM; @@ -117,6 +188,10 @@ xfs_init_procfs(void) void xfs_cleanup_procfs(void) { +#ifdef CONFIG_XFS_QUOTA + remove_proc_entry("fs/xfs/xqm", NULL); + remove_proc_entry("fs/xfs/xqmstat", NULL); +#endif remove_proc_entry("fs/xfs/stat", NULL); remove_proc_entry("fs/xfs", NULL); } diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h index 736854b1ca1a..c03ad38ceaeb 100644 --- a/fs/xfs/xfs_stats.h +++ b/fs/xfs/xfs_stats.h @@ -183,6 +183,16 @@ struct xfsstats { __uint32_t xs_ibt_2_alloc; __uint32_t xs_ibt_2_free; __uint32_t xs_ibt_2_moves; +#define XFSSTAT_END_XQMSTAT (XFSSTAT_END_IBT_V2+6) + __uint32_t xs_qm_dqreclaims; + __uint32_t xs_qm_dqreclaim_misses; + __uint32_t xs_qm_dquot_dups; + __uint32_t xs_qm_dqcachemisses; + __uint32_t xs_qm_dqcachehits; + __uint32_t xs_qm_dqwants; +#define XFSSTAT_END_QM (XFSSTAT_END_XQMSTAT+2) + __uint32_t xs_qm_dquot; + __uint32_t xs_qm_dquot_unused; /* Extra precision counters */ __uint64_t xs_xstrat_bytes; __uint64_t xs_write_bytes; -- cgit From f8739c3ce2ac9a01515b56026b6a066c0808234b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Mar 2012 08:52:34 +0000 Subject: xfs: per-filesystem dquot LRU lists Replace the global dquot lru lists with a per-filesystem one. Note that the shrinker isn't wire up to the per-superblock VFS shrinker infrastructure as would have problems summing up and splitting the counts for inodes and dquots. I don't think this is a major problem as the quota cache isn't as interwinded with the inode cache as the dentry cache is, because an inode that is dropped from the cache will generally release a dquot reference, but most of the time it won't be the last one. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_dquot.c | 84 ++++++++++++++++++++++++++++-------------------------- fs/xfs/xfs_dquot.h | 2 +- fs/xfs/xfs_qm.c | 59 ++++++++++++++++---------------------- fs/xfs/xfs_qm.h | 7 +++-- 4 files changed, 74 insertions(+), 78 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 98d7e25947fa..fec1a3d78e9f 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -47,7 +47,7 @@ * qi->qi_dqlist_lock * dquot->q_qlock (xfs_dqlock() and friends) * dquot->q_flush (xfs_dqflock() and friends) - * xfs_Gqm->qm_dqfrlist_lock + * qi->qi_lru_lock * * If two dquots need to be locked the order is user before group/project, * otherwise by the lowest id first, see xfs_dqlock2. @@ -69,7 +69,7 @@ void xfs_qm_dqdestroy( xfs_dquot_t *dqp) { - ASSERT(list_empty(&dqp->q_freelist)); + ASSERT(list_empty(&dqp->q_lru)); mutex_destroy(&dqp->q_qlock); kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); @@ -497,7 +497,7 @@ xfs_qm_dqread( dqp->dq_flags = type; dqp->q_core.d_id = cpu_to_be32(id); dqp->q_mount = mp; - INIT_LIST_HEAD(&dqp->q_freelist); + INIT_LIST_HEAD(&dqp->q_lru); mutex_init(&dqp->q_qlock); init_waitqueue_head(&dqp->q_pinwait); @@ -844,38 +844,22 @@ restart: } -/* - * Release a reference to the dquot (decrement ref-count) - * and unlock it. If there is a group quota attached to this - * dquot, carefully release that too without tripping over - * deadlocks'n'stuff. - */ -void -xfs_qm_dqput( +STATIC void +xfs_qm_dqput_final( struct xfs_dquot *dqp) { + struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; struct xfs_dquot *gdqp; - ASSERT(dqp->q_nrefs > 0); - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - - trace_xfs_dqput(dqp); - -recurse: - if (--dqp->q_nrefs > 0) { - xfs_dqunlock(dqp); - return; - } - trace_xfs_dqput_free(dqp); - mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); - if (list_empty(&dqp->q_freelist)) { - list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); - xfs_Gqm->qm_dqfrlist_cnt++; + mutex_lock(&qi->qi_lru_lock); + if (list_empty(&dqp->q_lru)) { + list_add_tail(&dqp->q_lru, &qi->qi_lru_list); + qi->qi_lru_count++; XFS_STATS_INC(xs_qm_dquot_unused); } - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + mutex_unlock(&qi->qi_lru_lock); /* * If we just added a udquot to the freelist, then we want to release @@ -892,10 +876,29 @@ recurse: /* * If we had a group quota hint, release it now. */ - if (gdqp) { - dqp = gdqp; - goto recurse; - } + if (gdqp) + xfs_qm_dqput(gdqp); +} + +/* + * Release a reference to the dquot (decrement ref-count) and unlock it. + * + * If there is a group quota attached to this dquot, carefully release that + * too without tripping over deadlocks'n'stuff. + */ +void +xfs_qm_dqput( + struct xfs_dquot *dqp) +{ + ASSERT(dqp->q_nrefs > 0); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + trace_xfs_dqput(dqp); + + if (--dqp->q_nrefs > 0) + xfs_dqunlock(dqp); + else + xfs_qm_dqput_final(dqp); } /* @@ -1115,6 +1118,7 @@ xfs_qm_dqpurge( { struct xfs_mount *mp = dqp->q_mount; struct xfs_dqhash *qh = dqp->q_hash; + struct xfs_quotainfo *qi = mp->m_quotainfo; xfs_dqlock(dqp); @@ -1165,22 +1169,22 @@ xfs_qm_dqpurge( qh->qh_version++; mutex_unlock(&qh->qh_lock); - mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); + mutex_lock(&qi->qi_dqlist_lock); list_del_init(&dqp->q_mplist); - mp->m_quotainfo->qi_dqreclaims++; - mp->m_quotainfo->qi_dquots--; - mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); + qi->qi_dqreclaims++; + qi->qi_dquots--; + mutex_unlock(&qi->qi_dqlist_lock); /* * We move dquots to the freelist as soon as their reference count * hits zero, so it really should be on the freelist here. */ - mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); - ASSERT(!list_empty(&dqp->q_freelist)); - list_del_init(&dqp->q_freelist); - xfs_Gqm->qm_dqfrlist_cnt--; + mutex_lock(&qi->qi_lru_lock); + ASSERT(!list_empty(&dqp->q_lru)); + list_del_init(&dqp->q_lru); + qi->qi_lru_count--; XFS_STATS_DEC(xs_qm_dquot_unused); - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + mutex_unlock(&qi->qi_lru_lock); xfs_qm_dqdestroy(dqp); } diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 60b0d72b0241..f291c25e5992 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -47,7 +47,7 @@ struct xfs_trans; */ typedef struct xfs_dquot { uint dq_flags; /* various flags (XFS_DQ_*) */ - struct list_head q_freelist; /* global free list of dquots */ + struct list_head q_lru; /* global free list of dquots */ struct list_head q_mplist; /* mount's list of dquots */ struct list_head q_hashlist; /* gloabl hash list of dquots */ xfs_dqhash_t *q_hash; /* the hashchain header */ diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 0dde1f48c280..a2579e1d687f 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -61,11 +61,6 @@ STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); -static struct shrinker xfs_qm_shaker = { - .shrink = xfs_qm_shake, - .seeks = DEFAULT_SEEKS, -}; - /* * Initialize the XQM structure. * Note that there is not one quota manager per file system. @@ -105,13 +100,6 @@ xfs_Gqm_init(void) xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i); } - /* - * Freelist of all dquots of all file systems - */ - INIT_LIST_HEAD(&xqm->qm_dqfrlist); - xqm->qm_dqfrlist_cnt = 0; - mutex_init(&xqm->qm_dqfrlist_lock); - /* * dquot zone. we register our own low-memory callback. */ @@ -122,8 +110,6 @@ xfs_Gqm_init(void) } else xqm->qm_dqzone = qm_dqzone; - register_shrinker(&xfs_qm_shaker); - /* * The t_dqinfo portion of transactions. */ @@ -155,12 +141,6 @@ xfs_qm_destroy( ASSERT(xqm != NULL); ASSERT(xqm->qm_nrefs == 0); - unregister_shrinker(&xfs_qm_shaker); - - mutex_lock(&xqm->qm_dqfrlist_lock); - ASSERT(list_empty(&xqm->qm_dqfrlist)); - mutex_unlock(&xqm->qm_dqfrlist_lock); - hsize = xqm->qm_dqhashmask + 1; for (i = 0; i < hsize; i++) { xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); @@ -826,6 +806,10 @@ xfs_qm_init_quotainfo( mutex_init(&qinf->qi_dqlist_lock); lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class); + INIT_LIST_HEAD(&qinf->qi_lru_list); + qinf->qi_lru_count = 0; + mutex_init(&qinf->qi_lru_lock); + qinf->qi_dqreclaims = 0; /* mutex used to serialize quotaoffs */ @@ -893,6 +877,9 @@ xfs_qm_init_quotainfo( qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; } + qinf->qi_shrinker.shrink = xfs_qm_shake; + qinf->qi_shrinker.seeks = DEFAULT_SEEKS; + register_shrinker(&qinf->qi_shrinker); return 0; } @@ -912,6 +899,8 @@ xfs_qm_destroy_quotainfo( ASSERT(qi != NULL); ASSERT(xfs_Gqm != NULL); + unregister_shrinker(&qi->qi_shrinker); + /* * Release the reference that XQM kept, so that we know * when the XQM structure should be freed. We cannot assume @@ -1623,6 +1612,7 @@ xfs_qm_dqreclaim_one( struct list_head *dispose_list) { struct xfs_mount *mp = dqp->q_mount; + struct xfs_quotainfo *qi = mp->m_quotainfo; int error; if (!xfs_dqlock_nowait(dqp)) @@ -1638,8 +1628,8 @@ xfs_qm_dqreclaim_one( trace_xfs_dqreclaim_want(dqp); XFS_STATS_INC(xs_qm_dqwants); - list_del_init(&dqp->q_freelist); - xfs_Gqm->qm_dqfrlist_cnt--; + list_del_init(&dqp->q_lru); + qi->qi_lru_count--; XFS_STATS_DEC(xs_qm_dquot_unused); return; } @@ -1688,8 +1678,8 @@ xfs_qm_dqreclaim_one( xfs_dqunlock(dqp); ASSERT(dqp->q_nrefs == 0); - list_move_tail(&dqp->q_freelist, dispose_list); - xfs_Gqm->qm_dqfrlist_cnt--; + list_move_tail(&dqp->q_lru, dispose_list); + qi->qi_lru_count--; XFS_STATS_DEC(xs_qm_dquot_unused); trace_xfs_dqreclaim_done(dqp); @@ -1702,7 +1692,7 @@ out_busy: /* * Move the dquot to the tail of the list so that we don't spin on it. */ - list_move_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); + list_move_tail(&dqp->q_lru, &qi->qi_lru_list); trace_xfs_dqreclaim_busy(dqp); XFS_STATS_INC(xs_qm_dqreclaim_misses); @@ -1713,6 +1703,8 @@ xfs_qm_shake( struct shrinker *shrink, struct shrink_control *sc) { + struct xfs_quotainfo *qi = + container_of(shrink, struct xfs_quotainfo, qi_shrinker); int nr_to_scan = sc->nr_to_scan; LIST_HEAD (dispose_list); struct xfs_dquot *dqp; @@ -1722,24 +1714,23 @@ xfs_qm_shake( if (!nr_to_scan) goto out; - mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); - while (!list_empty(&xfs_Gqm->qm_dqfrlist)) { + mutex_lock(&qi->qi_lru_lock); + while (!list_empty(&qi->qi_lru_list)) { if (nr_to_scan-- <= 0) break; - dqp = list_first_entry(&xfs_Gqm->qm_dqfrlist, struct xfs_dquot, - q_freelist); + dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot, + q_lru); xfs_qm_dqreclaim_one(dqp, &dispose_list); } - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + mutex_unlock(&qi->qi_lru_lock); while (!list_empty(&dispose_list)) { - dqp = list_first_entry(&dispose_list, struct xfs_dquot, - q_freelist); - list_del_init(&dqp->q_freelist); + dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru); + list_del_init(&dqp->q_lru); xfs_qm_dqfree_one(dqp); } out: - return (xfs_Gqm->qm_dqfrlist_cnt / 100) * sysctl_vfs_cache_pressure; + return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure; } /* diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 89f213f7252a..c236bba9bfab 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -56,9 +56,6 @@ typedef struct xfs_qm { xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */ xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */ uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */ - struct list_head qm_dqfrlist; /* freelist of dquots */ - struct mutex qm_dqfrlist_lock; - int qm_dqfrlist_cnt; uint qm_nrefs; /* file systems with quota on */ kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ @@ -71,6 +68,9 @@ typedef struct xfs_qm { typedef struct xfs_quotainfo { xfs_inode_t *qi_uquotaip; /* user quota inode */ xfs_inode_t *qi_gquotaip; /* group quota inode */ + struct list_head qi_lru_list; + struct mutex qi_lru_lock; + int qi_lru_count; struct list_head qi_dqlist; /* all dquots in filesys */ struct mutex qi_dqlist_lock; int qi_dquots; @@ -91,6 +91,7 @@ typedef struct xfs_quotainfo { xfs_qcnt_t qi_isoftlimit; /* default inode count soft limit */ xfs_qcnt_t qi_rtbhardlimit;/* default realtime blk hard limit */ xfs_qcnt_t qi_rtbsoftlimit;/* default realtime blk soft limit */ + struct shrinker qi_shrinker; } xfs_quotainfo_t; -- cgit From 9f920f116426806bfa34c1422742e1bf7b7a2b4b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Mar 2012 08:52:35 +0000 Subject: xfs: use per-filesystem radix trees for dquot lookup Replace the global hash tables for looking up in-memory dquot structures with per-filesystem radix trees to allow scaling to a large number of in-memory dquot structures. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_dquot.c | 188 +++++++++++++----------------------------------- fs/xfs/xfs_dquot.h | 12 ---- fs/xfs/xfs_qm.c | 95 +++--------------------- fs/xfs/xfs_qm.h | 19 +++-- fs/xfs/xfs_quota_priv.h | 11 --- fs/xfs/xfs_trace.h | 4 +- 6 files changed, 66 insertions(+), 263 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index fec1a3d78e9f..49456e555cfa 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -43,7 +43,7 @@ * Lock order: * * ip->i_lock - * qh->qh_lock + * qi->qi_tree_lock * qi->qi_dqlist_lock * dquot->q_qlock (xfs_dqlock() and friends) * dquot->q_flush (xfs_dqflock() and friends) @@ -601,60 +601,6 @@ error0: return error; } -/* - * Lookup a dquot in the incore dquot hashtable. We keep two separate - * hashtables for user and group dquots; and, these are global tables - * inside the XQM, not per-filesystem tables. - * The hash chain must be locked by caller, and it is left locked - * on return. Returning dquot is locked. - */ -STATIC int -xfs_qm_dqlookup( - xfs_mount_t *mp, - xfs_dqid_t id, - xfs_dqhash_t *qh, - xfs_dquot_t **O_dqpp) -{ - xfs_dquot_t *dqp; - - ASSERT(mutex_is_locked(&qh->qh_lock)); - - /* - * Traverse the hashchain looking for a match - */ - list_for_each_entry(dqp, &qh->qh_list, q_hashlist) { - /* - * We already have the hashlock. We don't need the - * dqlock to look at the id field of the dquot, since the - * id can't be modified without the hashlock anyway. - */ - if (be32_to_cpu(dqp->q_core.d_id) != id || dqp->q_mount != mp) - continue; - - trace_xfs_dqlookup_found(dqp); - - xfs_dqlock(dqp); - if (dqp->dq_flags & XFS_DQ_FREEING) { - *O_dqpp = NULL; - xfs_dqunlock(dqp); - return -1; - } - - dqp->q_nrefs++; - - /* - * move the dquot to the front of the hashchain - */ - list_move(&dqp->q_hashlist, &qh->qh_list); - trace_xfs_dqlookup_done(dqp); - *O_dqpp = dqp; - return 0; - } - - *O_dqpp = NULL; - return 1; -} - /* * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a * a locked dquot, doing an allocation (if requested) as needed. @@ -672,10 +618,10 @@ xfs_qm_dqget( uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ { - xfs_dquot_t *dqp, *dqp1; - xfs_dqhash_t *h; - uint version; - int error; + struct xfs_quotainfo *qi = mp->m_quotainfo; + struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type); + struct xfs_dquot *dqp; + int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || @@ -683,7 +629,6 @@ xfs_qm_dqget( (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { return (ESRCH); } - h = XFS_DQ_HASH(mp, id, type); #ifdef DEBUG if (xfs_do_dqerror) { @@ -704,34 +649,28 @@ xfs_qm_dqget( #endif restart: - mutex_lock(&h->qh_lock); + mutex_lock(&qi->qi_tree_lock); + dqp = radix_tree_lookup(tree, id); + if (dqp) { + xfs_dqlock(dqp); + if (dqp->dq_flags & XFS_DQ_FREEING) { + xfs_dqunlock(dqp); + mutex_unlock(&qi->qi_tree_lock); + trace_xfs_dqget_freeing(dqp); + delay(1); + goto restart; + } - /* - * Look in the cache (hashtable). - * The chain is kept locked during lookup. - */ - switch (xfs_qm_dqlookup(mp, id, h, O_dqpp)) { - case -1: - XFS_STATS_INC(xs_qm_dquot_dups); - mutex_unlock(&h->qh_lock); - delay(1); - goto restart; - case 0: + dqp->q_nrefs++; + mutex_unlock(&qi->qi_tree_lock); + + trace_xfs_dqget_hit(dqp); XFS_STATS_INC(xs_qm_dqcachehits); - /* - * The dquot was found, moved to the front of the chain, - * taken off the freelist if it was on it, and locked - * at this point. Just unlock the hashchain and return. - */ - ASSERT(*O_dqpp); - ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); - mutex_unlock(&h->qh_lock); - trace_xfs_dqget_hit(*O_dqpp); - return 0; /* success */ - default: - XFS_STATS_INC(xs_qm_dqcachemisses); - break; + *O_dqpp = dqp; + return 0; } + mutex_unlock(&qi->qi_tree_lock); + XFS_STATS_INC(xs_qm_dqcachemisses); /* * Dquot cache miss. We don't want to keep the inode lock across @@ -742,12 +681,6 @@ restart: */ if (ip) xfs_iunlock(ip, XFS_ILOCK_EXCL); - /* - * Save the hashchain version stamp, and unlock the chain, so that - * we don't keep the lock across a disk read - */ - version = h->qh_version; - mutex_unlock(&h->qh_lock); error = xfs_qm_dqread(mp, id, type, flags, &dqp); @@ -757,15 +690,14 @@ restart: if (error) return error; - /* - * Dquot lock comes after hashlock in the lock ordering - */ if (ip) { /* * A dquot could be attached to this inode by now, since * we had dropped the ilock. */ if (xfs_this_quota_on(mp, type)) { + struct xfs_dquot *dqp1; + dqp1 = xfs_inode_dquot(ip, type); if (dqp1) { xfs_qm_dqdestroy(dqp); @@ -780,51 +712,27 @@ restart: } } - /* - * Hashlock comes after ilock in lock order - */ - mutex_lock(&h->qh_lock); - if (version != h->qh_version) { - xfs_dquot_t *tmpdqp; + mutex_lock(&qi->qi_tree_lock); + error = -radix_tree_insert(tree, id, dqp); + if (unlikely(error)) { + WARN_ON(error != EEXIST); + /* - * Now, see if somebody else put the dquot in the - * hashtable before us. This can happen because we didn't - * keep the hashchain lock. We don't have to worry about - * lock order between the two dquots here since dqp isn't - * on any findable lists yet. + * Duplicate found. Just throw away the new dquot and start + * over. */ - switch (xfs_qm_dqlookup(mp, id, h, &tmpdqp)) { - case 0: - case -1: - /* - * Duplicate found, either in cache or on its way out. - * Just throw away the new dquot and start over. - */ - if (tmpdqp) - xfs_qm_dqput(tmpdqp); - mutex_unlock(&h->qh_lock); - xfs_qm_dqdestroy(dqp); - XFS_STATS_INC(xs_qm_dquot_dups); - goto restart; - default: - break; - } + mutex_unlock(&qi->qi_tree_lock); + trace_xfs_dqget_dup(dqp); + xfs_qm_dqdestroy(dqp); + XFS_STATS_INC(xs_qm_dquot_dups); + goto restart; } - /* - * Put the dquot at the beginning of the hash-chain and mp's list - * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. - */ - ASSERT(mutex_is_locked(&h->qh_lock)); - dqp->q_hash = h; - list_add(&dqp->q_hashlist, &h->qh_list); - h->qh_version++; - /* * Attach this dquot to this filesystem's list of all dquots, * kept inside the mount structure in m_quotainfo field */ - mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); + mutex_lock(&qi->qi_dqlist_lock); /* * We return a locked dquot to the caller, with a reference taken @@ -832,10 +740,11 @@ restart: xfs_dqlock(dqp); dqp->q_nrefs = 1; - list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist); - mp->m_quotainfo->qi_dquots++; - mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); - mutex_unlock(&h->qh_lock); + list_add(&dqp->q_mplist, &qi->qi_dqlist); + qi->qi_dquots++; + mutex_unlock(&qi->qi_dqlist_lock); + mutex_unlock(&qi->qi_tree_lock); + dqret: ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); trace_xfs_dqget_miss(dqp); @@ -1117,7 +1026,6 @@ xfs_qm_dqpurge( struct xfs_dquot *dqp) { struct xfs_mount *mp = dqp->q_mount; - struct xfs_dqhash *qh = dqp->q_hash; struct xfs_quotainfo *qi = mp->m_quotainfo; xfs_dqlock(dqp); @@ -1164,10 +1072,10 @@ xfs_qm_dqpurge( xfs_dqfunlock(dqp); xfs_dqunlock(dqp); - mutex_lock(&qh->qh_lock); - list_del_init(&dqp->q_hashlist); - qh->qh_version++; - mutex_unlock(&qh->qh_lock); + mutex_lock(&qi->qi_tree_lock); + radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), + be32_to_cpu(dqp->q_core.d_id)); + mutex_unlock(&qi->qi_tree_lock); mutex_lock(&qi->qi_dqlist_lock); list_del_init(&dqp->q_mplist); diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index f291c25e5992..4061f1731271 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -29,16 +29,6 @@ * when quotas are off. */ -/* - * The hash chain headers (hash buckets) - */ -typedef struct xfs_dqhash { - struct list_head qh_list; - struct mutex qh_lock; - uint qh_version; /* ever increasing version */ - uint qh_nelems; /* number of dquots on the list */ -} xfs_dqhash_t; - struct xfs_mount; struct xfs_trans; @@ -49,8 +39,6 @@ typedef struct xfs_dquot { uint dq_flags; /* various flags (XFS_DQ_*) */ struct list_head q_lru; /* global free list of dquots */ struct list_head q_mplist; /* mount's list of dquots */ - struct list_head q_hashlist; /* gloabl hash list of dquots */ - xfs_dqhash_t *q_hash; /* the hashchain header */ struct xfs_mount*q_mount; /* filesystem this relates to */ struct xfs_trans*q_transp; /* trans this belongs to currently */ uint q_nrefs; /* # active refs from inodes */ diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index a2579e1d687f..bb884e701cd9 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -54,9 +54,6 @@ struct xfs_qm *xfs_Gqm; kmem_zone_t *qm_dqzone; kmem_zone_t *qm_dqtrxzone; -STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); -STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); - STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); @@ -68,37 +65,9 @@ STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); STATIC struct xfs_qm * xfs_Gqm_init(void) { - xfs_dqhash_t *udqhash, *gdqhash; xfs_qm_t *xqm; - size_t hsize; - uint i; - - /* - * Initialize the dquot hash tables. - */ - udqhash = kmem_zalloc_greedy(&hsize, - XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t), - XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t)); - if (!udqhash) - goto out; - - gdqhash = kmem_zalloc_large(hsize); - if (!gdqhash) - goto out_free_udqhash; - - hsize /= sizeof(xfs_dqhash_t); xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); - xqm->qm_dqhashmask = hsize - 1; - xqm->qm_usr_dqhtable = udqhash; - xqm->qm_grp_dqhtable = gdqhash; - ASSERT(xqm->qm_usr_dqhtable != NULL); - ASSERT(xqm->qm_grp_dqhtable != NULL); - - for (i = 0; i < hsize; i++) { - xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i); - xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i); - } /* * dquot zone. we register our own low-memory callback. @@ -122,11 +91,6 @@ xfs_Gqm_init(void) xqm->qm_nrefs = 0; return xqm; - - out_free_udqhash: - kmem_free_large(udqhash); - out: - return NULL; } /* @@ -136,22 +100,9 @@ STATIC void xfs_qm_destroy( struct xfs_qm *xqm) { - int hsize, i; - ASSERT(xqm != NULL); ASSERT(xqm->qm_nrefs == 0); - hsize = xqm->qm_dqhashmask + 1; - for (i = 0; i < hsize; i++) { - xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); - xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); - } - kmem_free_large(xqm->qm_usr_dqhtable); - kmem_free_large(xqm->qm_grp_dqhtable); - xqm->qm_usr_dqhtable = NULL; - xqm->qm_grp_dqhtable = NULL; - xqm->qm_dqhashmask = 0; - kmem_free(xqm); } @@ -761,14 +712,6 @@ xfs_qm_dqdetach( } } -/* - * The hash chains and the mplist use the same xfs_dqhash structure as - * their list head, but we can take the mplist qh_lock and one of the - * hash qh_locks at the same time without any problem as they aren't - * related. - */ -static struct lock_class_key xfs_quota_mplist_class; - /* * This initializes all the quota information that's kept in the * mount structure @@ -802,9 +745,12 @@ xfs_qm_init_quotainfo( return error; } + INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); + INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); + mutex_init(&qinf->qi_tree_lock); + INIT_LIST_HEAD(&qinf->qi_dqlist); mutex_init(&qinf->qi_dqlist_lock); - lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class); INIT_LIST_HEAD(&qinf->qi_lru_list); qinf->qi_lru_count = 0; @@ -924,30 +870,6 @@ xfs_qm_destroy_quotainfo( mp->m_quotainfo = NULL; } - - -/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */ - -/* ARGSUSED */ -STATIC void -xfs_qm_list_init( - xfs_dqlist_t *list, - char *str, - int n) -{ - mutex_init(&list->qh_lock); - INIT_LIST_HEAD(&list->qh_list); - list->qh_version = 0; - list->qh_nelems = 0; -} - -STATIC void -xfs_qm_list_destroy( - xfs_dqlist_t *list) -{ - mutex_destroy(&(list->qh_lock)); -} - /* * Create an inode and return with a reference already taken, but unlocked * This is how we create quota inodes @@ -1592,10 +1514,10 @@ xfs_qm_dqfree_one( struct xfs_mount *mp = dqp->q_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; - mutex_lock(&dqp->q_hash->qh_lock); - list_del_init(&dqp->q_hashlist); - dqp->q_hash->qh_version++; - mutex_unlock(&dqp->q_hash->qh_lock); + mutex_lock(&qi->qi_tree_lock); + radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), + be32_to_cpu(dqp->q_core.d_id)); + mutex_unlock(&qi->qi_tree_lock); mutex_lock(&qi->qi_dqlist_lock); list_del_init(&dqp->q_mplist); @@ -1634,7 +1556,6 @@ xfs_qm_dqreclaim_one( return; } - ASSERT(dqp->q_hash); ASSERT(!list_empty(&dqp->q_mplist)); /* diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index c236bba9bfab..8f4b117823cc 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -30,12 +30,6 @@ extern struct xfs_qm *xfs_Gqm; extern kmem_zone_t *qm_dqzone; extern kmem_zone_t *qm_dqtrxzone; -/* - * Dquot hashtable constants/threshold values. - */ -#define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t)) -#define XFS_QM_HASHSIZE_HIGH ((PAGE_SIZE * 4) / sizeof(xfs_dqhash_t)) - /* * This defines the unit of allocation of dquots. * Currently, it is just one file system block, and a 4K blk contains 30 @@ -47,15 +41,10 @@ extern kmem_zone_t *qm_dqtrxzone; */ #define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 -typedef xfs_dqhash_t xfs_dqlist_t; - /* * Quota Manager (global) structure. Lives only in core. */ typedef struct xfs_qm { - xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */ - xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */ - uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */ uint qm_nrefs; /* file systems with quota on */ kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ @@ -66,6 +55,9 @@ typedef struct xfs_qm { * The mount structure keeps a pointer to this. */ typedef struct xfs_quotainfo { + struct radix_tree_root qi_uquota_tree; + struct radix_tree_root qi_gquota_tree; + struct mutex qi_tree_lock; xfs_inode_t *qi_uquotaip; /* user quota inode */ xfs_inode_t *qi_gquotaip; /* group quota inode */ struct list_head qi_lru_list; @@ -94,6 +86,11 @@ typedef struct xfs_quotainfo { struct shrinker qi_shrinker; } xfs_quotainfo_t; +#define XFS_DQUOT_TREE(qi, type) \ + ((type & XFS_DQ_USER) ? \ + &((qi)->qi_uquota_tree) : \ + &((qi)->qi_gquota_tree)) + extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long); extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *, diff --git a/fs/xfs/xfs_quota_priv.h b/fs/xfs/xfs_quota_priv.h index 94a3d927d716..6d86219d93da 100644 --- a/fs/xfs/xfs_quota_priv.h +++ b/fs/xfs/xfs_quota_priv.h @@ -24,17 +24,6 @@ */ #define XFS_DQITER_MAP_SIZE 10 -/* - * Hash into a bucket in the dquot hash table, based on . - */ -#define XFS_DQ_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \ - (__psunsigned_t)(id)) & \ - (xfs_Gqm->qm_dqhashmask - 1)) -#define XFS_DQ_HASH(mp, id, type) (type == XFS_DQ_USER ? \ - (xfs_Gqm->qm_usr_dqhtable + \ - XFS_DQ_HASHVAL(mp, id)) : \ - (xfs_Gqm->qm_grp_dqhtable + \ - XFS_DQ_HASHVAL(mp, id))) #define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ !dqp->q_core.d_blk_hardlimit && \ !dqp->q_core.d_blk_softlimit && \ diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index ceaf6fe67e41..75eb54af4d58 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -741,10 +741,10 @@ DEFINE_DQUOT_EVENT(xfs_dqalloc); DEFINE_DQUOT_EVENT(xfs_dqtobp_read); DEFINE_DQUOT_EVENT(xfs_dqread); DEFINE_DQUOT_EVENT(xfs_dqread_fail); -DEFINE_DQUOT_EVENT(xfs_dqlookup_found); -DEFINE_DQUOT_EVENT(xfs_dqlookup_done); DEFINE_DQUOT_EVENT(xfs_dqget_hit); DEFINE_DQUOT_EVENT(xfs_dqget_miss); +DEFINE_DQUOT_EVENT(xfs_dqget_freeing); +DEFINE_DQUOT_EVENT(xfs_dqget_dup); DEFINE_DQUOT_EVENT(xfs_dqput); DEFINE_DQUOT_EVENT(xfs_dqput_wait); DEFINE_DQUOT_EVENT(xfs_dqput_free); -- cgit From b84a3a96751f93071c1863f2962273973c8b8f5e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Mar 2012 11:53:34 -0500 Subject: xfs: remove the per-filesystem list of dquots Instead of keeping a separate per-filesystem list of dquots we can walk the radix tree for the two places where we need to iterate all quota structures. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_dquot.c | 95 +---------- fs/xfs/xfs_dquot.h | 2 - fs/xfs/xfs_qm.c | 415 +++++++++++++++++++++++++---------------------- fs/xfs/xfs_qm.h | 6 +- fs/xfs/xfs_qm_syscalls.c | 12 +- 5 files changed, 226 insertions(+), 304 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 49456e555cfa..2896ac953ed6 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -44,10 +44,9 @@ * * ip->i_lock * qi->qi_tree_lock - * qi->qi_dqlist_lock - * dquot->q_qlock (xfs_dqlock() and friends) - * dquot->q_flush (xfs_dqflock() and friends) - * qi->qi_lru_lock + * dquot->q_qlock (xfs_dqlock() and friends) + * dquot->q_flush (xfs_dqflock() and friends) + * qi->qi_lru_lock * * If two dquots need to be locked the order is user before group/project, * otherwise by the lowest id first, see xfs_dqlock2. @@ -728,21 +727,13 @@ restart: goto restart; } - /* - * Attach this dquot to this filesystem's list of all dquots, - * kept inside the mount structure in m_quotainfo field - */ - mutex_lock(&qi->qi_dqlist_lock); - /* * We return a locked dquot to the caller, with a reference taken */ xfs_dqlock(dqp); dqp->q_nrefs = 1; - list_add(&dqp->q_mplist, &qi->qi_dqlist); qi->qi_dquots++; - mutex_unlock(&qi->qi_dqlist_lock); mutex_unlock(&qi->qi_tree_lock); dqret: @@ -1017,86 +1008,6 @@ xfs_dqlock2( } } -/* - * Take a dquot out of the mount's dqlist as well as the hashlist. This is - * called via unmount as well as quotaoff, and the purge will always succeed. - */ -void -xfs_qm_dqpurge( - struct xfs_dquot *dqp) -{ - struct xfs_mount *mp = dqp->q_mount; - struct xfs_quotainfo *qi = mp->m_quotainfo; - - xfs_dqlock(dqp); - - /* - * If we're turning off quotas, we have to make sure that, for - * example, we don't delete quota disk blocks while dquots are - * in the process of getting written to those disk blocks. - * This dquot might well be on AIL, and we can't leave it there - * if we're turning off quotas. Basically, we need this flush - * lock, and are willing to block on it. - */ - if (!xfs_dqflock_nowait(dqp)) { - /* - * Block on the flush lock after nudging dquot buffer, - * if it is incore. - */ - xfs_dqflock_pushbuf_wait(dqp); - } - - /* - * If we are turning this type of quotas off, we don't care - * about the dirty metadata sitting in this dquot. OTOH, if - * we're unmounting, we do care, so we flush it and wait. - */ - if (XFS_DQ_IS_DIRTY(dqp)) { - int error; - - /* - * We don't care about getting disk errors here. We need - * to purge this dquot anyway, so we go ahead regardless. - */ - error = xfs_qm_dqflush(dqp, SYNC_WAIT); - if (error) - xfs_warn(mp, "%s: dquot %p flush failed", - __func__, dqp); - xfs_dqflock(dqp); - } - - ASSERT(atomic_read(&dqp->q_pincount) == 0); - ASSERT(XFS_FORCED_SHUTDOWN(mp) || - !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); - - xfs_dqfunlock(dqp); - xfs_dqunlock(dqp); - - mutex_lock(&qi->qi_tree_lock); - radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), - be32_to_cpu(dqp->q_core.d_id)); - mutex_unlock(&qi->qi_tree_lock); - - mutex_lock(&qi->qi_dqlist_lock); - list_del_init(&dqp->q_mplist); - qi->qi_dqreclaims++; - qi->qi_dquots--; - mutex_unlock(&qi->qi_dqlist_lock); - - /* - * We move dquots to the freelist as soon as their reference count - * hits zero, so it really should be on the freelist here. - */ - mutex_lock(&qi->qi_lru_lock); - ASSERT(!list_empty(&dqp->q_lru)); - list_del_init(&dqp->q_lru); - qi->qi_lru_count--; - XFS_STATS_DEC(xs_qm_dquot_unused); - mutex_unlock(&qi->qi_lru_lock); - - xfs_qm_dqdestroy(dqp); -} - /* * Give the buffer a little push if it is incore and * wait on the flush lock. diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 4061f1731271..ef9190bd8b30 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -38,7 +38,6 @@ struct xfs_trans; typedef struct xfs_dquot { uint dq_flags; /* various flags (XFS_DQ_*) */ struct list_head q_lru; /* global free list of dquots */ - struct list_head q_mplist; /* mount's list of dquots */ struct xfs_mount*q_mount; /* filesystem this relates to */ struct xfs_trans*q_transp; /* trans this belongs to currently */ uint q_nrefs; /* # active refs from inodes */ @@ -143,7 +142,6 @@ extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint, uint, struct xfs_dquot **); extern void xfs_qm_dqdestroy(xfs_dquot_t *); extern int xfs_qm_dqflush(xfs_dquot_t *, uint); -extern void xfs_qm_dqpurge(xfs_dquot_t *); extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, xfs_disk_dquot_t *); diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index bb884e701cd9..2f92d3b0d8a8 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -168,6 +168,187 @@ xfs_qm_rele_quotafs_ref( mutex_unlock(&xfs_Gqm_lock); } +/* + * We use the batch lookup interface to iterate over the dquots as it + * currently is the only interface into the radix tree code that allows + * fuzzy lookups instead of exact matches. Holding the lock over multiple + * operations is fine as all callers are used either during mount/umount + * or quotaoff. + */ +#define XFS_DQ_LOOKUP_BATCH 32 + +STATIC int +xfs_qm_dquot_walk( + struct xfs_mount *mp, + int type, + int (*execute)(struct xfs_dquot *dqp)) +{ + struct xfs_quotainfo *qi = mp->m_quotainfo; + struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type); + uint32_t next_index; + int last_error = 0; + int skipped; + int nr_found; + +restart: + skipped = 0; + next_index = 0; + nr_found = 0; + + while (1) { + struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; + int error = 0; + int i; + + mutex_lock(&qi->qi_tree_lock); + nr_found = radix_tree_gang_lookup(tree, (void **)batch, + next_index, XFS_DQ_LOOKUP_BATCH); + if (!nr_found) { + mutex_unlock(&qi->qi_tree_lock); + break; + } + + for (i = 0; i < nr_found; i++) { + struct xfs_dquot *dqp = batch[i]; + + next_index = be32_to_cpu(dqp->q_core.d_id) + 1; + + error = execute(batch[i]); + if (error == EAGAIN) { + skipped++; + continue; + } + if (error && last_error != EFSCORRUPTED) + last_error = error; + } + + mutex_unlock(&qi->qi_tree_lock); + + /* bail out if the filesystem is corrupted. */ + if (last_error == EFSCORRUPTED) { + skipped = 0; + break; + } + } + + if (skipped) { + delay(1); + goto restart; + } + + return last_error; +} + + +/* + * Purge a dquot from all tracking data structures and free it. + */ +STATIC int +xfs_qm_dqpurge( + struct xfs_dquot *dqp) +{ + struct xfs_mount *mp = dqp->q_mount; + struct xfs_quotainfo *qi = mp->m_quotainfo; + struct xfs_dquot *gdqp = NULL; + + xfs_dqlock(dqp); + if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { + xfs_dqunlock(dqp); + return EAGAIN; + } + + /* + * If this quota has a group hint attached, prepare for releasing it + * now. + */ + gdqp = dqp->q_gdquot; + if (gdqp) { + xfs_dqlock(gdqp); + dqp->q_gdquot = NULL; + } + + dqp->dq_flags |= XFS_DQ_FREEING; + + /* + * If we're turning off quotas, we have to make sure that, for + * example, we don't delete quota disk blocks while dquots are + * in the process of getting written to those disk blocks. + * This dquot might well be on AIL, and we can't leave it there + * if we're turning off quotas. Basically, we need this flush + * lock, and are willing to block on it. + */ + if (!xfs_dqflock_nowait(dqp)) { + /* + * Block on the flush lock after nudging dquot buffer, + * if it is incore. + */ + xfs_dqflock_pushbuf_wait(dqp); + } + + /* + * If we are turning this type of quotas off, we don't care + * about the dirty metadata sitting in this dquot. OTOH, if + * we're unmounting, we do care, so we flush it and wait. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + int error; + + /* + * We don't care about getting disk errors here. We need + * to purge this dquot anyway, so we go ahead regardless. + */ + error = xfs_qm_dqflush(dqp, SYNC_WAIT); + if (error) + xfs_warn(mp, "%s: dquot %p flush failed", + __func__, dqp); + xfs_dqflock(dqp); + } + + ASSERT(atomic_read(&dqp->q_pincount) == 0); + ASSERT(XFS_FORCED_SHUTDOWN(mp) || + !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); + + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + + radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), + be32_to_cpu(dqp->q_core.d_id)); + qi->qi_dquots--; + + /* + * We move dquots to the freelist as soon as their reference count + * hits zero, so it really should be on the freelist here. + */ + mutex_lock(&qi->qi_lru_lock); + ASSERT(!list_empty(&dqp->q_lru)); + list_del_init(&dqp->q_lru); + qi->qi_lru_count--; + XFS_STATS_DEC(xs_qm_dquot_unused); + mutex_unlock(&qi->qi_lru_lock); + + xfs_qm_dqdestroy(dqp); + + if (gdqp) + xfs_qm_dqput(gdqp); + return 0; +} + +/* + * Purge the dquot cache. + */ +void +xfs_qm_dqpurge_all( + struct xfs_mount *mp, + uint flags) +{ + if (flags & XFS_QMOPT_UQUOTA) + xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge); + if (flags & XFS_QMOPT_GQUOTA) + xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge); + if (flags & XFS_QMOPT_PQUOTA) + xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge); +} + /* * Just destroy the quotainfo structure. */ @@ -306,175 +487,6 @@ xfs_qm_unmount_quotas( } } -/* - * Flush all dquots of the given file system to disk. The dquots are - * _not_ purged from memory here, just their data written to disk. - */ -STATIC int -xfs_qm_dqflush_all( - struct xfs_mount *mp) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - int recl; - struct xfs_dquot *dqp; - int error; - - if (!q) - return 0; -again: - mutex_lock(&q->qi_dqlist_lock); - list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { - xfs_dqlock(dqp); - if ((dqp->dq_flags & XFS_DQ_FREEING) || - !XFS_DQ_IS_DIRTY(dqp)) { - xfs_dqunlock(dqp); - continue; - } - - /* XXX a sentinel would be better */ - recl = q->qi_dqreclaims; - if (!xfs_dqflock_nowait(dqp)) { - /* - * If we can't grab the flush lock then check - * to see if the dquot has been flushed delayed - * write. If so, grab its buffer and send it - * out immediately. We'll be able to acquire - * the flush lock when the I/O completes. - */ - xfs_dqflock_pushbuf_wait(dqp); - } - /* - * Let go of the mplist lock. We don't want to hold it - * across a disk write. - */ - mutex_unlock(&q->qi_dqlist_lock); - error = xfs_qm_dqflush(dqp, 0); - xfs_dqunlock(dqp); - if (error) - return error; - - mutex_lock(&q->qi_dqlist_lock); - if (recl != q->qi_dqreclaims) { - mutex_unlock(&q->qi_dqlist_lock); - /* XXX restart limit */ - goto again; - } - } - - mutex_unlock(&q->qi_dqlist_lock); - /* return ! busy */ - return 0; -} - -/* - * Release the group dquot pointers the user dquots may be - * carrying around as a hint. mplist is locked on entry and exit. - */ -STATIC void -xfs_qm_detach_gdquots( - struct xfs_mount *mp) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - struct xfs_dquot *dqp, *gdqp; - - again: - ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); - list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { - xfs_dqlock(dqp); - if (dqp->dq_flags & XFS_DQ_FREEING) { - xfs_dqunlock(dqp); - mutex_unlock(&q->qi_dqlist_lock); - delay(1); - mutex_lock(&q->qi_dqlist_lock); - goto again; - } - - gdqp = dqp->q_gdquot; - if (gdqp) - dqp->q_gdquot = NULL; - xfs_dqunlock(dqp); - - if (gdqp) - xfs_qm_dqrele(gdqp); - } -} - -/* - * Go through all the incore dquots of this file system and take them - * off the mplist and hashlist, if the dquot type matches the dqtype - * parameter. This is used when turning off quota accounting for - * users and/or groups, as well as when the filesystem is unmounting. - */ -STATIC int -xfs_qm_dqpurge_int( - struct xfs_mount *mp, - uint flags) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - struct xfs_dquot *dqp, *n; - uint dqtype; - int nmisses = 0; - LIST_HEAD (dispose_list); - - if (!q) - return 0; - - dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; - dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0; - dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0; - - mutex_lock(&q->qi_dqlist_lock); - - /* - * In the first pass through all incore dquots of this filesystem, - * we release the group dquot pointers the user dquots may be - * carrying around as a hint. We need to do this irrespective of - * what's being turned off. - */ - xfs_qm_detach_gdquots(mp); - - /* - * Try to get rid of all of the unwanted dquots. - */ - list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) { - xfs_dqlock(dqp); - if ((dqp->dq_flags & dqtype) != 0 && - !(dqp->dq_flags & XFS_DQ_FREEING)) { - if (dqp->q_nrefs == 0) { - dqp->dq_flags |= XFS_DQ_FREEING; - list_move_tail(&dqp->q_mplist, &dispose_list); - } else - nmisses++; - } - xfs_dqunlock(dqp); - } - mutex_unlock(&q->qi_dqlist_lock); - - list_for_each_entry_safe(dqp, n, &dispose_list, q_mplist) - xfs_qm_dqpurge(dqp); - - return nmisses; -} - -int -xfs_qm_dqpurge_all( - xfs_mount_t *mp, - uint flags) -{ - int ndquots; - - /* - * Purge the dquot cache. - * None of the dquots should really be busy at this point. - */ - if (mp->m_quotainfo) { - while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) { - delay(ndquots * 10); - } - } - return 0; -} - STATIC int xfs_qm_dqattach_one( xfs_inode_t *ip, @@ -749,15 +761,10 @@ xfs_qm_init_quotainfo( INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); mutex_init(&qinf->qi_tree_lock); - INIT_LIST_HEAD(&qinf->qi_dqlist); - mutex_init(&qinf->qi_dqlist_lock); - INIT_LIST_HEAD(&qinf->qi_lru_list); qinf->qi_lru_count = 0; mutex_init(&qinf->qi_lru_lock); - qinf->qi_dqreclaims = 0; - /* mutex used to serialize quotaoffs */ mutex_init(&qinf->qi_quotaofflock); @@ -854,9 +861,6 @@ xfs_qm_destroy_quotainfo( */ xfs_qm_rele_quotafs_ref(mp); - ASSERT(list_empty(&qi->qi_dqlist)); - mutex_destroy(&qi->qi_dqlist_lock); - if (qi->qi_uquotaip) { IRELE(qi->qi_uquotaip); qi->qi_uquotaip = NULL; /* paranoia */ @@ -1307,6 +1311,28 @@ error0: return error; } +STATIC int +xfs_qm_flush_one( + struct xfs_dquot *dqp) +{ + int error = 0; + + xfs_dqlock(dqp); + if (dqp->dq_flags & XFS_DQ_FREEING) + goto out_unlock; + if (!XFS_DQ_IS_DIRTY(dqp)) + goto out_unlock; + + if (!xfs_dqflock_nowait(dqp)) + xfs_dqflock_pushbuf_wait(dqp); + + error = xfs_qm_dqflush(dqp, 0); + +out_unlock: + xfs_dqunlock(dqp); + return error; +} + /* * Walk thru all the filesystem inodes and construct a consistent view * of the disk quota world. If the quotacheck fails, disable quotas. @@ -1315,7 +1341,7 @@ int xfs_qm_quotacheck( xfs_mount_t *mp) { - int done, count, error; + int done, count, error, error2; xfs_ino_t lastino; size_t structsz; xfs_inode_t *uip, *gip; @@ -1329,12 +1355,6 @@ xfs_qm_quotacheck( ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - /* - * There should be no cached dquots. The (simplistic) quotacheck - * algorithm doesn't like that. - */ - ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist)); - xfs_notice(mp, "Quotacheck needed: Please wait."); /* @@ -1373,12 +1393,21 @@ xfs_qm_quotacheck( } while (!done); /* - * We've made all the changes that we need to make incore. - * Flush them down to disk buffers if everything was updated - * successfully. + * We've made all the changes that we need to make incore. Flush them + * down to disk buffers if everything was updated successfully. */ - if (!error) - error = xfs_qm_dqflush_all(mp); + if (XFS_IS_UQUOTA_ON(mp)) + error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one); + if (XFS_IS_GQUOTA_ON(mp)) { + error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one); + if (!error) + error = error2; + } + if (XFS_IS_PQUOTA_ON(mp)) { + error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one); + if (!error) + error = error2; + } /* * We can get this error if we couldn't do a dquot allocation inside @@ -1517,13 +1546,9 @@ xfs_qm_dqfree_one( mutex_lock(&qi->qi_tree_lock); radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), be32_to_cpu(dqp->q_core.d_id)); - mutex_unlock(&qi->qi_tree_lock); - mutex_lock(&qi->qi_dqlist_lock); - list_del_init(&dqp->q_mplist); qi->qi_dquots--; - qi->qi_dqreclaims++; - mutex_unlock(&qi->qi_dqlist_lock); + mutex_unlock(&qi->qi_tree_lock); xfs_qm_dqdestroy(dqp); } @@ -1556,8 +1581,6 @@ xfs_qm_dqreclaim_one( return; } - ASSERT(!list_empty(&dqp->q_mplist)); - /* * Try to grab the flush lock. If this dquot is in the process of * getting flushed to disk, we don't want to reclaim it. diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 8f4b117823cc..76447060cd47 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -63,11 +63,7 @@ typedef struct xfs_quotainfo { struct list_head qi_lru_list; struct mutex qi_lru_lock; int qi_lru_count; - struct list_head qi_dqlist; /* all dquots in filesys */ - struct mutex qi_dqlist_lock; int qi_dquots; - int qi_dqreclaims; /* a change here indicates - a removal in the dqlist */ time_t qi_btimelimit; /* limit for blks timer */ time_t qi_itimelimit; /* limit for inodes timer */ time_t qi_rtbtimelimit;/* limit for rt blks timer */ @@ -126,7 +122,7 @@ extern int xfs_qm_quotacheck(xfs_mount_t *); extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); /* dquot stuff */ -extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); +extern void xfs_qm_dqpurge_all(xfs_mount_t *, uint); extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); /* quota ops */ diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index b9ac268a2d7c..c4f396e437a8 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c @@ -66,7 +66,6 @@ xfs_qm_scall_quotaoff( int error; uint inactivate_flags; xfs_qoff_logitem_t *qoffstart; - int nculprits; /* * No file system can have quotas enabled on disk but not in core. @@ -172,18 +171,13 @@ xfs_qm_scall_quotaoff( * This isn't protected by a particular lock directly, because we * don't want to take a mrlock every time we depend on quotas being on. */ - mp->m_qflags &= ~(flags); + mp->m_qflags &= ~flags; /* * Go through all the dquots of this file system and purge them, - * according to what was turned off. We may not be able to get rid - * of all dquots, because dquots can have temporary references that - * are not attached to inodes. eg. xfs_setattr, xfs_create. - * So, if we couldn't purge all the dquots from the filesystem, - * we can't get rid of the incore data structures. + * according to what was turned off. */ - while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype))) - delay(10 * nculprits); + xfs_qm_dqpurge_all(mp, dqtype); /* * Transactions that had started before ACTIVE state bit was cleared -- cgit From a05931ceb0160deadbd7798d60d01b17f2d81b09 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Mar 2012 08:52:37 +0000 Subject: xfs: remove the global xfs_Gqm structure If we initialize the slab caches for the quota code when XFS is loaded there is no need for a global and reference counted quota manager structure. Drop all this overhead and also fix the error handling during quota initialization. Reviewed-by: Dave Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_dquot.c | 35 ++++++++++++- fs/xfs/xfs_qm.c | 132 ----------------------------------------------- fs/xfs/xfs_qm.h | 15 +----- fs/xfs/xfs_qm_bhv.c | 16 ------ fs/xfs/xfs_super.c | 10 ++-- fs/xfs/xfs_super.h | 8 ++- fs/xfs/xfs_trans_dquot.c | 4 +- 7 files changed, 46 insertions(+), 174 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 2896ac953ed6..4be16a0cbe5a 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -59,6 +59,9 @@ int xfs_dqreq_num; int xfs_dqerror_mod = 33; #endif +struct kmem_zone *xfs_qm_dqtrxzone; +static struct kmem_zone *xfs_qm_dqzone; + static struct lock_class_key xfs_dquot_other_class; /* @@ -71,7 +74,7 @@ xfs_qm_dqdestroy( ASSERT(list_empty(&dqp->q_lru)); mutex_destroy(&dqp->q_qlock); - kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); + kmem_zone_free(xfs_qm_dqzone, dqp); XFS_STATS_DEC(xs_qm_dquot); } @@ -491,7 +494,7 @@ xfs_qm_dqread( int cancelflags = 0; - dqp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); + dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP); dqp->dq_flags = type; dqp->q_core.d_id = cpu_to_be32(id); @@ -1040,3 +1043,31 @@ xfs_dqflock_pushbuf_wait( out_lock: xfs_dqflock(dqp); } + +int __init +xfs_qm_init(void) +{ + xfs_qm_dqzone = + kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot"); + if (!xfs_qm_dqzone) + goto out; + + xfs_qm_dqtrxzone = + kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx"); + if (!xfs_qm_dqtrxzone) + goto out_free_dqzone; + + return 0; + +out_free_dqzone: + kmem_zone_destroy(xfs_qm_dqzone); +out: + return -ENOMEM; +} + +void __exit +xfs_qm_exit(void) +{ + kmem_zone_destroy(xfs_qm_dqtrxzone); + kmem_zone_destroy(xfs_qm_dqzone); +} diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 2f92d3b0d8a8..55c6afedc879 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -48,126 +48,10 @@ * quota functionality, including maintaining the freelist and hash * tables of dquots. */ -struct mutex xfs_Gqm_lock; -struct xfs_qm *xfs_Gqm; - -kmem_zone_t *qm_dqzone; -kmem_zone_t *qm_dqtrxzone; - STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); -/* - * Initialize the XQM structure. - * Note that there is not one quota manager per file system. - */ -STATIC struct xfs_qm * -xfs_Gqm_init(void) -{ - xfs_qm_t *xqm; - - xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); - - /* - * dquot zone. we register our own low-memory callback. - */ - if (!qm_dqzone) { - xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t), - "xfs_dquots"); - qm_dqzone = xqm->qm_dqzone; - } else - xqm->qm_dqzone = qm_dqzone; - - /* - * The t_dqinfo portion of transactions. - */ - if (!qm_dqtrxzone) { - xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t), - "xfs_dqtrx"); - qm_dqtrxzone = xqm->qm_dqtrxzone; - } else - xqm->qm_dqtrxzone = qm_dqtrxzone; - - xqm->qm_nrefs = 0; - return xqm; -} - -/* - * Destroy the global quota manager when its reference count goes to zero. - */ -STATIC void -xfs_qm_destroy( - struct xfs_qm *xqm) -{ - ASSERT(xqm != NULL); - ASSERT(xqm->qm_nrefs == 0); - - kmem_free(xqm); -} - -/* - * Called at mount time to let XQM know that another file system is - * starting quotas. This isn't crucial information as the individual mount - * structures are pretty independent, but it helps the XQM keep a - * global view of what's going on. - */ -/* ARGSUSED */ -STATIC int -xfs_qm_hold_quotafs_ref( - struct xfs_mount *mp) -{ - /* - * Need to lock the xfs_Gqm structure for things like this. For example, - * the structure could disappear between the entry to this routine and - * a HOLD operation if not locked. - */ - mutex_lock(&xfs_Gqm_lock); - - if (!xfs_Gqm) { - xfs_Gqm = xfs_Gqm_init(); - if (!xfs_Gqm) { - mutex_unlock(&xfs_Gqm_lock); - return ENOMEM; - } - } - - /* - * We can keep a list of all filesystems with quotas mounted for - * debugging and statistical purposes, but ... - * Just take a reference and get out. - */ - xfs_Gqm->qm_nrefs++; - mutex_unlock(&xfs_Gqm_lock); - - return 0; -} - - -/* - * Release the reference that a filesystem took at mount time, - * so that we know when we need to destroy the entire quota manager. - */ -/* ARGSUSED */ -STATIC void -xfs_qm_rele_quotafs_ref( - struct xfs_mount *mp) -{ - ASSERT(xfs_Gqm); - ASSERT(xfs_Gqm->qm_nrefs > 0); - - /* - * Destroy the entire XQM. If somebody mounts with quotaon, this'll - * be restarted. - */ - mutex_lock(&xfs_Gqm_lock); - if (--xfs_Gqm->qm_nrefs == 0) { - xfs_qm_destroy(xfs_Gqm); - xfs_Gqm = NULL; - } - mutex_unlock(&xfs_Gqm_lock); -} - /* * We use the batch lookup interface to iterate over the dquots as it * currently is the only interface into the radix tree code that allows @@ -738,13 +622,6 @@ xfs_qm_init_quotainfo( ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - /* - * Tell XQM that we exist as soon as possible. - */ - if ((error = xfs_qm_hold_quotafs_ref(mp))) { - return error; - } - qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); /* @@ -850,17 +727,9 @@ xfs_qm_destroy_quotainfo( qi = mp->m_quotainfo; ASSERT(qi != NULL); - ASSERT(xfs_Gqm != NULL); unregister_shrinker(&qi->qi_shrinker); - /* - * Release the reference that XQM kept, so that we know - * when the XQM structure should be freed. We cannot assume - * that xfs_Gqm is non-null after this point. - */ - xfs_qm_rele_quotafs_ref(mp); - if (qi->qi_uquotaip) { IRELE(qi->qi_uquotaip); qi->qi_uquotaip = NULL; /* paranoia */ @@ -1447,7 +1316,6 @@ xfs_qm_quotacheck( * We must turn off quotas. */ ASSERT(mp->m_quotainfo != NULL); - ASSERT(xfs_Gqm != NULL); xfs_qm_destroy_quotainfo(mp); if (xfs_mount_reset_sbqflags(mp)) { xfs_warn(mp, diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 76447060cd47..44b858b79d71 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -22,13 +22,9 @@ #include "xfs_dquot.h" #include "xfs_quota_priv.h" -struct xfs_qm; struct xfs_inode; -extern struct mutex xfs_Gqm_lock; -extern struct xfs_qm *xfs_Gqm; -extern kmem_zone_t *qm_dqzone; -extern kmem_zone_t *qm_dqtrxzone; +extern struct kmem_zone *xfs_qm_dqtrxzone; /* * This defines the unit of allocation of dquots. @@ -41,15 +37,6 @@ extern kmem_zone_t *qm_dqtrxzone; */ #define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 -/* - * Quota Manager (global) structure. Lives only in core. - */ -typedef struct xfs_qm { - uint qm_nrefs; /* file systems with quota on */ - kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ - kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ -} xfs_qm_t; - /* * Various quota information for individual filesystems. * The mount structure keeps a pointer to this. diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c index 809f86857c6d..e6986b5d80d8 100644 --- a/fs/xfs/xfs_qm_bhv.c +++ b/fs/xfs/xfs_qm_bhv.c @@ -156,19 +156,3 @@ xfs_qm_newmount( return 0; } - -void __init -xfs_qm_init(void) -{ - printk(KERN_INFO "SGI XFS Quota Management subsystem\n"); - mutex_init(&xfs_Gqm_lock); -} - -void __exit -xfs_qm_exit(void) -{ - if (qm_dqzone) - kmem_zone_destroy(qm_dqzone); - if (qm_dqtrxzone) - kmem_zone_destroy(qm_dqtrxzone); -} diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index e9ad7894648e..06d23b976f4c 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1654,13 +1654,17 @@ init_xfs_fs(void) if (error) goto out_cleanup_procfs; - vfs_initquota(); + error = xfs_qm_init(); + if (error) + goto out_sysctl_unregister; error = register_filesystem(&xfs_fs_type); if (error) - goto out_sysctl_unregister; + goto out_qm_exit; return 0; + out_qm_exit: + xfs_qm_exit(); out_sysctl_unregister: xfs_sysctl_unregister(); out_cleanup_procfs: @@ -1682,7 +1686,7 @@ init_xfs_fs(void) STATIC void __exit exit_xfs_fs(void) { - vfs_exitquota(); + xfs_qm_exit(); unregister_filesystem(&xfs_fs_type); xfs_sysctl_unregister(); xfs_cleanup_procfs(); diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h index 50a3266c999e..09b0c26b2245 100644 --- a/fs/xfs/xfs_super.h +++ b/fs/xfs/xfs_super.h @@ -21,13 +21,11 @@ #include #ifdef CONFIG_XFS_QUOTA -extern void xfs_qm_init(void); +extern int xfs_qm_init(void); extern void xfs_qm_exit(void); -# define vfs_initquota() xfs_qm_init() -# define vfs_exitquota() xfs_qm_exit() #else -# define vfs_initquota() do { } while (0) -# define vfs_exitquota() do { } while (0) +# define xfs_qm_init() (0) +# define xfs_qm_exit() do { } while (0) #endif #ifdef CONFIG_XFS_POSIX_ACL diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 877fe6367d2d..279099717ed2 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -875,7 +875,7 @@ STATIC void xfs_trans_alloc_dqinfo( xfs_trans_t *tp) { - tp->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP); + tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP); } void @@ -884,6 +884,6 @@ xfs_trans_free_dqinfo( { if (!tp->t_dqinfo) return; - kmem_zone_free(xfs_Gqm->qm_dqtrxzone, tp->t_dqinfo); + kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo); tp->t_dqinfo = NULL; } -- cgit From 8d2a5e6ee37f205b3d76c223d4f3f88ba1d06220 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 7 Mar 2012 04:50:19 +0000 Subject: xfs: clean up minor sparse warnings Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_dir2_block.c | 1 + fs/xfs/xfs_ioctl32.c | 2 +- fs/xfs/xfs_iops.c | 13 ++++++++----- 3 files changed, 10 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index 9245e029b8ea..d3b63aefd01d 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -29,6 +29,7 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_inode_item.h" +#include "xfs_dir2.h" #include "xfs_dir2_format.h" #include "xfs_dir2_priv.h" #include "xfs_error.h" diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index f9ccb7b7c043..a849a5473aff 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c @@ -293,7 +293,7 @@ xfs_compat_ioc_bulkstat( int res; error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, - sizeof(compat_xfs_bstat_t), 0, &res); + sizeof(compat_xfs_bstat_t), NULL, &res); } else if (cmd == XFS_IOC_FSBULKSTAT_32) { error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 7c01cda16727..3011b879f850 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -50,12 +50,15 @@ #include #include -int xfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, - void *fs_info) +static int +xfs_initxattrs( + struct inode *inode, + const struct xattr *xattr_array, + void *fs_info) { - const struct xattr *xattr; - struct xfs_inode *ip = XFS_I(inode); - int error = 0; + const struct xattr *xattr; + struct xfs_inode *ip = XFS_I(inode); + int error = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { error = xfs_attr_set(ip, xattr->name, xattr->value, -- cgit From 95a13f7b33be87d85d8e6652126a3f4d64d164db Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 14 Mar 2012 21:55:01 -0400 Subject: NFS: Fix a compile error when !defined NFS_DEBUG We should use the 'ifdebug' wrapper rather than trying to inline tests of nfs_debug, so that the code compiles correctly when we don't define NFS_DEBUG. Reported-by: Paul Gortmaker Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f7e064d997f6..c74fdb114b48 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3440,7 +3440,7 @@ static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path) status = decode_opaque_inline(xdr, &component->len, &component->data); if (unlikely(status != 0)) goto out_eio; - if (unlikely(nfs_debug & NFSDBG_XDR)) + ifdebug (XDR) pr_cont("%s%.*s ", (path->ncomponents != n ? "/ " : ""), component->len, component->data); -- cgit From f30d500f809eca67a21704347ab14bb35877b5ee Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 7 Mar 2012 04:50:25 +0000 Subject: xfs: fix inode lookup race When we get concurrent lookups of the same inode that is not in the per-AG inode cache, there is a race condition that triggers warnings in unlock_new_inode() indicating that we are initialising an inode that isn't in a the correct state for a new inode. When we do an inode lookup via a file handle or a bulkstat, we don't serialise lookups at a higher level through the dentry cache (i.e. pathless lookup), and so we can get concurrent lookups of the same inode. The race condition is between the insertion of the inode into the cache in the case of a cache miss and a concurrently lookup: Thread 1 Thread 2 xfs_iget() xfs_iget_cache_miss() xfs_iread() lock radix tree radix_tree_insert() rcu_read_lock radix_tree_lookup lock inode flags XFS_INEW not set igrab() unlock inode flags rcu_read_unlock use uninitialised inode ..... lock inode flags set XFS_INEW unlock inode flags unlock radix tree xfs_setup_inode() inode flags = I_NEW unlock_new_inode() WARNING as inode flags != I_NEW This can lead to inode corruption, inode list corruption, etc, and is generally a bad thing to occur. Fix this by setting XFS_INEW before inserting the inode into the radix tree. This will ensure any concurrent lookup will find the new inode with XFS_INEW set and that forces the lookup to wait until the XFS_INEW flag is removed before allowing the lookup to succeed. cc: # for 3.0.x, 3.2.x Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_iget.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index af3f30a3d9c2..a98cb4524e6c 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -349,9 +349,20 @@ xfs_iget_cache_miss( BUG(); } - spin_lock(&pag->pag_ici_lock); + /* + * These values must be set before inserting the inode into the radix + * tree as the moment it is inserted a concurrent lookup (allowed by the + * RCU locking mechanism) can find it and that lookup must see that this + * is an inode currently under construction (i.e. that XFS_INEW is set). + * The ip->i_flags_lock that protects the XFS_INEW flag forms the + * memory barrier that ensures this detection works correctly at lookup + * time. + */ + ip->i_udquot = ip->i_gdquot = NULL; + xfs_iflags_set(ip, XFS_INEW); /* insert the new inode */ + spin_lock(&pag->pag_ici_lock); error = radix_tree_insert(&pag->pag_ici_root, agino, ip); if (unlikely(error)) { WARN_ON(error != -EEXIST); @@ -359,11 +370,6 @@ xfs_iget_cache_miss( error = EAGAIN; goto out_preload_end; } - - /* These values _must_ be set before releasing the radix tree lock! */ - ip->i_udquot = ip->i_gdquot = NULL; - xfs_iflags_set(ip, XFS_INEW); - spin_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); -- cgit From 6eb2466036358078aed9a65d702cbc97baf0ce65 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 7 Mar 2012 04:50:24 +0000 Subject: xfs: remove remaining scraps of struct xfs_iomap Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers --- fs/xfs/xfs_vnode.h | 1 - fs/xfs/xfs_vnodeops.h | 3 --- 2 files changed, 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_vnode.h b/fs/xfs/xfs_vnode.h index 7c220b4227bc..db14d0c08682 100644 --- a/fs/xfs/xfs_vnode.h +++ b/fs/xfs/xfs_vnode.h @@ -22,7 +22,6 @@ struct file; struct xfs_inode; -struct xfs_iomap; struct attrlist_cursor_kern; /* diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index 0c877cbde142..447e146b2ba6 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -10,7 +10,6 @@ struct kiocb; struct pipe_inode_info; struct uio; struct xfs_inode; -struct xfs_iomap; int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap, int flags); @@ -49,8 +48,6 @@ int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name, int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags); int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize, int flags, struct attrlist_cursor_kern *cursor); -int xfs_bmap(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, - int flags, struct xfs_iomap *iomapp, int *niomaps); void xfs_tosspages(struct xfs_inode *inode, xfs_off_t first, xfs_off_t last, int fiopt); int xfs_flushinval_pages(struct xfs_inode *ip, xfs_off_t first, -- cgit From ad650f5b27bc9858360b42aaa0d9204d16115316 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 7 Mar 2012 04:50:21 +0000 Subject: xfs: fallback to vmalloc for large buffers in xfs_attrmulti_attr_get xfsdump uses for a large buffer for extended attributes, which has a kmalloc'd shadow buffer in the kernel. This can fail after the system has been running for some time as it is a high order allocation. Add a fallback to vmalloc so that it doesn't require contiguous memory and so won't randomly fail while xfsdump is running. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers --- fs/xfs/xfs_ioctl.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 76f3ca5cfc36..f588320dc4b9 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -450,9 +450,12 @@ xfs_attrmulti_attr_get( if (*len > XATTR_SIZE_MAX) return EINVAL; - kbuf = kmalloc(*len, GFP_KERNEL); - if (!kbuf) - return ENOMEM; + kbuf = kmem_zalloc(*len, KM_SLEEP | KM_MAYFAIL); + if (!kbuf) { + kbuf = kmem_zalloc_large(*len); + if (!kbuf) + return ENOMEM; + } error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); if (error) @@ -462,7 +465,10 @@ xfs_attrmulti_attr_get( error = EFAULT; out_kfree: - kfree(kbuf); + if (is_vmalloc_addr(kbuf)) + kmem_free_large(kbuf); + else + kmem_free(kbuf); return error; } -- cgit From f074211f6041305b645669464343d504f4e6a290 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 7 Mar 2012 04:50:22 +0000 Subject: xfs: fallback to vmalloc for large buffers in xfs_getbmap xfs_getbmap uses for a large buffer for extents, which is kmalloc'd. This can fail after the system has been running for some time as it is a high order allocation. Add a fallback to vmalloc so that it doesn't require contiguous memory and so won't randomly fail on files with large extent lists. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Mark Tinguely Signed-off-by: Ben Myers --- fs/xfs/xfs_bmap.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 188ef2fbd628..3548c6f75593 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -5536,8 +5536,12 @@ xfs_getbmap( if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx)) return XFS_ERROR(ENOMEM); out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL); - if (!out) - return XFS_ERROR(ENOMEM); + if (!out) { + out = kmem_zalloc_large(bmv->bmv_count * + sizeof(struct getbmapx)); + if (!out) + return XFS_ERROR(ENOMEM); + } xfs_ilock(ip, XFS_IOLOCK_SHARED); if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { @@ -5661,7 +5665,10 @@ xfs_getbmap( break; } - kmem_free(out); + if (is_vmalloc_addr(out)) + kmem_free_large(out); + else + kmem_free(out); return error; } -- cgit From 381b872cf7942ab8c95de156ce403bd906f3915d Mon Sep 17 00:00:00 2001 From: Seiji Aguchi Date: Fri, 16 Mar 2012 15:36:59 -0700 Subject: pstore: Introduce get_reason_str() to pstore Recently, there has been some changes in kmsg_dump() below and they have been applied to linus-tree. (1) kmsg_dump(KMSG_DUMP_KEXEC) was removed. http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=commitdiff;h=a3dd3323058d281abd584b15ad4c5b65064d7a61 (2) The order of "enum kmsg_dump_reason" was modified. http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=commitdiff;h=c22ab332902333f83766017478c1ef6607ace681 Replace the fragile reason_str array with a more robust solution that will not be broken by future re-arrangements of the enum values. Signed-off-by: Seiji Aguchi Signed-off-by: Adrian Hunter Acked-by: Don Zickus Link: https://lkml.org/lkml/2012/3/16/417 Signed-off-by: Tony Luck --- fs/pstore/platform.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 9ec22d3b4293..82c585f715e3 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -68,9 +68,25 @@ void pstore_set_kmsg_bytes(int bytes) /* Tag each group of saved records with a sequence number */ static int oopscount; -static char *reason_str[] = { - "Oops", "Panic", "Kexec", "Restart", "Halt", "Poweroff", "Emergency" -}; +static const char *get_reason_str(enum kmsg_dump_reason reason) +{ + switch (reason) { + case KMSG_DUMP_PANIC: + return "Panic"; + case KMSG_DUMP_OOPS: + return "Oops"; + case KMSG_DUMP_EMERG: + return "Emergency"; + case KMSG_DUMP_RESTART: + return "Restart"; + case KMSG_DUMP_HALT: + return "Halt"; + case KMSG_DUMP_POWEROFF: + return "Poweroff"; + default: + return "Unknown"; + } +} /* * callback from kmsg_dump. (s2,l2) has the most recently @@ -85,17 +101,15 @@ static void pstore_dump(struct kmsg_dumper *dumper, unsigned long s1_start, s2_start; unsigned long l1_cpy, l2_cpy; unsigned long size, total = 0; - char *dst, *why; + char *dst; + const char *why; u64 id; int hsize, ret; unsigned int part = 1; unsigned long flags = 0; int is_locked = 0; - if (reason < ARRAY_SIZE(reason_str)) - why = reason_str[reason]; - else - why = "Unknown"; + why = get_reason_str(reason); if (in_nmi()) { is_locked = spin_trylock(&psinfo->buf_lock); -- cgit From 2c724fb92732c0b2a5629eb8af74e82eb62ac947 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 16 Mar 2012 10:28:07 +0000 Subject: afs: Read of file returns EBADMSG A read of a large file on an afs mount failed: # cat junk.file > /dev/null cat: junk.file: Bad message Looking at the trace, call->offset wrapped since it is only an unsigned short. In afs_extract_data: _enter("{%u},{%zu},%d,,%zu", call->offset, len, last, count); ... if (call->offset < count) { if (last) { _leave(" = -EBADMSG [%d < %zu]", call->offset, count); return -EBADMSG; } Which matches the trace: [cat ] ==> afs_extract_data({65132},{524},1,,65536) [cat ] <== afs_extract_data() = -EBADMSG [0 < 65536] call->offset went from 65132 to 0. Fix this by making call->offset an unsigned int. Signed-off-by: Anton Blanchard Signed-off-by: David Howells Cc: Signed-off-by: Linus Torvalds --- fs/afs/internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/afs/internal.h b/fs/afs/internal.h index d2b0888126d4..a306bb6d88d9 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -109,7 +109,7 @@ struct afs_call { unsigned reply_size; /* current size of reply */ unsigned first_offset; /* offset into mapping[first] */ unsigned last_to; /* amount of mapping[last] */ - unsigned short offset; /* offset into received data store */ + unsigned offset; /* offset into received data store */ unsigned char unmarshall; /* unmarshalling phase */ bool incoming; /* T if incoming call */ bool send_pages; /* T if data from mapping should be sent */ -- cgit From c0173863528a8c9212c53e080d63a1aaae5ef4f4 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 16 Mar 2012 10:28:19 +0000 Subject: afs: Remote abort can cause BUG in rxrpc code When writing files to afs I sometimes hit a BUG: kernel BUG at fs/afs/rxrpc.c:179! With a backtrace of: afs_free_call afs_make_call afs_fs_store_data afs_vnode_store_data afs_write_back_from_locked_page afs_writepages_region afs_writepages The cause is: ASSERT(skb_queue_empty(&call->rx_queue)); Looking at a tcpdump of the session the abort happens because we are exceeding our disk quota: rx abort fs reply store-data error diskquota exceeded (32) So the abort error is valid. We hit the BUG because we haven't freed all the resources for the call. By freeing any skbs in call->rx_queue before calling afs_free_call we avoid hitting leaking memory and avoid hitting the BUG. Signed-off-by: Anton Blanchard Signed-off-by: David Howells Cc: Signed-off-by: Linus Torvalds --- fs/afs/rxrpc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index e45a323aebb4..8ad8c2a0703a 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -314,6 +314,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, struct msghdr msg; struct kvec iov[1]; int ret; + struct sk_buff *skb; _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); @@ -380,6 +381,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, error_do_abort: rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); + while ((skb = skb_dequeue(&call->rx_queue))) + afs_free_skb(skb); rxrpc_kernel_end_call(rxcall); call->rxcall = NULL; error_kill_call: -- cgit From 3d777a64066f3b9db8a94834aaed6a9cf09808fd Mon Sep 17 00:00:00 2001 From: Haogang Chen Date: Fri, 16 Mar 2012 17:08:38 -0700 Subject: nilfs2: clamp ns_r_segments_percentage to [1, 99] ns_r_segments_percentage is read from the disk. Bogus or malicious value could cause integer overflow and malfunction due to meaningless disk usage calculation. This patch reports error when mounting such bogus volumes. Signed-off-by: Haogang Chen Signed-off-by: Ryusuke Konishi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nilfs2/the_nilfs.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index d32714094375..8a759016c2e3 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -409,6 +409,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); nilfs->ns_r_segments_percentage = le32_to_cpu(sbp->s_r_segments_percentage); + if (nilfs->ns_r_segments_percentage < 1 || + nilfs->ns_r_segments_percentage > 99) { + printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n"); + return -EINVAL; + } + nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments)); nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); return 0; -- cgit From d7178c79d9b7c5518f9943188091a75fc6ce0675 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Fri, 16 Mar 2012 17:08:39 -0700 Subject: nilfs2: fix NULL pointer dereference in nilfs_load_super_block() According to the report from Slicky Devil, nilfs caused kernel oops at nilfs_load_super_block function during mount after he shrank the partition without resizing the filesystem: BUG: unable to handle kernel NULL pointer dereference at 00000048 IP: [] nilfs_load_super_block+0x17e/0x280 [nilfs2] *pde = 00000000 Oops: 0000 [#1] PREEMPT SMP ... Call Trace: [] init_nilfs+0x4b/0x2e0 [nilfs2] [] nilfs_mount+0x447/0x5b0 [nilfs2] [] mount_fs+0x36/0x180 [] vfs_kern_mount+0x51/0xa0 [] do_kern_mount+0x3e/0xe0 [] do_mount+0x169/0x700 [] sys_mount+0x6b/0xa0 [] sysenter_do_call+0x12/0x28 Code: 53 18 8b 43 20 89 4b 18 8b 4b 24 89 53 1c 89 43 24 89 4b 20 8b 43 20 c7 43 2c 00 00 00 00 23 75 e8 8b 50 68 89 53 28 8b 54 b3 20 <8b> 72 48 8b 7a 4c 8b 55 08 89 b3 84 00 00 00 89 bb 88 00 00 00 EIP: [] nilfs_load_super_block+0x17e/0x280 [nilfs2] SS:ESP 0068:ca9bbdcc CR2: 0000000000000048 This turned out due to a defect in an error path which runs if the calculated location of the secondary super block was invalid. This patch fixes it and eliminates the reported oops. Reported-by: Slicky Devil Signed-off-by: Ryusuke Konishi Tested-by: Slicky Devil Cc: [2.6.30+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nilfs2/the_nilfs.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 8a759016c2e3..501b7f8b739f 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -521,6 +521,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, brelse(sbh[1]); sbh[1] = NULL; sbp[1] = NULL; + valid[1] = 0; swp = 0; } if (!valid[swp]) { -- cgit From 8dd3775889345850ecddd689b5c200cdd91bd8c9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 15 Mar 2012 17:16:40 -0400 Subject: NFSv4.1: Clean ups and bugfixes for the pNFS read/writeback/commit code Move more pnfs-isms out of the generic commit code. Bugfixes: - filelayout_scan_commit_lists doesn't need to get/put the lseg. In fact since it is run under the inode->i_lock, the lseg_put() can deadlock. - Ensure that we distinguish between what needs to be done for commit-to-data server and what needs to be done for commit-to-MDS using the new flag PG_COMMIT_TO_DS. Otherwise we may end up calling put_lseg() on a bucket for a struct nfs_page that got written through the MDS. - Fix a case where we were using list_del() on an nfs_page->wb_list instead of list_del_init(). - filelayout_initiate_commit needs to call filelayout_commit_release on error instead of the mds_ops->rpc_release(). Otherwise it won't clear the commit lock. Cleanups: - Let the files layout manage the commit lists for the pNFS case. Don't expose stuff like pnfs_choose_commit_list, and the fact that the commit buckets hold references to the layout segment in common code. - Cast out the put_lseg() calls for the struct nfs_read/write_data->lseg into the pNFS layer from whence they came. - Let the pNFS layer manage the NFS_INO_PNFS_COMMIT bit. Signed-off-by: Trond Myklebust Cc: Fred Isaman --- fs/nfs/internal.h | 4 +- fs/nfs/nfs4filelayout.c | 82 ++++++++++++++++++++++++-------- fs/nfs/pnfs.c | 3 ++ fs/nfs/pnfs.h | 55 ++++++++++----------- fs/nfs/read.c | 1 - fs/nfs/write.c | 124 ++++++++++++++++++++++++++++++------------------ 6 files changed, 172 insertions(+), 97 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 04a914704e7b..2476dc69365f 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -308,8 +308,6 @@ extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); extern void nfs_readdata_release(struct nfs_read_data *rdata); /* write.c */ -extern int nfs_scan_commit_list(struct list_head *src, struct list_head *dst, - int max); extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head); extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio, @@ -334,6 +332,8 @@ void nfs_retry_commit(struct list_head *page_list, void nfs_commit_clear_lock(struct nfs_inode *nfsi); void nfs_commitdata_release(void *data); void nfs_commit_release_pages(struct nfs_write_data *data); +void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head); +void nfs_request_remove_commit_list(struct nfs_page *req); #ifdef CONFIG_MIGRATION extern int nfs_migrate_page(struct address_space *, diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 379a085f8f25..c24e077c2820 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -224,6 +224,7 @@ static void filelayout_read_release(void *data) { struct nfs_read_data *rdata = (struct nfs_read_data *)data; + put_lseg(rdata->lseg); rdata->mds_ops->rpc_release(data); } @@ -310,6 +311,7 @@ static void filelayout_write_release(void *data) { struct nfs_write_data *wdata = (struct nfs_write_data *)data; + put_lseg(wdata->lseg); wdata->mds_ops->rpc_release(data); } @@ -320,6 +322,7 @@ static void filelayout_commit_release(void *data) nfs_commit_release_pages(wdata); if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding)) nfs_commit_clear_lock(NFS_I(wdata->inode)); + put_lseg(wdata->lseg); nfs_commitdata_release(wdata); } @@ -779,11 +782,16 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j) /* The generic layer is about to remove the req from the commit list. * If this will make the bucket empty, it will need to put the lseg reference. - * Note inode lock is held, so we can't do the put here. */ -static struct pnfs_layout_segment * -filelayout_remove_commit_req(struct nfs_page *req) +static void +filelayout_clear_request_commit(struct nfs_page *req) { + struct pnfs_layout_segment *freeme = NULL; + struct inode *inode = req->wb_context->dentry->d_inode; + + spin_lock(&inode->i_lock); + if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags)) + goto out; if (list_is_singular(&req->wb_list)) { struct inode *inode = req->wb_context->dentry->d_inode; struct pnfs_layout_segment *lseg; @@ -792,11 +800,16 @@ filelayout_remove_commit_req(struct nfs_page *req) * since there is only one relevant lseg... */ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { - if (lseg->pls_range.iomode == IOMODE_RW) - return lseg; + if (lseg->pls_range.iomode == IOMODE_RW) { + freeme = lseg; + break; + } } } - return NULL; +out: + nfs_request_remove_commit_list(req); + spin_unlock(&inode->i_lock); + put_lseg(freeme); } static struct list_head * @@ -829,9 +842,20 @@ filelayout_choose_commit_list(struct nfs_page *req, */ get_lseg(lseg); } + set_bit(PG_COMMIT_TO_DS, &req->wb_flags); return list; } +static void +filelayout_mark_request_commit(struct nfs_page *req, + struct pnfs_layout_segment *lseg) +{ + struct list_head *list; + + list = filelayout_choose_commit_list(req, lseg); + nfs_request_add_commit_list(req, list); +} + static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) { struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); @@ -872,7 +896,7 @@ static int filelayout_initiate_commit(struct nfs_write_data *data, int how) set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags); set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); prepare_to_resend_writes(data); - data->mds_ops->rpc_release(data); + filelayout_commit_release(data); return -EAGAIN; } dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how); @@ -895,7 +919,7 @@ find_only_write_lseg_locked(struct inode *inode) list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) if (lseg->pls_range.iomode == IOMODE_RW) - return get_lseg(lseg); + return lseg; return NULL; } @@ -905,10 +929,33 @@ static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode) spin_lock(&inode->i_lock); rv = find_only_write_lseg_locked(inode); + if (rv) + get_lseg(rv); spin_unlock(&inode->i_lock); return rv; } +static int +filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max) +{ + struct list_head *src = &bucket->written; + struct list_head *dst = &bucket->committing; + struct nfs_page *req, *tmp; + int ret = 0; + + list_for_each_entry_safe(req, tmp, src, wb_list) { + if (!nfs_lock_request(req)) + continue; + nfs_request_remove_commit_list(req); + clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); + nfs_list_add_request(req, dst); + ret++; + if (ret == max) + break; + } + return ret; +} + /* Move reqs from written to committing lists, returning count of number moved. * Note called with i_lock held. */ @@ -920,21 +967,16 @@ static int filelayout_scan_commit_lists(struct inode *inode, int max) lseg = find_only_write_lseg_locked(inode); if (!lseg) - return 0; + goto out_done; fl = FILELAYOUT_LSEG(lseg); if (fl->commit_through_mds) - goto out_put; - for (i = 0; i < fl->number_of_buckets; i++) { - if (list_empty(&fl->commit_buckets[i].written)) - continue; - cnt = nfs_scan_commit_list(&fl->commit_buckets[i].written, - &fl->commit_buckets[i].committing, - max); + goto out_done; + for (i = 0; i < fl->number_of_buckets && max != 0; i++) { + cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i], max); max -= cnt; rv += cnt; } -out_put: - put_lseg(lseg); +out_done: return rv; } @@ -1033,8 +1075,8 @@ static struct pnfs_layoutdriver_type filelayout_type = { .free_lseg = filelayout_free_lseg, .pg_read_ops = &filelayout_pg_read_ops, .pg_write_ops = &filelayout_pg_write_ops, - .choose_commit_list = filelayout_choose_commit_list, - .remove_commit_req = filelayout_remove_commit_req, + .mark_request_commit = filelayout_mark_request_commit, + .clear_request_commit = filelayout_clear_request_commit, .scan_commit_lists = filelayout_scan_commit_lists, .commit_pagelist = filelayout_commit_pagelist, .read_pagelist = filelayout_read_pagelist, diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 6f1c1e3d12bc..b5d451586943 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1210,6 +1210,7 @@ void pnfs_ld_write_done(struct nfs_write_data *data) } data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages); } + put_lseg(data->lseg); data->mds_ops->rpc_release(data); } EXPORT_SYMBOL_GPL(pnfs_ld_write_done); @@ -1223,6 +1224,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, nfs_list_add_request(data->req, &desc->pg_list); nfs_pageio_reset_write_mds(desc); desc->pg_recoalesce = 1; + put_lseg(data->lseg); nfs_writedata_release(data); } @@ -1323,6 +1325,7 @@ void pnfs_ld_read_done(struct nfs_read_data *data) data->mds_ops->rpc_call_done(&data->task, data); } else pnfs_ld_handle_read_error(data); + put_lseg(data->lseg); data->mds_ops->rpc_release(data); } EXPORT_SYMBOL_GPL(pnfs_ld_read_done); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index ef92f676cf1e..e98ff3027d3a 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -94,9 +94,9 @@ struct pnfs_layoutdriver_type { const struct nfs_pageio_ops *pg_read_ops; const struct nfs_pageio_ops *pg_write_ops; - struct list_head * (*choose_commit_list) (struct nfs_page *req, + void (*mark_request_commit) (struct nfs_page *req, struct pnfs_layout_segment *lseg); - struct pnfs_layout_segment *(*remove_commit_req) (struct nfs_page *req); + void (*clear_request_commit) (struct nfs_page *req); int (*scan_commit_lists) (struct inode *inode, int max); int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how); @@ -269,39 +269,42 @@ pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how) return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how); } -static inline struct list_head * -pnfs_choose_commit_list(struct nfs_page *req, struct pnfs_layout_segment *lseg) +static inline bool +pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) { struct inode *inode = req->wb_context->dentry->d_inode; - struct list_head *rv; + struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; - if (lseg && NFS_SERVER(inode)->pnfs_curr_ld->choose_commit_list) - rv = NFS_SERVER(inode)->pnfs_curr_ld->choose_commit_list(req, lseg); - else - rv = &NFS_I(inode)->commit_list; - return rv; + if (lseg == NULL || ld->mark_request_commit == NULL) + return false; + ld->mark_request_commit(req, lseg); + return true; } -static inline struct pnfs_layout_segment * +static inline bool pnfs_clear_request_commit(struct nfs_page *req) { struct inode *inode = req->wb_context->dentry->d_inode; + struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; - if (NFS_SERVER(inode)->pnfs_curr_ld && - NFS_SERVER(inode)->pnfs_curr_ld->remove_commit_req) - return NFS_SERVER(inode)->pnfs_curr_ld->remove_commit_req(req); - else - return NULL; + if (ld == NULL || ld->clear_request_commit == NULL) + return false; + ld->clear_request_commit(req); + return true; } static inline int pnfs_scan_commit_lists(struct inode *inode, int max) { - if (NFS_SERVER(inode)->pnfs_curr_ld && - NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists) - return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(inode, max); - else + struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; + int ret; + + if (ld == NULL || ld->scan_commit_lists == NULL) return 0; + ret = ld->scan_commit_lists(inode, max); + if (ret != 0) + set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags); + return ret; } /* Should the pNFS client commit and return the layout upon a setattr */ @@ -403,18 +406,16 @@ pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how) return PNFS_NOT_ATTEMPTED; } -static inline struct list_head * -pnfs_choose_commit_list(struct nfs_page *req, struct pnfs_layout_segment *lseg) +static inline bool +pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) { - struct inode *inode = req->wb_context->dentry->d_inode; - - return &NFS_I(inode)->commit_list; + return false; } -static inline struct pnfs_layout_segment * +static inline bool pnfs_clear_request_commit(struct nfs_page *req) { - return NULL; + return false; } static inline int diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 3c2540d532c7..2662c0298dd0 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -66,7 +66,6 @@ void nfs_readdata_free(struct nfs_read_data *p) void nfs_readdata_release(struct nfs_read_data *rdata) { - put_lseg(rdata->lseg); put_nfs_open_context(rdata->args.context); nfs_readdata_free(rdata); } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index a630ad65d64c..0de19f413f92 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -100,7 +100,6 @@ void nfs_writedata_free(struct nfs_write_data *p) void nfs_writedata_release(struct nfs_write_data *wdata) { - put_lseg(wdata->lseg); put_nfs_open_context(wdata->args.context); nfs_writedata_free(wdata); } @@ -393,8 +392,6 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) spin_unlock(&inode->i_lock); } -static struct pnfs_layout_segment *nfs_clear_request_commit(struct nfs_page *req); - /* * Remove a write request from an inode */ @@ -402,18 +399,15 @@ static void nfs_inode_remove_request(struct nfs_page *req) { struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); - struct pnfs_layout_segment *lseg; BUG_ON (!NFS_WBACK_BUSY(req)); spin_lock(&inode->i_lock); - lseg = nfs_clear_request_commit(req); set_page_private(req->wb_page, 0); ClearPagePrivate(req->wb_page); clear_bit(PG_MAPPED, &req->wb_flags); nfsi->npages--; spin_unlock(&inode->i_lock); - put_lseg(lseg); nfs_release_request(req); } @@ -424,26 +418,69 @@ nfs_mark_request_dirty(struct nfs_page *req) } #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) -/* - * Add a request to the inode's commit list. +/** + * nfs_request_add_commit_list - add request to a commit list + * @req: pointer to a struct nfs_page + * @head: commit list head + * + * This sets the PG_CLEAN bit, updates the inode global count of + * number of outstanding requests requiring a commit as well as + * the MM page stats. + * + * The caller must _not_ hold the inode->i_lock, but must be + * holding the nfs_page lock. */ -static void -nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) +void +nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head) { struct inode *inode = req->wb_context->dentry->d_inode; - struct nfs_inode *nfsi = NFS_I(inode); - struct list_head *clist; - clist = pnfs_choose_commit_list(req, lseg); - spin_lock(&inode->i_lock); set_bit(PG_CLEAN, &(req)->wb_flags); - nfs_list_add_request(req, clist); - nfsi->ncommit++; + spin_lock(&inode->i_lock); + nfs_list_add_request(req, head); + NFS_I(inode)->ncommit++; spin_unlock(&inode->i_lock); inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); __mark_inode_dirty(inode, I_DIRTY_DATASYNC); } +EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); + +/** + * nfs_request_remove_commit_list - Remove request from a commit list + * @req: pointer to a nfs_page + * + * This clears the PG_CLEAN bit, and updates the inode global count of + * number of outstanding requests requiring a commit + * It does not update the MM page stats. + * + * The caller _must_ hold the inode->i_lock and the nfs_page lock. + */ +void +nfs_request_remove_commit_list(struct nfs_page *req) +{ + struct inode *inode = req->wb_context->dentry->d_inode; + + if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) + return; + nfs_list_remove_request(req); + NFS_I(inode)->ncommit--; +} +EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); + + +/* + * Add a request to the inode's commit list. + */ +static void +nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) +{ + struct inode *inode = req->wb_context->dentry->d_inode; + + if (pnfs_mark_request_commit(req, lseg)) + return; + nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list); +} static void nfs_clear_page_commit(struct page *page) @@ -452,18 +489,19 @@ nfs_clear_page_commit(struct page *page) dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); } -static struct pnfs_layout_segment * +static void nfs_clear_request_commit(struct nfs_page *req) { - struct pnfs_layout_segment *lseg = NULL; + if (test_bit(PG_CLEAN, &req->wb_flags)) { + struct inode *inode = req->wb_context->dentry->d_inode; - if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) { + if (!pnfs_clear_request_commit(req)) { + spin_lock(&inode->i_lock); + nfs_request_remove_commit_list(req); + spin_unlock(&inode->i_lock); + } nfs_clear_page_commit(req->wb_page); - lseg = pnfs_clear_request_commit(req); - NFS_I(req->wb_context->dentry->d_inode)->ncommit--; - list_del(&req->wb_list); } - return lseg; } static inline @@ -490,15 +528,14 @@ int nfs_reschedule_unstable_write(struct nfs_page *req, return 0; } #else -static inline void +static void nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) { } -static inline struct pnfs_layout_segment * +static void nfs_clear_request_commit(struct nfs_page *req) { - return NULL; } static inline @@ -523,25 +560,23 @@ nfs_need_commit(struct nfs_inode *nfsi) } /* i_lock held by caller */ -int +static int nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max) { struct nfs_page *req, *tmp; int ret = 0; list_for_each_entry_safe(req, tmp, src, wb_list) { - if (nfs_lock_request_dontget(req)) { - kref_get(&req->wb_kref); - list_move_tail(&req->wb_list, dst); - clear_bit(PG_CLEAN, &(req)->wb_flags); - ret++; - if (ret == max) - break; - } + if (!nfs_lock_request(req)) + continue; + nfs_request_remove_commit_list(req); + nfs_list_add_request(req, dst); + ret++; + if (ret == max) + break; } return ret; } -EXPORT_SYMBOL_GPL(nfs_scan_commit_list); /* * nfs_scan_commit - Scan an inode for commit requests @@ -559,14 +594,12 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst) spin_lock(&inode->i_lock); if (nfsi->ncommit > 0) { + const int max = INT_MAX; int pnfs_ret; - ret = nfs_scan_commit_list(&nfsi->commit_list, dst, INT_MAX); - pnfs_ret = pnfs_scan_commit_lists(inode, INT_MAX - ret); - if (pnfs_ret) { - ret += pnfs_ret; - set_bit(NFS_INO_PNFS_COMMIT, &nfsi->flags); - } + ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max); + pnfs_ret = pnfs_scan_commit_lists(inode, max - ret); + ret += pnfs_ret; nfsi->ncommit -= ret; } spin_unlock(&inode->i_lock); @@ -601,7 +634,6 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, unsigned int rqend; unsigned int end; int error; - struct pnfs_layout_segment *lseg = NULL; if (!PagePrivate(page)) return NULL; @@ -637,8 +669,6 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, spin_lock(&inode->i_lock); } - lseg = nfs_clear_request_commit(req); - /* Okay, the request matches. Update the region */ if (offset < req->wb_offset) { req->wb_offset = offset; @@ -650,7 +680,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, req->wb_bytes = rqend - req->wb_offset; out_unlock: spin_unlock(&inode->i_lock); - put_lseg(lseg); + nfs_clear_request_commit(req); return req; out_flushme: spin_unlock(&inode->i_lock); @@ -1337,7 +1367,6 @@ void nfs_commitdata_release(void *data) { struct nfs_write_data *wdata = data; - put_lseg(wdata->lseg); put_nfs_open_context(wdata->args.context); nfs_commit_free(wdata); } @@ -1647,6 +1676,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) if (req == NULL) break; if (nfs_lock_request_dontget(req)) { + nfs_clear_request_commit(req); nfs_inode_remove_request(req); /* * In case nfs_inode_remove_request has marked the -- cgit From 9390f42546339cf111edd23c16d6cf74ca41974c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 16 Mar 2012 13:52:45 -0400 Subject: NFSv4.1: Fix a few issues in filelayout_commit_pagelist - Fix a race in which NFS_I(inode)->commits_outstanding could potentially go to zero (triggering a call to nfs_commit_clear_lock()) before we're done sending out all the commit RPC calls. - If nfs_commitdata_alloc fails, there is no reason why we shouldn't try to send off all the commits-to-ds. - Simplify the error handling. - Change pnfs_commit_list() to always return either PNFS_ATTEMPTED or PNFS_NOT_ATTEMPTED. Signed-off-by: Trond Myklebust Cc: Fred Isaman --- fs/nfs/nfs4filelayout.c | 47 ++++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index c24e077c2820..e0bdbf4fe454 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -980,12 +980,14 @@ out_done: return rv; } -static int alloc_ds_commits(struct inode *inode, struct list_head *list) +static unsigned int +alloc_ds_commits(struct inode *inode, struct list_head *list) { struct pnfs_layout_segment *lseg; struct nfs4_filelayout_segment *fl; struct nfs_write_data *data; int i, j; + unsigned int nreq = 0; /* Won't need this when non-whole file layout segments are supported * instead we will use a pnfs_layout_hdr structure */ @@ -998,15 +1000,14 @@ static int alloc_ds_commits(struct inode *inode, struct list_head *list) continue; data = nfs_commitdata_alloc(); if (!data) - goto out_bad; + break; data->ds_commit_index = i; data->lseg = lseg; list_add(&data->pages, list); + nreq++; } - put_lseg(lseg); - return 0; -out_bad: + /* Clean up on error */ for (j = i; j < fl->number_of_buckets; j++) { if (list_empty(&fl->commit_buckets[i].committing)) continue; @@ -1015,7 +1016,7 @@ out_bad: } put_lseg(lseg); /* Caller will clean up entries put on list */ - return -ENOMEM; + return nreq; } /* This follows nfs_commit_list pretty closely */ @@ -1025,21 +1026,29 @@ filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, { struct nfs_write_data *data, *tmp; LIST_HEAD(list); + unsigned int nreq = 0; if (!list_empty(mds_pages)) { data = nfs_commitdata_alloc(); - if (!data) - goto out_bad; - data->lseg = NULL; - list_add(&data->pages, &list); + if (data != NULL) { + data->lseg = NULL; + list_add(&data->pages, &list); + nreq++; + } else + nfs_retry_commit(mds_pages, NULL); } - if (alloc_ds_commits(inode, &list)) - goto out_bad; + nreq += alloc_ds_commits(inode, &list); + + if (nreq == 0) { + nfs_commit_clear_lock(NFS_I(inode)); + goto out; + } + + atomic_add(nreq, &NFS_I(inode)->commits_outstanding); list_for_each_entry_safe(data, tmp, &list, pages) { list_del_init(&data->pages); - atomic_inc(&NFS_I(inode)->commits_outstanding); if (!data->lseg) { nfs_init_commit(data, mds_pages, NULL); nfs_initiate_commit(data, NFS_CLIENT(inode), @@ -1049,16 +1058,8 @@ filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, filelayout_initiate_commit(data, how); } } - return 0; - out_bad: - list_for_each_entry_safe(data, tmp, &list, pages) { - nfs_retry_commit(&data->pages, data->lseg); - list_del_init(&data->pages); - nfs_commit_free(data); - } - nfs_retry_commit(mds_pages, NULL); - nfs_commit_clear_lock(NFS_I(inode)); - return -ENOMEM; +out: + return PNFS_ATTEMPTED; } static void -- cgit From e49a29bd0eacce9d4956c4daf777a330115b369d Mon Sep 17 00:00:00 2001 From: Sachin Prabhu Date: Fri, 16 Mar 2012 19:25:52 +0000 Subject: Try using machine credentials for RENEW calls Using user credentials for RENEW calls will fail when the user credentials have expired. To avoid this, try using the machine credentials when making RENEW calls. If no machine credentials have been set, fall back to using user credentials as before. Signed-off-by: Sachin Prabhu Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 119006b0815a..12b068f2ec91 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -146,6 +146,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) struct rpc_cred *cred = NULL; struct nfs_server *server; + /* Use machine credentials if available */ + cred = nfs4_get_machine_cred_locked(clp); + if (cred != NULL) + goto out; + rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { cred = nfs4_get_renew_cred_server_locked(server); @@ -153,6 +158,8 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) break; } rcu_read_unlock(); + +out: return cred; } -- cgit From 93dc6107a76daed81c07f50215fa6ae77691634f Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 16 Mar 2012 16:34:03 -0400 Subject: Don't limit non-nested epoll paths Commit 28d82dc1c4ed ("epoll: limit paths") that I did to limit the number of possible wakeup paths in epoll is causing a few applications to longer work (dovecot for one). The original patch is really about limiting the amount of epoll nesting (since epoll fds can be attached to other fds). Thus, we probably can allow an unlimited number of paths of depth 1. My current patch limits it at 1000. And enforce the limits on paths that have a greater depth. This is captured in: https://bugzilla.redhat.com/show_bug.cgi?id=681578 Signed-off-by: Jason Baron Cc: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ea54cdef04dd..4d9d3a45e356 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -988,6 +988,10 @@ static int path_count[PATH_ARR_SIZE]; static int path_count_inc(int nests) { + /* Allow an arbitrary number of depth 1 paths */ + if (nests == 0) + return 0; + if (++path_count[nests] > path_limits[nests]) return -1; return 0; -- cgit From ce85852b90a214cf577fc1b4f49d99fd7e98784a Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Sat, 17 Mar 2012 09:46:55 +0300 Subject: CIFS: Fix a spurious error in cifs_push_posix_locks Signed-off-by: Pavel Shilovsky Reviewed-by: Jeff Layton Reported-by: Ben Hutchings Signed-off-by: Steve French --- fs/cifs/file.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5e64748a2917..8e02dbd88ae1 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -960,9 +960,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) INIT_LIST_HEAD(&locks_to_send); /* - * Allocating count locks is enough because no locks can be added to - * the list while we are holding cinode->lock_mutex that protects - * locking operations of this inode. + * Allocating count locks is enough because no FL_POSIX locks can be + * added to the list while we are holding cinode->lock_mutex that + * protects locking operations of this inode. */ for (; i < count; i++) { lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); @@ -973,18 +973,20 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) list_add_tail(&lck->llist, &locks_to_send); } - i = 0; el = locks_to_send.next; lock_flocks(); cifs_for_each_lock(cfile->dentry->d_inode, before) { + flock = *before; + if ((flock->fl_flags & FL_POSIX) == 0) + continue; if (el == &locks_to_send) { - /* something is really wrong */ + /* + * The list ended. We don't have enough allocated + * structures - something is really wrong. + */ cERROR(1, "Can't push all brlocks!"); break; } - flock = *before; - if ((flock->fl_flags & FL_POSIX) == 0) - continue; length = 1 + flock->fl_end - flock->fl_start; if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) type = CIFS_RDLCK; @@ -996,7 +998,6 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) lck->length = length; lck->type = type; lck->offset = flock->fl_start; - i++; el = el->next; } unlock_flocks(); -- cgit From 6d7d1a0dc735ea8412769edae7154885021107a9 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 19 Mar 2012 16:19:53 -0700 Subject: vfs: get rid of batshit-insane pointless dentry hash calculations For some odd historical reason, the final mixing round for the dentry cache hash table lookup had an insane "xor with big constant" logic. In two places. The big constant that is being xor'ed is GOLDEN_RATIO_PRIME, which is a fairly random-looking number that is designed to be *multiplied* with so that the bits get spread out over a whole long-word. But xor'ing with it is insane. It doesn't really even change the hash - it really only shifts the hash around in the hash table. To make matters worse, the insane big constant is different on 32-bit and 64-bit builds, even though the name hash bits we use are always 32-bit (and the bits from the pointer we mix in effectively are too). It's all total voodoo programming, in other words. Now, some testing and analysis of the hash chains shows that the rest of the hash function seems to be fairly good. It does pick the right bits of the parent dentry pointer, for example, and while it's generally a bad idea to use an xor to mix down the upper bits (because if there is a repeating pattern, the xor can cause "destructive interference"), it seems to not have been a disaster. For example, replacing the hash with the normal "hash_long()" code (that uses the GOLDEN_RATIO_PRIME constant correctly, btw) actually just makes the hash worse. The hand-picked hash knew which bits of the pointer had the highest entropy, and hash_long() ends up mixing bits less optimally at least in some trivial tests. So the hash function overall seems fine, it just has that really odd "shift result around by a constant xor". So get rid of the silly xor, and replace the down-mixing of the bits with an add instead of an xor that tends to not have the same kind of destructive interference issues. Some stats on the resulting hash chains shows that they look statistically identical before and after, but the code is simpler and no longer makes you go "WTF?". Also, the incoming hash really is just "unsigned int", not a long, and there's no real point to worry about the high 26 bits of the dentry pointer for the 64-bit case, because they are all going to be identical anyway. So also change the hashing to be done in the more natural 'unsigned int' that is the real size of the actual hashed data anyway. Signed-off-by: Linus Torvalds --- fs/dcache.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index bcbdb33fcc20..5f00a6f63c9e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly; static struct hlist_bl_head *dentry_hashtable __read_mostly; static inline struct hlist_bl_head *d_hash(const struct dentry *parent, - unsigned long hash) + unsigned int hash) { - hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; - hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); + hash += (unsigned long) parent / L1_CACHE_BYTES; + hash = hash + (hash >> D_HASHBITS); return dentry_hashtable + (hash & D_HASHMASK); } -- cgit From 220cca2a4f5867db595135e0450381032eb54902 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 19 Mar 2012 15:25:50 -0400 Subject: GFS2: Change truncate page allocation to be GFP_NOFS This patch changes the page allocation in gfs2_block_truncate_page and two others to GFP_NOFS to avoid deadlock in low-memory conditions. Signed-off-by: Bob Peterson Signed-off-by: Steven Whitehouse --- fs/gfs2/bmap.c | 4 ++-- fs/gfs2/quota.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 14a704015970..197c5c47e577 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -60,7 +60,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, int release = 0; if (!page || page->index) { - page = grab_cache_page(inode->i_mapping, 0); + page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); if (!page) return -ENOMEM; release = 1; @@ -930,7 +930,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) struct page *page; int err; - page = grab_cache_page(mapping, index); + page = find_or_create_page(mapping, index, GFP_NOFS); if (!page) return 0; diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index a45b21b03915..4856c66640bf 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -681,7 +681,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, ptr = qp; nbytes = sizeof(struct gfs2_quota); get_a_page: - page = grab_cache_page(mapping, index); + page = find_or_create_page(mapping, index, GFP_NOFS); if (!page) return -ENOMEM; -- cgit From f1f996b66cc3908a8f5ffccc2ff41840e92f3b10 Mon Sep 17 00:00:00 2001 From: Laura Vasilescu Date: Mon, 19 Mar 2012 15:41:15 +0200 Subject: kcore: fix spelling in read_kcore() comment Signed-off-by: Laura Vasilescu Signed-off-by: Jiri Kosina --- fs/proc/kcore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index d245cb23dd72..e5e69aff6c69 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -513,7 +513,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) n = copy_to_user(buffer, (char *)start, tsz); /* - * We cannot distingush between fault on source + * We cannot distinguish between fault on source * and fault on destination. When this happens * we clear too and hope it will trigger the * EFAULT again. -- cgit From e8e3c3d66fd9d1ee2250f68d778cc48c1346d228 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:27 +0800 Subject: fs: remove the second argument of k[un]map_atomic() Acked-by: Benjamin LaHaise Signed-off-by: Cong Wang --- fs/aio.c | 30 +++++++++++++++--------------- fs/bio-integrity.c | 10 +++++----- fs/exec.c | 4 ++-- fs/namei.c | 4 ++-- fs/pipe.c | 8 ++++---- fs/splice.c | 7 ++----- 6 files changed, 30 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index b9d64d89a043..5b600cb8779e 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx) info->nr = nr_events; /* trusted copy */ - ring = kmap_atomic(info->ring_pages[0], KM_USER0); + ring = kmap_atomic(info->ring_pages[0]); ring->nr = nr_events; /* user copy */ ring->id = ctx->user_id; ring->head = ring->tail = 0; @@ -168,32 +168,32 @@ static int aio_setup_ring(struct kioctx *ctx) ring->compat_features = AIO_RING_COMPAT_FEATURES; ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; ring->header_length = sizeof(struct aio_ring); - kunmap_atomic(ring, KM_USER0); + kunmap_atomic(ring); return 0; } /* aio_ring_event: returns a pointer to the event at the given index from - * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); + * kmap_atomic(). Release the pointer with put_aio_ring_event(); */ #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) -#define aio_ring_event(info, nr, km) ({ \ +#define aio_ring_event(info, nr) ({ \ unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ struct io_event *__event; \ __event = kmap_atomic( \ - (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ + (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \ __event += pos % AIO_EVENTS_PER_PAGE; \ __event; \ }) -#define put_aio_ring_event(event, km) do { \ +#define put_aio_ring_event(event) do { \ struct io_event *__event = (event); \ (void)__event; \ - kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ + kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \ } while(0) static void ctx_rcu_free(struct rcu_head *head) @@ -1019,10 +1019,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2) if (kiocbIsCancelled(iocb)) goto put_rq; - ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); + ring = kmap_atomic(info->ring_pages[0]); tail = info->tail; - event = aio_ring_event(info, tail, KM_IRQ0); + event = aio_ring_event(info, tail); if (++tail >= info->nr) tail = 0; @@ -1043,8 +1043,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2) info->tail = tail; ring->tail = tail; - put_aio_ring_event(event, KM_IRQ0); - kunmap_atomic(ring, KM_IRQ1); + put_aio_ring_event(event); + kunmap_atomic(ring); pr_debug("added to ring %p at [%lu]\n", iocb, tail); @@ -1089,7 +1089,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) unsigned long head; int ret = 0; - ring = kmap_atomic(info->ring_pages[0], KM_USER0); + ring = kmap_atomic(info->ring_pages[0]); dprintk("in aio_read_evt h%lu t%lu m%lu\n", (unsigned long)ring->head, (unsigned long)ring->tail, (unsigned long)ring->nr); @@ -1101,18 +1101,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) head = ring->head % info->nr; if (head != ring->tail) { - struct io_event *evp = aio_ring_event(info, head, KM_USER1); + struct io_event *evp = aio_ring_event(info, head); *ent = *evp; head = (head + 1) % info->nr; smp_mb(); /* finish reading the event before updatng the head */ ring->head = head; ret = 1; - put_aio_ring_event(evp, KM_USER1); + put_aio_ring_event(evp); } spin_unlock(&info->ring_lock); out: - kunmap_atomic(ring, KM_USER0); + kunmap_atomic(ring); dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, (unsigned long)ring->head, (unsigned long)ring->tail); return ret; diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index c2183f3917cd..e85c04b9f61c 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -357,7 +357,7 @@ static void bio_integrity_generate(struct bio *bio) bix.sector_size = bi->sector_size; bio_for_each_segment(bv, bio, i) { - void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); + void *kaddr = kmap_atomic(bv->bv_page); bix.data_buf = kaddr + bv->bv_offset; bix.data_size = bv->bv_len; bix.prot_buf = prot_buf; @@ -371,7 +371,7 @@ static void bio_integrity_generate(struct bio *bio) total += sectors * bi->tuple_size; BUG_ON(total > bio->bi_integrity->bip_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } } @@ -498,7 +498,7 @@ static int bio_integrity_verify(struct bio *bio) bix.sector_size = bi->sector_size; bio_for_each_segment(bv, bio, i) { - void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); + void *kaddr = kmap_atomic(bv->bv_page); bix.data_buf = kaddr + bv->bv_offset; bix.data_size = bv->bv_len; bix.prot_buf = prot_buf; @@ -507,7 +507,7 @@ static int bio_integrity_verify(struct bio *bio) ret = bi->verify_fn(&bix); if (ret) { - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); return ret; } @@ -517,7 +517,7 @@ static int bio_integrity_verify(struct bio *bio) total += sectors * bi->tuple_size; BUG_ON(total > bio->bi_integrity->bip_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } return ret; diff --git a/fs/exec.c b/fs/exec.c index 153dee14fe55..1a07d1c2d78e 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1339,13 +1339,13 @@ int remove_arg_zero(struct linux_binprm *bprm) ret = -EFAULT; goto out; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); for (; offset < PAGE_SIZE && kaddr[offset]; offset++, bprm->p++) ; - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); put_arg_page(page); if (offset == PAGE_SIZE) diff --git a/fs/namei.c b/fs/namei.c index 46ea9cc16647..d135da74ce04 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3371,9 +3371,9 @@ retry: if (err) goto fail; - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memcpy(kaddr, symname, len-1); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, page, fsdata); diff --git a/fs/pipe.c b/fs/pipe.c index a932ced92a16..fe0502f9beb2 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -230,7 +230,7 @@ void *generic_pipe_buf_map(struct pipe_inode_info *pipe, { if (atomic) { buf->flags |= PIPE_BUF_FLAG_ATOMIC; - return kmap_atomic(buf->page, KM_USER0); + return kmap_atomic(buf->page); } return kmap(buf->page); @@ -251,7 +251,7 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe, { if (buf->flags & PIPE_BUF_FLAG_ATOMIC) { buf->flags &= ~PIPE_BUF_FLAG_ATOMIC; - kunmap_atomic(map_data, KM_USER0); + kunmap_atomic(map_data); } else kunmap(buf->page); } @@ -565,14 +565,14 @@ redo1: iov_fault_in_pages_read(iov, chars); redo2: if (atomic) - src = kmap_atomic(page, KM_USER0); + src = kmap_atomic(page); else src = kmap(page); error = pipe_iov_copy_from_user(src, iov, chars, atomic); if (atomic) - kunmap_atomic(src, KM_USER0); + kunmap_atomic(src); else kunmap(page); diff --git a/fs/splice.c b/fs/splice.c index 1ec0493266b3..f16402ed915c 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -737,15 +737,12 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, goto out; if (buf->page != page) { - /* - * Careful, ->map() uses KM_USER0! - */ char *src = buf->ops->map(pipe, buf, 1); - char *dst = kmap_atomic(page, KM_USER1); + char *dst = kmap_atomic(page); memcpy(dst + offset, src + buf->offset, this_len); flush_dcache_page(page); - kunmap_atomic(dst, KM_USER1); + kunmap_atomic(dst); buf->ops->unmap(pipe, buf, src); } ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, -- cgit From 7ac687d9e047b3fa335f04e18c7188db6a170334 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:28 +0800 Subject: btrfs: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/btrfs/compression.c | 12 ++++++------ fs/btrfs/extent_io.c | 16 ++++++++-------- fs/btrfs/file-item.c | 4 ++-- fs/btrfs/inode.c | 26 +++++++++++++------------- fs/btrfs/lzo.c | 4 ++-- fs/btrfs/scrub.c | 8 ++++---- fs/btrfs/zlib.c | 4 ++-- 7 files changed, 37 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d02c27cd14c7..b805afb37fa8 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -120,10 +120,10 @@ static int check_compressed_csum(struct inode *inode, page = cb->compressed_pages[i]; csum = ~(u32)0; - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); btrfs_csum_final(csum, (char *)&csum); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (csum != *cb_sum) { printk(KERN_INFO "btrfs csum failed ino %llu " @@ -521,10 +521,10 @@ static noinline int add_ra_bio_pages(struct inode *inode, if (zero_offset) { int zeros; zeros = PAGE_CACHE_SIZE - zero_offset; - userpage = kmap_atomic(page, KM_USER0); + userpage = kmap_atomic(page); memset(userpage + zero_offset, 0, zeros); flush_dcache_page(page); - kunmap_atomic(userpage, KM_USER0); + kunmap_atomic(userpage); } } @@ -993,9 +993,9 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, bytes = min(PAGE_CACHE_SIZE - *pg_offset, PAGE_CACHE_SIZE - buf_offset); bytes = min(bytes, working_bytes); - kaddr = kmap_atomic(page_out, KM_USER0); + kaddr = kmap_atomic(page_out); memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); flush_dcache_page(page_out); *pg_offset += bytes; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a55fbe6252de..2862454bcdb3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2546,10 +2546,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree, if (zero_offset) { iosize = PAGE_CACHE_SIZE - zero_offset; - userpage = kmap_atomic(page, KM_USER0); + userpage = kmap_atomic(page); memset(userpage + zero_offset, 0, iosize); flush_dcache_page(page); - kunmap_atomic(userpage, KM_USER0); + kunmap_atomic(userpage); } } while (cur <= end) { @@ -2558,10 +2558,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree, struct extent_state *cached = NULL; iosize = PAGE_CACHE_SIZE - pg_offset; - userpage = kmap_atomic(page, KM_USER0); + userpage = kmap_atomic(page); memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); - kunmap_atomic(userpage, KM_USER0); + kunmap_atomic(userpage); set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); unlock_extent_cached(tree, cur, cur + iosize - 1, @@ -2607,10 +2607,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree, char *userpage; struct extent_state *cached = NULL; - userpage = kmap_atomic(page, KM_USER0); + userpage = kmap_atomic(page); memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); - kunmap_atomic(userpage, KM_USER0); + kunmap_atomic(userpage); set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); @@ -2756,10 +2756,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, if (page->index == end_index) { char *userpage; - userpage = kmap_atomic(page, KM_USER0); + userpage = kmap_atomic(page); memset(userpage + pg_offset, 0, PAGE_CACHE_SIZE - pg_offset); - kunmap_atomic(userpage, KM_USER0); + kunmap_atomic(userpage); flush_dcache_page(page); } pg_offset = 0; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index c7fb3a4247d3..078b4fd54500 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -447,13 +447,13 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, sums->bytenr = ordered->start; } - data = kmap_atomic(bvec->bv_page, KM_USER0); + data = kmap_atomic(bvec->bv_page); sector_sum->sum = ~(u32)0; sector_sum->sum = btrfs_csum_data(root, data + bvec->bv_offset, sector_sum->sum, bvec->bv_len); - kunmap_atomic(data, KM_USER0); + kunmap_atomic(data); btrfs_csum_final(sector_sum->sum, (char *)§or_sum->sum); sector_sum->bytenr = disk_bytenr; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 892b34785ccc..3a0b5c1f9d31 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -173,9 +173,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, cur_size = min_t(unsigned long, compressed_size, PAGE_CACHE_SIZE); - kaddr = kmap_atomic(cpage, KM_USER0); + kaddr = kmap_atomic(cpage); write_extent_buffer(leaf, kaddr, ptr, cur_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); i++; ptr += cur_size; @@ -187,10 +187,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, page = find_get_page(inode->i_mapping, start >> PAGE_CACHE_SHIFT); btrfs_set_file_extent_compression(leaf, ei, 0); - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); offset = start & (PAGE_CACHE_SIZE - 1); write_extent_buffer(leaf, kaddr + offset, ptr, size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); page_cache_release(page); } btrfs_mark_buffer_dirty(leaf); @@ -422,10 +422,10 @@ again: * sending it down to disk */ if (offset) { - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } will_compress = 1; } @@ -1873,7 +1873,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, } else { ret = get_state_private(io_tree, start, &private); } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); if (ret) goto zeroit; @@ -1882,7 +1882,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, if (csum != private) goto zeroit; - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); good: return 0; @@ -1894,7 +1894,7 @@ zeroit: (unsigned long long)private); memset(kaddr + offset, 1, end - start + 1); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (private == 0) return 0; return -EIO; @@ -4937,12 +4937,12 @@ static noinline int uncompress_inline(struct btrfs_path *path, ret = btrfs_decompress(compress_type, tmp, page, extent_offset, inline_size, max_size); if (ret) { - char *kaddr = kmap_atomic(page, KM_USER0); + char *kaddr = kmap_atomic(page); unsigned long copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, max_size - extent_offset); memset(kaddr + pg_offset, 0, copy_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } kfree(tmp); return 0; @@ -5719,11 +5719,11 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) unsigned long flags; local_irq_save(flags); - kaddr = kmap_atomic(page, KM_IRQ0); + kaddr = kmap_atomic(page); csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, csum, bvec->bv_len); btrfs_csum_final(csum, (char *)&csum); - kunmap_atomic(kaddr, KM_IRQ0); + kunmap_atomic(kaddr); local_irq_restore(flags); flush_dcache_page(bvec->bv_page); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index a178f5ebea78..743b86fa4fcb 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -411,9 +411,9 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in, bytes = min_t(unsigned long, destlen, out_len - start_byte); - kaddr = kmap_atomic(dest_page, KM_USER0); + kaddr = kmap_atomic(dest_page); memcpy(kaddr, workspace->buf + start_byte, bytes); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); out: return ret; } diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index abc0fbffa510..390e7102b0ff 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -591,7 +591,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix) u64 flags = sbio->spag[ix].flags; page = sbio->bio->bi_io_vec[ix].bv_page; - buffer = kmap_atomic(page, KM_USER0); + buffer = kmap_atomic(page); if (flags & BTRFS_EXTENT_FLAG_DATA) { ret = scrub_checksum_data(sbio->sdev, sbio->spag + ix, buffer); @@ -603,7 +603,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix) } else { WARN_ON(1); } - kunmap_atomic(buffer, KM_USER0); + kunmap_atomic(buffer); return ret; } @@ -792,7 +792,7 @@ static void scrub_checksum(struct btrfs_work *work) } for (i = 0; i < sbio->count; ++i) { page = sbio->bio->bi_io_vec[i].bv_page; - buffer = kmap_atomic(page, KM_USER0); + buffer = kmap_atomic(page); flags = sbio->spag[i].flags; logical = sbio->logical + i * PAGE_SIZE; ret = 0; @@ -807,7 +807,7 @@ static void scrub_checksum(struct btrfs_work *work) } else { WARN_ON(1); } - kunmap_atomic(buffer, KM_USER0); + kunmap_atomic(buffer); if (ret) { ret = scrub_recheck_error(sbio, i); if (!ret) { diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index faccd47c6c46..92c20654cc55 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -370,9 +370,9 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, PAGE_CACHE_SIZE - buf_offset); bytes = min(bytes, bytes_left); - kaddr = kmap_atomic(dest_page, KM_USER0); + kaddr = kmap_atomic(dest_page); memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); pg_offset += bytes; bytes_left -= bytes; -- cgit From da4aa36d0140ca8ef1e67df3e829b9085d369dca Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:26 +0800 Subject: afs: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/afs/fsclient.c | 8 ++++---- fs/afs/mntpt.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 2f213d109c21..b960ff05ea0b 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -365,10 +365,10 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, _debug("extract data"); if (call->count > 0) { page = call->reply3; - buffer = kmap_atomic(page, KM_USER0); + buffer = kmap_atomic(page); ret = afs_extract_data(call, skb, last, buffer, call->count); - kunmap_atomic(buffer, KM_USER0); + kunmap_atomic(buffer); switch (ret) { case 0: break; case -EAGAIN: return 0; @@ -411,9 +411,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, if (call->count < PAGE_SIZE) { _debug("clear"); page = call->reply3; - buffer = kmap_atomic(page, KM_USER0); + buffer = kmap_atomic(page); memset(buffer + call->count, 0, PAGE_SIZE - call->count); - kunmap_atomic(buffer, KM_USER0); + kunmap_atomic(buffer); } _leave(" = 0 [done]"); diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 8f4ce2658b7d..298cf8919ec7 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -200,9 +200,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) if (PageError(page)) goto error; - buf = kmap_atomic(page, KM_USER0); + buf = kmap_atomic(page); memcpy(devname, buf, size); - kunmap_atomic(buf, KM_USER0); + kunmap_atomic(buf); page_cache_release(page); page = NULL; } -- cgit From bf7014b67fd931003e5f12e6742b1fc5f6c0a045 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:29 +0800 Subject: exofs: remove the second argument of k[un]map_atomic() Ack-by: Boaz Harrosh Signed-off-by: Cong Wang --- fs/exofs/dir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index 80405836ba6e..c61e62ac231c 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -597,7 +597,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent) goto fail; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); de = (struct exofs_dir_entry *)kaddr; de->name_len = 1; de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1)); @@ -611,7 +611,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent) de->inode_no = cpu_to_le64(parent->i_ino); memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR)); exofs_set_de_type(de, inode); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); err = exofs_commit_chunk(page, 0, chunk_size); fail: page_cache_release(page); -- cgit From d4a23aee23710dabeaa44c30950b87d33bf104be Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:29 +0800 Subject: ext2: remove the second argument of k[un]map_atomic() Acked-by: Jan Kara Signed-off-by: Cong Wang --- fs/ext2/dir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index d37df352d324..0f4f5c929257 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -645,7 +645,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent) unlock_page(page); goto fail; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memset(kaddr, 0, chunk_size); de = (struct ext2_dir_entry_2 *)kaddr; de->name_len = 1; @@ -660,7 +660,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent) de->inode = cpu_to_le32(parent->i_ino); memcpy (de->name, "..\0", 4); ext2_set_de_type (de, inode); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); err = ext2_commit_chunk(page, 0, chunk_size); fail: page_cache_release(page); -- cgit From 2408f6ef6bf58620f8330b37181d2bdca2e7c7c5 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:30 +0800 Subject: fuse: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/fuse/dev.c | 4 ++-- fs/fuse/file.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 5f3368ab0fa9..7df2b5e8fbe1 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -838,10 +838,10 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, } } if (page) { - void *mapaddr = kmap_atomic(page, KM_USER0); + void *mapaddr = kmap_atomic(page); void *buf = mapaddr + offset; offset += fuse_copy_do(cs, &buf, &count); - kunmap_atomic(mapaddr, KM_USER0); + kunmap_atomic(mapaddr); } else offset += fuse_copy_do(cs, NULL, &count); } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 4a199fd93fbd..a841868bf9ce 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1887,11 +1887,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) goto out; - vaddr = kmap_atomic(pages[0], KM_USER0); + vaddr = kmap_atomic(pages[0]); err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, transferred, in_iovs + out_iovs, (flags & FUSE_IOCTL_COMPAT) != 0); - kunmap_atomic(vaddr, KM_USER0); + kunmap_atomic(vaddr); if (err) goto out; -- cgit From d93492855ff307ce9d699e36d966af3420b80bb3 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:30 +0800 Subject: gfs2: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/gfs2/aops.c | 12 ++++++------ fs/gfs2/lops.c | 8 ++++---- fs/gfs2/quota.c | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 501e5cba09b3..38b7a74a0f91 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -434,12 +434,12 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) if (error) return error; - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); flush_dcache_page(page); brelse(dibh); SetPageUptodate(page); @@ -542,9 +542,9 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state, page = read_cache_page(mapping, index, __gfs2_readpage, NULL); if (IS_ERR(page)) return PTR_ERR(page); - p = kmap_atomic(page, KM_USER0); + p = kmap_atomic(page); memcpy(buf + copied, p + offset, amt); - kunmap_atomic(p, KM_USER0); + kunmap_atomic(p); mark_page_accessed(page); page_cache_release(page); copied += amt; @@ -788,11 +788,11 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memcpy(buf + pos, kaddr + pos, copied); memset(kaddr + pos + copied, 0, len - copied); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (!PageUptodate(page)) SetPageUptodate(page); diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 0301be655b12..df7c6e8d0764 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -553,11 +553,11 @@ static void gfs2_check_magic(struct buffer_head *bh) __be32 *ptr; clear_buffer_escaped(bh); - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); ptr = kaddr + bh_offset(bh); if (*ptr == cpu_to_be32(GFS2_MAGIC)) set_buffer_escaped(bh); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, @@ -594,10 +594,10 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, if (buffer_escaped(bd->bd_bh)) { void *kaddr; bh1 = gfs2_log_get_buf(sdp); - kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0); + kaddr = kmap_atomic(bd->bd_bh->b_page); memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh), bh1->b_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); *(__be32 *)bh1->b_data = 0; clear_buffer_escaped(bd->bd_bh); unlock_buffer(bd->bd_bh); diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index a45b21b03915..c0f8904f0860 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -720,12 +720,12 @@ get_a_page: gfs2_trans_add_bh(ip->i_gl, bh, 0); - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) nbytes = PAGE_CACHE_SIZE - offset; memcpy(kaddr + offset, ptr, nbytes); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); unlock_page(page); page_cache_release(page); -- cgit From 8fb53c46d9c9322fc5a8e53038ceb0d243059c25 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:31 +0800 Subject: jbd: remove the second argument of k[un]map_atomic() Acked-by: Jan Kara Signed-off-by: Cong Wang --- fs/jbd/journal.c | 12 ++++++------ fs/jbd/transaction.c | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 59c09f9541b5..e49e81bb80ef 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -328,7 +328,7 @@ repeat: new_offset = offset_in_page(jh2bh(jh_in)->b_data); } - mapped_data = kmap_atomic(new_page, KM_USER0); + mapped_data = kmap_atomic(new_page); /* * Check for escaping */ @@ -337,7 +337,7 @@ repeat: need_copy_out = 1; do_escape = 1; } - kunmap_atomic(mapped_data, KM_USER0); + kunmap_atomic(mapped_data); /* * Do we need to do a data copy? @@ -354,9 +354,9 @@ repeat: } jh_in->b_frozen_data = tmp; - mapped_data = kmap_atomic(new_page, KM_USER0); + mapped_data = kmap_atomic(new_page); memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); - kunmap_atomic(mapped_data, KM_USER0); + kunmap_atomic(mapped_data); new_page = virt_to_page(tmp); new_offset = offset_in_page(tmp); @@ -368,9 +368,9 @@ repeat: * copying, we can finally do so. */ if (do_escape) { - mapped_data = kmap_atomic(new_page, KM_USER0); + mapped_data = kmap_atomic(new_page); *((unsigned int *)(mapped_data + new_offset)) = 0; - kunmap_atomic(mapped_data, KM_USER0); + kunmap_atomic(mapped_data); } set_bh_page(new_bh, new_page, new_offset); diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 7fce94b04bc3..b2a7e5244e39 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -718,9 +718,9 @@ done: "Possible IO failure.\n"); page = jh2bh(jh)->b_page; offset = offset_in_page(jh2bh(jh)->b_data); - source = kmap_atomic(page, KM_USER0); + source = kmap_atomic(page); memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); - kunmap_atomic(source, KM_USER0); + kunmap_atomic(source); } jbd_unlock_bh_state(bh); -- cgit From 303a8f2afc7ba01083b50b7fceac2a412f28da4e Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:31 +0800 Subject: jbd2: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/jbd2/commit.c | 4 ++-- fs/jbd2/journal.c | 12 ++++++------ fs/jbd2/transaction.c | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 5069b8475150..c067a8cae63b 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -286,10 +286,10 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh) char *addr; __u32 checksum; - addr = kmap_atomic(page, KM_USER0); + addr = kmap_atomic(page); checksum = crc32_be(crc32_sum, (void *)(addr + offset_in_page(bh->b_data)), bh->b_size); - kunmap_atomic(addr, KM_USER0); + kunmap_atomic(addr); return checksum; } diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index c0a5f9f1b127..5ff8940b8f02 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -345,7 +345,7 @@ repeat: new_offset = offset_in_page(jh2bh(jh_in)->b_data); } - mapped_data = kmap_atomic(new_page, KM_USER0); + mapped_data = kmap_atomic(new_page); /* * Fire data frozen trigger if data already wasn't frozen. Do this * before checking for escaping, as the trigger may modify the magic @@ -364,7 +364,7 @@ repeat: need_copy_out = 1; do_escape = 1; } - kunmap_atomic(mapped_data, KM_USER0); + kunmap_atomic(mapped_data); /* * Do we need to do a data copy? @@ -385,9 +385,9 @@ repeat: } jh_in->b_frozen_data = tmp; - mapped_data = kmap_atomic(new_page, KM_USER0); + mapped_data = kmap_atomic(new_page); memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); - kunmap_atomic(mapped_data, KM_USER0); + kunmap_atomic(mapped_data); new_page = virt_to_page(tmp); new_offset = offset_in_page(tmp); @@ -406,9 +406,9 @@ repeat: * copying, we can finally do so. */ if (do_escape) { - mapped_data = kmap_atomic(new_page, KM_USER0); + mapped_data = kmap_atomic(new_page); *((unsigned int *)(mapped_data + new_offset)) = 0; - kunmap_atomic(mapped_data, KM_USER0); + kunmap_atomic(mapped_data); } set_bh_page(new_bh, new_page, new_offset); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 35ae096bed5d..e5aba56e1fd5 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -783,12 +783,12 @@ done: "Possible IO failure.\n"); page = jh2bh(jh)->b_page; offset = offset_in_page(jh2bh(jh)->b_data); - source = kmap_atomic(page, KM_USER0); + source = kmap_atomic(page); /* Fire data frozen trigger just before we copy the data */ jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers); memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); - kunmap_atomic(source, KM_USER0); + kunmap_atomic(source); /* * Now that the frozen data is saved off, we need to store -- cgit From 50bc9b65b6e32f146c3c9812a9d62fe7ff518b5a Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:31 +0800 Subject: logfs: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/logfs/dir.c | 18 +++++++++--------- fs/logfs/readwrite.c | 38 +++++++++++++++++++------------------- fs/logfs/segment.c | 4 ++-- 3 files changed, 30 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 3de7a32cadbe..1b6e21dda286 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -177,17 +177,17 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry) (filler_t *)logfs_readpage, NULL); if (IS_ERR(page)) return page; - dd = kmap_atomic(page, KM_USER0); + dd = kmap_atomic(page); BUG_ON(dd->namelen == 0); if (name->len != be16_to_cpu(dd->namelen) || memcmp(name->name, dd->name, name->len)) { - kunmap_atomic(dd, KM_USER0); + kunmap_atomic(dd); page_cache_release(page); continue; } - kunmap_atomic(dd, KM_USER0); + kunmap_atomic(dd); return page; } return NULL; @@ -365,9 +365,9 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry, return NULL; } index = page->index; - dd = kmap_atomic(page, KM_USER0); + dd = kmap_atomic(page); ino = be64_to_cpu(dd->ino); - kunmap_atomic(dd, KM_USER0); + kunmap_atomic(dd); page_cache_release(page); inode = logfs_iget(dir->i_sb, ino); @@ -402,12 +402,12 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry, if (!page) return -ENOMEM; - dd = kmap_atomic(page, KM_USER0); + dd = kmap_atomic(page); memset(dd, 0, sizeof(*dd)); dd->ino = cpu_to_be64(inode->i_ino); dd->type = logfs_type(inode); logfs_set_name(dd, &dentry->d_name); - kunmap_atomic(dd, KM_USER0); + kunmap_atomic(dd); err = logfs_write_buf(dir, page, WF_LOCK); unlock_page(page); @@ -579,9 +579,9 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry, if (IS_ERR(page)) return PTR_ERR(page); *pos = page->index; - map = kmap_atomic(page, KM_USER0); + map = kmap_atomic(page); memcpy(dd, map, sizeof(*dd)); - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); page_cache_release(page); return 0; } diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index 4153e65b0148..e3ab5e5a904c 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c @@ -517,9 +517,9 @@ static int indirect_write_alias(struct super_block *sb, ino = page->mapping->host->i_ino; logfs_unpack_index(page->index, &bix, &level); - child = kmap_atomic(page, KM_USER0); + child = kmap_atomic(page); val = child[pos]; - kunmap_atomic(child, KM_USER0); + kunmap_atomic(child); err = write_one_alias(sb, ino, bix, level, pos, val); if (err) return err; @@ -673,9 +673,9 @@ static void alloc_indirect_block(struct inode *inode, struct page *page, alloc_data_block(inode, page); block = logfs_block(page); - array = kmap_atomic(page, KM_USER0); + array = kmap_atomic(page); initialize_block_counters(page, block, array, page_is_empty); - kunmap_atomic(array, KM_USER0); + kunmap_atomic(array); } static void block_set_pointer(struct page *page, int index, u64 ptr) @@ -685,10 +685,10 @@ static void block_set_pointer(struct page *page, int index, u64 ptr) u64 oldptr; BUG_ON(!block); - array = kmap_atomic(page, KM_USER0); + array = kmap_atomic(page); oldptr = be64_to_cpu(array[index]); array[index] = cpu_to_be64(ptr); - kunmap_atomic(array, KM_USER0); + kunmap_atomic(array); SetPageUptodate(page); block->full += !!(ptr & LOGFS_FULLY_POPULATED) @@ -701,9 +701,9 @@ static u64 block_get_pointer(struct page *page, int index) __be64 *block; u64 ptr; - block = kmap_atomic(page, KM_USER0); + block = kmap_atomic(page); ptr = be64_to_cpu(block[index]); - kunmap_atomic(block, KM_USER0); + kunmap_atomic(block); return ptr; } @@ -850,7 +850,7 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data) } slot = get_bits(bix, SUBLEVEL(level)); - rblock = kmap_atomic(page, KM_USER0); + rblock = kmap_atomic(page); while (slot < LOGFS_BLOCK_FACTOR) { if (data && (rblock[slot] != 0)) break; @@ -861,12 +861,12 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data) bix &= ~(increment - 1); } if (slot >= LOGFS_BLOCK_FACTOR) { - kunmap_atomic(rblock, KM_USER0); + kunmap_atomic(rblock); logfs_put_read_page(page); return bix; } bofs = be64_to_cpu(rblock[slot]); - kunmap_atomic(rblock, KM_USER0); + kunmap_atomic(rblock); logfs_put_read_page(page); if (!bofs) { BUG_ON(data); @@ -1961,9 +1961,9 @@ int logfs_read_inode(struct inode *inode) if (IS_ERR(page)) return PTR_ERR(page); - di = kmap_atomic(page, KM_USER0); + di = kmap_atomic(page); logfs_disk_to_inode(di, inode); - kunmap_atomic(di, KM_USER0); + kunmap_atomic(di); move_page_to_inode(inode, page); page_cache_release(page); return 0; @@ -1982,9 +1982,9 @@ static struct page *inode_to_page(struct inode *inode) if (!page) return NULL; - di = kmap_atomic(page, KM_USER0); + di = kmap_atomic(page); logfs_inode_to_disk(inode, di); - kunmap_atomic(di, KM_USER0); + kunmap_atomic(di); move_inode_to_page(page, inode); return page; } @@ -2041,13 +2041,13 @@ static void logfs_mod_segment_entry(struct super_block *sb, u32 segno, if (write) alloc_indirect_block(inode, page, 0); - se = kmap_atomic(page, KM_USER0); + se = kmap_atomic(page); change_se(se + child_no, arg); if (write) { logfs_set_alias(sb, logfs_block(page), child_no); BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize); } - kunmap_atomic(se, KM_USER0); + kunmap_atomic(se); logfs_put_write_page(page); } @@ -2245,10 +2245,10 @@ int logfs_inode_write(struct inode *inode, const void *buf, size_t count, if (!page) return -ENOMEM; - pagebuf = kmap_atomic(page, KM_USER0); + pagebuf = kmap_atomic(page); memcpy(pagebuf, buf, count); flush_dcache_page(page); - kunmap_atomic(pagebuf, KM_USER0); + kunmap_atomic(pagebuf); if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE) i_size_write(inode, pos + LOGFS_BLOCKSIZE); diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index ab798ed1cc88..e28d090c98d6 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c @@ -543,9 +543,9 @@ void move_page_to_btree(struct page *page) BUG_ON(!item); /* mempool empty */ memset(item, 0, sizeof(*item)); - child = kmap_atomic(page, KM_USER0); + child = kmap_atomic(page); item->val = child[pos]; - kunmap_atomic(child, KM_USER0); + kunmap_atomic(child); item->child_no = pos; list_add(&item->list, &block->item_list); } -- cgit From 27a6d5c742ceff68b09396bb99cd6344afa85330 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:32 +0800 Subject: minix: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/minix/dir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/minix/dir.c b/fs/minix/dir.c index 085a9262c692..685b2d981b87 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -335,7 +335,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir) goto fail; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memset(kaddr, 0, PAGE_CACHE_SIZE); if (sbi->s_version == MINIX_V3) { @@ -355,7 +355,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir) de->inode = dir->i_ino; strcpy(de->name, ".."); } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); fail: -- cgit From 2b86ce2db3349f5c2af81769c2596ee0d5a63a47 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:33 +0800 Subject: nfs: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/nfs/dir.c | 8 ++++---- fs/nfs/nfs4proc.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index fd9a872fada0..32aa6917265a 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -260,10 +260,10 @@ void nfs_readdir_clear_array(struct page *page) struct nfs_cache_array *array; int i; - array = kmap_atomic(page, KM_USER0); + array = kmap_atomic(page); for (i = 0; i < array->size; i++) kfree(array->array[i].string.name); - kunmap_atomic(array, KM_USER0); + kunmap_atomic(array); } /* @@ -1870,11 +1870,11 @@ static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *sym if (!page) return -ENOMEM; - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memcpy(kaddr, symname, pathlen); if (pathlen < PAGE_SIZE) memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr); if (error != 0) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ec9f6ef6c5dd..caf92d05c3a9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -193,7 +193,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent * when talking to the server, we always send cookie 0 * instead of 1 or 2. */ - start = p = kmap_atomic(*readdir->pages, KM_USER0); + start = p = kmap_atomic(*readdir->pages); if (cookie == 0) { *p++ = xdr_one; /* next */ @@ -221,7 +221,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent readdir->pgbase = (char *)p - (char *)start; readdir->count -= readdir->pgbase; - kunmap_atomic(start, KM_USER0); + kunmap_atomic(start); } static int nfs4_wait_clnt_recover(struct nfs_client *clp) -- cgit From 7b9c0976ac5e549feb1d4731bc76b9acf9ac47a8 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:33 +0800 Subject: nilfs2: remove the second argument of k[un]map_atomic() Acked-by: Ryusuke Konishi Signed-off-by: Cong Wang --- fs/nilfs2/cpfile.c | 94 ++++++++++++++++++++++++++-------------------------- fs/nilfs2/dat.c | 38 ++++++++++----------- fs/nilfs2/dir.c | 4 +-- fs/nilfs2/ifile.c | 4 +-- fs/nilfs2/mdt.c | 4 +-- fs/nilfs2/page.c | 8 ++--- fs/nilfs2/recovery.c | 4 +-- fs/nilfs2/segbuf.c | 4 +-- fs/nilfs2/sufile.c | 68 ++++++++++++++++++------------------- 9 files changed, 114 insertions(+), 114 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index c9b342c8b503..dab5c4c6dfaf 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -218,11 +218,11 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile, kaddr, 1); mark_buffer_dirty(cp_bh); - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_ncheckpoints, 1); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); } @@ -313,7 +313,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, continue; } - kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); + kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint( cpfile, cno, cp_bh, kaddr); nicps = 0; @@ -334,7 +334,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, cpfile, cp_bh, kaddr, nicps); if (count == 0) { /* make hole */ - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(cp_bh); ret = nilfs_cpfile_delete_checkpoint_block( @@ -349,18 +349,18 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, } } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(cp_bh); } if (tnicps > 0) { - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } brelse(header_bh); @@ -408,7 +408,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, continue; /* skip hole */ } - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { if (!nilfs_checkpoint_invalid(cp)) { @@ -418,7 +418,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, n++; } } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(bh); } @@ -451,10 +451,10 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, ret = nilfs_cpfile_get_header_block(cpfile, &bh); if (ret < 0) goto out; - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(bh); if (curr == 0) { ret = 0; @@ -472,7 +472,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, ret = 0; /* No snapshots (started from a hole block) */ goto out; } - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); while (n < nci) { cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); curr = ~(__u64)0; /* Terminator */ @@ -488,7 +488,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); if (curr_blkoff != next_blkoff) { - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(bh); ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, &bh); @@ -496,12 +496,12 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, WARN_ON(ret == -ENOENT); goto out; } - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); } curr = next; curr_blkoff = next_blkoff; } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(bh); *cnop = curr; ret = n; @@ -592,24 +592,24 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) goto out_sem; - kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); + kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); if (nilfs_checkpoint_invalid(cp)) { ret = -ENOENT; - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); goto out_cp; } if (nilfs_checkpoint_snapshot(cp)) { ret = 0; - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); goto out_cp; } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) goto out_cp; - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); list = &header->ch_snapshot_list; curr_bh = header_bh; @@ -621,13 +621,13 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); curr = prev; if (curr_blkoff != prev_blkoff) { - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(curr_bh); ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &curr_bh); if (ret < 0) goto out_header; - kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); + kaddr = kmap_atomic(curr_bh->b_page); } curr_blkoff = prev_blkoff; cp = nilfs_cpfile_block_get_checkpoint( @@ -635,7 +635,7 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) list = &cp->cp_snapshot_list; prev = le64_to_cpu(list->ssl_prev); } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (prev != 0) { ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, @@ -647,29 +647,29 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) get_bh(prev_bh); } - kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); + kaddr = kmap_atomic(curr_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, curr, curr_bh, kaddr); list->ssl_prev = cpu_to_le64(cno); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); - kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); + kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); nilfs_checkpoint_set_snapshot(cp); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); - kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); + kaddr = kmap_atomic(prev_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, prev, prev_bh, kaddr); list->ssl_next = cpu_to_le64(cno); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_nsnapshots, 1); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); mark_buffer_dirty(prev_bh); mark_buffer_dirty(curr_bh); @@ -710,23 +710,23 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) goto out_sem; - kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); + kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); if (nilfs_checkpoint_invalid(cp)) { ret = -ENOENT; - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); goto out_cp; } if (!nilfs_checkpoint_snapshot(cp)) { ret = 0; - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); goto out_cp; } list = &cp->cp_snapshot_list; next = le64_to_cpu(list->ssl_next); prev = le64_to_cpu(list->ssl_prev); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) @@ -750,29 +750,29 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) get_bh(prev_bh); } - kaddr = kmap_atomic(next_bh->b_page, KM_USER0); + kaddr = kmap_atomic(next_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, next, next_bh, kaddr); list->ssl_prev = cpu_to_le64(prev); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); - kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); + kaddr = kmap_atomic(prev_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, prev, prev_bh, kaddr); list->ssl_next = cpu_to_le64(next); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); - kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); + kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); nilfs_checkpoint_clear_snapshot(cp); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_nsnapshots, -1); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); mark_buffer_dirty(next_bh); mark_buffer_dirty(prev_bh); @@ -829,13 +829,13 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); if (ret < 0) goto out; - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); if (nilfs_checkpoint_invalid(cp)) ret = -ENOENT; else ret = nilfs_checkpoint_snapshot(cp); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(bh); out: @@ -912,12 +912,12 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) ret = nilfs_cpfile_get_header_block(cpfile, &bh); if (ret < 0) goto out_sem; - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); cpstat->cs_cno = nilfs_mdt_cno(cpfile); cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(bh); out_sem: diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index fcc2f869af16..b5c13f3576b9 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -85,13 +85,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req) struct nilfs_dat_entry *entry; void *kaddr; - kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_end = cpu_to_le64(NILFS_CNO_MAX); entry->de_blocknr = cpu_to_le64(0); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); nilfs_palloc_commit_alloc_entry(dat, req); nilfs_dat_commit_entry(dat, req); @@ -109,13 +109,13 @@ static void nilfs_dat_commit_free(struct inode *dat, struct nilfs_dat_entry *entry; void *kaddr; - kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_end = cpu_to_le64(NILFS_CNO_MIN); entry->de_blocknr = cpu_to_le64(0); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); nilfs_dat_commit_entry(dat, req); nilfs_palloc_commit_free_entry(dat, req); @@ -136,12 +136,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, struct nilfs_dat_entry *entry; void *kaddr; - kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); entry->de_blocknr = cpu_to_le64(blocknr); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); nilfs_dat_commit_entry(dat, req); } @@ -160,12 +160,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) return ret; } - kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); start = le64_to_cpu(entry->de_start); blocknr = le64_to_cpu(entry->de_blocknr); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (blocknr == 0) { ret = nilfs_palloc_prepare_free_entry(dat, req); @@ -186,7 +186,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, sector_t blocknr; void *kaddr; - kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); end = start = le64_to_cpu(entry->de_start); @@ -196,7 +196,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, } entry->de_end = cpu_to_le64(end); blocknr = le64_to_cpu(entry->de_blocknr); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (blocknr == 0) nilfs_dat_commit_free(dat, req); @@ -211,12 +211,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req) sector_t blocknr; void *kaddr; - kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); start = le64_to_cpu(entry->de_start); blocknr = le64_to_cpu(entry->de_blocknr); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (start == nilfs_mdt_cno(dat) && blocknr == 0) nilfs_palloc_abort_free_entry(dat, req); @@ -346,20 +346,20 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) } } - kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, (unsigned long long)vblocknr, (unsigned long long)le64_to_cpu(entry->de_start), (unsigned long long)le64_to_cpu(entry->de_end)); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(entry_bh); return -EINVAL; } WARN_ON(blocknr == 0); entry->de_blocknr = cpu_to_le64(blocknr); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); mark_buffer_dirty(entry_bh); nilfs_mdt_mark_dirty(dat); @@ -409,7 +409,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) } } - kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); blocknr = le64_to_cpu(entry->de_blocknr); if (blocknr == 0) { @@ -419,7 +419,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) *blocknrp = blocknr; out: - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(entry_bh); return ret; } @@ -440,7 +440,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, 0, &entry_bh); if (ret < 0) return ret; - kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(entry_bh->b_page); /* last virtual block number in this block */ first = vinfo->vi_vblocknr; do_div(first, entries_per_block); @@ -456,7 +456,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, vinfo->vi_end = le64_to_cpu(entry->de_end); vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(entry_bh); } diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index ca35b3a46d17..df1a7fb238d1 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -602,7 +602,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) unlock_page(page); goto fail; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memset(kaddr, 0, chunk_size); de = (struct nilfs_dir_entry *)kaddr; de->name_len = 1; @@ -617,7 +617,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) de->inode = cpu_to_le64(parent->i_ino); memcpy(de->name, "..\0", 4); nilfs_set_de_type(de, inode); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); nilfs_commit_chunk(page, mapping, 0, chunk_size); fail: page_cache_release(page); diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index 684d76300a80..5a48df79d674 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -122,11 +122,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) return ret; } - kaddr = kmap_atomic(req.pr_entry_bh->b_page, KM_USER0); + kaddr = kmap_atomic(req.pr_entry_bh->b_page); raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr, req.pr_entry_bh, kaddr); raw_inode->i_flags = 0; - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); mark_buffer_dirty(req.pr_entry_bh); brelse(req.pr_entry_bh); diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 800e8d78a83b..f9897d09c693 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -58,12 +58,12 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block, set_buffer_mapped(bh); - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); if (init_block) init_block(inode, bh, kaddr); flush_dcache_page(bh->b_page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); set_buffer_uptodate(bh); mark_buffer_dirty(bh); diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 65221a04c6f0..3e7b2a0dc0c8 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -119,11 +119,11 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) struct page *spage = sbh->b_page, *dpage = dbh->b_page; struct buffer_head *bh; - kaddr0 = kmap_atomic(spage, KM_USER0); - kaddr1 = kmap_atomic(dpage, KM_USER1); + kaddr0 = kmap_atomic(spage); + kaddr1 = kmap_atomic(dpage); memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); - kunmap_atomic(kaddr1, KM_USER1); - kunmap_atomic(kaddr0, KM_USER0); + kunmap_atomic(kaddr1); + kunmap_atomic(kaddr0); dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; dbh->b_blocknr = sbh->b_blocknr; diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index a604ac0331b2..f1626f5011c5 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -493,9 +493,9 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, if (unlikely(!bh_org)) return -EIO; - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(bh_org); return 0; } diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 850a7c0228fb..dc9a913784ab 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -227,9 +227,9 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf, crc = crc32_le(crc, bh->b_data, bh->b_size); } list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } raw_sum->ss_datasum = cpu_to_le32(crc); } diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 0a0aba617d8a..c5b7653a4391 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -111,11 +111,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, struct nilfs_sufile_header *header; void *kaddr; - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); le64_add_cpu(&header->sh_ncleansegs, ncleanadd); le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); mark_buffer_dirty(header_bh); } @@ -319,11 +319,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); ncleansegs = le64_to_cpu(header->sh_ncleansegs); last_alloc = le64_to_cpu(header->sh_last_alloc); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); nsegments = nilfs_sufile_get_nsegments(sufile); maxsegnum = sui->allocmax; @@ -356,7 +356,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) &su_bh); if (ret < 0) goto out_header; - kaddr = kmap_atomic(su_bh->b_page, KM_USER0); + kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); @@ -367,14 +367,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) continue; /* found a clean segment */ nilfs_segment_usage_set_dirty(su); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); le64_add_cpu(&header->sh_ncleansegs, -1); le64_add_cpu(&header->sh_ndirtysegs, 1); header->sh_last_alloc = cpu_to_le64(segnum); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); sui->ncleansegs--; mark_buffer_dirty(header_bh); @@ -385,7 +385,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) goto out_header; } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(su_bh); } @@ -407,16 +407,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, struct nilfs_segment_usage *su; void *kaddr; - kaddr = kmap_atomic(su_bh->b_page, KM_USER0); + kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (unlikely(!nilfs_segment_usage_clean(su))) { printk(KERN_WARNING "%s: segment %llu must be clean\n", __func__, (unsigned long long)segnum); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); return; } nilfs_segment_usage_set_dirty(su); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); nilfs_sufile_mod_counter(header_bh, -1, 1); NILFS_SUI(sufile)->ncleansegs--; @@ -433,11 +433,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, void *kaddr; int clean, dirty; - kaddr = kmap_atomic(su_bh->b_page, KM_USER0); + kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && su->su_nblocks == cpu_to_le32(0)) { - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); return; } clean = nilfs_segment_usage_clean(su); @@ -447,7 +447,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, su->su_lastmod = cpu_to_le64(0); su->su_nblocks = cpu_to_le32(0); su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); NILFS_SUI(sufile)->ncleansegs -= clean; @@ -464,12 +464,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, void *kaddr; int sudirty; - kaddr = kmap_atomic(su_bh->b_page, KM_USER0); + kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (nilfs_segment_usage_clean(su)) { printk(KERN_WARNING "%s: segment %llu is already clean\n", __func__, (unsigned long long)segnum); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); return; } WARN_ON(nilfs_segment_usage_error(su)); @@ -477,7 +477,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, sudirty = nilfs_segment_usage_dirty(su); nilfs_segment_usage_set_clean(su); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); mark_buffer_dirty(su_bh); nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); @@ -525,13 +525,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, if (ret < 0) goto out_sem; - kaddr = kmap_atomic(bh->b_page, KM_USER0); + kaddr = kmap_atomic(bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); WARN_ON(nilfs_segment_usage_error(su)); if (modtime) su->su_lastmod = cpu_to_le64(modtime); su->su_nblocks = cpu_to_le32(nblocks); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(sufile); @@ -572,7 +572,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) if (ret < 0) goto out_sem; - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); @@ -582,7 +582,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) spin_lock(&nilfs->ns_last_segment_lock); sustat->ss_prot_seq = nilfs->ns_prot_seq; spin_unlock(&nilfs->ns_last_segment_lock); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(header_bh); out_sem: @@ -598,15 +598,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, void *kaddr; int suclean; - kaddr = kmap_atomic(su_bh->b_page, KM_USER0); + kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (nilfs_segment_usage_error(su)) { - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); return; } suclean = nilfs_segment_usage_clean(su); nilfs_segment_usage_set_error(su); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (suclean) { nilfs_sufile_mod_counter(header_bh, -1, 0); @@ -675,7 +675,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, /* hole */ continue; } - kaddr = kmap_atomic(su_bh->b_page, KM_USER0); + kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); su2 = su; @@ -684,7 +684,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || nilfs_segment_is_active(nilfs, segnum + j)) { ret = -EBUSY; - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(su_bh); goto out_header; } @@ -696,7 +696,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, nc++; } } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (nc > 0) { mark_buffer_dirty(su_bh); ncleaned += nc; @@ -772,10 +772,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) sui->ncleansegs -= nsegs - newnsegs; } - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(sufile); @@ -840,7 +840,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, continue; } - kaddr = kmap_atomic(su_bh->b_page, KM_USER0); + kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); for (j = 0; j < n; @@ -853,7 +853,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, si->sui_flags |= (1UL << NILFS_SEGMENT_USAGE_ACTIVE); } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(su_bh); } ret = nsegs; @@ -902,10 +902,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, goto failed; sui = NILFS_SUI(sufile); - kaddr = kmap_atomic(header_bh->b_page, KM_USER0); + kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); brelse(header_bh); sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; -- cgit From a3ac1414eb601136ba3475e841fe76ccbab5e88d Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:34 +0800 Subject: ntfs: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/ntfs/aops.c | 20 ++++++++++---------- fs/ntfs/attrib.c | 20 ++++++++++---------- fs/ntfs/file.c | 16 ++++++++-------- fs/ntfs/super.c | 8 ++++---- 4 files changed, 32 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 0b1e885b8cf8..fa9c05f97af4 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -94,11 +94,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) if (file_ofs < init_size) ofs = init_size - file_ofs; local_irq_save(flags); - kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); + kaddr = kmap_atomic(page); memset(kaddr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); + kunmap_atomic(kaddr); local_irq_restore(flags); } } else { @@ -147,11 +147,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) /* Should have been verified before we got here... */ BUG_ON(!recs); local_irq_save(flags); - kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); + kaddr = kmap_atomic(page); for (i = 0; i < recs; i++) post_read_mst_fixup((NTFS_RECORD*)(kaddr + i * rec_size), rec_size); - kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); + kunmap_atomic(kaddr); local_irq_restore(flags); flush_dcache_page(page); if (likely(page_uptodate && !PageError(page))) @@ -504,7 +504,7 @@ retry_readpage: /* Race with shrinking truncate. */ attr_len = i_size; } - addr = kmap_atomic(page, KM_USER0); + addr = kmap_atomic(page); /* Copy the data to the page. */ memcpy(addr, (u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset), @@ -512,7 +512,7 @@ retry_readpage: /* Zero the remainder of the page. */ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); flush_dcache_page(page); - kunmap_atomic(addr, KM_USER0); + kunmap_atomic(addr); put_unm_err_out: ntfs_attr_put_search_ctx(ctx); unm_err_out: @@ -746,14 +746,14 @@ lock_retry_remap: unsigned long *bpos, *bend; /* Check if the buffer is zero. */ - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); bpos = (unsigned long *)(kaddr + bh_offset(bh)); bend = (unsigned long *)((u8*)bpos + blocksize); do { if (unlikely(*bpos)) break; } while (likely(++bpos < bend)); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (bpos == bend) { /* * Buffer is zero and sparse, no need to write @@ -1495,14 +1495,14 @@ retry_writepage: /* Shrinking cannot fail. */ BUG_ON(err); } - addr = kmap_atomic(page, KM_USER0); + addr = kmap_atomic(page); /* Copy the data from the page to the mft record. */ memcpy((u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset), addr, attr_len); /* Zero out of bounds area in the page cache page. */ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); - kunmap_atomic(addr, KM_USER0); + kunmap_atomic(addr); flush_dcache_page(page); flush_dcache_mft_record_page(ctx->ntfs_ino); /* We are done with the page. */ diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index e0281992ddc3..a27e3fecefaf 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -1656,12 +1656,12 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) attr_size = le32_to_cpu(a->data.resident.value_length); BUG_ON(attr_size != data_size); if (page && !PageUptodate(page)) { - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memcpy(kaddr, (u8*)a + le16_to_cpu(a->data.resident.value_offset), attr_size); memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); flush_dcache_page(page); SetPageUptodate(page); } @@ -1806,9 +1806,9 @@ undo_err_out: sizeof(a->data.resident.reserved)); /* Copy the data from the page back to the attribute value. */ if (page) { - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memcpy((u8*)a + mp_ofs, kaddr, attr_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } /* Setup the allocated size in the ntfs inode in case it changed. */ write_lock_irqsave(&ni->size_lock, flags); @@ -2540,10 +2540,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) size = PAGE_CACHE_SIZE; if (idx == end) size = end_ofs; - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memset(kaddr + start_ofs, val, size - start_ofs); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); set_page_dirty(page); page_cache_release(page); balance_dirty_pages_ratelimited(mapping); @@ -2561,10 +2561,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) "page (index 0x%lx).", idx); return -ENOMEM; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memset(kaddr, val, PAGE_CACHE_SIZE); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); /* * If the page has buffers, mark them uptodate since buffer * state and not page state is definitive in 2.6 kernels. @@ -2598,10 +2598,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) "(error, index 0x%lx).", idx); return PTR_ERR(page); } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memset(kaddr, val, end_ofs); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); set_page_dirty(page); page_cache_release(page); balance_dirty_pages_ratelimited(mapping); diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index c587e2d27183..8639169221c7 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -704,7 +704,7 @@ map_buffer_cached: u8 *kaddr; unsigned pofs; - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); if (bh_pos < pos) { pofs = bh_pos & ~PAGE_CACHE_MASK; memset(kaddr + pofs, 0, pos - bh_pos); @@ -713,7 +713,7 @@ map_buffer_cached: pofs = end & ~PAGE_CACHE_MASK; memset(kaddr + pofs, 0, bh_end - end); } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); flush_dcache_page(page); } continue; @@ -1287,9 +1287,9 @@ static inline size_t ntfs_copy_from_user(struct page **pages, len = PAGE_CACHE_SIZE - ofs; if (len > bytes) len = bytes; - addr = kmap_atomic(*pages, KM_USER0); + addr = kmap_atomic(*pages); left = __copy_from_user_inatomic(addr + ofs, buf, len); - kunmap_atomic(addr, KM_USER0); + kunmap_atomic(addr); if (unlikely(left)) { /* Do it the slow way. */ addr = kmap(*pages); @@ -1401,10 +1401,10 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages, len = PAGE_CACHE_SIZE - ofs; if (len > bytes) len = bytes; - addr = kmap_atomic(*pages, KM_USER0); + addr = kmap_atomic(*pages); copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, *iov, *iov_ofs, len); - kunmap_atomic(addr, KM_USER0); + kunmap_atomic(addr); if (unlikely(copied != len)) { /* Do it the slow way. */ addr = kmap(*pages); @@ -1691,7 +1691,7 @@ static int ntfs_commit_pages_after_write(struct page **pages, BUG_ON(end > le32_to_cpu(a->length) - le16_to_cpu(a->data.resident.value_offset)); kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); /* Copy the received data from the page to the mft record. */ memcpy(kattr + pos, kaddr + pos, bytes); /* Update the attribute length if necessary. */ @@ -1713,7 +1713,7 @@ static int ntfs_commit_pages_after_write(struct page **pages, flush_dcache_page(page); SetPageUptodate(page); } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); /* Update initialized_size/i_size if necessary. */ read_lock_irqsave(&ni->size_lock, flags); initialized_size = ni->initialized_size; diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index f907611cca73..28d4e6ab6634 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -2473,7 +2473,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) nr_free -= PAGE_CACHE_SIZE * 8; continue; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); /* * Subtract the number of set bits. If this * is the last page and it is partial we don't really care as @@ -2483,7 +2483,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) */ nr_free -= bitmap_weight(kaddr, PAGE_CACHE_SIZE * BITS_PER_BYTE); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); page_cache_release(page); } ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); @@ -2544,7 +2544,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, nr_free -= PAGE_CACHE_SIZE * 8; continue; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); /* * Subtract the number of set bits. If this * is the last page and it is partial we don't really care as @@ -2554,7 +2554,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, */ nr_free -= bitmap_weight(kaddr, PAGE_CACHE_SIZE * BITS_PER_BYTE); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); page_cache_release(page); } ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", -- cgit From c4bc8dcbbe7a7876d76e3f3e129a2ccec46d7cdb Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:34 +0800 Subject: ocfs2: remove the second argument of k[un]map_atomic() Acked-by: Joel Becker Signed-off-by: Cong Wang --- fs/ocfs2/aops.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 78b68af3b0e3..657743254eb9 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -102,7 +102,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, * copy, the data is still good. */ if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) { - kaddr = kmap_atomic(bh_result->b_page, KM_USER0); + kaddr = kmap_atomic(bh_result->b_page); if (!kaddr) { mlog(ML_ERROR, "couldn't kmap!\n"); goto bail; @@ -110,7 +110,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, memcpy(kaddr + (bh_result->b_size * iblock), buffer_cache_bh->b_data, bh_result->b_size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); set_buffer_uptodate(bh_result); } brelse(buffer_cache_bh); @@ -236,13 +236,13 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, return -EROFS; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); if (size) memcpy(kaddr, di->id2.i_data.id_data, size); /* Clear the remaining part of the page */ memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); SetPageUptodate(page); @@ -689,7 +689,7 @@ static void ocfs2_clear_page_regions(struct page *page, ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); if (from || to) { if (from > cluster_start) @@ -700,7 +700,7 @@ static void ocfs2_clear_page_regions(struct page *page, memset(kaddr + cluster_start, 0, cluster_end - cluster_start); } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } /* @@ -1981,9 +1981,9 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, } } - kaddr = kmap_atomic(wc->w_target_page, KM_USER0); + kaddr = kmap_atomic(wc->w_target_page); memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); trace_ocfs2_write_end_inline( (unsigned long long)OCFS2_I(inode)->ip_blkno, -- cgit From 883da600b00eb6fa9f8db5687759732b3c6dd357 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:35 +0800 Subject: reiserfs: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/reiserfs/stree.c | 4 ++-- fs/reiserfs/tail_conversion.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 313d39d639eb..77df82f9e70a 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -1284,12 +1284,12 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, ** -clm */ - data = kmap_atomic(un_bh->b_page, KM_USER0); + data = kmap_atomic(un_bh->b_page); off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); memcpy(data + off, B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih), ret_value); - kunmap_atomic(data, KM_USER0); + kunmap_atomic(data); } /* Perform balancing after all resources have been collected at once. */ do_balance(&s_del_balance, NULL, NULL, M_DELETE); diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c index d7f6e51bef2a..8f546bd473b8 100644 --- a/fs/reiserfs/tail_conversion.c +++ b/fs/reiserfs/tail_conversion.c @@ -128,9 +128,9 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, if (up_to_date_bh) { unsigned pgoff = (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); - char *kaddr = kmap_atomic(up_to_date_bh->b_page, KM_USER0); + char *kaddr = kmap_atomic(up_to_date_bh->b_page); memset(kaddr + pgoff, 0, blk_size - total_tail); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } REISERFS_I(inode)->i_first_direct_byte = U32_MAX; -- cgit From 53b55e55893fbce8d78caeb22e17a28d909a2132 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:36 +0800 Subject: squashfs: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/squashfs/file.c | 8 ++++---- fs/squashfs/symlink.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 38bb1c640559..8ca62c28fe12 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -464,10 +464,10 @@ static int squashfs_readpage(struct file *file, struct page *page) if (PageUptodate(push_page)) goto skip_page; - pageaddr = kmap_atomic(push_page, KM_USER0); + pageaddr = kmap_atomic(push_page); squashfs_copy_data(pageaddr, buffer, offset, avail); memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); - kunmap_atomic(pageaddr, KM_USER0); + kunmap_atomic(pageaddr); flush_dcache_page(push_page); SetPageUptodate(push_page); skip_page: @@ -484,9 +484,9 @@ skip_page: error_out: SetPageError(page); out: - pageaddr = kmap_atomic(page, KM_USER0); + pageaddr = kmap_atomic(page); memset(pageaddr, 0, PAGE_CACHE_SIZE); - kunmap_atomic(pageaddr, KM_USER0); + kunmap_atomic(pageaddr); flush_dcache_page(page); if (!PageError(page)) SetPageUptodate(page); diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c index 1191817264cc..12806dffb345 100644 --- a/fs/squashfs/symlink.c +++ b/fs/squashfs/symlink.c @@ -90,14 +90,14 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page) goto error_out; } - pageaddr = kmap_atomic(page, KM_USER0); + pageaddr = kmap_atomic(page); copied = squashfs_copy_data(pageaddr + bytes, entry, offset, length - bytes); if (copied == length - bytes) memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); else block = entry->next_index; - kunmap_atomic(pageaddr, KM_USER0); + kunmap_atomic(pageaddr); squashfs_cache_put(entry); } -- cgit From a1c7c13783c9d2d1d67f53c49dc4eaf34811a290 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:36 +0800 Subject: ubifs: remove the second argument of k[un]map_atomic() Signed-off-by: Cong Wang --- fs/ubifs/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index f9c234bf33d3..5c8f6dc1d28b 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1042,10 +1042,10 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (i_size > synced_i_size) { err = inode->i_sb->s_op->write_inode(inode, NULL); -- cgit From 7c0fb227529102d38603c0afc76a2c18a7581afa Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:36 +0800 Subject: udf: remove the second argument of k[un]map_atomic() Acked-by: Jan Kara Signed-off-by: Cong Wang --- fs/udf/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/udf/file.c b/fs/udf/file.c index d567b8448dfc..7f3f7ba3df6e 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -87,10 +87,10 @@ static int udf_adinicb_write_end(struct file *file, char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, kaddr + offset, copied); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); return simple_write_end(file, mapping, pos, len, copied, page, fsdata); } -- cgit From 10b9b98e41ba248a899f6175ce96ee91431b6194 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Tue, 20 Mar 2012 12:55:09 +0300 Subject: CIFS: Respect negotiated MaxMpxCount Some servers sets this value less than 50 that was hardcoded and we lost the connection if when we exceed this limit. Fix this by respecting this value - not sending more than the server allows. Cc: stable@kernel.org Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 8 ++++---- fs/cifs/cifsglob.h | 10 +++------- fs/cifs/cifssmb.c | 9 +++++++-- fs/cifs/connect.c | 11 ++++------- fs/cifs/dir.c | 6 ++++-- fs/cifs/file.c | 4 ++-- fs/cifs/transport.c | 4 ++-- 7 files changed, 26 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index b1fd382d1952..6ee1cb45ca0d 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -76,7 +76,7 @@ MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " unsigned int cifs_max_pending = CIFS_MAX_REQ; module_param(cifs_max_pending, int, 0444); MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " - "Default: 50 Range: 2 to 256"); + "Default: 32767 Range: 2 to 32767."); unsigned short echo_retries = 5; module_param(echo_retries, ushort, 0644); MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and " @@ -1116,9 +1116,9 @@ init_cifs(void) if (cifs_max_pending < 2) { cifs_max_pending = 2; cFYI(1, "cifs_max_pending set to min of 2"); - } else if (cifs_max_pending > 256) { - cifs_max_pending = 256; - cFYI(1, "cifs_max_pending set to max of 256"); + } else if (cifs_max_pending > CIFS_MAX_REQ) { + cifs_max_pending = CIFS_MAX_REQ; + cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ); } rc = cifs_fscache_register(); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 76e7d8b6da17..d47d20aac670 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -55,14 +55,9 @@ /* * MAX_REQ is the maximum number of requests that WE will send - * on one socket concurrently. It also matches the most common - * value of max multiplex returned by servers. We may - * eventually want to use the negotiated value (in case - * future servers can handle more) when we are more confident that - * we will not have problems oveloading the socket with pending - * write data. + * on one socket concurrently. */ -#define CIFS_MAX_REQ 50 +#define CIFS_MAX_REQ 32767 #define RFC1001_NAME_LEN 15 #define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1) @@ -263,6 +258,7 @@ struct TCP_Server_Info { bool session_estab; /* mark when very first sess is established */ u16 dialect; /* dialect index that server chose */ enum securityEnum secType; + bool oplocks:1; /* enable oplocks */ unsigned int maxReq; /* Clients should submit no more */ /* than maxReq distinct unanswered SMBs to the server when using */ /* multiplexed reads or writes */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 8b7794c31591..cd66b76e3282 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -458,7 +458,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) goto neg_err_exit; } server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode); - server->maxReq = le16_to_cpu(rsp->MaxMpxCount); + server->maxReq = min_t(unsigned int, + le16_to_cpu(rsp->MaxMpxCount), + cifs_max_pending); + server->oplocks = server->maxReq > 1 ? enable_oplocks : false; server->maxBuf = le16_to_cpu(rsp->MaxBufSize); server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); /* even though we do not use raw we might as well set this @@ -564,7 +567,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) /* one byte, so no need to convert this or EncryptionKeyLen from little endian */ - server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount); + server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount), + cifs_max_pending); + server->oplocks = server->maxReq > 1 ? enable_oplocks : false; /* probably no need to store and check maxvcs */ server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize); server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 602f77c304c9..03f71fb40a8a 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -642,14 +642,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) spin_unlock(&GlobalMid_Lock); wake_up_all(&server->response_q); - /* - * Check if we have blocked requests that need to free. Note that - * cifs_max_pending is normally 50, but can be set at module install - * time to as little as two. - */ + /* Check if we have blocked requests that need to free. */ spin_lock(&GlobalMid_Lock); - if (atomic_read(&server->inFlight) >= cifs_max_pending) - atomic_set(&server->inFlight, cifs_max_pending - 1); + if (atomic_read(&server->inFlight) >= server->maxReq) + atomic_set(&server->inFlight, server->maxReq - 1); /* * We do not want to set the max_pending too low or we could end up * with the counter going negative. @@ -1910,6 +1906,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->noautotune = volume_info->noautotune; tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; atomic_set(&tcp_ses->inFlight, 0); + tcp_ses->maxReq = 1; /* enough to send negotiate request */ init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); INIT_LIST_HEAD(&tcp_ses->pending_mid_q); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index bc7e24420ac0..d172c8ed9017 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -171,7 +171,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode, } tcon = tlink_tcon(tlink); - if (enable_oplocks) + if (tcon->ses->server->oplocks) oplock = REQ_OPLOCK; if (nd) @@ -492,7 +492,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, { int xid; int rc = 0; /* to get around spurious gcc warning, set to zero here */ - __u32 oplock = enable_oplocks ? REQ_OPLOCK : 0; + __u32 oplock; __u16 fileHandle = 0; bool posix_open = false; struct cifs_sb_info *cifs_sb; @@ -518,6 +518,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } pTcon = tlink_tcon(tlink); + oplock = pTcon->ses->server->oplocks ? REQ_OPLOCK : 0; + /* * Don't allow the separator character in a path component. * The VFS will not allow "/", but "\" is allowed by posix. diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 8e02dbd88ae1..159fcc56dc2d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -380,7 +380,7 @@ int cifs_open(struct inode *inode, struct file *file) cFYI(1, "inode = 0x%p file flags are 0x%x for %s", inode, file->f_flags, full_path); - if (enable_oplocks) + if (tcon->ses->server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; @@ -505,7 +505,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush) cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, pCifsFile->f_flags, full_path); - if (enable_oplocks) + if (tcon->ses->server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 0cc9584f5889..99a27cfa6cd2 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -265,12 +265,12 @@ static int wait_for_free_request(struct TCP_Server_Info *server, spin_lock(&GlobalMid_Lock); while (1) { - if (atomic_read(&server->inFlight) >= cifs_max_pending) { + if (atomic_read(&server->inFlight) >= server->maxReq) { spin_unlock(&GlobalMid_Lock); cifs_num_waiters_inc(server); wait_event(server->request_q, atomic_read(&server->inFlight) - < cifs_max_pending); + < server->maxReq); cifs_num_waiters_dec(server); spin_lock(&GlobalMid_Lock); } else { -- cgit From c4f1b62a4b50a01e8d820717906b674807ef9ca3 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Tue, 20 Mar 2012 12:51:24 -0400 Subject: NFS: ncommit count is being double decremented The decrement is handled by each call to nfs_request_remove_commit_list, no need to do it again in nfs_scan_commit. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0de19f413f92..628d9a69d0a2 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -595,12 +595,9 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst) spin_lock(&inode->i_lock); if (nfsi->ncommit > 0) { const int max = INT_MAX; - int pnfs_ret; ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max); - pnfs_ret = pnfs_scan_commit_lists(inode, max - ret); - ret += pnfs_ret; - nfsi->ncommit -= ret; + ret += pnfs_scan_commit_lists(inode, max - ret); } spin_unlock(&inode->i_lock); return ret; -- cgit From 5ae67c4fee869c9b3c87b727a9ea511b6326b834 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 19 Mar 2012 16:17:18 -0400 Subject: NFSv4: It is not safe to dereference lsp->ls_state in release_lockowner It is quite possible for the release_lockowner RPC call to race with the close RPC call, in which case, we cannot dereference lsp->ls_state in order to find the nfs_server. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4proc.c | 4 +++- fs/nfs/nfs4state.c | 8 +++----- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index b47bdb9c1612..97ecc863dd76 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -340,7 +340,7 @@ extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); extern void nfs_release_seqid(struct nfs_seqid *seqid); extern void nfs_free_seqid(struct nfs_seqid *seqid); -extern void nfs4_free_lock_state(struct nfs4_lock_state *lsp); +extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp); extern const nfs4_stateid zero_stateid; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 5e0961acfef4..d41d97fb4cb9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4760,13 +4760,14 @@ out: struct nfs_release_lockowner_data { struct nfs4_lock_state *lsp; + struct nfs_server *server; struct nfs_release_lockowner_args args; }; static void nfs4_release_lockowner_release(void *calldata) { struct nfs_release_lockowner_data *data = calldata; - nfs4_free_lock_state(data->lsp); + nfs4_free_lock_state(data->server, data->lsp); kfree(calldata); } @@ -4788,6 +4789,7 @@ int nfs4_release_lockowner(struct nfs4_lock_state *lsp) if (!data) return -ENOMEM; data->lsp = lsp; + data->server = server; data->args.lock_owner.clientid = server->nfs_client->cl_clientid; data->args.lock_owner.id = lsp->ls_seqid.owner_id; data->args.lock_owner.s_dev = server->s_dev; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 12b068f2ec91..0f43414eb25a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -791,10 +791,8 @@ out_free: return NULL; } -void nfs4_free_lock_state(struct nfs4_lock_state *lsp) +void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) { - struct nfs_server *server = lsp->ls_state->owner->so_server; - ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id); nfs4_destroy_seqid_counter(&lsp->ls_seqid); kfree(lsp); @@ -828,7 +826,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_ } spin_unlock(&state->state_lock); if (new != NULL) - nfs4_free_lock_state(new); + nfs4_free_lock_state(state->owner->so_server, new); return lsp; } @@ -853,7 +851,7 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp) if (nfs4_release_lockowner(lsp) == 0) return; } - nfs4_free_lock_state(lsp); + nfs4_free_lock_state(lsp->ls_state->owner->so_server, lsp); } static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) -- cgit From 3b3be88d67cc17d0f0ab6edaf131516793fc947e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 17 Mar 2012 11:59:30 -0400 Subject: NFS: Use cond_resched_lock() to reduce latencies in the commit scans Ensure that we conditionally drop the inode->i_lock when it is safe to do so in the commit loops. We do so after locking the nfs_page, but before removing it from the commit list. We can then use list_safe_reset_next to recover the loop after the lock is retaken. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayout.c | 11 ++++++++--- fs/nfs/pnfs.h | 8 ++++---- fs/nfs/write.c | 11 ++++++++--- 3 files changed, 20 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index e0bdbf4fe454..634c0bcb4fd6 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -936,7 +936,8 @@ static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode) } static int -filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max) +filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max, + spinlock_t *lock) { struct list_head *src = &bucket->written; struct list_head *dst = &bucket->committing; @@ -946,6 +947,8 @@ filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max) list_for_each_entry_safe(req, tmp, src, wb_list) { if (!nfs_lock_request(req)) continue; + if (cond_resched_lock(lock)) + list_safe_reset_next(req, tmp, wb_list); nfs_request_remove_commit_list(req); clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); nfs_list_add_request(req, dst); @@ -959,7 +962,8 @@ filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max) /* Move reqs from written to committing lists, returning count of number moved. * Note called with i_lock held. */ -static int filelayout_scan_commit_lists(struct inode *inode, int max) +static int filelayout_scan_commit_lists(struct inode *inode, int max, + spinlock_t *lock) { struct pnfs_layout_segment *lseg; struct nfs4_filelayout_segment *fl; @@ -972,7 +976,8 @@ static int filelayout_scan_commit_lists(struct inode *inode, int max) if (fl->commit_through_mds) goto out_done; for (i = 0; i < fl->number_of_buckets && max != 0; i++) { - cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i], max); + cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i], + max, lock); max -= cnt; rv += cnt; } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index e98ff3027d3a..07802652f5a3 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -97,7 +97,7 @@ struct pnfs_layoutdriver_type { void (*mark_request_commit) (struct nfs_page *req, struct pnfs_layout_segment *lseg); void (*clear_request_commit) (struct nfs_page *req); - int (*scan_commit_lists) (struct inode *inode, int max); + int (*scan_commit_lists) (struct inode *inode, int max, spinlock_t *lock); int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how); /* @@ -294,14 +294,14 @@ pnfs_clear_request_commit(struct nfs_page *req) } static inline int -pnfs_scan_commit_lists(struct inode *inode, int max) +pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock) { struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; int ret; if (ld == NULL || ld->scan_commit_lists == NULL) return 0; - ret = ld->scan_commit_lists(inode, max); + ret = ld->scan_commit_lists(inode, max, lock); if (ret != 0) set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags); return ret; @@ -419,7 +419,7 @@ pnfs_clear_request_commit(struct nfs_page *req) } static inline int -pnfs_scan_commit_lists(struct inode *inode, int max) +pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock) { return 0; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 628d9a69d0a2..bd93d40099f9 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -561,7 +561,8 @@ nfs_need_commit(struct nfs_inode *nfsi) /* i_lock held by caller */ static int -nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max) +nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max, + spinlock_t *lock) { struct nfs_page *req, *tmp; int ret = 0; @@ -569,6 +570,8 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max) list_for_each_entry_safe(req, tmp, src, wb_list) { if (!nfs_lock_request(req)) continue; + if (cond_resched_lock(lock)) + list_safe_reset_next(req, tmp, wb_list); nfs_request_remove_commit_list(req); nfs_list_add_request(req, dst); ret++; @@ -596,8 +599,10 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst) if (nfsi->ncommit > 0) { const int max = INT_MAX; - ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max); - ret += pnfs_scan_commit_lists(inode, max - ret); + ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max, + &inode->i_lock); + ret += pnfs_scan_commit_lists(inode, max - ret, + &inode->i_lock); } spin_unlock(&inode->i_lock); return ret; -- cgit From e27d359e9b7e446190362cd5c8fe281d02194896 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 18 Mar 2012 14:07:42 -0400 Subject: SUNRPC/NFS: Add Kbuild dependencies for NFS_DEBUG/RPC_DEBUG This allows us to turn on/off the dprintk() debugging interfaces for those distributions that don't ship the 'rpcdebug' utility. It also allows us to add Kbuild dependencies. Specifically, we already know that dprintk() in general relies on CONFIG_SYSCTL. Now it turns out that the NFS dprintks depend on CONFIG_CRC32 after we added support for the filehandle hash. Reported-by: Paul Gortmaker Signed-off-by: Trond Myklebust --- fs/nfs/Kconfig | 6 ++++++ fs/nfs/inode.c | 2 +- fs/nfs/mount_clnt.c | 2 +- fs/nfs/nfsroot.c | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 7bce64c7060e..2a0e6c599147 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -144,3 +144,9 @@ config NFS_USE_KERNEL_DNS depends on NFS_V4 && !NFS_USE_LEGACY_DNS select DNS_RESOLVER default y + +config NFS_DEBUG + bool + depends on NFS_FS && SUNRPC_DEBUG + select CRC32 + default y diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 1a19f8d30c14..7bb4d13c1cd5 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1047,7 +1047,7 @@ struct nfs_fh *nfs_alloc_fhandle(void) return fh; } -#ifdef RPC_DEBUG +#ifdef NFS_DEBUG /* * _nfs_display_fhandle_hash - calculate the crc32 hash for the filehandle * in the same way that wireshark does diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index b37ca34af903..8e65c7f1f87c 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -16,7 +16,7 @@ #include #include "internal.h" -#ifdef RPC_DEBUG +#ifdef NFS_DEBUG # define NFSDBG_FACILITY NFSDBG_MOUNT #endif diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index c4744e1d513c..cd3c910d2d12 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -104,7 +104,7 @@ static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = ""; /* server:export path string passed to super.c */ static char nfs_root_device[NFS_MAXPATHLEN + 1] __initdata = ""; -#ifdef RPC_DEBUG +#ifdef NFS_DEBUG /* * When the "nfsrootdebug" kernel command line option is specified, * enable debugging messages for NFSROOT. -- cgit From ad2a8e6078a16d3b61b530f1447110841c36ae56 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 20 Mar 2012 16:58:06 +0000 Subject: AFS: checking wrong bit in afs_readpages() We should be testing "if (vnode->flags & (1 << 4))" instead of "if (vnode->flags & 4) {". The current test checks if the data was modified instead of deleted. Signed-off-by: Dan Carpenter Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/afs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/afs/file.c b/fs/afs/file.c index 14d89fa58fee..8f6e9234d565 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -251,7 +251,7 @@ static int afs_readpages(struct file *file, struct address_space *mapping, ASSERT(key != NULL); vnode = AFS_FS_I(mapping->host); - if (vnode->flags & AFS_VNODE_DELETED) { + if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { _leave(" = -ESTALE"); return -ESTALE; } -- cgit From e636825346b36a07ccfc8e30946d52855e21f681 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 19 Mar 2012 17:03:22 +0100 Subject: exit_signal: simplify the "we have changed execution domain" logic exit_notify() checks "tsk->self_exec_id != tsk->parent_exec_id" to handle the "we have changed execution domain" case. We can change do_thread() to always set ->exit_signal = SIGCHLD and remove this check to simplify the code. We could change setup_new_exec() instead, this looks more logical because it increments ->self_exec_id. But note that de_thread() already resets ->exit_signal if it changes the leader, let's keep both changes close to each other. Note that we change ->exit_signal lockless, this changes the rules. Thereafter ->exit_signal is not stable under tasklist but this is fine, the only possible change is OLDSIG -> SIGCHLD. This can race with eligible_child() but the race is harmless. We can race with reparent_leader() which changes our ->exit_signal in parallel, but it does the same change to SIGCHLD. The noticeable user-visible change is that the execing task is not "visible" to do_wait()->eligible_child(__WCLONE) right after exec. To me this looks more logical, and this is consistent with mt case. Signed-off-by: Oleg Nesterov Signed-off-by: Linus Torvalds --- fs/exec.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index b0695a9900ef..1e94d2263ae0 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -977,6 +977,9 @@ static int de_thread(struct task_struct *tsk) sig->notify_count = 0; no_thread_group: + /* we have changed execution domain */ + tsk->exit_signal = SIGCHLD; + if (current->mm) setmax_mm_hiwater_rss(&sig->maxrss, current->mm); -- cgit From 701085b219016d38f105b031381b9cee6200253a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 19 Mar 2012 17:04:01 +0100 Subject: exec: move de_thread()->setmax_mm_hiwater_rss() into exec_mmap() Minor cleanup. de_thread()->setmax_mm_hiwater_rss() looks a bit strange, move it into exec_mmap() which plays with old_mm. Signed-off-by: Oleg Nesterov Signed-off-by: Linus Torvalds --- fs/exec.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 1e94d2263ae0..95551c6da090 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -850,6 +850,7 @@ static int exec_mmap(struct mm_struct *mm) if (old_mm) { up_read(&old_mm->mmap_sem); BUG_ON(active_mm != old_mm); + setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm); mm_update_next_owner(old_mm); mmput(old_mm); return 0; @@ -980,9 +981,6 @@ no_thread_group: /* we have changed execution domain */ tsk->exit_signal = SIGCHLD; - if (current->mm) - setmax_mm_hiwater_rss(&sig->maxrss, current->mm); - exit_itimers(sig); flush_itimer_signals(); -- cgit From 8de52778798fe39660a8d6b26f290e0c93202761 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 6 Feb 2012 12:45:27 -0500 Subject: vfs: check i_nlink limits in vfs_{mkdir,rename_dir,link} New field of struct super_block - ->s_max_links. Maximal allowed value of ->i_nlink or 0; in the latter case all checks still need to be done in ->link/->mkdir/->rename instances. Note that this limit applies both to directoris and to non-directories. Signed-off-by: Al Viro --- fs/exofs/namei.c | 13 +------------ fs/exofs/super.c | 1 + fs/ext2/namei.c | 13 +------------ fs/ext2/super.c | 1 + fs/jfs/namei.c | 13 ------------- fs/jfs/super.c | 1 + fs/logfs/dir.c | 3 --- fs/logfs/super.c | 1 + fs/minix/inode.c | 10 +++++----- fs/minix/minix.h | 1 - fs/minix/namei.c | 14 +------------- fs/namei.c | 13 +++++++++++++ fs/nilfs2/namei.c | 11 ----------- fs/nilfs2/super.c | 1 + fs/sysv/namei.c | 12 +----------- fs/sysv/super.c | 24 ++++++++++++------------ fs/sysv/sysv.h | 1 - fs/udf/namei.c | 13 ------------- fs/udf/super.c | 3 +++ fs/ufs/namei.c | 14 +------------- fs/ufs/super.c | 1 + fs/xfs/xfs_rename.c | 11 ----------- fs/xfs/xfs_super.c | 1 + fs/xfs/xfs_utils.c | 2 -- fs/xfs/xfs_vnodeops.c | 16 ---------------- 25 files changed, 45 insertions(+), 149 deletions(-) (limited to 'fs') diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index 9dbf0c301030..fc7161d6bf6b 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -143,9 +143,6 @@ static int exofs_link(struct dentry *old_dentry, struct inode *dir, { struct inode *inode = old_dentry->d_inode; - if (inode->i_nlink >= EXOFS_LINK_MAX) - return -EMLINK; - inode->i_ctime = CURRENT_TIME; inode_inc_link_count(inode); ihold(inode); @@ -156,10 +153,7 @@ static int exofs_link(struct dentry *old_dentry, struct inode *dir, static int exofs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; - int err = -EMLINK; - - if (dir->i_nlink >= EXOFS_LINK_MAX) - goto out; + int err; inode_inc_link_count(dir); @@ -275,11 +269,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, if (err) goto out_dir; } else { - if (dir_de) { - err = -EMLINK; - if (new_dir->i_nlink >= EXOFS_LINK_MAX) - goto out_dir; - } err = exofs_add_link(new_dentry, old_inode); if (err) goto out_dir; diff --git a/fs/exofs/super.c b/fs/exofs/super.c index d22cd168c6ee..6cafcadfc3c8 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -754,6 +754,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent) sb->s_blocksize = EXOFS_BLKSIZE; sb->s_blocksize_bits = EXOFS_BLKSHIFT; sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_max_links = EXOFS_LINK_MAX; atomic_set(&sbi->s_curr_pending, 0); sb->s_bdev = NULL; sb->s_dev = 0; diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 080419814bae..dffb86536285 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -195,9 +195,6 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, struct inode *inode = old_dentry->d_inode; int err; - if (inode->i_nlink >= EXT2_LINK_MAX) - return -EMLINK; - dquot_initialize(dir); inode->i_ctime = CURRENT_TIME_SEC; @@ -217,10 +214,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) { struct inode * inode; - int err = -EMLINK; - - if (dir->i_nlink >= EXT2_LINK_MAX) - goto out; + int err; dquot_initialize(dir); @@ -346,11 +340,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, drop_nlink(new_inode); inode_dec_link_count(new_inode); } else { - if (dir_de) { - err = -EMLINK; - if (new_dir->i_nlink >= EXT2_LINK_MAX) - goto out_dir; - } err = ext2_add_link(new_dentry, old_inode); if (err) goto out_dir; diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 0090595beb28..9f6766a3ac1e 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -919,6 +919,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) } sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits); + sb->s_max_links = EXT2_LINK_MAX; if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) { sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE; diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 5f7c160ea64f..07c91ca6017d 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -220,12 +220,6 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) dquot_initialize(dip); - /* link count overflow on parent directory ? */ - if (dip->i_nlink == JFS_LINK_MAX) { - rc = -EMLINK; - goto out1; - } - /* * search parent directory for entry/freespace * (dtSearch() returns parent directory page pinned) @@ -806,9 +800,6 @@ static int jfs_link(struct dentry *old_dentry, jfs_info("jfs_link: %s %s", old_dentry->d_name.name, dentry->d_name.name); - if (ip->i_nlink == JFS_LINK_MAX) - return -EMLINK; - dquot_initialize(dir); tid = txBegin(ip->i_sb, 0); @@ -1138,10 +1129,6 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, rc = -ENOTEMPTY; goto out3; } - } else if ((new_dir != old_dir) && - (new_dir->i_nlink == JFS_LINK_MAX)) { - rc = -EMLINK; - goto out3; } } else if (new_ip) { IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 682bca642f38..4661ad705130 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -441,6 +441,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; sb->s_fs_info = sbi; + sb->s_max_links = JFS_LINK_MAX; sbi->sb = sb; sbi->uid = sbi->gid = sbi->umask = -1; diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 3de7a32cadbe..4aea231fc9e6 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -558,9 +558,6 @@ static int logfs_link(struct dentry *old_dentry, struct inode *dir, { struct inode *inode = old_dentry->d_inode; - if (inode->i_nlink >= LOGFS_LINK_MAX) - return -EMLINK; - inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; ihold(inode); inc_nlink(inode); diff --git a/fs/logfs/super.c b/fs/logfs/super.c index c9ee7f5d1caf..b1a491a5fe78 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c @@ -542,6 +542,7 @@ static struct dentry *logfs_get_sb_device(struct logfs_super *super, * the filesystem incompatible with 32bit systems. */ sb->s_maxbytes = (1ull << 43) - 1; + sb->s_max_links = LOGFS_LINK_MAX; sb->s_op = &logfs_super_operations; sb->s_flags = flags | MS_NOATIME; diff --git a/fs/minix/inode.c b/fs/minix/inode.c index fa8b612b8ce2..62c697caffb9 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -190,24 +190,24 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_version = MINIX_V1; sbi->s_dirsize = 16; sbi->s_namelen = 14; - sbi->s_link_max = MINIX_LINK_MAX; + s->s_max_links = MINIX_LINK_MAX; } else if (s->s_magic == MINIX_SUPER_MAGIC2) { sbi->s_version = MINIX_V1; sbi->s_dirsize = 32; sbi->s_namelen = 30; - sbi->s_link_max = MINIX_LINK_MAX; + s->s_max_links = MINIX_LINK_MAX; } else if (s->s_magic == MINIX2_SUPER_MAGIC) { sbi->s_version = MINIX_V2; sbi->s_nzones = ms->s_zones; sbi->s_dirsize = 16; sbi->s_namelen = 14; - sbi->s_link_max = MINIX2_LINK_MAX; + s->s_max_links = MINIX2_LINK_MAX; } else if (s->s_magic == MINIX2_SUPER_MAGIC2) { sbi->s_version = MINIX_V2; sbi->s_nzones = ms->s_zones; sbi->s_dirsize = 32; sbi->s_namelen = 30; - sbi->s_link_max = MINIX2_LINK_MAX; + s->s_max_links = MINIX2_LINK_MAX; } else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) { m3s = (struct minix3_super_block *) bh->b_data; s->s_magic = m3s->s_magic; @@ -221,9 +221,9 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_dirsize = 64; sbi->s_namelen = 60; sbi->s_version = MINIX_V3; - sbi->s_link_max = MINIX2_LINK_MAX; sbi->s_mount_state = MINIX_VALID_FS; sb_set_blocksize(s, m3s->s_blocksize); + s->s_max_links = MINIX2_LINK_MAX; } else goto out_no_fs; diff --git a/fs/minix/minix.h b/fs/minix/minix.h index c889ef0aa571..1ebd11854622 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h @@ -34,7 +34,6 @@ struct minix_sb_info { unsigned long s_max_size; int s_dirsize; int s_namelen; - int s_link_max; struct buffer_head ** s_imap; struct buffer_head ** s_zmap; struct buffer_head * s_sbh; diff --git a/fs/minix/namei.c b/fs/minix/namei.c index 2f76e38c2065..2d0ee1786305 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -94,9 +94,6 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir, { struct inode *inode = old_dentry->d_inode; - if (inode->i_nlink >= minix_sb(inode->i_sb)->s_link_max) - return -EMLINK; - inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); ihold(inode); @@ -106,10 +103,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir, static int minix_mkdir(struct inode * dir, struct dentry *dentry, umode_t mode) { struct inode * inode; - int err = -EMLINK; - - if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max) - goto out; + int err; inode_inc_link_count(dir); @@ -181,7 +175,6 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry) static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, struct inode * new_dir, struct dentry *new_dentry) { - struct minix_sb_info * info = minix_sb(old_dir->i_sb); struct inode * old_inode = old_dentry->d_inode; struct inode * new_inode = new_dentry->d_inode; struct page * dir_page = NULL; @@ -219,11 +212,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, drop_nlink(new_inode); inode_dec_link_count(new_inode); } else { - if (dir_de) { - err = -EMLINK; - if (new_dir->i_nlink >= info->s_link_max) - goto out_dir; - } err = minix_add_link(new_dentry, old_inode); if (err) goto out_dir; diff --git a/fs/namei.c b/fs/namei.c index 46ea9cc16647..a0b82762e8fc 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2569,6 +2569,7 @@ SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, d int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int error = may_create(dir, dentry); + unsigned max_links = dir->i_sb->s_max_links; if (error) return error; @@ -2581,6 +2582,9 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) if (error) return error; + if (max_links && dir->i_nlink >= max_links) + return -EMLINK; + error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); @@ -2911,6 +2915,7 @@ SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newn int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { struct inode *inode = old_dentry->d_inode; + unsigned max_links = dir->i_sb->s_max_links; int error; if (!inode) @@ -2941,6 +2946,8 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0) error = -ENOENT; + else if (max_links && inode->i_nlink >= max_links) + error = -EMLINK; else error = dir->i_op->link(old_dentry, dir, new_dentry); mutex_unlock(&inode->i_mutex); @@ -3050,6 +3057,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, { int error = 0; struct inode *target = new_dentry->d_inode; + unsigned max_links = new_dir->i_sb->s_max_links; /* * If we are going to change the parent - check write permissions, @@ -3073,6 +3081,11 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) goto out; + error = -EMLINK; + if (max_links && !target && new_dir != old_dir && + new_dir->i_nlink >= max_links) + goto out; + if (target) shrink_dcache_parent(new_dentry); error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 1cd3f624dffc..fce2bbee66d4 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -193,9 +193,6 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir, struct nilfs_transaction_info ti; int err; - if (inode->i_nlink >= NILFS_LINK_MAX) - return -EMLINK; - err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; @@ -219,9 +216,6 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) struct nilfs_transaction_info ti; int err; - if (dir->i_nlink >= NILFS_LINK_MAX) - return -EMLINK; - err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; @@ -400,11 +394,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, drop_nlink(new_inode); nilfs_mark_inode_dirty(new_inode); } else { - if (dir_de) { - err = -EMLINK; - if (new_dir->i_nlink >= NILFS_LINK_MAX) - goto out_dir; - } err = nilfs_add_link(new_dentry, old_inode); if (err) goto out_dir; diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 08e3d4f9df18..1fc9ad3c1d14 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1059,6 +1059,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_export_op = &nilfs_export_ops; sb->s_root = NULL; sb->s_time_gran = 1; + sb->s_max_links = NILFS_LINK_MAX; bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; sb->s_bdi = bdi ? : &default_backing_dev_info; diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index b217797e621b..d7466e293614 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -121,9 +121,6 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir, { struct inode *inode = old_dentry->d_inode; - if (inode->i_nlink >= SYSV_SB(inode->i_sb)->s_link_max) - return -EMLINK; - inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); ihold(inode); @@ -134,10 +131,8 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir, static int sysv_mkdir(struct inode * dir, struct dentry *dentry, umode_t mode) { struct inode * inode; - int err = -EMLINK; + int err; - if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) - goto out; inode_inc_link_count(dir); inode = sysv_new_inode(dir, S_IFDIR|mode); @@ -251,11 +246,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, drop_nlink(new_inode); inode_dec_link_count(new_inode); } else { - if (dir_de) { - err = -EMLINK; - if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) - goto out_dir; - } err = sysv_add_link(new_dentry, old_inode); if (err) goto out_dir; diff --git a/fs/sysv/super.c b/fs/sysv/super.c index f60c196913ea..f467740e088c 100644 --- a/fs/sysv/super.c +++ b/fs/sysv/super.c @@ -44,7 +44,7 @@ enum { JAN_1_1980 = (10*365 + 2) * 24 * 60 * 60 }; -static void detected_xenix(struct sysv_sb_info *sbi) +static void detected_xenix(struct sysv_sb_info *sbi, unsigned *max_links) { struct buffer_head *bh1 = sbi->s_bh1; struct buffer_head *bh2 = sbi->s_bh2; @@ -59,7 +59,7 @@ static void detected_xenix(struct sysv_sb_info *sbi) sbd2 = (struct xenix_super_block *) (bh2->b_data - 512); } - sbi->s_link_max = XENIX_LINK_MAX; + *max_links = XENIX_LINK_MAX; sbi->s_fic_size = XENIX_NICINOD; sbi->s_flc_size = XENIX_NICFREE; sbi->s_sbd1 = (char *)sbd1; @@ -75,7 +75,7 @@ static void detected_xenix(struct sysv_sb_info *sbi) sbi->s_nzones = fs32_to_cpu(sbi, sbd1->s_fsize); } -static void detected_sysv4(struct sysv_sb_info *sbi) +static void detected_sysv4(struct sysv_sb_info *sbi, unsigned *max_links) { struct sysv4_super_block * sbd; struct buffer_head *bh1 = sbi->s_bh1; @@ -86,7 +86,7 @@ static void detected_sysv4(struct sysv_sb_info *sbi) else sbd = (struct sysv4_super_block *) bh2->b_data; - sbi->s_link_max = SYSV_LINK_MAX; + *max_links = SYSV_LINK_MAX; sbi->s_fic_size = SYSV_NICINOD; sbi->s_flc_size = SYSV_NICFREE; sbi->s_sbd1 = (char *)sbd; @@ -103,7 +103,7 @@ static void detected_sysv4(struct sysv_sb_info *sbi) sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize); } -static void detected_sysv2(struct sysv_sb_info *sbi) +static void detected_sysv2(struct sysv_sb_info *sbi, unsigned *max_links) { struct sysv2_super_block *sbd; struct buffer_head *bh1 = sbi->s_bh1; @@ -114,7 +114,7 @@ static void detected_sysv2(struct sysv_sb_info *sbi) else sbd = (struct sysv2_super_block *) bh2->b_data; - sbi->s_link_max = SYSV_LINK_MAX; + *max_links = SYSV_LINK_MAX; sbi->s_fic_size = SYSV_NICINOD; sbi->s_flc_size = SYSV_NICFREE; sbi->s_sbd1 = (char *)sbd; @@ -131,14 +131,14 @@ static void detected_sysv2(struct sysv_sb_info *sbi) sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize); } -static void detected_coherent(struct sysv_sb_info *sbi) +static void detected_coherent(struct sysv_sb_info *sbi, unsigned *max_links) { struct coh_super_block * sbd; struct buffer_head *bh1 = sbi->s_bh1; sbd = (struct coh_super_block *) bh1->b_data; - sbi->s_link_max = COH_LINK_MAX; + *max_links = COH_LINK_MAX; sbi->s_fic_size = COH_NICINOD; sbi->s_flc_size = COH_NICFREE; sbi->s_sbd1 = (char *)sbd; @@ -154,12 +154,12 @@ static void detected_coherent(struct sysv_sb_info *sbi) sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize); } -static void detected_v7(struct sysv_sb_info *sbi) +static void detected_v7(struct sysv_sb_info *sbi, unsigned *max_links) { struct buffer_head *bh2 = sbi->s_bh2; struct v7_super_block *sbd = (struct v7_super_block *)bh2->b_data; - sbi->s_link_max = V7_LINK_MAX; + *max_links = V7_LINK_MAX; sbi->s_fic_size = V7_NICINOD; sbi->s_flc_size = V7_NICFREE; sbi->s_sbd1 = (char *)sbd; @@ -290,7 +290,7 @@ static char *flavour_names[] = { [FSTYPE_AFS] = "AFS", }; -static void (*flavour_setup[])(struct sysv_sb_info *) = { +static void (*flavour_setup[])(struct sysv_sb_info *, unsigned *) = { [FSTYPE_XENIX] = detected_xenix, [FSTYPE_SYSV4] = detected_sysv4, [FSTYPE_SYSV2] = detected_sysv2, @@ -310,7 +310,7 @@ static int complete_read_super(struct super_block *sb, int silent, int size) sbi->s_firstinodezone = 2; - flavour_setup[sbi->s_type](sbi); + flavour_setup[sbi->s_type](sbi, &sb->s_max_links); sbi->s_truncate = 1; sbi->s_ndatazones = sbi->s_nzones - sbi->s_firstdatazone; diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h index 0e4b821c5691..11b07672f6c5 100644 --- a/fs/sysv/sysv.h +++ b/fs/sysv/sysv.h @@ -24,7 +24,6 @@ struct sysv_sb_info { char s_bytesex; /* bytesex (le/be/pdp) */ char s_truncate; /* if 1: names > SYSV_NAMELEN chars are truncated */ /* if 0: they are disallowed (ENAMETOOLONG) */ - nlink_t s_link_max; /* max number of hard links to a file */ unsigned int s_inodes_per_block; /* number of inodes per block */ unsigned int s_inodes_per_block_1; /* inodes_per_block - 1 */ unsigned int s_inodes_per_block_bits; /* log2(inodes_per_block) */ diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 08bf46edf9c4..38de8f234b94 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -32,8 +32,6 @@ #include #include -enum { UDF_MAX_LINKS = 0xffff }; - static inline int udf_match(int len1, const unsigned char *name1, int len2, const unsigned char *name2) { @@ -649,10 +647,6 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) struct udf_inode_info *dinfo = UDF_I(dir); struct udf_inode_info *iinfo; - err = -EMLINK; - if (dir->i_nlink >= UDF_MAX_LINKS) - goto out; - err = -EIO; inode = udf_new_inode(dir, S_IFDIR | mode, &err); if (!inode) @@ -1032,9 +1026,6 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, struct fileIdentDesc cfi, *fi; int err; - if (inode->i_nlink >= UDF_MAX_LINKS) - return -EMLINK; - fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { return err; @@ -1126,10 +1117,6 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, if (udf_get_lb_pblock(old_inode->i_sb, &tloc, 0) != old_dir->i_ino) goto end_rename; - - retval = -EMLINK; - if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS) - goto end_rename; } if (!nfi) { nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, diff --git a/fs/udf/super.c b/fs/udf/super.c index c09a84daaf50..8d8b25336fbb 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -75,6 +75,8 @@ #define UDF_DEFAULT_BLOCKSIZE 2048 +enum { UDF_MAX_LINKS = 0xffff }; + /* These are the "meat" - everything else is stuffing */ static int udf_fill_super(struct super_block *, void *, int); static void udf_put_super(struct super_block *); @@ -2042,6 +2044,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) goto error_out; } sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_max_links = UDF_MAX_LINKS; return 0; error_out: diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 38cac199edff..a2281cadefa1 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -166,10 +166,6 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, int error; lock_ufs(dir->i_sb); - if (inode->i_nlink >= UFS_LINK_MAX) { - unlock_ufs(dir->i_sb); - return -EMLINK; - } inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); @@ -183,10 +179,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) { struct inode * inode; - int err = -EMLINK; - - if (dir->i_nlink >= UFS_LINK_MAX) - goto out; + int err; lock_ufs(dir->i_sb); inode_inc_link_count(dir); @@ -305,11 +298,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, drop_nlink(new_inode); inode_dec_link_count(new_inode); } else { - if (dir_de) { - err = -EMLINK; - if (new_dir->i_nlink >= UFS_LINK_MAX) - goto out_dir; - } err = ufs_add_link(new_dentry, old_inode); if (err) goto out_dir; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 5246ee3e5607..ec25d09fcaa8 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -1157,6 +1157,7 @@ magic_found: "fast symlink size (%u)\n", uspi->s_maxsymlinklen); uspi->s_maxsymlinklen = maxsymlen; } + sb->s_max_links = UFS_LINK_MAX; inode = ufs_iget(sb, UFS_ROOTINO); if (IS_ERR(inode)) { diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index 866de277079a..e44ef7ee8ce8 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c @@ -118,17 +118,6 @@ xfs_rename( new_parent = (src_dp != target_dp); src_is_directory = S_ISDIR(src_ip->i_d.di_mode); - if (src_is_directory) { - /* - * Check for link count overflow on target_dp - */ - if (target_ip == NULL && new_parent && - target_dp->i_d.di_nlink >= XFS_MAXLINK) { - error = XFS_ERROR(EMLINK); - goto std_return; - } - } - xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, inodes, &num_inodes); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index ee5b695c99a7..0e4c5c017fba 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1341,6 +1341,7 @@ xfs_fs_fill_super( sb->s_blocksize = mp->m_sb.sb_blocksize; sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); + sb->s_max_links = XFS_MAXLINK; sb->s_time_gran = 1; set_posix_acl_flag(sb); diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index 89dbb4a50872..79c05ac85bfe 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c @@ -296,8 +296,6 @@ xfs_bumplink( xfs_trans_t *tp, xfs_inode_t *ip) { - if (ip->i_d.di_nlink >= XFS_MAXLINK) - return XFS_ERROR(EMLINK); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); ASSERT(ip->i_d.di_nlink > 0); diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index ebdb88840a47..64981d7e7375 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -917,14 +917,6 @@ xfs_create( xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); unlock_dp_on_error = B_TRUE; - /* - * Check for directory link count overflow. - */ - if (is_dir && dp->i_d.di_nlink >= XFS_MAXLINK) { - error = XFS_ERROR(EMLINK); - goto out_trans_cancel; - } - xfs_bmap_init(&free_list, &first_block); /* @@ -1428,14 +1420,6 @@ xfs_link( xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); - /* - * If the source has too many links, we can't make any more to it. - */ - if (sip->i_d.di_nlink >= XFS_MAXLINK) { - error = XFS_ERROR(EMLINK); - goto error_return; - } - /* * If we are using project inheritance, we only allow hard link * creation in our tree when the project IDs are the same; else -- cgit From b57ce9694ec43dcb6ef6f189d6540e4b3d2c5e7a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 02:38:16 -0500 Subject: vfs: drop_file_write_access() made static Signed-off-by: Al Viro --- fs/file_table.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/file_table.c b/fs/file_table.c index 20002e39754d..70f2a0fd6aec 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -204,7 +204,7 @@ EXPORT_SYMBOL(alloc_file); * to write to @file, along with access to write through * its vfsmount. */ -void drop_file_write_access(struct file *file) +static void drop_file_write_access(struct file *file) { struct vfsmount *mnt = file->f_path.mnt; struct dentry *dentry = file->f_path.dentry; @@ -219,7 +219,6 @@ void drop_file_write_access(struct file *file) mnt_drop_write(mnt); file_release_write(file); } -EXPORT_SYMBOL_GPL(drop_file_write_access); /* the real guts of fput() - releasing the last reference to file */ -- cgit From e28e832c3e1e1197873cfd0b6ce86868cf5c391d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 02:58:14 -0500 Subject: ecryptfs: don't bother with ->drop_inode() generic_drop_inode() is the default Signed-off-by: Al Viro --- fs/ecryptfs/super.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index cf152823bbf4..2dd946b636d2 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c @@ -184,7 +184,6 @@ static int ecryptfs_show_options(struct seq_file *m, struct dentry *root) const struct super_operations ecryptfs_sops = { .alloc_inode = ecryptfs_alloc_inode, .destroy_inode = ecryptfs_destroy_inode, - .drop_inode = generic_drop_inode, .statfs = ecryptfs_statfs, .remount_fs = NULL, .evict_inode = ecryptfs_evict_inode, -- cgit From 9bcb4b733c22b7dbc4cf847e707ac98f751e9180 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 19:43:17 -0500 Subject: vfs: turn generic_drop_inode() into static inline Once upon a time it used to be much bigger, but these days there's no point whatsoever keeping it in fs/inode.c, especially since it's not even needed as initializer for ->drop_inode() - it's the default and leaving ->drop_inode NULL will do just as well. Signed-off-by: Al Viro --- fs/inode.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 83ab215baab1..92de04b0baa2 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1368,17 +1368,6 @@ int generic_delete_inode(struct inode *inode) } EXPORT_SYMBOL(generic_delete_inode); -/* - * Normal UNIX filesystem behaviour: delete the - * inode when the usage count drops to zero, and - * i_nlink is zero. - */ -int generic_drop_inode(struct inode *inode) -{ - return !inode->i_nlink || inode_unhashed(inode); -} -EXPORT_SYMBOL_GPL(generic_drop_inode); - /* * Called when we're dropping the last reference * to an inode. -- cgit From 064326c0773af8a0e8bb82d895cceaedc8a51b9e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 21:06:12 -0500 Subject: clean up the failure exits in cifs_read_super() no need to make that iput() conditional, just take it to the right place... Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index b1fd382d1952..8b7d7ff88792 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -119,7 +119,6 @@ cifs_read_super(struct super_block *sb) if (IS_ERR(inode)) { rc = PTR_ERR(inode); - inode = NULL; goto out_no_root; } @@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb) if (!sb->s_root) { rc = -ENOMEM; + iput(inode); goto out_no_root; } @@ -147,9 +147,6 @@ cifs_read_super(struct super_block *sb) out_no_root: cERROR(1, "cifs_read_super: get root inode failed"); - if (inode) - iput(inode); - return rc; } -- cgit From f56b0fbc6477e50303a503ee1453ed94e20f154a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 21:15:58 -0500 Subject: coda: clean failure exits in coda_fill_super() same as for cifs, move iput() to the right place, make it unconditional Signed-off-by: Al Viro --- fs/coda/inode.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 5e2e1b3f068d..32dafc875c14 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -208,7 +208,6 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) if (IS_ERR(root)) { error = PTR_ERR(root); printk("Failure of coda_cnode_make for root: error %d\n", error); - root = NULL; goto error; } @@ -216,15 +215,13 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) root->i_ino, root->i_sb->s_id); sb->s_root = d_alloc_root(root); if (!sb->s_root) { + iput(root); error = -EINVAL; goto error; } return 0; error: - if (root) - iput(root); - mutex_lock(&vc->vc_mutex); bdi_destroy(&vc->bdi); vc->vc_sb = NULL; -- cgit From be0d93f0aa5682a24a2a9ec0dd26fffaad608cce Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 21:46:49 -0500 Subject: ... and the same failure exits cleanup for ocfs2 Signed-off-by: Al Viro --- fs/ocfs2/super.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 604e12c4e979..2b1184f7097f 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1154,12 +1154,12 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) } status = ocfs2_mount_volume(sb); - if (osb->root_inode) - inode = igrab(osb->root_inode); - if (status < 0) goto read_super_error; + if (osb->root_inode) + inode = igrab(osb->root_inode); + if (!inode) { status = -EIO; mlog_errno(status); @@ -1168,6 +1168,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) root = d_alloc_root(inode); if (!root) { + iput(inode); status = -ENOMEM; mlog_errno(status); goto read_super_error; @@ -1220,9 +1221,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) read_super_error: brelse(bh); - if (inode) - iput(inode); - if (osb) { atomic_set(&osb->vol_state, VOLUME_DISABLED); wake_up(&osb->osb_mount_event); -- cgit From 6b4231e2f92adbcf96fb2a3fa751d7ca0a61b21f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 21:56:08 -0500 Subject: procfs: clean proc_fill_super() up First of all, there's no need to zero ->i_uid/->i_gid on root inode - both had been set to zero already. Moreover, let's take the iput() on failure to the failure exit it belongs to... Signed-off-by: Al Viro --- fs/proc/inode.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 84fd3235a590..a70af3a44f45 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -499,16 +499,15 @@ int proc_fill_super(struct super_block *s) root_inode = proc_get_inode(s, &proc_root); if (!root_inode) goto out_no_root; - root_inode->i_uid = 0; - root_inode->i_gid = 0; s->s_root = d_alloc_root(root_inode); - if (!s->s_root) + if (!s->s_root) { + iput(root_inode); goto out_no_root; + } return 0; out_no_root: printk("proc_read_super: get root inode failed\n"); - iput(root_inode); pde_put(&proc_root); return -ENOMEM; } -- cgit From 48fde701aff662559b38d9a609574068f22d00fe Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 8 Jan 2012 22:15:13 -0500 Subject: switch open-coded instances of d_make_root() to new helper Signed-off-by: Al Viro --- fs/9p/vfs_super.c | 3 +-- fs/adfs/super.c | 3 +-- fs/affs/super.c | 7 ++----- fs/afs/super.c | 7 ++----- fs/autofs4/inode.c | 10 ++-------- fs/befs/linuxvfs.c | 3 +-- fs/bfs/inode.c | 3 +-- fs/btrfs/super.c | 8 ++------ fs/ceph/super.c | 3 +-- fs/cifs/cifsfs.c | 4 +--- fs/coda/inode.c | 3 +-- fs/configfs/mount.c | 3 +-- fs/cramfs/inode.c | 6 ++---- fs/devpts/inode.c | 3 +-- fs/ecryptfs/main.c | 3 +-- fs/efs/super.c | 3 +-- fs/exofs/super.c | 3 +-- fs/ext2/super.c | 3 +-- fs/ext3/super.c | 3 +-- fs/ext4/super.c | 3 +-- fs/freevxfs/vxfs_super.c | 3 +-- fs/fuse/inode.c | 9 ++------- fs/gfs2/ops_fstype.c | 3 +-- fs/hfs/super.c | 6 ++---- fs/hostfs/hostfs_kern.c | 4 ++-- fs/hpfs/super.c | 6 ++---- fs/hppfs/hppfs.c | 9 ++------- fs/hugetlbfs/inode.c | 13 ++----------- fs/isofs/inode.c | 3 +-- fs/jffs2/fs.c | 6 ++---- fs/jfs/super.c | 3 +-- fs/libfs.c | 6 ++---- fs/logfs/super.c | 6 ++---- fs/ncpfs/inode.c | 6 ++---- fs/nfs/getroot.c | 6 ++---- fs/nilfs2/super.c | 3 +-- fs/ocfs2/dlmfs/dlmfs.c | 14 ++------------ fs/ocfs2/super.c | 3 +-- fs/omfs/inode.c | 6 ++---- fs/openpromfs/inode.c | 3 +-- fs/proc/inode.c | 15 +++------------ fs/pstore/inode.c | 3 +-- fs/qnx4/inode.c | 6 ++---- fs/ramfs/inode.c | 12 ++---------- fs/reiserfs/super.c | 6 ++---- fs/romfs/super.c | 6 ++---- fs/squashfs/super.c | 3 +-- fs/sysfs/mount.c | 3 +-- fs/sysv/super.c | 3 +-- fs/ubifs/super.c | 6 ++---- fs/udf/super.c | 3 +-- fs/ufs/super.c | 6 ++---- fs/xfs/xfs_super.c | 6 ++---- 53 files changed, 80 insertions(+), 200 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 7b0cd87b07c2..10b7d3c9dba8 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -155,9 +155,8 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, goto release_sb; } - root = d_alloc_root(inode); + root = d_make_root(inode); if (!root) { - iput(inode); retval = -ENOMEM; goto release_sb; } diff --git a/fs/adfs/super.c b/fs/adfs/super.c index 8e3b36ace305..06fdcc9382c4 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -483,10 +483,9 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_d_op = &adfs_dentry_operations; root = adfs_iget(sb, &root_obj); - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) { int i; - iput(root); for (i = 0; i < asb->s_map_size; i++) brelse(asb->s_map[i].dm_bh); kfree(asb->s_map); diff --git a/fs/affs/super.c b/fs/affs/super.c index 8ba73fed7964..0782653a05a2 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -473,7 +473,7 @@ got_root: root_inode = affs_iget(sb, root_block); if (IS_ERR(root_inode)) { ret = PTR_ERR(root_inode); - goto out_error_noinode; + goto out_error; } if (AFFS_SB(sb)->s_flags & SF_INTL) @@ -481,7 +481,7 @@ got_root: else sb->s_d_op = &affs_dentry_operations; - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (!sb->s_root) { printk(KERN_ERR "AFFS: Get root inode failed\n"); goto out_error; @@ -494,9 +494,6 @@ got_root: * Begin the cascaded cleanup ... */ out_error: - if (root_inode) - iput(root_inode); -out_error_noinode: kfree(sbi->s_bitmap); affs_brelse(root_bh); kfree(sbi->s_prefix); diff --git a/fs/afs/super.c b/fs/afs/super.c index 983ec59fc80d..f02b31e7e648 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -301,7 +301,6 @@ static int afs_fill_super(struct super_block *sb, { struct afs_super_info *as = sb->s_fs_info; struct afs_fid fid; - struct dentry *root = NULL; struct inode *inode = NULL; int ret; @@ -327,18 +326,16 @@ static int afs_fill_super(struct super_block *sb, set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags); ret = -ENOMEM; - root = d_alloc_root(inode); - if (!root) + sb->s_root = d_make_root(inode); + if (!sb->s_root) goto error; sb->s_d_op = &afs_fs_dentry_operations; - sb->s_root = root; _leave(" = 0"); return 0; error: - iput(inode); _leave(" = %d", ret); return ret; } diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 06858d955120..d8dc002e9cc3 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -247,12 +247,9 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) if (!ino) goto fail_free; root_inode = autofs4_get_inode(s, S_IFDIR | 0755); - if (!root_inode) - goto fail_ino; - - root = d_alloc_root(root_inode); + root = d_make_root(root_inode); if (!root) - goto fail_iput; + goto fail_ino; pipe = NULL; root->d_fsdata = ino; @@ -317,9 +314,6 @@ fail_fput: fail_dput: dput(root); goto fail_free; -fail_iput: - printk("autofs: get root dentry failed\n"); - iput(root_inode); fail_ino: kfree(ino); fail_free: diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 6e6d536767fe..e18da23d42b5 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -852,9 +852,8 @@ befs_fill_super(struct super_block *sb, void *data, int silent) ret = PTR_ERR(root); goto unacquire_priv_sbp; } - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) { - iput(root); befs_error(sb, "get root inode failed"); goto unacquire_priv_sbp; } diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index b0391bc402b1..e23dc7c8b884 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -367,9 +367,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) ret = PTR_ERR(inode); goto out2; } - s->s_root = d_alloc_root(inode); + s->s_root = d_make_root(inode); if (!s->s_root) { - iput(inode); ret = -ENOMEM; goto out2; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 3ce97b217cbe..81df3fec6a6d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -629,7 +629,6 @@ static int btrfs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; - struct dentry *root_dentry; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_key key; int err; @@ -660,15 +659,12 @@ static int btrfs_fill_super(struct super_block *sb, goto fail_close; } - root_dentry = d_alloc_root(inode); - if (!root_dentry) { - iput(inode); + sb->s_root = d_make_root(inode); + if (!sb->s_root) { err = -ENOMEM; goto fail_close; } - sb->s_root = root_dentry; - save_mount_options(sb, data); cleancache_init_fs(sb); sb->s_flags |= MS_ACTIVE; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 00de2c9568cd..256f85221926 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -655,9 +655,8 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, dout("open_root_inode success\n"); if (ceph_ino(inode) == CEPH_INO_ROOT && fsc->sb->s_root == NULL) { - root = d_alloc_root(inode); + root = d_make_root(inode); if (!root) { - iput(inode); root = ERR_PTR(-ENOMEM); goto out; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8b7d7ff88792..418fc42fb8b2 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -122,11 +122,9 @@ cifs_read_super(struct super_block *sb) goto out_no_root; } - sb->s_root = d_alloc_root(inode); - + sb->s_root = d_make_root(inode); if (!sb->s_root) { rc = -ENOMEM; - iput(inode); goto out_no_root; } diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 32dafc875c14..05156c17b551 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -213,9 +213,8 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) printk("coda_read_super: rootinode is %ld dev %s\n", root->i_ino, root->i_sb->s_id); - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) { - iput(root); error = -EINVAL; goto error; } diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index 276e15cafd58..07f60455f1c1 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -91,10 +91,9 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; } - root = d_alloc_root(inode); + root = d_make_root(inode); if (!root) { pr_debug("%s: could not get root dentry!\n",__func__); - iput(inode); return -ENOMEM; } config_group_init(&configfs_root_group); diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index a2ee8f9f5a38..853480d2b3d1 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -318,11 +318,9 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) root = get_cramfs_inode(sb, &super.root, 0); if (IS_ERR(root)) goto out; - sb->s_root = d_alloc_root(root); - if (!sb->s_root) { - iput(root); + sb->s_root = d_make_root(root); + if (!sb->s_root) goto out; - } return 0; out: kfree(sbi); diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index c4e2a58a2e82..57dae0baedf2 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -309,12 +309,11 @@ devpts_fill_super(struct super_block *s, void *data, int silent) inode->i_fop = &simple_dir_operations; set_nlink(inode, 2); - s->s_root = d_alloc_root(inode); + s->s_root = d_make_root(inode); if (s->s_root) return 0; printk(KERN_ERR "devpts: get root dentry failed\n"); - iput(inode); fail: return -ENOMEM; diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index b4a6befb1216..6e0e017e6932 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -550,9 +550,8 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags if (IS_ERR(inode)) goto out_free; - s->s_root = d_alloc_root(inode); + s->s_root = d_make_root(inode); if (!s->s_root) { - iput(inode); rc = -ENOMEM; goto out_free; } diff --git a/fs/efs/super.c b/fs/efs/super.c index 981106429a9f..e755ec746c69 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c @@ -317,10 +317,9 @@ static int efs_fill_super(struct super_block *s, void *d, int silent) goto out_no_fs; } - s->s_root = d_alloc_root(root); + s->s_root = d_make_root(root); if (!(s->s_root)) { printk(KERN_ERR "EFS: get root dentry failed\n"); - iput(root); ret = -ENOMEM; goto out_no_fs; } diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 6cafcadfc3c8..7f2b590a36b7 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -819,9 +819,8 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent) ret = PTR_ERR(root); goto free_sbi; } - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) { - iput(root); EXOFS_ERR("ERROR: get root inode failed\n"); ret = -ENOMEM; goto free_sbi; diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 9f6766a3ac1e..e1025c7a437a 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -1088,9 +1088,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount3; } - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) { - iput(root); ext2_msg(sb, KERN_ERR, "error: get root inode failed"); ret = -ENOMEM; goto failed_mount3; diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 726c7ef6cdf1..e0b45b93327b 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -2046,10 +2046,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); goto failed_mount3; } - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) { ext3_msg(sb, KERN_ERR, "error: get root dentry failed"); - iput(root); ret = -ENOMEM; goto failed_mount3; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 502c61fd7392..d2baea7bcf30 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3735,9 +3735,8 @@ no_journal: iput(root); goto failed_mount4; } - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) { - iput(root); ext4_msg(sb, KERN_ERR, "get root dentry failed"); ret = -ENOMEM; goto failed_mount4; diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c index 9d1c99558389..d4fabd26084e 100644 --- a/fs/freevxfs/vxfs_super.c +++ b/fs/freevxfs/vxfs_super.c @@ -224,9 +224,8 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent) ret = PTR_ERR(root); goto out; } - sbp->s_root = d_alloc_root(root); + sbp->s_root = d_make_root(root); if (!sbp->s_root) { - iput(root); printk(KERN_WARNING "vxfs: unable to get root dentry.\n"); goto out_free_ilist; } diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 64cf8d07393e..4aec5995867e 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -988,14 +988,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) err = -ENOMEM; root = fuse_get_root_inode(sb, d.rootmode); - if (!root) + root_dentry = d_make_root(root); + if (!root_dentry) goto err_put_conn; - - root_dentry = d_alloc_root(root); - if (!root_dentry) { - iput(root); - goto err_put_conn; - } /* only now - we want root dentry with NULL ->d_op */ sb->s_d_op = &fuse_dentry_operations; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 24f609c9ef91..10e848c6d1b5 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -431,10 +431,9 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr, fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode)); return PTR_ERR(inode); } - dentry = d_alloc_root(inode); + dentry = d_make_root(inode); if (!dentry) { fs_err(sdp, "can't alloc %s dentry\n", name); - iput(inode); return -ENOMEM; } *dptr = dentry; diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 8137fb3e6780..7b4c537d6e13 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -430,15 +430,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_d_op = &hfs_dentry_operations; res = -ENOMEM; - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (!sb->s_root) - goto bail_iput; + goto bail_no_root; /* everything's okay */ return 0; -bail_iput: - iput(root_inode); bail_no_root: printk(KERN_ERR "hfs: get root inode failed.\n"); bail: diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index e130bd46d671..588d45885a6f 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -966,9 +966,9 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) } err = -ENOMEM; - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (sb->s_root == NULL) - goto out_put; + goto out; return 0; diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 3690467c944e..54f6eccb79d9 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -625,11 +625,9 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) hpfs_init_inode(root); hpfs_read_inode(root); unlock_new_inode(root); - s->s_root = d_alloc_root(root); - if (!s->s_root) { - iput(root); + s->s_root = d_make_root(root); + if (!s->s_root) goto bail0; - } /* * find the root directory's . pointer & finish filling in the inode diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index d92f4ce80925..a80e45a690ac 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -726,17 +726,12 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent) err = -ENOMEM; root_inode = get_inode(sb, dget(proc_mnt->mnt_root)); - if (!root_inode) - goto out_mntput; - - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (!sb->s_root) - goto out_iput; + goto out_mntput; return 0; - out_iput: - iput(root_inode); out_mntput: mntput(proc_mnt); out: diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 1e85a7ac0217..81932fa1861a 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -831,8 +831,6 @@ bad_val: static int hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) { - struct inode * inode; - struct dentry * root; int ret; struct hugetlbfs_config config; struct hugetlbfs_sb_info *sbinfo; @@ -865,16 +863,9 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; sb->s_time_gran = 1; - inode = hugetlbfs_get_root(sb, &config); - if (!inode) - goto out_free; - - root = d_alloc_root(inode); - if (!root) { - iput(inode); + sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config)); + if (!sb->s_root) goto out_free; - } - sb->s_root = root; return 0; out_free: kfree(sbinfo); diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index bd62c76fb5df..29037c365ba4 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -947,9 +947,8 @@ root_found: s->s_d_op = &isofs_dentry_ops[table]; /* get the root dentry */ - s->s_root = d_alloc_root(inode); + s->s_root = d_make_root(inode); if (!(s->s_root)) { - iput(inode); error = -ENOMEM; goto out_no_inode; } diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 2e0123867cb1..c0d5c9d770da 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -561,9 +561,9 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) ret = -ENOMEM; D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); - sb->s_root = d_alloc_root(root_i); + sb->s_root = d_make_root(root_i); if (!sb->s_root) - goto out_root_i; + goto out_root; sb->s_maxbytes = 0xFFFFFFFF; sb->s_blocksize = PAGE_CACHE_SIZE; @@ -573,8 +573,6 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) jffs2_start_garbage_collect_thread(c); return 0; - out_root_i: - iput(root_i); out_root: jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 4661ad705130..b3bb95504479 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -522,7 +522,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) ret = PTR_ERR(inode); goto out_no_rw; } - sb->s_root = d_alloc_root(inode); + sb->s_root = d_make_root(inode); if (!sb->s_root) goto out_no_root; @@ -540,7 +540,6 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) out_no_root: jfs_err("jfs_read_super: get root dentry failed"); - iput(inode); out_no_rw: rc = jfs_umount(sb); diff --git a/fs/libfs.c b/fs/libfs.c index 5b2dbb3ba4fc..7c895a763a1e 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -491,11 +491,9 @@ int simple_fill_super(struct super_block *s, unsigned long magic, inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; set_nlink(inode, 2); - root = d_alloc_root(inode); - if (!root) { - iput(inode); + root = d_make_root(inode); + if (!root) return -ENOMEM; - } for (i = 0; !files->name || files->name[0]; i++, files++) { if (!files->name) continue; diff --git a/fs/logfs/super.c b/fs/logfs/super.c index b1a491a5fe78..7de18c3021fe 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c @@ -315,11 +315,9 @@ static int logfs_get_sb_final(struct super_block *sb) if (IS_ERR(rootdir)) goto fail; - sb->s_root = d_alloc_root(rootdir); - if (!sb->s_root) { - iput(rootdir); + sb->s_root = d_make_root(rootdir); + if (!sb->s_root) goto fail; - } /* at that point we know that ->put_super() will be called */ super->s_erase_page = alloc_pages(GFP_KERNEL, 0); diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 3d1e34f8a68e..49df0e7f8379 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -716,13 +716,11 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) if (!root_inode) goto out_disconnect; DPRINTK("ncp_fill_super: root vol=%d\n", NCP_FINFO(root_inode)->volNumber); - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (!sb->s_root) - goto out_no_root; + goto out_disconnect; return 0; -out_no_root: - iput(root_inode); out_disconnect: ncp_lock_server(server); ncp_disconnect(server); diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index dcb61548887f..801d6d830787 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c @@ -49,11 +49,9 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i { /* The mntroot acts as the dummy root dentry for this superblock */ if (sb->s_root == NULL) { - sb->s_root = d_alloc_root(inode); - if (sb->s_root == NULL) { - iput(inode); + sb->s_root = d_make_root(inode); + if (sb->s_root == NULL) return -ENOMEM; - } ihold(inode); /* * Ensure that this dentry is invisible to d_find_alias(). diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 1fc9ad3c1d14..1099a76cee59 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -917,9 +917,8 @@ static int nilfs_get_root_dentry(struct super_block *sb, if (root->cno == NILFS_CPTREE_CURRENT_CNO) { dentry = d_find_alias(inode); if (!dentry) { - dentry = d_alloc_root(inode); + dentry = d_make_root(inode); if (!dentry) { - iput(inode); ret = -ENOMEM; goto failed_dentry; } diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index abfac0d7ae9c..3b5825ef3193 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -582,24 +582,14 @@ static int dlmfs_fill_super(struct super_block * sb, void * data, int silent) { - struct inode * inode; - struct dentry * root; - sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = DLMFS_MAGIC; sb->s_op = &dlmfs_ops; - inode = dlmfs_get_root_inode(sb); - if (!inode) - return -ENOMEM; - - root = d_alloc_root(inode); - if (!root) { - iput(inode); + sb->s_root = d_make_root(dlmfs_get_root_inode(sb)); + if (!sb->s_root) return -ENOMEM; - } - sb->s_root = root; return 0; } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 2b1184f7097f..337687c3e233 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1166,9 +1166,8 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) goto read_super_error; } - root = d_alloc_root(inode); + root = d_make_root(inode); if (!root) { - iput(inode); status = -ENOMEM; mlog_errno(status); goto read_super_error; diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index 6065bb0ba207..dbc842222589 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -539,11 +539,9 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent) goto out_brelse_bh2; } - sb->s_root = d_alloc_root(root); - if (!sb->s_root) { - iput(root); + sb->s_root = d_make_root(root); + if (!sb->s_root) goto out_brelse_bh2; - } printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); ret = 0; diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index a88c03bc749d..bc49c975d501 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c @@ -408,13 +408,12 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent) oi->type = op_inode_node; oi->u.node = of_find_node_by_path("/"); - s->s_root = d_alloc_root(root_inode); + s->s_root = d_make_root(root_inode); if (!s->s_root) goto out_no_root_dentry; return 0; out_no_root_dentry: - iput(root_inode); ret = -ENOMEM; out_no_root: printk("openprom_fill_super: get root inode failed\n"); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index a70af3a44f45..8461a7b82fdb 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -486,8 +486,6 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) int proc_fill_super(struct super_block *s) { - struct inode * root_inode; - s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC; s->s_blocksize = 1024; s->s_blocksize_bits = 10; @@ -496,17 +494,10 @@ int proc_fill_super(struct super_block *s) s->s_time_gran = 1; pde_get(&proc_root); - root_inode = proc_get_inode(s, &proc_root); - if (!root_inode) - goto out_no_root; - s->s_root = d_alloc_root(root_inode); - if (!s->s_root) { - iput(root_inode); - goto out_no_root; - } - return 0; + s->s_root = d_make_root(proc_get_inode(s, &proc_root)); + if (s->s_root) + return 0; -out_no_root: printk("proc_read_super: get root inode failed\n"); pde_put(&proc_root); return -ENOMEM; diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index b3b426edb2fd..ec7d1fb6f35a 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -303,7 +303,7 @@ int pstore_fill_super(struct super_block *sb, void *data, int silent) /* override ramfs "dir" options so we catch unlink(2) */ inode->i_op = &pstore_dir_inode_operations; - root = d_alloc_root(inode); + root = d_make_root(inode); sb->s_root = root; if (!root) { err = -ENOMEM; @@ -314,7 +314,6 @@ int pstore_fill_super(struct super_block *sb, void *data, int silent) return 0; fail: - iput(inode); return err; } diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 6b009548d2e0..db18d866d981 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -260,15 +260,13 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) } ret = -ENOMEM; - s->s_root = d_alloc_root(root); + s->s_root = d_make_root(root); if (s->s_root == NULL) - goto outi; + goto outb; brelse(bh); return 0; - outi: - iput(root); outb: kfree(qs->BitMap); out: diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index aec766abe3af..b6612d2ed718 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -210,7 +210,6 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent) { struct ramfs_fs_info *fsi; struct inode *inode = NULL; - struct dentry *root; int err; save_mount_options(sb, data); @@ -234,14 +233,8 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_time_gran = 1; inode = ramfs_get_inode(sb, NULL, S_IFDIR | fsi->mount_opts.mode, 0); - if (!inode) { - err = -ENOMEM; - goto fail; - } - - root = d_alloc_root(inode); - sb->s_root = root; - if (!root) { + sb->s_root = d_make_root(inode); + if (!sb->s_root) { err = -ENOMEM; goto fail; } @@ -250,7 +243,6 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent) fail: kfree(fsi); sb->s_fs_info = NULL; - iput(inode); return err; } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index e12d8b97cd4d..208dfd144409 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -1874,11 +1874,9 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) unlock_new_inode(root_inode); } - s->s_root = d_alloc_root(root_inode); - if (!s->s_root) { - iput(root_inode); + s->s_root = d_make_root(root_inode); + if (!s->s_root) goto error; - } // define and initialize hash function sbi->s_hash_function = hash_function(s); if (sbi->s_hash_function == NULL) { diff --git a/fs/romfs/super.c b/fs/romfs/super.c index bb36ab74eb45..e64f6b5f7ae5 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -538,14 +538,12 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent) if (IS_ERR(root)) goto error; - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) - goto error_i; + goto error; return 0; -error_i: - iput(root); error: return -EINVAL; error_rsb_inval: diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index ecaa2f7bdb8f..970b1167e7cb 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -316,11 +316,10 @@ check_directory_table: } insert_inode_hash(root); - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (sb->s_root == NULL) { ERROR("Root inode create failed\n"); err = -ENOMEM; - iput(root); goto failed_mount; } diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index e34f0d99ea4e..2243f8ec64d5 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -61,10 +61,9 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent) } /* instantiate and link root dentry */ - root = d_alloc_root(inode); + root = d_make_root(inode); if (!root) { pr_debug("%s: could not get root dentry!\n",__func__); - iput(inode); return -ENOMEM; } root->d_fsdata = &sysfs_root; diff --git a/fs/sysv/super.c b/fs/sysv/super.c index f467740e088c..7491c33b6468 100644 --- a/fs/sysv/super.c +++ b/fs/sysv/super.c @@ -341,9 +341,8 @@ static int complete_read_super(struct super_block *sb, int silent, int size) printk("SysV FS: get root inode failed\n"); return 0; } - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (!sb->s_root) { - iput(root_inode); printk("SysV FS: get root dentry failed\n"); return 0; } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 63765d58445b..76e4e0566ad6 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2076,15 +2076,13 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) goto out_umount; } - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) - goto out_iput; + goto out_umount; mutex_unlock(&c->umount_mutex); return 0; -out_iput: - iput(root); out_umount: ubifs_umount(c); out_unlock: diff --git a/fs/udf/super.c b/fs/udf/super.c index 8d8b25336fbb..85067b4c7e14 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -2037,10 +2037,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) } /* Allocate a dentry for the root inode */ - sb->s_root = d_alloc_root(inode); + sb->s_root = d_make_root(inode); if (!sb->s_root) { udf_err(sb, "Couldn't allocate root dentry\n"); - iput(inode); goto error_out; } sb->s_maxbytes = MAX_LFS_FILESIZE; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index ec25d09fcaa8..f636f6b460d0 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -1164,10 +1164,10 @@ magic_found: ret = PTR_ERR(inode); goto failed; } - sb->s_root = d_alloc_root(inode); + sb->s_root = d_make_root(inode); if (!sb->s_root) { ret = -ENOMEM; - goto dalloc_failed; + goto failed; } ufs_setup_cstotal(sb); @@ -1181,8 +1181,6 @@ magic_found: UFSD("EXIT\n"); return 0; -dalloc_failed: - iput(inode); failed: if (ubh) ubh_brelse_uspi (uspi); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 0e4c5c017fba..baf40e378d35 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1362,10 +1362,10 @@ xfs_fs_fill_super( error = EINVAL; goto out_syncd_stop; } - sb->s_root = d_alloc_root(root); + sb->s_root = d_make_root(root); if (!sb->s_root) { error = ENOMEM; - goto out_iput; + goto out_syncd_stop; } return 0; @@ -1384,8 +1384,6 @@ xfs_fs_fill_super( out: return -error; - out_iput: - iput(root); out_syncd_stop: xfs_syncd_stop(mp); out_unmount: -- cgit From ea29c6950a0305f2af7fd4a2021a6e946ada0174 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 22:04:09 -0500 Subject: ntfs: switch to d_make_root() Signed-off-by: Al Viro --- fs/ntfs/super.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index f907611cca73..22020d8b1ed2 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -2908,9 +2908,10 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) ntfs_error(sb, "Failed to load system files."); goto unl_upcase_iput_tmp_ino_err_out_now; } - if ((sb->s_root = d_alloc_root(vol->root_ino))) { - /* We grab a reference, simulating an ntfs_iget(). */ - ihold(vol->root_ino); + + /* We grab a reference, simulating an ntfs_iget(). */ + ihold(vol->root_ino); + if ((sb->s_root = d_make_root(vol->root_ino))) { ntfs_debug("Exiting, status successful."); /* Release the default upcase if it has no users. */ mutex_lock(&ntfs_lock); -- cgit From 1688f86046e5572623b2eacb685eb707fe21fb0a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 22:06:33 -0500 Subject: fat: switch to d_make_root() Signed-off-by: Al Viro --- fs/fat/inode.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 3ab841054d53..21687e31acc0 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1496,11 +1496,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, root_inode->i_ino = MSDOS_ROOT_INO; root_inode->i_version = 1; error = fat_read_root(root_inode); - if (error < 0) + if (error < 0) { + iput(root_inode); goto out_fail; + } error = -ENOMEM; insert_inode_hash(root_inode); - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (!sb->s_root) { fat_msg(sb, KERN_ERR, "get root inode failed"); goto out_fail; @@ -1516,8 +1518,6 @@ out_invalid: out_fail: if (fat_inode) iput(fat_inode); - if (root_inode) - iput(root_inode); unload_nls(sbi->nls_io); unload_nls(sbi->nls_disk); if (sbi->options.iocharset != fat_default_iocharset) -- cgit From 68acb8e60d672cba0fd1d3545ba33343931c7a24 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 22:07:07 -0500 Subject: hfsplus: switch to d_make_root() Signed-off-by: Al Viro --- fs/hfsplus/super.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 427682ca9e48..ceb1c281eefb 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -465,6 +465,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) goto out_put_alloc_file; } + sb->s_d_op = &hfsplus_dentry_operations; + sb->s_root = d_make_root(root); + if (!sb->s_root) { + err = -ENOMEM; + goto out_put_alloc_file; + } + str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; str.name = HFSP_HIDDENDIR_NAME; err = hfs_find_init(sbi->cat_tree, &fd); @@ -515,13 +522,6 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) } } - sb->s_d_op = &hfsplus_dentry_operations; - sb->s_root = d_alloc_root(root); - if (!sb->s_root) { - err = -ENOMEM; - goto out_put_hidden_dir; - } - unload_nls(sbi->nls); sbi->nls = nls; return 0; @@ -529,7 +529,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) out_put_hidden_dir: iput(sbi->hidden_dir); out_put_root: - iput(root); + dput(sb->s_root); + sb->s_root = NULL; out_put_alloc_file: iput(sbi->alloc_file); out_close_cat_tree: -- cgit From ca85c07809ca19de3391cb79ee1198f3dd91fa8d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 22:07:43 -0500 Subject: minixfs: switch to d_make_root() Signed-off-by: Al Viro --- fs/minix/inode.c | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 62c697caffb9..fcb05d2c6b5f 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -254,14 +254,6 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) minix_set_bit(0,sbi->s_imap[0]->b_data); minix_set_bit(0,sbi->s_zmap[0]->b_data); - /* set up enough so that it can read an inode */ - s->s_op = &minix_sops; - root_inode = minix_iget(s, MINIX_ROOT_INO); - if (IS_ERR(root_inode)) { - ret = PTR_ERR(root_inode); - goto out_no_root; - } - /* Apparently minix can create filesystems that allocate more blocks for * the bitmaps than needed. We simply ignore that, but verify it didn't * create one with not enough blocks and bail out if so. @@ -270,7 +262,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) if (sbi->s_imap_blocks < block) { printk("MINIX-fs: file system does not have enough " "imap blocks allocated. Refusing to mount\n"); - goto out_iput; + goto out_no_bitmap; } block = minix_blocks_needed( @@ -279,13 +271,21 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) if (sbi->s_zmap_blocks < block) { printk("MINIX-fs: file system does not have enough " "zmap blocks allocated. Refusing to mount.\n"); - goto out_iput; + goto out_no_bitmap; + } + + /* set up enough so that it can read an inode */ + s->s_op = &minix_sops; + root_inode = minix_iget(s, MINIX_ROOT_INO); + if (IS_ERR(root_inode)) { + ret = PTR_ERR(root_inode); + goto out_no_root; } ret = -ENOMEM; - s->s_root = d_alloc_root(root_inode); + s->s_root = d_make_root(root_inode); if (!s->s_root) - goto out_iput; + goto out_no_root; if (!(s->s_flags & MS_RDONLY)) { if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ @@ -301,10 +301,6 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) return 0; -out_iput: - iput(root_inode); - goto out_freemap; - out_no_root: if (!silent) printk("MINIX-fs: get root inode failed\n"); -- cgit From 318ceed088497d1ca839b1172518ac4cc7096b82 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 22:08:01 -0500 Subject: tidy up after d_make_root() conversion Signed-off-by: Al Viro --- fs/pstore/inode.c | 24 +++++++----------------- fs/ramfs/inode.c | 20 ++++++-------------- 2 files changed, 13 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index ec7d1fb6f35a..f37c32b94525 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -278,9 +278,7 @@ fail: int pstore_fill_super(struct super_block *sb, void *data, int silent) { - struct inode *inode = NULL; - struct dentry *root; - int err; + struct inode *inode; save_mount_options(sb, data); @@ -296,25 +294,17 @@ int pstore_fill_super(struct super_block *sb, void *data, int silent) parse_options(data); inode = pstore_get_inode(sb, NULL, S_IFDIR | 0755, 0); - if (!inode) { - err = -ENOMEM; - goto fail; - } - /* override ramfs "dir" options so we catch unlink(2) */ - inode->i_op = &pstore_dir_inode_operations; - - root = d_make_root(inode); - sb->s_root = root; - if (!root) { - err = -ENOMEM; - goto fail; + if (inode) { + /* override ramfs "dir" options so we catch unlink(2) */ + inode->i_op = &pstore_dir_inode_operations; } + sb->s_root = d_make_root(inode); + if (!sb->s_root) + return -ENOMEM; pstore_get_records(0); return 0; -fail: - return err; } static struct dentry *pstore_mount(struct file_system_type *fs_type, diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index b6612d2ed718..a1fdabe21dec 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -209,21 +209,19 @@ static int ramfs_parse_options(char *data, struct ramfs_mount_opts *opts) int ramfs_fill_super(struct super_block *sb, void *data, int silent) { struct ramfs_fs_info *fsi; - struct inode *inode = NULL; + struct inode *inode; int err; save_mount_options(sb, data); fsi = kzalloc(sizeof(struct ramfs_fs_info), GFP_KERNEL); sb->s_fs_info = fsi; - if (!fsi) { - err = -ENOMEM; - goto fail; - } + if (!fsi) + return -ENOMEM; err = ramfs_parse_options(data, &fsi->mount_opts); if (err) - goto fail; + return err; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_CACHE_SIZE; @@ -234,16 +232,10 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent) inode = ramfs_get_inode(sb, NULL, S_IFDIR | fsi->mount_opts.mode, 0); sb->s_root = d_make_root(inode); - if (!sb->s_root) { - err = -ENOMEM; - goto fail; - } + if (!sb->s_root) + return -ENOMEM; return 0; -fail: - kfree(fsi); - sb->s_fs_info = NULL; - return err; } struct dentry *ramfs_mount(struct file_system_type *fs_type, -- cgit From 32991ab305ace7017c62f8eecbe5eb36dc32e13b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Feb 2012 22:15:47 -0500 Subject: vfs: d_alloc_root() gone all callers converted to d_make_root() by now Signed-off-by: Al Viro --- fs/dcache.c | 24 ------------------------ 1 file changed, 24 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index bcbdb33fcc20..a78e145a4357 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1443,30 +1443,6 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) EXPORT_SYMBOL(d_instantiate_unique); -/** - * d_alloc_root - allocate root dentry - * @root_inode: inode to allocate the root for - * - * Allocate a root ("/") dentry for the inode given. The inode is - * instantiated and returned. %NULL is returned if there is insufficient - * memory or the inode passed is %NULL. - */ - -struct dentry * d_alloc_root(struct inode * root_inode) -{ - struct dentry *res = NULL; - - if (root_inode) { - static const struct qstr name = { .name = "/", .len = 1 }; - - res = __d_alloc(root_inode->i_sb, &name); - if (res) - d_instantiate(res, root_inode); - } - return res; -} -EXPORT_SYMBOL(d_alloc_root); - struct dentry *d_make_root(struct inode *root_inode) { struct dentry *res = NULL; -- cgit From 516cdb68e5b44ca1bef31619f5da8d5e9e298f88 Mon Sep 17 00:00:00 2001 From: Kai Bankett Date: Mon, 13 Feb 2012 02:43:41 +0100 Subject: qnx4fs: small cleanup Small qnx4 cleanup patch. - removes .writepage, .write_begin and .write_end (+callback functions) - removes '.' path checking in namei.c (handled on upper layers) Signed-off-by: Kai Bankett Signed-off-by: Al Viro --- fs/qnx4/inode.c | 27 --------------------------- fs/qnx4/namei.c | 4 ---- 2 files changed, 31 deletions(-) (limited to 'fs') diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index db18d866d981..14a0ba0f0c1c 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -286,44 +286,17 @@ static void qnx4_put_super(struct super_block *sb) return; } -static int qnx4_writepage(struct page *page, struct writeback_control *wbc) -{ - return block_write_full_page(page,qnx4_get_block, wbc); -} - static int qnx4_readpage(struct file *file, struct page *page) { return block_read_full_page(page,qnx4_get_block); } -static int qnx4_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) -{ - struct qnx4_inode_info *qnx4_inode = qnx4_i(mapping->host); - int ret; - - *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, - qnx4_get_block, - &qnx4_inode->mmu_private); - if (unlikely(ret)) { - loff_t isize = mapping->host->i_size; - if (pos + len > isize) - vmtruncate(mapping->host, isize); - } - - return ret; -} static sector_t qnx4_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,qnx4_get_block); } static const struct address_space_operations qnx4_aops = { .readpage = qnx4_readpage, - .writepage = qnx4_writepage, - .write_begin = qnx4_write_begin, - .write_end = generic_write_end, .bmap = qnx4_bmap }; diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c index 275327b5615e..e8eb8784ed30 100644 --- a/fs/qnx4/namei.c +++ b/fs/qnx4/namei.c @@ -39,10 +39,6 @@ static int qnx4_match(int len, const char *name, } else { namelen = QNX4_SHORT_NAME_MAX; } - /* "" means "." ---> so paths like "/usr/lib//libc.a" work */ - if (!len && (de->di_fname[0] == '.') && (de->di_fname[1] == '\0')) { - return 1; - } thislen = strlen( de->di_fname ); if ( thislen > namelen ) thislen = namelen; -- cgit From 5d026c7242201e7c9d0e12fcb2bcaffead9d59fd Mon Sep 17 00:00:00 2001 From: Kai Bankett Date: Fri, 17 Feb 2012 05:59:20 +0100 Subject: fs: initial qnx6fs addition Adds support for qnx6fs readonly support to the linux kernel. * Mount option The option mmi_fs can be used to mount Harman Becker/Audi MMI 3G HDD qnx6fs filesystems. * Documentation A high level filesystem stucture description can be found in the Documentation/filesystems directory. (qnx6.txt) * Additional features - Active (stable) superblock selection - Superblock checksum check (enforced) - Supports mount of qnx6 filesystems with to host different endianess - Automatic endianess detection - Longfilename support (with non-enfocing crc check) - All blocksizes (512, 1024, 2048 and 4096 supported) Signed-off-by: Kai Bankett Signed-off-by: Al Viro --- fs/Kconfig | 1 + fs/Makefile | 1 + fs/qnx6/Kconfig | 26 ++ fs/qnx6/Makefile | 7 + fs/qnx6/README | 8 + fs/qnx6/dir.c | 291 ++++++++++++++++++++++ fs/qnx6/inode.c | 698 ++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/qnx6/namei.c | 42 ++++ fs/qnx6/qnx6.h | 135 ++++++++++ fs/qnx6/super_mmi.c | 150 +++++++++++ 10 files changed, 1359 insertions(+) create mode 100644 fs/qnx6/Kconfig create mode 100644 fs/qnx6/Makefile create mode 100644 fs/qnx6/README create mode 100644 fs/qnx6/dir.c create mode 100644 fs/qnx6/inode.c create mode 100644 fs/qnx6/namei.c create mode 100644 fs/qnx6/qnx6.h create mode 100644 fs/qnx6/super_mmi.c (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index d621f02a3f9e..1497ddf27e91 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -210,6 +210,7 @@ source "fs/minix/Kconfig" source "fs/omfs/Kconfig" source "fs/hpfs/Kconfig" source "fs/qnx4/Kconfig" +source "fs/qnx6/Kconfig" source "fs/romfs/Kconfig" source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index 93804d4d66e1..2fb977934673 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -102,6 +102,7 @@ obj-$(CONFIG_UBIFS_FS) += ubifs/ obj-$(CONFIG_AFFS_FS) += affs/ obj-$(CONFIG_ROMFS_FS) += romfs/ obj-$(CONFIG_QNX4FS_FS) += qnx4/ +obj-$(CONFIG_QNX6FS_FS) += qnx6/ obj-$(CONFIG_AUTOFS4_FS) += autofs4/ obj-$(CONFIG_ADFS_FS) += adfs/ obj-$(CONFIG_FUSE_FS) += fuse/ diff --git a/fs/qnx6/Kconfig b/fs/qnx6/Kconfig new file mode 100644 index 000000000000..edbba5c17cc8 --- /dev/null +++ b/fs/qnx6/Kconfig @@ -0,0 +1,26 @@ +config QNX6FS_FS + tristate "QNX6 file system support (read only)" + depends on BLOCK && CRC32 + help + This is the file system used by the real-time operating systems + QNX 6 (also called QNX RTP). + Further information is available at . + Say Y if you intend to mount QNX hard disks or floppies formatted + with a mkqnx6fs. + However, keep in mind that this currently is a readonly driver! + + To compile this file system support as a module, choose M here: the + module will be called qnx6. + + If you don't know whether you need it, then you don't need it: + answer N. + +config QNX6FS_DEBUG + bool "QNX6 debugging information" + depends on QNX6FS_FS + help + Turns on extended debugging output. + + If you are not a developer working on the QNX6FS, you probably don't + want this: + answer N. diff --git a/fs/qnx6/Makefile b/fs/qnx6/Makefile new file mode 100644 index 000000000000..9dd06199afc9 --- /dev/null +++ b/fs/qnx6/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the linux qnx4-filesystem routines. +# + +obj-$(CONFIG_QNX6FS_FS) += qnx6.o + +qnx6-objs := inode.o dir.o namei.o super_mmi.o diff --git a/fs/qnx6/README b/fs/qnx6/README new file mode 100644 index 000000000000..116d622026cc --- /dev/null +++ b/fs/qnx6/README @@ -0,0 +1,8 @@ + + This is a snapshot of the QNX6 filesystem for Linux. + Please send diffs and remarks to . + +Credits : + +Al Viro (endless patience with me & support ;)) +Kai Bankett (Maintainer) diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c new file mode 100644 index 000000000000..dc597353db3b --- /dev/null +++ b/fs/qnx6/dir.c @@ -0,0 +1,291 @@ +/* + * QNX6 file system, Linux implementation. + * + * Version : 1.0.0 + * + * History : + * + * 01-02-2012 by Kai Bankett (chaosman@ontika.net) : first release. + * 16-02-2012 pagemap extension by Al Viro + * + */ + +#include "qnx6.h" + +static unsigned qnx6_lfile_checksum(char *name, unsigned size) +{ + unsigned crc = 0; + char *end = name + size; + while (name < end) { + crc = ((crc >> 1) + *(name++)) ^ + ((crc & 0x00000001) ? 0x80000000 : 0); + } + return crc; +} + +static struct page *qnx6_get_page(struct inode *dir, unsigned long n) +{ + struct address_space *mapping = dir->i_mapping; + struct page *page = read_mapping_page(mapping, n, NULL); + if (!IS_ERR(page)) + kmap(page); + return page; +} + +static inline unsigned long dir_pages(struct inode *inode) +{ + return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; +} + +static unsigned last_entry(struct inode *inode, unsigned long page_nr) +{ + unsigned long last_byte = inode->i_size; + last_byte -= page_nr << PAGE_CACHE_SHIFT; + if (last_byte > PAGE_CACHE_SIZE) + last_byte = PAGE_CACHE_SIZE; + return last_byte / QNX6_DIR_ENTRY_SIZE; +} + +static struct qnx6_long_filename *qnx6_longname(struct super_block *sb, + struct qnx6_long_dir_entry *de, + struct page **p) +{ + struct qnx6_sb_info *sbi = QNX6_SB(sb); + u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ + u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */ + /* within page */ + u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK; + struct address_space *mapping = sbi->longfile->i_mapping; + struct page *page = read_mapping_page(mapping, n, NULL); + if (IS_ERR(page)) + return ERR_CAST(page); + kmap(*p = page); + return (struct qnx6_long_filename *)(page_address(page) + offs); +} + +static int qnx6_dir_longfilename(struct inode *inode, + struct qnx6_long_dir_entry *de, + void *dirent, loff_t pos, + unsigned de_inode, filldir_t filldir) +{ + struct qnx6_long_filename *lf; + struct super_block *s = inode->i_sb; + struct qnx6_sb_info *sbi = QNX6_SB(s); + struct page *page; + int lf_size; + + if (de->de_size != 0xff) { + /* error - long filename entries always have size 0xff + in direntry */ + printk(KERN_ERR "qnx6: invalid direntry size (%i).\n", + de->de_size); + return 0; + } + lf = qnx6_longname(s, de, &page); + if (IS_ERR(lf)) { + printk(KERN_ERR "qnx6:Error reading longname\n"); + return 0; + } + + lf_size = fs16_to_cpu(sbi, lf->lf_size); + + if (lf_size > QNX6_LONG_NAME_MAX) { + QNX6DEBUG((KERN_INFO "file %s\n", lf->lf_fname)); + printk(KERN_ERR "qnx6:Filename too long (%i)\n", lf_size); + qnx6_put_page(page); + return 0; + } + + /* calc & validate longfilename checksum + mmi 3g filesystem does not have that checksum */ + if (!test_opt(s, MMI_FS) && fs32_to_cpu(sbi, de->de_checksum) != + qnx6_lfile_checksum(lf->lf_fname, lf_size)) + printk(KERN_INFO "qnx6: long filename checksum error.\n"); + + QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s inode:%u\n", + lf_size, lf->lf_fname, de_inode)); + if (filldir(dirent, lf->lf_fname, lf_size, pos, de_inode, + DT_UNKNOWN) < 0) { + qnx6_put_page(page); + return 0; + } + + qnx6_put_page(page); + /* success */ + return 1; +} + +static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + struct super_block *s = inode->i_sb; + struct qnx6_sb_info *sbi = QNX6_SB(s); + loff_t pos = filp->f_pos & (QNX6_DIR_ENTRY_SIZE - 1); + unsigned long npages = dir_pages(inode); + unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; + bool done = false; + + if (filp->f_pos >= inode->i_size) + return 0; + + for ( ; !done && n < npages; n++, start = 0) { + struct page *page = qnx6_get_page(inode, n); + int limit = last_entry(inode, n); + struct qnx6_dir_entry *de; + int i = start; + + if (IS_ERR(page)) { + printk(KERN_ERR "qnx6_readdir: read failed\n"); + filp->f_pos = (n + 1) << PAGE_CACHE_SHIFT; + return PTR_ERR(page); + } + de = ((struct qnx6_dir_entry *)page_address(page)) + start; + for (; i < limit; i++, de++, pos += QNX6_DIR_ENTRY_SIZE) { + int size = de->de_size; + u32 no_inode = fs32_to_cpu(sbi, de->de_inode); + + if (!no_inode || !size) + continue; + + if (size > QNX6_SHORT_NAME_MAX) { + /* long filename detected + get the filename from long filename + structure / block */ + if (!qnx6_dir_longfilename(inode, + (struct qnx6_long_dir_entry *)de, + dirent, pos, no_inode, + filldir)) { + done = true; + break; + } + } else { + QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s" + " inode:%u\n", size, de->de_fname, + no_inode)); + if (filldir(dirent, de->de_fname, size, + pos, no_inode, DT_UNKNOWN) + < 0) { + done = true; + break; + } + } + } + qnx6_put_page(page); + } + filp->f_pos = pos; + return 0; +} + +/* + * check if the long filename is correct. + */ +static unsigned qnx6_long_match(int len, const char *name, + struct qnx6_long_dir_entry *de, struct inode *dir) +{ + struct super_block *s = dir->i_sb; + struct qnx6_sb_info *sbi = QNX6_SB(s); + struct page *page; + int thislen; + struct qnx6_long_filename *lf = qnx6_longname(s, de, &page); + + if (IS_ERR(lf)) + return 0; + + thislen = fs16_to_cpu(sbi, lf->lf_size); + if (len != thislen) { + qnx6_put_page(page); + return 0; + } + if (memcmp(name, lf->lf_fname, len) == 0) { + qnx6_put_page(page); + return fs32_to_cpu(sbi, de->de_inode); + } + qnx6_put_page(page); + return 0; +} + +/* + * check if the filename is correct. + */ +static unsigned qnx6_match(struct super_block *s, int len, const char *name, + struct qnx6_dir_entry *de) +{ + struct qnx6_sb_info *sbi = QNX6_SB(s); + if (memcmp(name, de->de_fname, len) == 0) + return fs32_to_cpu(sbi, de->de_inode); + return 0; +} + + +unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, + struct page **res_page) +{ + struct super_block *s = dir->i_sb; + struct qnx6_inode_info *ei = QNX6_I(dir); + struct page *page = NULL; + unsigned long start, n; + unsigned long npages = dir_pages(dir); + unsigned ino; + struct qnx6_dir_entry *de; + struct qnx6_long_dir_entry *lde; + + *res_page = NULL; + + if (npages == 0) + return 0; + start = ei->i_dir_start_lookup; + if (start >= npages) + start = 0; + n = start; + + do { + page = qnx6_get_page(dir, n); + if (!IS_ERR(page)) { + int limit = last_entry(dir, n); + int i; + + de = (struct qnx6_dir_entry *)page_address(page); + for (i = 0; i < limit; i++, de++) { + if (len <= QNX6_SHORT_NAME_MAX) { + /* short filename */ + if (len != de->de_size) + continue; + ino = qnx6_match(s, len, name, de); + if (ino) + goto found; + } else if (de->de_size == 0xff) { + /* deal with long filename */ + lde = (struct qnx6_long_dir_entry *)de; + ino = qnx6_long_match(len, + name, lde, dir); + if (ino) + goto found; + } else + printk(KERN_ERR "qnx6: undefined " + "filename size in inode.\n"); + } + qnx6_put_page(page); + } + + if (++n >= npages) + n = 0; + } while (n != start); + return 0; + +found: + *res_page = page; + ei->i_dir_start_lookup = n; + return ino; +} + +const struct file_operations qnx6_dir_operations = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .readdir = qnx6_readdir, + .fsync = generic_file_fsync, +}; + +const struct inode_operations qnx6_dir_inode_operations = { + .lookup = qnx6_lookup, +}; diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c new file mode 100644 index 000000000000..e44012dc5645 --- /dev/null +++ b/fs/qnx6/inode.c @@ -0,0 +1,698 @@ +/* + * QNX6 file system, Linux implementation. + * + * Version : 1.0.0 + * + * History : + * + * 01-02-2012 by Kai Bankett (chaosman@ontika.net) : first release. + * 16-02-2012 pagemap extension by Al Viro + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qnx6.h" + +static const struct super_operations qnx6_sops; + +static void qnx6_put_super(struct super_block *sb); +static struct inode *qnx6_alloc_inode(struct super_block *sb); +static void qnx6_destroy_inode(struct inode *inode); +static int qnx6_remount(struct super_block *sb, int *flags, char *data); +static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf); +static int qnx6_show_options(struct seq_file *seq, struct dentry *root); + +static const struct super_operations qnx6_sops = { + .alloc_inode = qnx6_alloc_inode, + .destroy_inode = qnx6_destroy_inode, + .put_super = qnx6_put_super, + .statfs = qnx6_statfs, + .remount_fs = qnx6_remount, + .show_options = qnx6_show_options, +}; + +static int qnx6_show_options(struct seq_file *seq, struct dentry *root) +{ + struct super_block *sb = root->d_sb; + struct qnx6_sb_info *sbi = QNX6_SB(sb); + + if (sbi->s_mount_opt & QNX6_MOUNT_MMI_FS) + seq_puts(seq, ",mmi_fs"); + return 0; +} + +static int qnx6_remount(struct super_block *sb, int *flags, char *data) +{ + *flags |= MS_RDONLY; + return 0; +} + +static unsigned qnx6_get_devblock(struct super_block *sb, __fs32 block) +{ + struct qnx6_sb_info *sbi = QNX6_SB(sb); + return fs32_to_cpu(sbi, block) + sbi->s_blks_off; +} + +static unsigned qnx6_block_map(struct inode *inode, unsigned iblock); + +static int qnx6_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh, int create) +{ + unsigned phys; + + QNX6DEBUG((KERN_INFO "qnx6: qnx6_get_block inode=[%ld] iblock=[%ld]\n", + inode->i_ino, (unsigned long)iblock)); + + phys = qnx6_block_map(inode, iblock); + if (phys) { + /* logical block is before EOF */ + map_bh(bh, inode->i_sb, phys); + } + return 0; +} + +static int qnx6_check_blockptr(__fs32 ptr) +{ + if (ptr == ~(__fs32)0) { + printk(KERN_ERR "qnx6: hit unused blockpointer.\n"); + return 0; + } + return 1; +} + +static int qnx6_readpage(struct file *file, struct page *page) +{ + return mpage_readpage(page, qnx6_get_block); +} + +static int qnx6_readpages(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + return mpage_readpages(mapping, pages, nr_pages, qnx6_get_block); +} + +/* + * returns the block number for the no-th element in the tree + * inodebits requred as there are multiple inodes in one inode block + */ +static unsigned qnx6_block_map(struct inode *inode, unsigned no) +{ + struct super_block *s = inode->i_sb; + struct qnx6_sb_info *sbi = QNX6_SB(s); + struct qnx6_inode_info *ei = QNX6_I(inode); + unsigned block = 0; + struct buffer_head *bh; + __fs32 ptr; + int levelptr; + int ptrbits = sbi->s_ptrbits; + int bitdelta; + u32 mask = (1 << ptrbits) - 1; + int depth = ei->di_filelevels; + int i; + + bitdelta = ptrbits * depth; + levelptr = no >> bitdelta; + + if (levelptr > QNX6_NO_DIRECT_POINTERS - 1) { + printk(KERN_ERR "qnx6:Requested file block number (%u) too big.", + no); + return 0; + } + + block = qnx6_get_devblock(s, ei->di_block_ptr[levelptr]); + + for (i = 0; i < depth; i++) { + bh = sb_bread(s, block); + if (!bh) { + printk(KERN_ERR "qnx6:Error reading block (%u)\n", + block); + return 0; + } + bitdelta -= ptrbits; + levelptr = (no >> bitdelta) & mask; + ptr = ((__fs32 *)bh->b_data)[levelptr]; + + if (!qnx6_check_blockptr(ptr)) + return 0; + + block = qnx6_get_devblock(s, ptr); + brelse(bh); + } + return block; +} + +static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct super_block *sb = dentry->d_sb; + struct qnx6_sb_info *sbi = QNX6_SB(sb); + u64 id = huge_encode_dev(sb->s_bdev->bd_dev); + + buf->f_type = sb->s_magic; + buf->f_bsize = sb->s_blocksize; + buf->f_blocks = fs32_to_cpu(sbi, sbi->sb->sb_num_blocks); + buf->f_bfree = fs32_to_cpu(sbi, sbi->sb->sb_free_blocks); + buf->f_files = fs32_to_cpu(sbi, sbi->sb->sb_num_inodes); + buf->f_ffree = fs32_to_cpu(sbi, sbi->sb->sb_free_inodes); + buf->f_bavail = buf->f_bfree; + buf->f_namelen = QNX6_LONG_NAME_MAX; + buf->f_fsid.val[0] = (u32)id; + buf->f_fsid.val[1] = (u32)(id >> 32); + + return 0; +} + +/* + * Check the root directory of the filesystem to make sure + * it really _is_ a qnx6 filesystem, and to check the size + * of the directory entry. + */ +static const char *qnx6_checkroot(struct super_block *s) +{ + static char match_root[2][3] = {".\0\0", "..\0"}; + int i, error = 0; + struct qnx6_dir_entry *dir_entry; + struct inode *root = s->s_root->d_inode; + struct address_space *mapping = root->i_mapping; + struct page *page = read_mapping_page(mapping, 0, NULL); + if (IS_ERR(page)) + return "error reading root directory"; + kmap(page); + dir_entry = page_address(page); + for (i = 0; i < 2; i++) { + /* maximum 3 bytes - due to match_root limitation */ + if (strncmp(dir_entry[i].de_fname, match_root[i], 3)) + error = 1; + } + qnx6_put_page(page); + if (error) + return "error reading root directory."; + return NULL; +} + +#ifdef CONFIG_QNX6FS_DEBUG +void qnx6_superblock_debug(struct qnx6_super_block *sb, struct super_block *s) +{ + struct qnx6_sb_info *sbi = QNX6_SB(s); + + QNX6DEBUG((KERN_INFO "magic: %08x\n", + fs32_to_cpu(sbi, sb->sb_magic))); + QNX6DEBUG((KERN_INFO "checksum: %08x\n", + fs32_to_cpu(sbi, sb->sb_checksum))); + QNX6DEBUG((KERN_INFO "serial: %llx\n", + fs64_to_cpu(sbi, sb->sb_serial))); + QNX6DEBUG((KERN_INFO "flags: %08x\n", + fs32_to_cpu(sbi, sb->sb_flags))); + QNX6DEBUG((KERN_INFO "blocksize: %08x\n", + fs32_to_cpu(sbi, sb->sb_blocksize))); + QNX6DEBUG((KERN_INFO "num_inodes: %08x\n", + fs32_to_cpu(sbi, sb->sb_num_inodes))); + QNX6DEBUG((KERN_INFO "free_inodes: %08x\n", + fs32_to_cpu(sbi, sb->sb_free_inodes))); + QNX6DEBUG((KERN_INFO "num_blocks: %08x\n", + fs32_to_cpu(sbi, sb->sb_num_blocks))); + QNX6DEBUG((KERN_INFO "free_blocks: %08x\n", + fs32_to_cpu(sbi, sb->sb_free_blocks))); + QNX6DEBUG((KERN_INFO "inode_levels: %02x\n", + sb->Inode.levels)); +} +#endif + +enum { + Opt_mmifs, + Opt_err +}; + +static const match_table_t tokens = { + {Opt_mmifs, "mmi_fs"}, + {Opt_err, NULL} +}; + +static int qnx6_parse_options(char *options, struct super_block *sb) +{ + char *p; + struct qnx6_sb_info *sbi = QNX6_SB(sb); + substring_t args[MAX_OPT_ARGS]; + + if (!options) + return 1; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + if (!*p) + continue; + + token = match_token(p, tokens, args); + switch (token) { + case Opt_mmifs: + set_opt(sbi->s_mount_opt, MMI_FS); + break; + default: + return 0; + } + } + return 1; +} + +static struct buffer_head *qnx6_check_first_superblock(struct super_block *s, + int offset, int silent) +{ + struct qnx6_sb_info *sbi = QNX6_SB(s); + struct buffer_head *bh; + struct qnx6_super_block *sb; + + /* Check the superblock signatures + start with the first superblock */ + bh = sb_bread(s, offset); + if (!bh) { + printk(KERN_ERR "qnx6: unable to read the first superblock\n"); + return NULL; + } + sb = (struct qnx6_super_block *)bh->b_data; + if (fs32_to_cpu(sbi, sb->sb_magic) != QNX6_SUPER_MAGIC) { + sbi->s_bytesex = BYTESEX_BE; + if (fs32_to_cpu(sbi, sb->sb_magic) == QNX6_SUPER_MAGIC) { + /* we got a big endian fs */ + QNX6DEBUG((KERN_INFO "qnx6: fs got different" + " endianess.\n")); + return bh; + } else + sbi->s_bytesex = BYTESEX_LE; + if (!silent) { + if (offset == 0) { + printk(KERN_ERR "qnx6: wrong signature (magic)" + " in superblock #1.\n"); + } else { + printk(KERN_INFO "qnx6: wrong signature (magic)" + " at position (0x%lx) - will try" + " alternative position (0x0000).\n", + offset * s->s_blocksize); + } + } + brelse(bh); + return NULL; + } + return bh; +} + +static struct inode *qnx6_private_inode(struct super_block *s, + struct qnx6_root_node *p); + +static int qnx6_fill_super(struct super_block *s, void *data, int silent) +{ + struct buffer_head *bh1 = NULL, *bh2 = NULL; + struct qnx6_super_block *sb1 = NULL, *sb2 = NULL; + struct qnx6_sb_info *sbi; + struct inode *root; + const char *errmsg; + struct qnx6_sb_info *qs; + int ret = -EINVAL; + u64 offset; + int bootblock_offset = QNX6_BOOTBLOCK_SIZE; + + qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL); + if (!qs) + return -ENOMEM; + s->s_fs_info = qs; + + /* Superblock always is 512 Byte long */ + if (!sb_set_blocksize(s, QNX6_SUPERBLOCK_SIZE)) { + printk(KERN_ERR "qnx6: unable to set blocksize\n"); + goto outnobh; + } + + /* parse the mount-options */ + if (!qnx6_parse_options((char *) data, s)) { + printk(KERN_ERR "qnx6: invalid mount options.\n"); + goto outnobh; + } + if (test_opt(s, MMI_FS)) { + sb1 = qnx6_mmi_fill_super(s, silent); + if (sb1) + goto mmi_success; + else + goto outnobh; + } + sbi = QNX6_SB(s); + sbi->s_bytesex = BYTESEX_LE; + /* Check the superblock signatures + start with the first superblock */ + bh1 = qnx6_check_first_superblock(s, + bootblock_offset / QNX6_SUPERBLOCK_SIZE, silent); + if (!bh1) { + /* try again without bootblock offset */ + bh1 = qnx6_check_first_superblock(s, 0, silent); + if (!bh1) { + printk(KERN_ERR "qnx6: unable to read the first superblock\n"); + goto outnobh; + } + /* seems that no bootblock at partition start */ + bootblock_offset = 0; + } + sb1 = (struct qnx6_super_block *)bh1->b_data; + +#ifdef CONFIG_QNX6FS_DEBUG + qnx6_superblock_debug(sb1, s); +#endif + + /* checksum check - start at byte 8 and end at byte 512 */ + if (fs32_to_cpu(sbi, sb1->sb_checksum) != + crc32_be(0, (char *)(bh1->b_data + 8), 504)) { + printk(KERN_ERR "qnx6: superblock #1 checksum error\n"); + goto out; + } + + /* set new blocksize */ + if (!sb_set_blocksize(s, fs32_to_cpu(sbi, sb1->sb_blocksize))) { + printk(KERN_ERR "qnx6: unable to set blocksize\n"); + goto out; + } + /* blocksize invalidates bh - pull it back in */ + brelse(bh1); + bh1 = sb_bread(s, bootblock_offset >> s->s_blocksize_bits); + if (!bh1) + goto outnobh; + sb1 = (struct qnx6_super_block *)bh1->b_data; + + /* calculate second superblock blocknumber */ + offset = fs32_to_cpu(sbi, sb1->sb_num_blocks) + + (bootblock_offset >> s->s_blocksize_bits) + + (QNX6_SUPERBLOCK_AREA >> s->s_blocksize_bits); + + /* set bootblock offset */ + sbi->s_blks_off = (bootblock_offset >> s->s_blocksize_bits) + + (QNX6_SUPERBLOCK_AREA >> s->s_blocksize_bits); + + /* next the second superblock */ + bh2 = sb_bread(s, offset); + if (!bh2) { + printk(KERN_ERR "qnx6: unable to read the second superblock\n"); + goto out; + } + sb2 = (struct qnx6_super_block *)bh2->b_data; + if (fs32_to_cpu(sbi, sb2->sb_magic) != QNX6_SUPER_MAGIC) { + if (!silent) + printk(KERN_ERR "qnx6: wrong signature (magic)" + " in superblock #2.\n"); + goto out; + } + + /* checksum check - start at byte 8 and end at byte 512 */ + if (fs32_to_cpu(sbi, sb2->sb_checksum) != + crc32_be(0, (char *)(bh2->b_data + 8), 504)) { + printk(KERN_ERR "qnx6: superblock #2 checksum error\n"); + goto out; + } + + if (fs64_to_cpu(sbi, sb1->sb_serial) >= + fs64_to_cpu(sbi, sb2->sb_serial)) { + /* superblock #1 active */ + sbi->sb_buf = bh1; + sbi->sb = (struct qnx6_super_block *)bh1->b_data; + brelse(bh2); + printk(KERN_INFO "qnx6: superblock #1 active\n"); + } else { + /* superblock #2 active */ + sbi->sb_buf = bh2; + sbi->sb = (struct qnx6_super_block *)bh2->b_data; + brelse(bh1); + printk(KERN_INFO "qnx6: superblock #2 active\n"); + } +mmi_success: + /* sanity check - limit maximum indirect pointer levels */ + if (sb1->Inode.levels > QNX6_PTR_MAX_LEVELS) { + printk(KERN_ERR "qnx6: too many inode levels (max %i, sb %i)\n", + QNX6_PTR_MAX_LEVELS, sb1->Inode.levels); + goto out; + } + if (sb1->Longfile.levels > QNX6_PTR_MAX_LEVELS) { + printk(KERN_ERR "qnx6: too many longfilename levels" + " (max %i, sb %i)\n", + QNX6_PTR_MAX_LEVELS, sb1->Longfile.levels); + goto out; + } + s->s_op = &qnx6_sops; + s->s_magic = QNX6_SUPER_MAGIC; + s->s_flags |= MS_RDONLY; /* Yup, read-only yet */ + + /* ease the later tree level calculations */ + sbi = QNX6_SB(s); + sbi->s_ptrbits = ilog2(s->s_blocksize / 4); + sbi->inodes = qnx6_private_inode(s, &sb1->Inode); + if (!sbi->inodes) + goto out; + sbi->longfile = qnx6_private_inode(s, &sb1->Longfile); + if (!sbi->longfile) + goto out1; + + /* prefetch root inode */ + root = qnx6_iget(s, QNX6_ROOT_INO); + if (IS_ERR(root)) { + printk(KERN_ERR "qnx6: get inode failed\n"); + ret = PTR_ERR(root); + goto out2; + } + + ret = -ENOMEM; + s->s_root = d_make_root(root); + if (!s->s_root) + goto out2; + + ret = -EINVAL; + errmsg = qnx6_checkroot(s); + if (errmsg != NULL) { + if (!silent) + printk(KERN_ERR "qnx6: %s\n", errmsg); + goto out3; + } + return 0; + +out3: + dput(s->s_root); + s->s_root = NULL; +out2: + iput(sbi->longfile); +out1: + iput(sbi->inodes); +out: + if (bh1) + brelse(bh1); + if (bh2) + brelse(bh2); +outnobh: + kfree(qs); + s->s_fs_info = NULL; + return ret; +} + +static void qnx6_put_super(struct super_block *sb) +{ + struct qnx6_sb_info *qs = QNX6_SB(sb); + brelse(qs->sb_buf); + iput(qs->longfile); + iput(qs->inodes); + kfree(qs); + sb->s_fs_info = NULL; + return; +} + +static sector_t qnx6_bmap(struct address_space *mapping, sector_t block) +{ + return generic_block_bmap(mapping, block, qnx6_get_block); +} +static const struct address_space_operations qnx6_aops = { + .readpage = qnx6_readpage, + .readpages = qnx6_readpages, + .bmap = qnx6_bmap +}; + +static struct inode *qnx6_private_inode(struct super_block *s, + struct qnx6_root_node *p) +{ + struct inode *inode = new_inode(s); + if (inode) { + struct qnx6_inode_info *ei = QNX6_I(inode); + struct qnx6_sb_info *sbi = QNX6_SB(s); + inode->i_size = fs64_to_cpu(sbi, p->size); + memcpy(ei->di_block_ptr, p->ptr, sizeof(p->ptr)); + ei->di_filelevels = p->levels; + inode->i_mode = S_IFREG | S_IRUSR; /* probably wrong */ + inode->i_mapping->a_ops = &qnx6_aops; + } + return inode; +} + +struct inode *qnx6_iget(struct super_block *sb, unsigned ino) +{ + struct qnx6_sb_info *sbi = QNX6_SB(sb); + struct qnx6_inode_entry *raw_inode; + struct inode *inode; + struct qnx6_inode_info *ei; + struct address_space *mapping; + struct page *page; + u32 n, offs; + + inode = iget_locked(sb, ino); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; + + ei = QNX6_I(inode); + + inode->i_mode = 0; + + if (ino == 0) { + printk(KERN_ERR "qnx6: bad inode number on dev %s: %u is " + "out of range\n", + sb->s_id, ino); + iget_failed(inode); + return ERR_PTR(-EIO); + } + n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS); + offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS); + mapping = sbi->inodes->i_mapping; + page = read_mapping_page(mapping, n, NULL); + if (IS_ERR(page)) { + printk(KERN_ERR "qnx6: major problem: unable to read inode from " + "dev %s\n", sb->s_id); + iget_failed(inode); + return ERR_CAST(page); + } + kmap(page); + raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs; + + inode->i_mode = fs16_to_cpu(sbi, raw_inode->di_mode); + inode->i_uid = (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid); + inode->i_gid = (gid_t)fs32_to_cpu(sbi, raw_inode->di_gid); + inode->i_size = fs64_to_cpu(sbi, raw_inode->di_size); + inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_mtime); + inode->i_mtime.tv_nsec = 0; + inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_atime); + inode->i_atime.tv_nsec = 0; + inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_ctime); + inode->i_ctime.tv_nsec = 0; + + /* calc blocks based on 512 byte blocksize */ + inode->i_blocks = (inode->i_size + 511) >> 9; + + memcpy(&ei->di_block_ptr, &raw_inode->di_block_ptr, + sizeof(raw_inode->di_block_ptr)); + ei->di_filelevels = raw_inode->di_filelevels; + + if (S_ISREG(inode->i_mode)) { + inode->i_fop = &generic_ro_fops; + inode->i_mapping->a_ops = &qnx6_aops; + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = &qnx6_dir_inode_operations; + inode->i_fop = &qnx6_dir_operations; + inode->i_mapping->a_ops = &qnx6_aops; + } else if (S_ISLNK(inode->i_mode)) { + inode->i_op = &page_symlink_inode_operations; + inode->i_mapping->a_ops = &qnx6_aops; + } else + init_special_inode(inode, inode->i_mode, 0); + qnx6_put_page(page); + unlock_new_inode(inode); + return inode; +} + +static struct kmem_cache *qnx6_inode_cachep; + +static struct inode *qnx6_alloc_inode(struct super_block *sb) +{ + struct qnx6_inode_info *ei; + ei = kmem_cache_alloc(qnx6_inode_cachep, GFP_KERNEL); + if (!ei) + return NULL; + return &ei->vfs_inode; +} + +static void qnx6_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + INIT_LIST_HEAD(&inode->i_dentry); + kmem_cache_free(qnx6_inode_cachep, QNX6_I(inode)); +} + +static void qnx6_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, qnx6_i_callback); +} + +static void init_once(void *foo) +{ + struct qnx6_inode_info *ei = (struct qnx6_inode_info *) foo; + + inode_init_once(&ei->vfs_inode); +} + +static int init_inodecache(void) +{ + qnx6_inode_cachep = kmem_cache_create("qnx6_inode_cache", + sizeof(struct qnx6_inode_info), + 0, (SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + init_once); + if (!qnx6_inode_cachep) + return -ENOMEM; + return 0; +} + +static void destroy_inodecache(void) +{ + kmem_cache_destroy(qnx6_inode_cachep); +} + +static struct dentry *qnx6_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + return mount_bdev(fs_type, flags, dev_name, data, qnx6_fill_super); +} + +static struct file_system_type qnx6_fs_type = { + .owner = THIS_MODULE, + .name = "qnx6", + .mount = qnx6_mount, + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, +}; + +static int __init init_qnx6_fs(void) +{ + int err; + + err = init_inodecache(); + if (err) + return err; + + err = register_filesystem(&qnx6_fs_type); + if (err) { + destroy_inodecache(); + return err; + } + + printk(KERN_INFO "QNX6 filesystem 1.0.0 registered.\n"); + return 0; +} + +static void __exit exit_qnx6_fs(void) +{ + unregister_filesystem(&qnx6_fs_type); + destroy_inodecache(); +} + +module_init(init_qnx6_fs) +module_exit(exit_qnx6_fs) +MODULE_LICENSE("GPL"); diff --git a/fs/qnx6/namei.c b/fs/qnx6/namei.c new file mode 100644 index 000000000000..8a97289e04ad --- /dev/null +++ b/fs/qnx6/namei.c @@ -0,0 +1,42 @@ +/* + * QNX6 file system, Linux implementation. + * + * Version : 1.0.0 + * + * History : + * + * 01-02-2012 by Kai Bankett (chaosman@ontika.net) : first release. + * 16-02-2012 pagemap extension by Al Viro + * + */ + +#include "qnx6.h" + +struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *nd) +{ + unsigned ino; + struct page *page; + struct inode *foundinode = NULL; + const char *name = dentry->d_name.name; + int len = dentry->d_name.len; + + if (len > QNX6_LONG_NAME_MAX) + return ERR_PTR(-ENAMETOOLONG); + + ino = qnx6_find_entry(len, dir, name, &page); + if (ino) { + foundinode = qnx6_iget(dir->i_sb, ino); + qnx6_put_page(page); + if (IS_ERR(foundinode)) { + QNX6DEBUG((KERN_ERR "qnx6: lookup->iget -> " + " error %ld\n", PTR_ERR(foundinode))); + return ERR_CAST(foundinode); + } + } else { + QNX6DEBUG((KERN_INFO "qnx6_lookup: not found %s\n", name)); + return NULL; + } + d_add(dentry, foundinode); + return NULL; +} diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h new file mode 100644 index 000000000000..6c5e02a0b6a8 --- /dev/null +++ b/fs/qnx6/qnx6.h @@ -0,0 +1,135 @@ +/* + * QNX6 file system, Linux implementation. + * + * Version : 1.0.0 + * + * History : + * + * 01-02-2012 by Kai Bankett (chaosman@ontika.net) : first release. + * 16-02-2012 page map extension by Al Viro + * + */ + +#include +#include + +typedef __u16 __bitwise __fs16; +typedef __u32 __bitwise __fs32; +typedef __u64 __bitwise __fs64; + +#include + +#ifdef CONFIG_QNX6FS_DEBUG +#define QNX6DEBUG(X) printk X +#else +#define QNX6DEBUG(X) (void) 0 +#endif + +struct qnx6_sb_info { + struct buffer_head *sb_buf; /* superblock buffer */ + struct qnx6_super_block *sb; /* our superblock */ + int s_blks_off; /* blkoffset fs-startpoint */ + int s_ptrbits; /* indirect pointer bitfield */ + unsigned long s_mount_opt; /* all mount options */ + int s_bytesex; /* holds endianess info */ + struct inode * inodes; + struct inode * longfile; +}; + +struct qnx6_inode_info { + __fs32 di_block_ptr[QNX6_NO_DIRECT_POINTERS]; + __u8 di_filelevels; + __u32 i_dir_start_lookup; + struct inode vfs_inode; +}; + +extern struct inode *qnx6_iget(struct super_block *sb, unsigned ino); +extern struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *nd); + +#ifdef CONFIG_QNX6FS_DEBUG +extern void qnx6_superblock_debug(struct qnx6_super_block *, + struct super_block *); +#endif + +extern const struct inode_operations qnx6_dir_inode_operations; +extern const struct file_operations qnx6_dir_operations; + +static inline struct qnx6_sb_info *QNX6_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + +static inline struct qnx6_inode_info *QNX6_I(struct inode *inode) +{ + return container_of(inode, struct qnx6_inode_info, vfs_inode); +} + +#define clear_opt(o, opt) (o &= ~(QNX6_MOUNT_##opt)) +#define set_opt(o, opt) (o |= (QNX6_MOUNT_##opt)) +#define test_opt(sb, opt) (QNX6_SB(sb)->s_mount_opt & \ + QNX6_MOUNT_##opt) +enum { + BYTESEX_LE, + BYTESEX_BE, +}; + +static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n) +{ + if (sbi->s_bytesex == BYTESEX_LE) + return le64_to_cpu((__force __le64)n); + else + return be64_to_cpu((__force __be64)n); +} + +static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n) +{ + if (sbi->s_bytesex == BYTESEX_LE) + return (__force __fs64)cpu_to_le64(n); + else + return (__force __fs64)cpu_to_be64(n); +} + +static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n) +{ + if (sbi->s_bytesex == BYTESEX_LE) + return le32_to_cpu((__force __le32)n); + else + return be32_to_cpu((__force __be32)n); +} + +static inline __fs32 cpu_to_fs32(struct qnx6_sb_info *sbi, __u32 n) +{ + if (sbi->s_bytesex == BYTESEX_LE) + return (__force __fs32)cpu_to_le32(n); + else + return (__force __fs32)cpu_to_be32(n); +} + +static inline __u16 fs16_to_cpu(struct qnx6_sb_info *sbi, __fs16 n) +{ + if (sbi->s_bytesex == BYTESEX_LE) + return le16_to_cpu((__force __le16)n); + else + return be16_to_cpu((__force __be16)n); +} + +static inline __fs16 cpu_to_fs16(struct qnx6_sb_info *sbi, __u16 n) +{ + if (sbi->s_bytesex == BYTESEX_LE) + return (__force __fs16)cpu_to_le16(n); + else + return (__force __fs16)cpu_to_be16(n); +} + +extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s, + int silent); + +static inline void qnx6_put_page(struct page *page) +{ + kunmap(page); + page_cache_release(page); +} + +extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, + struct page **res_page); diff --git a/fs/qnx6/super_mmi.c b/fs/qnx6/super_mmi.c new file mode 100644 index 000000000000..29c32cba62d6 --- /dev/null +++ b/fs/qnx6/super_mmi.c @@ -0,0 +1,150 @@ +/* + * QNX6 file system, Linux implementation. + * + * Version : 1.0.0 + * + * History : + * + * 01-02-2012 by Kai Bankett (chaosman@ontika.net) : first release. + * + */ + +#include +#include +#include +#include "qnx6.h" + +static void qnx6_mmi_copy_sb(struct qnx6_super_block *qsb, + struct qnx6_mmi_super_block *sb) +{ + qsb->sb_magic = sb->sb_magic; + qsb->sb_checksum = sb->sb_checksum; + qsb->sb_serial = sb->sb_serial; + qsb->sb_blocksize = sb->sb_blocksize; + qsb->sb_num_inodes = sb->sb_num_inodes; + qsb->sb_free_inodes = sb->sb_free_inodes; + qsb->sb_num_blocks = sb->sb_num_blocks; + qsb->sb_free_blocks = sb->sb_free_blocks; + + /* the rest of the superblock is the same */ + memcpy(&qsb->Inode, &sb->Inode, sizeof(sb->Inode)); + memcpy(&qsb->Bitmap, &sb->Bitmap, sizeof(sb->Bitmap)); + memcpy(&qsb->Longfile, &sb->Longfile, sizeof(sb->Longfile)); +} + +struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s, int silent) +{ + struct buffer_head *bh1, *bh2 = NULL; + struct qnx6_mmi_super_block *sb1, *sb2; + struct qnx6_super_block *qsb = NULL; + struct qnx6_sb_info *sbi; + __u64 offset; + + /* Check the superblock signatures + start with the first superblock */ + bh1 = sb_bread(s, 0); + if (!bh1) { + printk(KERN_ERR "qnx6: Unable to read first mmi superblock\n"); + return NULL; + } + sb1 = (struct qnx6_mmi_super_block *)bh1->b_data; + sbi = QNX6_SB(s); + if (fs32_to_cpu(sbi, sb1->sb_magic) != QNX6_SUPER_MAGIC) { + if (!silent) { + printk(KERN_ERR "qnx6: wrong signature (magic) in" + " superblock #1.\n"); + goto out; + } + } + + /* checksum check - start at byte 8 and end at byte 512 */ + if (fs32_to_cpu(sbi, sb1->sb_checksum) != + crc32_be(0, (char *)(bh1->b_data + 8), 504)) { + printk(KERN_ERR "qnx6: superblock #1 checksum error\n"); + goto out; + } + + /* calculate second superblock blocknumber */ + offset = fs32_to_cpu(sbi, sb1->sb_num_blocks) + QNX6_SUPERBLOCK_AREA / + fs32_to_cpu(sbi, sb1->sb_blocksize); + + /* set new blocksize */ + if (!sb_set_blocksize(s, fs32_to_cpu(sbi, sb1->sb_blocksize))) { + printk(KERN_ERR "qnx6: unable to set blocksize\n"); + goto out; + } + /* blocksize invalidates bh - pull it back in */ + brelse(bh1); + bh1 = sb_bread(s, 0); + if (!bh1) + goto out; + sb1 = (struct qnx6_mmi_super_block *)bh1->b_data; + + /* read second superblock */ + bh2 = sb_bread(s, offset); + if (!bh2) { + printk(KERN_ERR "qnx6: unable to read the second superblock\n"); + goto out; + } + sb2 = (struct qnx6_mmi_super_block *)bh2->b_data; + if (fs32_to_cpu(sbi, sb2->sb_magic) != QNX6_SUPER_MAGIC) { + if (!silent) + printk(KERN_ERR "qnx6: wrong signature (magic) in" + " superblock #2.\n"); + goto out; + } + + /* checksum check - start at byte 8 and end at byte 512 */ + if (fs32_to_cpu(sbi, sb2->sb_checksum) + != crc32_be(0, (char *)(bh2->b_data + 8), 504)) { + printk(KERN_ERR "qnx6: superblock #1 checksum error\n"); + goto out; + } + + qsb = kmalloc(sizeof(*qsb), GFP_KERNEL); + if (!qsb) { + printk(KERN_ERR "qnx6: unable to allocate memory.\n"); + goto out; + } + + if (fs64_to_cpu(sbi, sb1->sb_serial) > + fs64_to_cpu(sbi, sb2->sb_serial)) { + /* superblock #1 active */ + qnx6_mmi_copy_sb(qsb, sb1); +#ifdef CONFIG_QNX6FS_DEBUG + qnx6_superblock_debug(qsb, s); +#endif + memcpy(bh1->b_data, qsb, sizeof(struct qnx6_super_block)); + + sbi->sb_buf = bh1; + sbi->sb = (struct qnx6_super_block *)bh1->b_data; + brelse(bh2); + printk(KERN_INFO "qnx6: superblock #1 active\n"); + } else { + /* superblock #2 active */ + qnx6_mmi_copy_sb(qsb, sb2); +#ifdef CONFIG_QNX6FS_DEBUG + qnx6_superblock_debug(qsb, s); +#endif + memcpy(bh2->b_data, qsb, sizeof(struct qnx6_super_block)); + + sbi->sb_buf = bh2; + sbi->sb = (struct qnx6_super_block *)bh2->b_data; + brelse(bh1); + printk(KERN_INFO "qnx6: superblock #2 active\n"); + } + kfree(qsb); + + /* offset for mmi_fs is just SUPERBLOCK_AREA bytes */ + sbi->s_blks_off = QNX6_SUPERBLOCK_AREA / s->s_blocksize; + + /* success */ + return sbi->sb; + +out: + if (bh1 != NULL) + brelse(bh1); + if (bh2 != NULL) + brelse(bh2); + return NULL; +} -- cgit From e23754f880f10124f0a2848f9d17e361a295378e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 6 Mar 2012 14:33:22 -0500 Subject: aio: don't bother with async freeing on failure in ioctx_alloc() Signed-off-by: Al Viro --- fs/aio.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index b9d64d89a043..d09b56090aa5 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -248,6 +248,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) struct mm_struct *mm; struct kioctx *ctx; int did_sync = 0; + int err = -ENOMEM; /* Prevent overflows */ if ((nr_events > (0x10000000U / sizeof(struct io_event))) || @@ -310,16 +311,13 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) return ctx; out_cleanup: - __put_ioctx(ctx); - return ERR_PTR(-EAGAIN); - + err = -EAGAIN; + aio_free_ring(ctx); out_freectx: mmdrop(mm); kmem_cache_free(kioctx_cachep, ctx); - ctx = ERR_PTR(-ENOMEM); - - dprintk("aio: error allocating ioctx %p\n", ctx); - return ctx; + dprintk("aio: error allocating ioctx %d\n", err); + return ERR_PTR(err); } /* aio_cancel_all -- cgit From 2dd542b7aeb1c222273cf0593a718d9b44998d9f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 10 Mar 2012 23:10:35 -0500 Subject: aio: aio_nr decrements don't need to be delayed we can do that right in __put_ioctx(); as the result, the loop in ioctx_alloc() can be killed. Signed-off-by: Al Viro --- fs/aio.c | 42 ++++++++++++++---------------------------- 1 file changed, 14 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index d09b56090aa5..216eb37b2c76 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -199,16 +199,7 @@ static int aio_setup_ring(struct kioctx *ctx) static void ctx_rcu_free(struct rcu_head *head) { struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); - unsigned nr_events = ctx->max_reqs; - kmem_cache_free(kioctx_cachep, ctx); - - if (nr_events) { - spin_lock(&aio_nr_lock); - BUG_ON(aio_nr - nr_events > aio_nr); - aio_nr -= nr_events; - spin_unlock(&aio_nr_lock); - } } /* __put_ioctx @@ -217,6 +208,7 @@ static void ctx_rcu_free(struct rcu_head *head) */ static void __put_ioctx(struct kioctx *ctx) { + unsigned nr_events = ctx->max_reqs; BUG_ON(ctx->reqs_active); cancel_delayed_work(&ctx->wq); @@ -224,6 +216,12 @@ static void __put_ioctx(struct kioctx *ctx) aio_free_ring(ctx); mmdrop(ctx->mm); ctx->mm = NULL; + if (nr_events) { + spin_lock(&aio_nr_lock); + BUG_ON(aio_nr - nr_events > aio_nr); + aio_nr -= nr_events; + spin_unlock(&aio_nr_lock); + } pr_debug("__put_ioctx: freeing %p\n", ctx); call_rcu(&ctx->rcu_head, ctx_rcu_free); } @@ -247,7 +245,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) { struct mm_struct *mm; struct kioctx *ctx; - int did_sync = 0; int err = -ENOMEM; /* Prevent overflows */ @@ -257,7 +254,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) return ERR_PTR(-EINVAL); } - if ((unsigned long)nr_events > aio_max_nr) + if (!nr_events || (unsigned long)nr_events > aio_max_nr) return ERR_PTR(-EAGAIN); ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); @@ -281,25 +278,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) goto out_freectx; /* limit the number of system wide aios */ - do { - spin_lock_bh(&aio_nr_lock); - if (aio_nr + nr_events > aio_max_nr || - aio_nr + nr_events < aio_nr) - ctx->max_reqs = 0; - else - aio_nr += ctx->max_reqs; + spin_lock_bh(&aio_nr_lock); + if (aio_nr + nr_events > aio_max_nr || + aio_nr + nr_events < aio_nr) { spin_unlock_bh(&aio_nr_lock); - if (ctx->max_reqs || did_sync) - break; - - /* wait for rcu callbacks to have completed before giving up */ - synchronize_rcu(); - did_sync = 1; - ctx->max_reqs = nr_events; - } while (1); - - if (ctx->max_reqs == 0) goto out_cleanup; + } + aio_nr += ctx->max_reqs; + spin_unlock_bh(&aio_nr_lock); /* now link into global list. */ spin_lock(&mm->ioctx_lock); -- cgit From 9fa1cb397fa052fc9acfaf5a9f2faff31e10f6b7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 10 Mar 2012 23:14:05 -0500 Subject: aio: aio_nr_lock is taken only synchronously now Signed-off-by: Al Viro --- fs/aio.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index 216eb37b2c76..9c3de88e2ead 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -278,14 +278,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) goto out_freectx; /* limit the number of system wide aios */ - spin_lock_bh(&aio_nr_lock); + spin_lock(&aio_nr_lock); if (aio_nr + nr_events > aio_max_nr || aio_nr + nr_events < aio_nr) { - spin_unlock_bh(&aio_nr_lock); + spin_unlock(&aio_nr_lock); goto out_cleanup; } aio_nr += ctx->max_reqs; - spin_unlock_bh(&aio_nr_lock); + spin_unlock(&aio_nr_lock); /* now link into global list. */ spin_lock(&mm->ioctx_lock); -- cgit From bf50722a3c4a83aae651dc20b708308a4f119eb9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 11 Mar 2012 00:58:40 -0500 Subject: aio: use cancel_delayed_work_sync() Signed-off-by: Al Viro --- fs/aio.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index 9c3de88e2ead..a92d7547b6f6 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -211,8 +211,7 @@ static void __put_ioctx(struct kioctx *ctx) unsigned nr_events = ctx->max_reqs; BUG_ON(ctx->reqs_active); - cancel_delayed_work(&ctx->wq); - cancel_work_sync(&ctx->wq.work); + cancel_delayed_work_sync(&ctx->wq); aio_free_ring(ctx); mmdrop(ctx->mm); ctx->mm = NULL; -- cgit From cd1ea261ac128479833b9f518bf788ee47ada2de Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 11 Mar 2012 00:59:07 -0500 Subject: aio: don't bother with cancel_delayed_work() in exit_aio() __put_ioctx() will cover it anyway. Signed-off-by: Al Viro --- fs/aio.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index a92d7547b6f6..3174cd969b01 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -390,10 +390,6 @@ void exit_aio(struct mm_struct *mm) aio_cancel_all(ctx); wait_for_all_aios(ctx); - /* - * Ensure we don't leave the ctx on the aio_wq - */ - cancel_work_sync(&ctx->wq.work); if (1 != atomic_read(&ctx->users)) printk(KERN_DEBUG -- cgit From 9fcf03d0d6e845ed495fc8b1ec328b473ff298b3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 13 Mar 2012 22:06:28 -0400 Subject: aio: fix the comment in aio_kick_handler() It should've been changed when queue_work() became queue_delayed_work(..., 0) in there. It's always had been about not needing a delay, not about not using specific function... Signed-off-by: Al Viro --- fs/aio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index 3174cd969b01..59b431a2ae0c 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -899,7 +899,7 @@ static void aio_kick_handler(struct work_struct *work) unuse_mm(mm); set_fs(oldfs); /* - * we're in a worker thread already, don't use queue_delayed_work, + * we're in a worker thread already; no point using non-zero delay */ if (requeue) queue_delayed_work(aio_wq, &ctx->wq, 0); -- cgit From 68ac1234fb949b66941d94dce4157742799fc581 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 15 Mar 2012 08:21:57 -0400 Subject: switch touch_atime to struct path Signed-off-by: Al Viro --- fs/cachefiles/namei.c | 3 ++- fs/ecryptfs/file.c | 9 ++++----- fs/inode.c | 5 +++-- fs/namei.c | 2 +- fs/nfsd/vfs.c | 11 ++++++----- fs/stat.c | 2 +- 6 files changed, 17 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index a0358c2189cb..7f0771d3894e 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -646,7 +646,8 @@ lookup_again: * (this is used to keep track of culling, and atimes are only * updated by read, write and readdir but not lookup or * open) */ - touch_atime(cache->mnt, next); + path.dentry = next; + touch_atime(&path); } /* open a file interface onto a data file */ diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index d3f95f941c47..2b17f2f9b121 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -48,8 +48,7 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb, unsigned long nr_segs, loff_t pos) { ssize_t rc; - struct dentry *lower_dentry; - struct vfsmount *lower_vfsmount; + struct path lower; struct file *file = iocb->ki_filp; rc = generic_file_aio_read(iocb, iov, nr_segs, pos); @@ -60,9 +59,9 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb, if (-EIOCBQUEUED == rc) rc = wait_on_sync_kiocb(iocb); if (rc >= 0) { - lower_dentry = ecryptfs_dentry_to_lower(file->f_path.dentry); - lower_vfsmount = ecryptfs_dentry_to_lower_mnt(file->f_path.dentry); - touch_atime(lower_vfsmount, lower_dentry); + lower.dentry = ecryptfs_dentry_to_lower(file->f_path.dentry); + lower.mnt = ecryptfs_dentry_to_lower_mnt(file->f_path.dentry); + touch_atime(&lower); } return rc; } diff --git a/fs/inode.c b/fs/inode.c index 92de04b0baa2..8b612813a6a7 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1499,9 +1499,10 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, * This function automatically handles read only file systems and media, * as well as the "noatime" flag and inode specific "noatime" markers. */ -void touch_atime(struct vfsmount *mnt, struct dentry *dentry) +void touch_atime(struct path *path) { - struct inode *inode = dentry->d_inode; + struct vfsmount *mnt = path->mnt; + struct inode *inode = path->dentry->d_inode; struct timespec now; if (inode->i_flags & S_NOATIME) diff --git a/fs/namei.c b/fs/namei.c index a0b82762e8fc..0ccc74ee92fb 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -642,7 +642,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p) cond_resched(); current->total_link_count++; - touch_atime(link->mnt, dentry); + touch_atime(link); nd_set_link(nd, NULL); error = security_inode_follow_link(link->dentry, nd); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index edf6d3ed8777..e59f71d0cf73 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1541,30 +1541,31 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, __be32 nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) { - struct dentry *dentry; struct inode *inode; mm_segment_t oldfs; __be32 err; int host_err; + struct path path; err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP); if (err) goto out; - dentry = fhp->fh_dentry; - inode = dentry->d_inode; + path.mnt = fhp->fh_export->ex_path.mnt; + path.dentry = fhp->fh_dentry; + inode = path.dentry->d_inode; err = nfserr_inval; if (!inode->i_op->readlink) goto out; - touch_atime(fhp->fh_export->ex_path.mnt, dentry); + touch_atime(&path); /* N.B. Why does this call need a get_fs()?? * Remove the set_fs and watch the fireworks:-) --okir */ oldfs = get_fs(); set_fs(KERNEL_DS); - host_err = inode->i_op->readlink(dentry, buf, *lenp); + host_err = inode->i_op->readlink(path.dentry, buf, *lenp); set_fs(oldfs); if (host_err < 0) diff --git a/fs/stat.c b/fs/stat.c index 8806b8997d2e..86f13563a463 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -307,7 +307,7 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, if (inode->i_op->readlink) { error = security_inode_readlink(path.dentry); if (!error) { - touch_atime(path.mnt, path.dentry); + touch_atime(&path); error = inode->i_op->readlink(path.dentry, buf, bufsiz); } -- cgit From 3ee05ed0679f29ab19727067ff7c14f0c257fa9a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 15 Mar 2012 14:48:29 -0400 Subject: no need to play with fs->seq in exit_fs() Signed-off-by: Al Viro --- fs/fs_struct.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/fs_struct.c b/fs/fs_struct.c index 78b519c13536..27114b413622 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -107,10 +107,8 @@ void exit_fs(struct task_struct *tsk) int kill; task_lock(tsk); spin_lock(&fs->lock); - write_seqcount_begin(&fs->seq); tsk->fs = NULL; kill = !--fs->users; - write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); task_unlock(tsk); if (kill) -- cgit From 82234e61a8fc75599f29026c0805fc0cfe2a6c87 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 15 Mar 2012 14:48:55 -0400 Subject: vfs: take path_get_longterm() out of write_seqcount scope Signed-off-by: Al Viro --- fs/fs_struct.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/fs_struct.c b/fs/fs_struct.c index 27114b413622..6324c4274959 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -26,11 +26,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path) { struct path old_root; + path_get_longterm(path); spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); old_root = fs->root; fs->root = *path; - path_get_longterm(path); write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); if (old_root.dentry) @@ -45,11 +45,11 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path) { struct path old_pwd; + path_get_longterm(path); spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); old_pwd = fs->pwd; fs->pwd = *path; - path_get_longterm(path); write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); @@ -57,6 +57,14 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path) path_put_longterm(&old_pwd); } +static inline int replace_path(struct path *p, const struct path *old, const struct path *new) +{ + if (likely(p->dentry != old->dentry || p->mnt != old->mnt)) + return 0; + *p = *new; + return 1; +} + void chroot_fs_refs(struct path *old_root, struct path *new_root) { struct task_struct *g, *p; @@ -68,21 +76,16 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) task_lock(p); fs = p->fs; if (fs) { + int hits = 0; spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); - if (fs->root.dentry == old_root->dentry - && fs->root.mnt == old_root->mnt) { - path_get_longterm(new_root); - fs->root = *new_root; + hits += replace_path(&fs->root, old_root, new_root); + hits += replace_path(&fs->pwd, old_root, new_root); + write_seqcount_end(&fs->seq); + while (hits--) { count++; - } - if (fs->pwd.dentry == old_root->dentry - && fs->pwd.mnt == old_root->mnt) { path_get_longterm(new_root); - fs->pwd = *new_root; - count++; } - write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); } task_unlock(p); -- cgit From c45ac8887e778c4fa2b572c51a94a681a0955d4d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 00:59:06 -0400 Subject: take private bits of reiserfs_xattr.h to fs/reiserfs/xattr.h Signed-off-by: Al Viro --- fs/reiserfs/file.c | 2 +- fs/reiserfs/inode.c | 2 +- fs/reiserfs/namei.c | 2 +- fs/reiserfs/super.c | 2 +- fs/reiserfs/xattr.c | 2 +- fs/reiserfs/xattr.h | 124 +++++++++++++++++++++++++++++++++++++++++++ fs/reiserfs/xattr_acl.c | 2 +- fs/reiserfs/xattr_security.c | 2 +- fs/reiserfs/xattr_trusted.c | 2 +- fs/reiserfs/xattr_user.c | 2 +- 10 files changed, 133 insertions(+), 9 deletions(-) create mode 100644 fs/reiserfs/xattr.h (limited to 'fs') diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index ace635053a36..e26ee4988e78 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include "xattr.h" #include #include #include diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 9e8cd5acd79c..894c7316e9c6 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include "xattr.h" #include #include #include diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 146378865239..cb67ebf882a9 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include "xattr.h" #include #define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) set_nlink(i, 1); } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 208dfd144409..a67fc7d28de5 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include "xattr.h" #include #include #include diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index c24deda8a8bc..ead5d8aab440 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -43,7 +43,7 @@ #include #include #include -#include +#include "xattr.h" #include #include #include diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h new file mode 100644 index 000000000000..367c0459db6d --- /dev/null +++ b/fs/reiserfs/xattr.h @@ -0,0 +1,124 @@ +#include +#include +#include +#include +#include +#include + +struct inode; +struct dentry; +struct iattr; +struct super_block; +struct nameidata; + +int reiserfs_xattr_register_handlers(void) __init; +void reiserfs_xattr_unregister_handlers(void); +int reiserfs_xattr_init(struct super_block *sb, int mount_flags); +int reiserfs_lookup_privroot(struct super_block *sb); +int reiserfs_delete_xattrs(struct inode *inode); +int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); +int reiserfs_permission(struct inode *inode, int mask); + +#ifdef CONFIG_REISERFS_FS_XATTR +#define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) +ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, + void *buffer, size_t size); +int reiserfs_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size); +int reiserfs_removexattr(struct dentry *dentry, const char *name); + +int reiserfs_xattr_get(struct inode *, const char *, void *, size_t); +int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int); +int reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *, + struct inode *, const char *, const void *, + size_t, int); + +extern const struct xattr_handler reiserfs_xattr_user_handler; +extern const struct xattr_handler reiserfs_xattr_trusted_handler; +extern const struct xattr_handler reiserfs_xattr_security_handler; +#ifdef CONFIG_REISERFS_FS_SECURITY +int reiserfs_security_init(struct inode *dir, struct inode *inode, + const struct qstr *qstr, + struct reiserfs_security_handle *sec); +int reiserfs_security_write(struct reiserfs_transaction_handle *th, + struct inode *inode, + struct reiserfs_security_handle *sec); +void reiserfs_security_free(struct reiserfs_security_handle *sec); +#endif + +static inline int reiserfs_xattrs_initialized(struct super_block *sb) +{ + return REISERFS_SB(sb)->priv_root != NULL; +} + +#define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header)) +static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size) +{ + loff_t ret = 0; + if (reiserfs_file_data_log(inode)) { + ret = _ROUND_UP(xattr_size(size), inode->i_sb->s_blocksize); + ret >>= inode->i_sb->s_blocksize_bits; + } + return ret; +} + +/* We may have to create up to 3 objects: xattr root, xattr dir, xattr file. + * Let's try to be smart about it. + * xattr root: We cache it. If it's not cached, we may need to create it. + * xattr dir: If anything has been loaded for this inode, we can set a flag + * saying so. + * xattr file: Since we don't cache xattrs, we can't tell. We always include + * blocks for it. + * + * However, since root and dir can be created between calls - YOU MUST SAVE + * THIS VALUE. + */ +static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode) +{ + size_t nblocks = JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); + + if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) { + nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); + if (!REISERFS_SB(inode->i_sb)->xattr_root->d_inode) + nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); + } + + return nblocks; +} + +static inline void reiserfs_init_xattr_rwsem(struct inode *inode) +{ + init_rwsem(&REISERFS_I(inode)->i_xattr_sem); +} + +#else + +#define reiserfs_getxattr NULL +#define reiserfs_setxattr NULL +#define reiserfs_listxattr NULL +#define reiserfs_removexattr NULL + +static inline void reiserfs_init_xattr_rwsem(struct inode *inode) +{ +} +#endif /* CONFIG_REISERFS_FS_XATTR */ + +#ifndef CONFIG_REISERFS_FS_SECURITY +static inline int reiserfs_security_init(struct inode *dir, + struct inode *inode, + const struct qstr *qstr, + struct reiserfs_security_handle *sec) +{ + return 0; +} +static inline int +reiserfs_security_write(struct reiserfs_transaction_handle *th, + struct inode *inode, + struct reiserfs_security_handle *sec) +{ + return 0; +} +static inline void reiserfs_security_free(struct reiserfs_security_handle *sec) +{} +#endif diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 6da0396e5052..c0a8c519b2e1 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include "xattr.h" #include #include diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index 534668fa41be..6104066e44c4 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c @@ -4,7 +4,7 @@ #include #include #include -#include +#include "xattr.h" #include #include diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c index 9883736ce3ec..f995b258c141 100644 --- a/fs/reiserfs/xattr_trusted.c +++ b/fs/reiserfs/xattr_trusted.c @@ -4,7 +4,7 @@ #include #include #include -#include +#include "xattr.h" #include static int diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c index 45ae1a00013a..748978db6f03 100644 --- a/fs/reiserfs/xattr_user.c +++ b/fs/reiserfs/xattr_user.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include "xattr.h" #include static int -- cgit From a3063ab88fcbe5249f841cb95dfd626b8bf2674f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 01:03:10 -0400 Subject: move reiserfs_acl.h to fs/reiserfs/acl.h Signed-off-by: Al Viro --- fs/reiserfs/acl.h | 76 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/reiserfs/file.c | 2 +- fs/reiserfs/inode.c | 2 +- fs/reiserfs/namei.c | 2 +- fs/reiserfs/super.c | 2 +- fs/reiserfs/xattr.c | 2 +- fs/reiserfs/xattr_acl.c | 2 +- 7 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 fs/reiserfs/acl.h (limited to 'fs') diff --git a/fs/reiserfs/acl.h b/fs/reiserfs/acl.h new file mode 100644 index 000000000000..f096b80e73d8 --- /dev/null +++ b/fs/reiserfs/acl.h @@ -0,0 +1,76 @@ +#include +#include + +#define REISERFS_ACL_VERSION 0x0001 + +typedef struct { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +} reiserfs_acl_entry; + +typedef struct { + __le16 e_tag; + __le16 e_perm; +} reiserfs_acl_entry_short; + +typedef struct { + __le32 a_version; +} reiserfs_acl_header; + +static inline size_t reiserfs_acl_size(int count) +{ + if (count <= 4) { + return sizeof(reiserfs_acl_header) + + count * sizeof(reiserfs_acl_entry_short); + } else { + return sizeof(reiserfs_acl_header) + + 4 * sizeof(reiserfs_acl_entry_short) + + (count - 4) * sizeof(reiserfs_acl_entry); + } +} + +static inline int reiserfs_acl_count(size_t size) +{ + ssize_t s; + size -= sizeof(reiserfs_acl_header); + s = size - 4 * sizeof(reiserfs_acl_entry_short); + if (s < 0) { + if (size % sizeof(reiserfs_acl_entry_short)) + return -1; + return size / sizeof(reiserfs_acl_entry_short); + } else { + if (s % sizeof(reiserfs_acl_entry)) + return -1; + return s / sizeof(reiserfs_acl_entry) + 4; + } +} + +#ifdef CONFIG_REISERFS_FS_POSIX_ACL +struct posix_acl *reiserfs_get_acl(struct inode *inode, int type); +int reiserfs_acl_chmod(struct inode *inode); +int reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th, + struct inode *dir, struct dentry *dentry, + struct inode *inode); +int reiserfs_cache_default_acl(struct inode *dir); +extern const struct xattr_handler reiserfs_posix_acl_default_handler; +extern const struct xattr_handler reiserfs_posix_acl_access_handler; + +#else + +#define reiserfs_cache_default_acl(inode) 0 +#define reiserfs_get_acl NULL + +static inline int reiserfs_acl_chmod(struct inode *inode) +{ + return 0; +} + +static inline int +reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th, + const struct inode *dir, struct dentry *dentry, + struct inode *inode) +{ + return 0; +} +#endif diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index e26ee4988e78..3fa5915dea6e 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -4,7 +4,7 @@ #include #include -#include +#include "acl.h" #include "xattr.h" #include #include diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 894c7316e9c6..b696493d6b66 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include "acl.h" #include "xattr.h" #include #include diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index cb67ebf882a9..34bdab29883b 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include "acl.h" #include "xattr.h" #include diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index a67fc7d28de5..cf68a6ba0ec6 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include "acl.h" #include "xattr.h" #include #include diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index ead5d8aab440..61c9b5633e27 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -44,7 +44,7 @@ #include #include #include "xattr.h" -#include +#include "acl.h" #include #include #include diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index c0a8c519b2e1..f09094057eaa 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -8,7 +8,7 @@ #include #include #include "xattr.h" -#include +#include "acl.h" #include static int reiserfs_set_acl(struct reiserfs_transaction_handle *th, -- cgit From a8a4b79b53fc7cbb023afedf58b04dd4e9bbb114 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 01:06:11 -0400 Subject: kill pointless includes of reiserfs_fs_{i,sb}.h Signed-off-by: Al Viro --- fs/reiserfs/bitmap.c | 2 -- fs/reiserfs/objectid.c | 1 - fs/reiserfs/procfs.c | 1 - fs/reiserfs/resize.c | 1 - fs/reiserfs/xattr.h | 1 - 5 files changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index 70de42f09f1d..3c4636162621 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c @@ -10,8 +10,6 @@ #include #include #include -#include -#include #include #include diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c index 3a6de810bd61..efc929e6a323 100644 --- a/fs/reiserfs/objectid.c +++ b/fs/reiserfs/objectid.c @@ -6,7 +6,6 @@ #include #include #include -#include // find where objectid map starts #define objectid_map(s,rs) (old_format_only (s) ? \ diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index 7a9981196c1c..f931a089bbe7 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c index 7483279b482d..e1415ad93251 100644 --- a/fs/reiserfs/resize.c +++ b/fs/reiserfs/resize.c @@ -14,7 +14,6 @@ #include #include #include -#include #include int reiserfs_resize(struct super_block *s, unsigned long block_count_new) diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h index 367c0459db6d..ccd146bb0665 100644 --- a/fs/reiserfs/xattr.h +++ b/fs/reiserfs/xattr.h @@ -2,7 +2,6 @@ #include #include #include -#include #include struct inode; -- cgit From f466c6fdb3b1f043ff1977a8d2a1d0cd4dc164fa Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 01:16:43 -0400 Subject: move private bits of reiserfs_fs.h to fs/reiserfs/reiserfs.h Signed-off-by: Al Viro --- fs/reiserfs/bitmap.c | 2 +- fs/reiserfs/dir.c | 2 +- fs/reiserfs/do_balan.c | 2 +- fs/reiserfs/file.c | 2 +- fs/reiserfs/fix_node.c | 2 +- fs/reiserfs/hashes.c | 2 +- fs/reiserfs/ibalance.c | 2 +- fs/reiserfs/inode.c | 2 +- fs/reiserfs/ioctl.c | 2 +- fs/reiserfs/item_ops.c | 2 +- fs/reiserfs/journal.c | 2 +- fs/reiserfs/lbalance.c | 2 +- fs/reiserfs/lock.c | 2 +- fs/reiserfs/namei.c | 2 +- fs/reiserfs/objectid.c | 2 +- fs/reiserfs/prints.c | 4 +- fs/reiserfs/procfs.c | 2 +- fs/reiserfs/reiserfs.h | 2327 +++++++++++++++++++++++++++++++++++++++++ fs/reiserfs/resize.c | 2 +- fs/reiserfs/stree.c | 2 +- fs/reiserfs/super.c | 2 +- fs/reiserfs/tail_conversion.c | 2 +- fs/reiserfs/xattr.c | 2 +- fs/reiserfs/xattr.h | 1 - fs/reiserfs/xattr_acl.c | 2 +- fs/reiserfs/xattr_security.c | 2 +- fs/reiserfs/xattr_trusted.c | 2 +- fs/reiserfs/xattr_user.c | 2 +- 28 files changed, 2354 insertions(+), 28 deletions(-) create mode 100644 fs/reiserfs/reiserfs.h (limited to 'fs') diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index 3c4636162621..4c0c7d163d15 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c @@ -4,7 +4,7 @@ /* Reiserfs block (de)allocator, bitmap-based. */ #include -#include +#include "reiserfs.h" #include #include #include diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index 133e9355dc6f..66c53b642a88 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include "reiserfs.h" #include #include #include diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index 60c080440661..2b7882b508db 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c @@ -17,7 +17,7 @@ #include #include -#include +#include "reiserfs.h" #include #include diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 3fa5915dea6e..8375c922c0d5 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -3,7 +3,7 @@ */ #include -#include +#include "reiserfs.h" #include "acl.h" #include "xattr.h" #include diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index 1e4250bc3a6f..430e0658704c 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include "reiserfs.h" #include /* To make any changes in the tree we find a node, that contains item diff --git a/fs/reiserfs/hashes.c b/fs/reiserfs/hashes.c index 6471c670743e..91b0cc1242a2 100644 --- a/fs/reiserfs/hashes.c +++ b/fs/reiserfs/hashes.c @@ -19,7 +19,7 @@ // #include -#include +#include "reiserfs.h" #include #define DELTA 0x9E3779B9 diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c index 2074fd95046b..e1978fd895f5 100644 --- a/fs/reiserfs/ibalance.c +++ b/fs/reiserfs/ibalance.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include "reiserfs.h" #include /* this is one and only function that is used outside (do_balance.c) */ diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index b696493d6b66..494c315c7417 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -4,7 +4,7 @@ #include #include -#include +#include "reiserfs.h" #include "acl.h" #include "xattr.h" #include diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index 950e3d1b5c9e..0c2185042d5f 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include "reiserfs.h" #include #include #include diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c index 72cb1cc51b87..ee382ef3d300 100644 --- a/fs/reiserfs/item_ops.c +++ b/fs/reiserfs/item_ops.c @@ -3,7 +3,7 @@ */ #include -#include +#include "reiserfs.h" // this contains item handlers for old item types: sd, direct, // indirect, directory diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index c3cf54fd4de3..cf9f4de00a95 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include "reiserfs.h" #include #include #include diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c index 03d85cbf90bf..c4b73f9ccf85 100644 --- a/fs/reiserfs/lbalance.c +++ b/fs/reiserfs/lbalance.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include "reiserfs.h" #include /* these are used in do_balance.c */ diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c index 7df1ce48203a..d735bc8470e3 100644 --- a/fs/reiserfs/lock.c +++ b/fs/reiserfs/lock.c @@ -1,4 +1,4 @@ -#include +#include "reiserfs.h" #include /* diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 34bdab29883b..84e8a69cee9d 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include "reiserfs.h" #include "acl.h" #include "xattr.h" #include diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c index efc929e6a323..f732d6a5251d 100644 --- a/fs/reiserfs/objectid.c +++ b/fs/reiserfs/objectid.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include "reiserfs.h" // find where objectid map starts #define objectid_map(s,rs) (old_format_only (s) ? \ diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 45de98b59466..c0b1112ab7e3 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -4,7 +4,7 @@ #include #include -#include +#include "reiserfs.h" #include #include @@ -329,7 +329,7 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...) Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it pointless complexity): - panics in reiserfs_fs.h have numbers from 1000 to 1999 + panics in reiserfs.h have numbers from 1000 to 1999 super.c 2000 to 2999 preserve.c (unused) 3000 to 3999 bitmap.c 4000 to 4999 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index f931a089bbe7..2c1ade692cc8 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include "reiserfs.h" #include #include diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h new file mode 100644 index 000000000000..b3865c84f54c --- /dev/null +++ b/fs/reiserfs/reiserfs.h @@ -0,0 +1,2327 @@ +/* + * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for licensing and copyright details + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* the 32 bit compat definitions with int argument */ +#define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int) +#define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS +#define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS +#define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION +#define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION + +/* + * Locking primitives. The write lock is a per superblock + * special mutex that has properties close to the Big Kernel Lock + * which was used in the previous locking scheme. + */ +void reiserfs_write_lock(struct super_block *s); +void reiserfs_write_unlock(struct super_block *s); +int reiserfs_write_lock_once(struct super_block *s); +void reiserfs_write_unlock_once(struct super_block *s, int lock_depth); + +#ifdef CONFIG_REISERFS_CHECK +void reiserfs_lock_check_recursive(struct super_block *s); +#else +static inline void reiserfs_lock_check_recursive(struct super_block *s) { } +#endif + +/* + * Several mutexes depend on the write lock. + * However sometimes we want to relax the write lock while we hold + * these mutexes, according to the release/reacquire on schedule() + * properties of the Bkl that were used. + * Reiserfs performances and locking were based on this scheme. + * Now that the write lock is a mutex and not the bkl anymore, doing so + * may result in a deadlock: + * + * A acquire write_lock + * A acquire j_commit_mutex + * A release write_lock and wait for something + * B acquire write_lock + * B can't acquire j_commit_mutex and sleep + * A can't acquire write lock anymore + * deadlock + * + * What we do here is avoiding such deadlock by playing the same game + * than the Bkl: if we can't acquire a mutex that depends on the write lock, + * we release the write lock, wait a bit and then retry. + * + * The mutexes concerned by this hack are: + * - The commit mutex of a journal list + * - The flush mutex + * - The journal lock + * - The inode mutex + */ +static inline void reiserfs_mutex_lock_safe(struct mutex *m, + struct super_block *s) +{ + reiserfs_lock_check_recursive(s); + reiserfs_write_unlock(s); + mutex_lock(m); + reiserfs_write_lock(s); +} + +static inline void +reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass, + struct super_block *s) +{ + reiserfs_lock_check_recursive(s); + reiserfs_write_unlock(s); + mutex_lock_nested(m, subclass); + reiserfs_write_lock(s); +} + +static inline void +reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s) +{ + reiserfs_lock_check_recursive(s); + reiserfs_write_unlock(s); + down_read(sem); + reiserfs_write_lock(s); +} + +/* + * When we schedule, we usually want to also release the write lock, + * according to the previous bkl based locking scheme of reiserfs. + */ +static inline void reiserfs_cond_resched(struct super_block *s) +{ + if (need_resched()) { + reiserfs_write_unlock(s); + schedule(); + reiserfs_write_lock(s); + } +} + +struct fid; + +/* in reading the #defines, it may help to understand that they employ + the following abbreviations: + + B = Buffer + I = Item header + H = Height within the tree (should be changed to LEV) + N = Number of the item in the node + STAT = stat data + DEH = Directory Entry Header + EC = Entry Count + E = Entry number + UL = Unsigned Long + BLKH = BLocK Header + UNFM = UNForMatted node + DC = Disk Child + P = Path + + These #defines are named by concatenating these abbreviations, + where first comes the arguments, and last comes the return value, + of the macro. + +*/ + +#define USE_INODE_GENERATION_COUNTER + +#define REISERFS_PREALLOCATE +#define DISPLACE_NEW_PACKING_LOCALITIES +#define PREALLOCATION_SIZE 9 + +/* n must be power of 2 */ +#define _ROUND_UP(x,n) (((x)+(n)-1u) & ~((n)-1u)) + +// to be ok for alpha and others we have to align structures to 8 byte +// boundary. +// FIXME: do not change 4 by anything else: there is code which relies on that +#define ROUND_UP(x) _ROUND_UP(x,8LL) + +/* debug levels. Right now, CONFIG_REISERFS_CHECK means print all debug +** messages. +*/ +#define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */ + +void __reiserfs_warning(struct super_block *s, const char *id, + const char *func, const char *fmt, ...); +#define reiserfs_warning(s, id, fmt, args...) \ + __reiserfs_warning(s, id, __func__, fmt, ##args) +/* assertions handling */ + +/** always check a condition and panic if it's false. */ +#define __RASSERT(cond, scond, format, args...) \ +do { \ + if (!(cond)) \ + reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \ + __FILE__ ":%i:%s: " format "\n", \ + in_interrupt() ? -1 : task_pid_nr(current), \ + __LINE__, __func__ , ##args); \ +} while (0) + +#define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args) + +#if defined( CONFIG_REISERFS_CHECK ) +#define RFALSE(cond, format, args...) __RASSERT(!(cond), "!(" #cond ")", format, ##args) +#else +#define RFALSE( cond, format, args... ) do {;} while( 0 ) +#endif + +#define CONSTF __attribute_const__ +/* + * Disk Data Structures + */ + +/***************************************************************************/ +/* SUPER BLOCK */ +/***************************************************************************/ + +/* + * Structure of super block on disk, a version of which in RAM is often accessed as REISERFS_SB(s)->s_rs + * the version in RAM is part of a larger structure containing fields never written to disk. + */ +#define UNSET_HASH 0 // read_super will guess about, what hash names + // in directories were sorted with +#define TEA_HASH 1 +#define YURA_HASH 2 +#define R5_HASH 3 +#define DEFAULT_HASH R5_HASH + +struct journal_params { + __le32 jp_journal_1st_block; /* where does journal start from on its + * device */ + __le32 jp_journal_dev; /* journal device st_rdev */ + __le32 jp_journal_size; /* size of the journal */ + __le32 jp_journal_trans_max; /* max number of blocks in a transaction. */ + __le32 jp_journal_magic; /* random value made on fs creation (this + * was sb_journal_block_count) */ + __le32 jp_journal_max_batch; /* max number of blocks to batch into a + * trans */ + __le32 jp_journal_max_commit_age; /* in seconds, how old can an async + * commit be */ + __le32 jp_journal_max_trans_age; /* in seconds, how old can a transaction + * be */ +}; + +/* this is the super from 3.5.X, where X >= 10 */ +struct reiserfs_super_block_v1 { + __le32 s_block_count; /* blocks count */ + __le32 s_free_blocks; /* free blocks count */ + __le32 s_root_block; /* root block number */ + struct journal_params s_journal; + __le16 s_blocksize; /* block size */ + __le16 s_oid_maxsize; /* max size of object id array, see + * get_objectid() commentary */ + __le16 s_oid_cursize; /* current size of object id array */ + __le16 s_umount_state; /* this is set to 1 when filesystem was + * umounted, to 2 - when not */ + char s_magic[10]; /* reiserfs magic string indicates that + * file system is reiserfs: + * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs" */ + __le16 s_fs_state; /* it is set to used by fsck to mark which + * phase of rebuilding is done */ + __le32 s_hash_function_code; /* indicate, what hash function is being use + * to sort names in a directory*/ + __le16 s_tree_height; /* height of disk tree */ + __le16 s_bmap_nr; /* amount of bitmap blocks needed to address + * each block of file system */ + __le16 s_version; /* this field is only reliable on filesystem + * with non-standard journal */ + __le16 s_reserved_for_journal; /* size in blocks of journal area on main + * device, we need to keep after + * making fs with non-standard journal */ +} __attribute__ ((__packed__)); + +#define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1)) + +/* this is the on disk super block */ +struct reiserfs_super_block { + struct reiserfs_super_block_v1 s_v1; + __le32 s_inode_generation; + __le32 s_flags; /* Right now used only by inode-attributes, if enabled */ + unsigned char s_uuid[16]; /* filesystem unique identifier */ + unsigned char s_label[16]; /* filesystem volume label */ + __le16 s_mnt_count; /* Count of mounts since last fsck */ + __le16 s_max_mnt_count; /* Maximum mounts before check */ + __le32 s_lastcheck; /* Timestamp of last fsck */ + __le32 s_check_interval; /* Interval between checks */ + char s_unused[76]; /* zero filled by mkreiserfs and + * reiserfs_convert_objectid_map_v1() + * so any additions must be updated + * there as well. */ +} __attribute__ ((__packed__)); + +#define SB_SIZE (sizeof(struct reiserfs_super_block)) + +#define REISERFS_VERSION_1 0 +#define REISERFS_VERSION_2 2 + +// on-disk super block fields converted to cpu form +#define SB_DISK_SUPER_BLOCK(s) (REISERFS_SB(s)->s_rs) +#define SB_V1_DISK_SUPER_BLOCK(s) (&(SB_DISK_SUPER_BLOCK(s)->s_v1)) +#define SB_BLOCKSIZE(s) \ + le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_blocksize)) +#define SB_BLOCK_COUNT(s) \ + le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_block_count)) +#define SB_FREE_BLOCKS(s) \ + le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks)) +#define SB_REISERFS_MAGIC(s) \ + (SB_V1_DISK_SUPER_BLOCK(s)->s_magic) +#define SB_ROOT_BLOCK(s) \ + le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_root_block)) +#define SB_TREE_HEIGHT(s) \ + le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height)) +#define SB_REISERFS_STATE(s) \ + le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state)) +#define SB_VERSION(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_version)) +#define SB_BMAP_NR(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr)) + +#define PUT_SB_BLOCK_COUNT(s, val) \ + do { SB_V1_DISK_SUPER_BLOCK(s)->s_block_count = cpu_to_le32(val); } while (0) +#define PUT_SB_FREE_BLOCKS(s, val) \ + do { SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks = cpu_to_le32(val); } while (0) +#define PUT_SB_ROOT_BLOCK(s, val) \ + do { SB_V1_DISK_SUPER_BLOCK(s)->s_root_block = cpu_to_le32(val); } while (0) +#define PUT_SB_TREE_HEIGHT(s, val) \ + do { SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height = cpu_to_le16(val); } while (0) +#define PUT_SB_REISERFS_STATE(s, val) \ + do { SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state = cpu_to_le16(val); } while (0) +#define PUT_SB_VERSION(s, val) \ + do { SB_V1_DISK_SUPER_BLOCK(s)->s_version = cpu_to_le16(val); } while (0) +#define PUT_SB_BMAP_NR(s, val) \ + do { SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr = cpu_to_le16 (val); } while (0) + +#define SB_ONDISK_JP(s) (&SB_V1_DISK_SUPER_BLOCK(s)->s_journal) +#define SB_ONDISK_JOURNAL_SIZE(s) \ + le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_size)) +#define SB_ONDISK_JOURNAL_1st_BLOCK(s) \ + le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_1st_block)) +#define SB_ONDISK_JOURNAL_DEVICE(s) \ + le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_dev)) +#define SB_ONDISK_RESERVED_FOR_JOURNAL(s) \ + le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_reserved_for_journal)) + +#define is_block_in_log_or_reserved_area(s, block) \ + block >= SB_JOURNAL_1st_RESERVED_BLOCK(s) \ + && block < SB_JOURNAL_1st_RESERVED_BLOCK(s) + \ + ((!is_reiserfs_jr(SB_DISK_SUPER_BLOCK(s)) ? \ + SB_ONDISK_JOURNAL_SIZE(s) + 1 : SB_ONDISK_RESERVED_FOR_JOURNAL(s))) + +int is_reiserfs_3_5(struct reiserfs_super_block *rs); +int is_reiserfs_3_6(struct reiserfs_super_block *rs); +int is_reiserfs_jr(struct reiserfs_super_block *rs); + +/* ReiserFS leaves the first 64k unused, so that partition labels have + enough space. If someone wants to write a fancy bootloader that + needs more than 64k, let us know, and this will be increased in size. + This number must be larger than than the largest block size on any + platform, or code will break. -Hans */ +#define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024) +#define REISERFS_FIRST_BLOCK unused_define +#define REISERFS_JOURNAL_OFFSET_IN_BYTES REISERFS_DISK_OFFSET_IN_BYTES + +/* the spot for the super in versions 3.5 - 3.5.10 (inclusive) */ +#define REISERFS_OLD_DISK_OFFSET_IN_BYTES (8 * 1024) + +/* reiserfs internal error code (used by search_by_key and fix_nodes)) */ +#define CARRY_ON 0 +#define REPEAT_SEARCH -1 +#define IO_ERROR -2 +#define NO_DISK_SPACE -3 +#define NO_BALANCING_NEEDED (-4) +#define NO_MORE_UNUSED_CONTIGUOUS_BLOCKS (-5) +#define QUOTA_EXCEEDED -6 + +typedef __u32 b_blocknr_t; +typedef __le32 unp_t; + +struct unfm_nodeinfo { + unp_t unfm_nodenum; + unsigned short unfm_freespace; +}; + +/* there are two formats of keys: 3.5 and 3.6 + */ +#define KEY_FORMAT_3_5 0 +#define KEY_FORMAT_3_6 1 + +/* there are two stat datas */ +#define STAT_DATA_V1 0 +#define STAT_DATA_V2 1 + +static inline struct reiserfs_inode_info *REISERFS_I(const struct inode *inode) +{ + return container_of(inode, struct reiserfs_inode_info, vfs_inode); +} + +static inline struct reiserfs_sb_info *REISERFS_SB(const struct super_block *sb) +{ + return sb->s_fs_info; +} + +/* Don't trust REISERFS_SB(sb)->s_bmap_nr, it's a u16 + * which overflows on large file systems. */ +static inline __u32 reiserfs_bmap_count(struct super_block *sb) +{ + return (SB_BLOCK_COUNT(sb) - 1) / (sb->s_blocksize * 8) + 1; +} + +static inline int bmap_would_wrap(unsigned bmap_nr) +{ + return bmap_nr > ((1LL << 16) - 1); +} + +/** this says about version of key of all items (but stat data) the + object consists of */ +#define get_inode_item_key_version( inode ) \ + ((REISERFS_I(inode)->i_flags & i_item_key_version_mask) ? KEY_FORMAT_3_6 : KEY_FORMAT_3_5) + +#define set_inode_item_key_version( inode, version ) \ + ({ if((version)==KEY_FORMAT_3_6) \ + REISERFS_I(inode)->i_flags |= i_item_key_version_mask; \ + else \ + REISERFS_I(inode)->i_flags &= ~i_item_key_version_mask; }) + +#define get_inode_sd_version(inode) \ + ((REISERFS_I(inode)->i_flags & i_stat_data_version_mask) ? STAT_DATA_V2 : STAT_DATA_V1) + +#define set_inode_sd_version(inode, version) \ + ({ if((version)==STAT_DATA_V2) \ + REISERFS_I(inode)->i_flags |= i_stat_data_version_mask; \ + else \ + REISERFS_I(inode)->i_flags &= ~i_stat_data_version_mask; }) + +/* This is an aggressive tail suppression policy, I am hoping it + improves our benchmarks. The principle behind it is that percentage + space saving is what matters, not absolute space saving. This is + non-intuitive, but it helps to understand it if you consider that the + cost to access 4 blocks is not much more than the cost to access 1 + block, if you have to do a seek and rotate. A tail risks a + non-linear disk access that is significant as a percentage of total + time cost for a 4 block file and saves an amount of space that is + less significant as a percentage of space, or so goes the hypothesis. + -Hans */ +#define STORE_TAIL_IN_UNFM_S1(n_file_size,n_tail_size,n_block_size) \ +(\ + (!(n_tail_size)) || \ + (((n_tail_size) > MAX_DIRECT_ITEM_LEN(n_block_size)) || \ + ( (n_file_size) >= (n_block_size) * 4 ) || \ + ( ( (n_file_size) >= (n_block_size) * 3 ) && \ + ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size))/4) ) || \ + ( ( (n_file_size) >= (n_block_size) * 2 ) && \ + ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size))/2) ) || \ + ( ( (n_file_size) >= (n_block_size) ) && \ + ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size) * 3)/4) ) ) \ +) + +/* Another strategy for tails, this one means only create a tail if all the + file would fit into one DIRECT item. + Primary intention for this one is to increase performance by decreasing + seeking. +*/ +#define STORE_TAIL_IN_UNFM_S2(n_file_size,n_tail_size,n_block_size) \ +(\ + (!(n_tail_size)) || \ + (((n_file_size) > MAX_DIRECT_ITEM_LEN(n_block_size)) ) \ +) + +/* + * values for s_umount_state field + */ +#define REISERFS_VALID_FS 1 +#define REISERFS_ERROR_FS 2 + +// +// there are 5 item types currently +// +#define TYPE_STAT_DATA 0 +#define TYPE_INDIRECT 1 +#define TYPE_DIRECT 2 +#define TYPE_DIRENTRY 3 +#define TYPE_MAXTYPE 3 +#define TYPE_ANY 15 // FIXME: comment is required + +/***************************************************************************/ +/* KEY & ITEM HEAD */ +/***************************************************************************/ + +// +// directories use this key as well as old files +// +struct offset_v1 { + __le32 k_offset; + __le32 k_uniqueness; +} __attribute__ ((__packed__)); + +struct offset_v2 { + __le64 v; +} __attribute__ ((__packed__)); + +static inline __u16 offset_v2_k_type(const struct offset_v2 *v2) +{ + __u8 type = le64_to_cpu(v2->v) >> 60; + return (type <= TYPE_MAXTYPE) ? type : TYPE_ANY; +} + +static inline void set_offset_v2_k_type(struct offset_v2 *v2, int type) +{ + v2->v = + (v2->v & cpu_to_le64(~0ULL >> 4)) | cpu_to_le64((__u64) type << 60); +} + +static inline loff_t offset_v2_k_offset(const struct offset_v2 *v2) +{ + return le64_to_cpu(v2->v) & (~0ULL >> 4); +} + +static inline void set_offset_v2_k_offset(struct offset_v2 *v2, loff_t offset) +{ + offset &= (~0ULL >> 4); + v2->v = (v2->v & cpu_to_le64(15ULL << 60)) | cpu_to_le64(offset); +} + +/* Key of an item determines its location in the S+tree, and + is composed of 4 components */ +struct reiserfs_key { + __le32 k_dir_id; /* packing locality: by default parent + directory object id */ + __le32 k_objectid; /* object identifier */ + union { + struct offset_v1 k_offset_v1; + struct offset_v2 k_offset_v2; + } __attribute__ ((__packed__)) u; +} __attribute__ ((__packed__)); + +struct in_core_key { + __u32 k_dir_id; /* packing locality: by default parent + directory object id */ + __u32 k_objectid; /* object identifier */ + __u64 k_offset; + __u8 k_type; +}; + +struct cpu_key { + struct in_core_key on_disk_key; + int version; + int key_length; /* 3 in all cases but direct2indirect and + indirect2direct conversion */ +}; + +/* Our function for comparing keys can compare keys of different + lengths. It takes as a parameter the length of the keys it is to + compare. These defines are used in determining what is to be passed + to it as that parameter. */ +#define REISERFS_FULL_KEY_LEN 4 +#define REISERFS_SHORT_KEY_LEN 2 + +/* The result of the key compare */ +#define FIRST_GREATER 1 +#define SECOND_GREATER -1 +#define KEYS_IDENTICAL 0 +#define KEY_FOUND 1 +#define KEY_NOT_FOUND 0 + +#define KEY_SIZE (sizeof(struct reiserfs_key)) +#define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32)) + +/* return values for search_by_key and clones */ +#define ITEM_FOUND 1 +#define ITEM_NOT_FOUND 0 +#define ENTRY_FOUND 1 +#define ENTRY_NOT_FOUND 0 +#define DIRECTORY_NOT_FOUND -1 +#define REGULAR_FILE_FOUND -2 +#define DIRECTORY_FOUND -3 +#define BYTE_FOUND 1 +#define BYTE_NOT_FOUND 0 +#define FILE_NOT_FOUND -1 + +#define POSITION_FOUND 1 +#define POSITION_NOT_FOUND 0 + +// return values for reiserfs_find_entry and search_by_entry_key +#define NAME_FOUND 1 +#define NAME_NOT_FOUND 0 +#define GOTO_PREVIOUS_ITEM 2 +#define NAME_FOUND_INVISIBLE 3 + +/* Everything in the filesystem is stored as a set of items. The + item head contains the key of the item, its free space (for + indirect items) and specifies the location of the item itself + within the block. */ + +struct item_head { + /* Everything in the tree is found by searching for it based on + * its key.*/ + struct reiserfs_key ih_key; + union { + /* The free space in the last unformatted node of an + indirect item if this is an indirect item. This + equals 0xFFFF iff this is a direct item or stat data + item. Note that the key, not this field, is used to + determine the item type, and thus which field this + union contains. */ + __le16 ih_free_space_reserved; + /* Iff this is a directory item, this field equals the + number of directory entries in the directory item. */ + __le16 ih_entry_count; + } __attribute__ ((__packed__)) u; + __le16 ih_item_len; /* total size of the item body */ + __le16 ih_item_location; /* an offset to the item body + * within the block */ + __le16 ih_version; /* 0 for all old items, 2 for new + ones. Highest bit is set by fsck + temporary, cleaned after all + done */ +} __attribute__ ((__packed__)); +/* size of item header */ +#define IH_SIZE (sizeof(struct item_head)) + +#define ih_free_space(ih) le16_to_cpu((ih)->u.ih_free_space_reserved) +#define ih_version(ih) le16_to_cpu((ih)->ih_version) +#define ih_entry_count(ih) le16_to_cpu((ih)->u.ih_entry_count) +#define ih_location(ih) le16_to_cpu((ih)->ih_item_location) +#define ih_item_len(ih) le16_to_cpu((ih)->ih_item_len) + +#define put_ih_free_space(ih, val) do { (ih)->u.ih_free_space_reserved = cpu_to_le16(val); } while(0) +#define put_ih_version(ih, val) do { (ih)->ih_version = cpu_to_le16(val); } while (0) +#define put_ih_entry_count(ih, val) do { (ih)->u.ih_entry_count = cpu_to_le16(val); } while (0) +#define put_ih_location(ih, val) do { (ih)->ih_item_location = cpu_to_le16(val); } while (0) +#define put_ih_item_len(ih, val) do { (ih)->ih_item_len = cpu_to_le16(val); } while (0) + +#define unreachable_item(ih) (ih_version(ih) & (1 << 15)) + +#define get_ih_free_space(ih) (ih_version (ih) == KEY_FORMAT_3_6 ? 0 : ih_free_space (ih)) +#define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == KEY_FORMAT_3_6) ? 0 : (val))) + +/* these operate on indirect items, where you've got an array of ints +** at a possibly unaligned location. These are a noop on ia32 +** +** p is the array of __u32, i is the index into the array, v is the value +** to store there. +*/ +#define get_block_num(p, i) get_unaligned_le32((p) + (i)) +#define put_block_num(p, i, v) put_unaligned_le32((v), (p) + (i)) + +// +// in old version uniqueness field shows key type +// +#define V1_SD_UNIQUENESS 0 +#define V1_INDIRECT_UNIQUENESS 0xfffffffe +#define V1_DIRECT_UNIQUENESS 0xffffffff +#define V1_DIRENTRY_UNIQUENESS 500 +#define V1_ANY_UNIQUENESS 555 // FIXME: comment is required + +// +// here are conversion routines +// +static inline int uniqueness2type(__u32 uniqueness) CONSTF; +static inline int uniqueness2type(__u32 uniqueness) +{ + switch ((int)uniqueness) { + case V1_SD_UNIQUENESS: + return TYPE_STAT_DATA; + case V1_INDIRECT_UNIQUENESS: + return TYPE_INDIRECT; + case V1_DIRECT_UNIQUENESS: + return TYPE_DIRECT; + case V1_DIRENTRY_UNIQUENESS: + return TYPE_DIRENTRY; + case V1_ANY_UNIQUENESS: + default: + return TYPE_ANY; + } +} + +static inline __u32 type2uniqueness(int type) CONSTF; +static inline __u32 type2uniqueness(int type) +{ + switch (type) { + case TYPE_STAT_DATA: + return V1_SD_UNIQUENESS; + case TYPE_INDIRECT: + return V1_INDIRECT_UNIQUENESS; + case TYPE_DIRECT: + return V1_DIRECT_UNIQUENESS; + case TYPE_DIRENTRY: + return V1_DIRENTRY_UNIQUENESS; + case TYPE_ANY: + default: + return V1_ANY_UNIQUENESS; + } +} + +// +// key is pointer to on disk key which is stored in le, result is cpu, +// there is no way to get version of object from key, so, provide +// version to these defines +// +static inline loff_t le_key_k_offset(int version, + const struct reiserfs_key *key) +{ + return (version == KEY_FORMAT_3_5) ? + le32_to_cpu(key->u.k_offset_v1.k_offset) : + offset_v2_k_offset(&(key->u.k_offset_v2)); +} + +static inline loff_t le_ih_k_offset(const struct item_head *ih) +{ + return le_key_k_offset(ih_version(ih), &(ih->ih_key)); +} + +static inline loff_t le_key_k_type(int version, const struct reiserfs_key *key) +{ + return (version == KEY_FORMAT_3_5) ? + uniqueness2type(le32_to_cpu(key->u.k_offset_v1.k_uniqueness)) : + offset_v2_k_type(&(key->u.k_offset_v2)); +} + +static inline loff_t le_ih_k_type(const struct item_head *ih) +{ + return le_key_k_type(ih_version(ih), &(ih->ih_key)); +} + +static inline void set_le_key_k_offset(int version, struct reiserfs_key *key, + loff_t offset) +{ + (version == KEY_FORMAT_3_5) ? (void)(key->u.k_offset_v1.k_offset = cpu_to_le32(offset)) : /* jdm check */ + (void)(set_offset_v2_k_offset(&(key->u.k_offset_v2), offset)); +} + +static inline void set_le_ih_k_offset(struct item_head *ih, loff_t offset) +{ + set_le_key_k_offset(ih_version(ih), &(ih->ih_key), offset); +} + +static inline void set_le_key_k_type(int version, struct reiserfs_key *key, + int type) +{ + (version == KEY_FORMAT_3_5) ? + (void)(key->u.k_offset_v1.k_uniqueness = + cpu_to_le32(type2uniqueness(type))) + : (void)(set_offset_v2_k_type(&(key->u.k_offset_v2), type)); +} + +static inline void set_le_ih_k_type(struct item_head *ih, int type) +{ + set_le_key_k_type(ih_version(ih), &(ih->ih_key), type); +} + +static inline int is_direntry_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_DIRENTRY; +} + +static inline int is_direct_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_DIRECT; +} + +static inline int is_indirect_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_INDIRECT; +} + +static inline int is_statdata_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_STAT_DATA; +} + +// +// item header has version. +// +static inline int is_direntry_le_ih(struct item_head *ih) +{ + return is_direntry_le_key(ih_version(ih), &ih->ih_key); +} + +static inline int is_direct_le_ih(struct item_head *ih) +{ + return is_direct_le_key(ih_version(ih), &ih->ih_key); +} + +static inline int is_indirect_le_ih(struct item_head *ih) +{ + return is_indirect_le_key(ih_version(ih), &ih->ih_key); +} + +static inline int is_statdata_le_ih(struct item_head *ih) +{ + return is_statdata_le_key(ih_version(ih), &ih->ih_key); +} + +// +// key is pointer to cpu key, result is cpu +// +static inline loff_t cpu_key_k_offset(const struct cpu_key *key) +{ + return key->on_disk_key.k_offset; +} + +static inline loff_t cpu_key_k_type(const struct cpu_key *key) +{ + return key->on_disk_key.k_type; +} + +static inline void set_cpu_key_k_offset(struct cpu_key *key, loff_t offset) +{ + key->on_disk_key.k_offset = offset; +} + +static inline void set_cpu_key_k_type(struct cpu_key *key, int type) +{ + key->on_disk_key.k_type = type; +} + +static inline void cpu_key_k_offset_dec(struct cpu_key *key) +{ + key->on_disk_key.k_offset--; +} + +#define is_direntry_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRENTRY) +#define is_direct_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRECT) +#define is_indirect_cpu_key(key) (cpu_key_k_type (key) == TYPE_INDIRECT) +#define is_statdata_cpu_key(key) (cpu_key_k_type (key) == TYPE_STAT_DATA) + +/* are these used ? */ +#define is_direntry_cpu_ih(ih) (is_direntry_cpu_key (&((ih)->ih_key))) +#define is_direct_cpu_ih(ih) (is_direct_cpu_key (&((ih)->ih_key))) +#define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key))) +#define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key))) + +#define I_K_KEY_IN_ITEM(ih, key, n_blocksize) \ + (!COMP_SHORT_KEYS(ih, key) && \ + I_OFF_BYTE_IN_ITEM(ih, k_offset(key), n_blocksize)) + +/* maximal length of item */ +#define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE) +#define MIN_ITEM_LEN 1 + +/* object identifier for root dir */ +#define REISERFS_ROOT_OBJECTID 2 +#define REISERFS_ROOT_PARENT_OBJECTID 1 + +extern struct reiserfs_key root_key; + +/* + * Picture represents a leaf of the S+tree + * ______________________________________________________ + * | | Array of | | | + * |Block | Object-Item | F r e e | Objects- | + * | head | Headers | S p a c e | Items | + * |______|_______________|___________________|___________| + */ + +/* Header of a disk block. More precisely, header of a formatted leaf + or internal node, and not the header of an unformatted node. */ +struct block_head { + __le16 blk_level; /* Level of a block in the tree. */ + __le16 blk_nr_item; /* Number of keys/items in a block. */ + __le16 blk_free_space; /* Block free space in bytes. */ + __le16 blk_reserved; + /* dump this in v4/planA */ + struct reiserfs_key blk_right_delim_key; /* kept only for compatibility */ +}; + +#define BLKH_SIZE (sizeof(struct block_head)) +#define blkh_level(p_blkh) (le16_to_cpu((p_blkh)->blk_level)) +#define blkh_nr_item(p_blkh) (le16_to_cpu((p_blkh)->blk_nr_item)) +#define blkh_free_space(p_blkh) (le16_to_cpu((p_blkh)->blk_free_space)) +#define blkh_reserved(p_blkh) (le16_to_cpu((p_blkh)->blk_reserved)) +#define set_blkh_level(p_blkh,val) ((p_blkh)->blk_level = cpu_to_le16(val)) +#define set_blkh_nr_item(p_blkh,val) ((p_blkh)->blk_nr_item = cpu_to_le16(val)) +#define set_blkh_free_space(p_blkh,val) ((p_blkh)->blk_free_space = cpu_to_le16(val)) +#define set_blkh_reserved(p_blkh,val) ((p_blkh)->blk_reserved = cpu_to_le16(val)) +#define blkh_right_delim_key(p_blkh) ((p_blkh)->blk_right_delim_key) +#define set_blkh_right_delim_key(p_blkh,val) ((p_blkh)->blk_right_delim_key = val) + +/* + * values for blk_level field of the struct block_head + */ + +#define FREE_LEVEL 0 /* when node gets removed from the tree its + blk_level is set to FREE_LEVEL. It is then + used to see whether the node is still in the + tree */ + +#define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */ + +/* Given the buffer head of a formatted node, resolve to the block head of that node. */ +#define B_BLK_HEAD(bh) ((struct block_head *)((bh)->b_data)) +/* Number of items that are in buffer. */ +#define B_NR_ITEMS(bh) (blkh_nr_item(B_BLK_HEAD(bh))) +#define B_LEVEL(bh) (blkh_level(B_BLK_HEAD(bh))) +#define B_FREE_SPACE(bh) (blkh_free_space(B_BLK_HEAD(bh))) + +#define PUT_B_NR_ITEMS(bh, val) do { set_blkh_nr_item(B_BLK_HEAD(bh), val); } while (0) +#define PUT_B_LEVEL(bh, val) do { set_blkh_level(B_BLK_HEAD(bh), val); } while (0) +#define PUT_B_FREE_SPACE(bh, val) do { set_blkh_free_space(B_BLK_HEAD(bh), val); } while (0) + +/* Get right delimiting key. -- little endian */ +#define B_PRIGHT_DELIM_KEY(bh) (&(blk_right_delim_key(B_BLK_HEAD(bh)))) + +/* Does the buffer contain a disk leaf. */ +#define B_IS_ITEMS_LEVEL(bh) (B_LEVEL(bh) == DISK_LEAF_NODE_LEVEL) + +/* Does the buffer contain a disk internal node */ +#define B_IS_KEYS_LEVEL(bh) (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \ + && B_LEVEL(bh) <= MAX_HEIGHT) + +/***************************************************************************/ +/* STAT DATA */ +/***************************************************************************/ + +// +// old stat data is 32 bytes long. We are going to distinguish new one by +// different size +// +struct stat_data_v1 { + __le16 sd_mode; /* file type, permissions */ + __le16 sd_nlink; /* number of hard links */ + __le16 sd_uid; /* owner */ + __le16 sd_gid; /* group */ + __le32 sd_size; /* file size */ + __le32 sd_atime; /* time of last access */ + __le32 sd_mtime; /* time file was last modified */ + __le32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */ + union { + __le32 sd_rdev; + __le32 sd_blocks; /* number of blocks file uses */ + } __attribute__ ((__packed__)) u; + __le32 sd_first_direct_byte; /* first byte of file which is stored + in a direct item: except that if it + equals 1 it is a symlink and if it + equals ~(__u32)0 there is no + direct item. The existence of this + field really grates on me. Let's + replace it with a macro based on + sd_size and our tail suppression + policy. Someday. -Hans */ +} __attribute__ ((__packed__)); + +#define SD_V1_SIZE (sizeof(struct stat_data_v1)) +#define stat_data_v1(ih) (ih_version (ih) == KEY_FORMAT_3_5) +#define sd_v1_mode(sdp) (le16_to_cpu((sdp)->sd_mode)) +#define set_sd_v1_mode(sdp,v) ((sdp)->sd_mode = cpu_to_le16(v)) +#define sd_v1_nlink(sdp) (le16_to_cpu((sdp)->sd_nlink)) +#define set_sd_v1_nlink(sdp,v) ((sdp)->sd_nlink = cpu_to_le16(v)) +#define sd_v1_uid(sdp) (le16_to_cpu((sdp)->sd_uid)) +#define set_sd_v1_uid(sdp,v) ((sdp)->sd_uid = cpu_to_le16(v)) +#define sd_v1_gid(sdp) (le16_to_cpu((sdp)->sd_gid)) +#define set_sd_v1_gid(sdp,v) ((sdp)->sd_gid = cpu_to_le16(v)) +#define sd_v1_size(sdp) (le32_to_cpu((sdp)->sd_size)) +#define set_sd_v1_size(sdp,v) ((sdp)->sd_size = cpu_to_le32(v)) +#define sd_v1_atime(sdp) (le32_to_cpu((sdp)->sd_atime)) +#define set_sd_v1_atime(sdp,v) ((sdp)->sd_atime = cpu_to_le32(v)) +#define sd_v1_mtime(sdp) (le32_to_cpu((sdp)->sd_mtime)) +#define set_sd_v1_mtime(sdp,v) ((sdp)->sd_mtime = cpu_to_le32(v)) +#define sd_v1_ctime(sdp) (le32_to_cpu((sdp)->sd_ctime)) +#define set_sd_v1_ctime(sdp,v) ((sdp)->sd_ctime = cpu_to_le32(v)) +#define sd_v1_rdev(sdp) (le32_to_cpu((sdp)->u.sd_rdev)) +#define set_sd_v1_rdev(sdp,v) ((sdp)->u.sd_rdev = cpu_to_le32(v)) +#define sd_v1_blocks(sdp) (le32_to_cpu((sdp)->u.sd_blocks)) +#define set_sd_v1_blocks(sdp,v) ((sdp)->u.sd_blocks = cpu_to_le32(v)) +#define sd_v1_first_direct_byte(sdp) \ + (le32_to_cpu((sdp)->sd_first_direct_byte)) +#define set_sd_v1_first_direct_byte(sdp,v) \ + ((sdp)->sd_first_direct_byte = cpu_to_le32(v)) + +/* inode flags stored in sd_attrs (nee sd_reserved) */ + +/* we want common flags to have the same values as in ext2, + so chattr(1) will work without problems */ +#define REISERFS_IMMUTABLE_FL FS_IMMUTABLE_FL +#define REISERFS_APPEND_FL FS_APPEND_FL +#define REISERFS_SYNC_FL FS_SYNC_FL +#define REISERFS_NOATIME_FL FS_NOATIME_FL +#define REISERFS_NODUMP_FL FS_NODUMP_FL +#define REISERFS_SECRM_FL FS_SECRM_FL +#define REISERFS_UNRM_FL FS_UNRM_FL +#define REISERFS_COMPR_FL FS_COMPR_FL +#define REISERFS_NOTAIL_FL FS_NOTAIL_FL + +/* persistent flags that file inherits from the parent directory */ +#define REISERFS_INHERIT_MASK ( REISERFS_IMMUTABLE_FL | \ + REISERFS_SYNC_FL | \ + REISERFS_NOATIME_FL | \ + REISERFS_NODUMP_FL | \ + REISERFS_SECRM_FL | \ + REISERFS_COMPR_FL | \ + REISERFS_NOTAIL_FL ) + +/* Stat Data on disk (reiserfs version of UFS disk inode minus the + address blocks) */ +struct stat_data { + __le16 sd_mode; /* file type, permissions */ + __le16 sd_attrs; /* persistent inode flags */ + __le32 sd_nlink; /* number of hard links */ + __le64 sd_size; /* file size */ + __le32 sd_uid; /* owner */ + __le32 sd_gid; /* group */ + __le32 sd_atime; /* time of last access */ + __le32 sd_mtime; /* time file was last modified */ + __le32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */ + __le32 sd_blocks; + union { + __le32 sd_rdev; + __le32 sd_generation; + //__le32 sd_first_direct_byte; + /* first byte of file which is stored in a + direct item: except that if it equals 1 + it is a symlink and if it equals + ~(__u32)0 there is no direct item. The + existence of this field really grates + on me. Let's replace it with a macro + based on sd_size and our tail + suppression policy? */ + } __attribute__ ((__packed__)) u; +} __attribute__ ((__packed__)); +// +// this is 44 bytes long +// +#define SD_SIZE (sizeof(struct stat_data)) +#define SD_V2_SIZE SD_SIZE +#define stat_data_v2(ih) (ih_version (ih) == KEY_FORMAT_3_6) +#define sd_v2_mode(sdp) (le16_to_cpu((sdp)->sd_mode)) +#define set_sd_v2_mode(sdp,v) ((sdp)->sd_mode = cpu_to_le16(v)) +/* sd_reserved */ +/* set_sd_reserved */ +#define sd_v2_nlink(sdp) (le32_to_cpu((sdp)->sd_nlink)) +#define set_sd_v2_nlink(sdp,v) ((sdp)->sd_nlink = cpu_to_le32(v)) +#define sd_v2_size(sdp) (le64_to_cpu((sdp)->sd_size)) +#define set_sd_v2_size(sdp,v) ((sdp)->sd_size = cpu_to_le64(v)) +#define sd_v2_uid(sdp) (le32_to_cpu((sdp)->sd_uid)) +#define set_sd_v2_uid(sdp,v) ((sdp)->sd_uid = cpu_to_le32(v)) +#define sd_v2_gid(sdp) (le32_to_cpu((sdp)->sd_gid)) +#define set_sd_v2_gid(sdp,v) ((sdp)->sd_gid = cpu_to_le32(v)) +#define sd_v2_atime(sdp) (le32_to_cpu((sdp)->sd_atime)) +#define set_sd_v2_atime(sdp,v) ((sdp)->sd_atime = cpu_to_le32(v)) +#define sd_v2_mtime(sdp) (le32_to_cpu((sdp)->sd_mtime)) +#define set_sd_v2_mtime(sdp,v) ((sdp)->sd_mtime = cpu_to_le32(v)) +#define sd_v2_ctime(sdp) (le32_to_cpu((sdp)->sd_ctime)) +#define set_sd_v2_ctime(sdp,v) ((sdp)->sd_ctime = cpu_to_le32(v)) +#define sd_v2_blocks(sdp) (le32_to_cpu((sdp)->sd_blocks)) +#define set_sd_v2_blocks(sdp,v) ((sdp)->sd_blocks = cpu_to_le32(v)) +#define sd_v2_rdev(sdp) (le32_to_cpu((sdp)->u.sd_rdev)) +#define set_sd_v2_rdev(sdp,v) ((sdp)->u.sd_rdev = cpu_to_le32(v)) +#define sd_v2_generation(sdp) (le32_to_cpu((sdp)->u.sd_generation)) +#define set_sd_v2_generation(sdp,v) ((sdp)->u.sd_generation = cpu_to_le32(v)) +#define sd_v2_attrs(sdp) (le16_to_cpu((sdp)->sd_attrs)) +#define set_sd_v2_attrs(sdp,v) ((sdp)->sd_attrs = cpu_to_le16(v)) + +/***************************************************************************/ +/* DIRECTORY STRUCTURE */ +/***************************************************************************/ +/* + Picture represents the structure of directory items + ________________________________________________ + | Array of | | | | | | + | directory |N-1| N-2 | .... | 1st |0th| + | entry headers | | | | | | + |_______________|___|_____|________|_______|___| + <---- directory entries ------> + + First directory item has k_offset component 1. We store "." and ".." + in one item, always, we never split "." and ".." into differing + items. This makes, among other things, the code for removing + directories simpler. */ +#define SD_OFFSET 0 +#define SD_UNIQUENESS 0 +#define DOT_OFFSET 1 +#define DOT_DOT_OFFSET 2 +#define DIRENTRY_UNIQUENESS 500 + +/* */ +#define FIRST_ITEM_OFFSET 1 + +/* + Q: How to get key of object pointed to by entry from entry? + + A: Each directory entry has its header. This header has deh_dir_id and deh_objectid fields, those are key + of object, entry points to */ + +/* NOT IMPLEMENTED: + Directory will someday contain stat data of object */ + +struct reiserfs_de_head { + __le32 deh_offset; /* third component of the directory entry key */ + __le32 deh_dir_id; /* objectid of the parent directory of the object, that is referenced + by directory entry */ + __le32 deh_objectid; /* objectid of the object, that is referenced by directory entry */ + __le16 deh_location; /* offset of name in the whole item */ + __le16 deh_state; /* whether 1) entry contains stat data (for future), and 2) whether + entry is hidden (unlinked) */ +} __attribute__ ((__packed__)); +#define DEH_SIZE sizeof(struct reiserfs_de_head) +#define deh_offset(p_deh) (le32_to_cpu((p_deh)->deh_offset)) +#define deh_dir_id(p_deh) (le32_to_cpu((p_deh)->deh_dir_id)) +#define deh_objectid(p_deh) (le32_to_cpu((p_deh)->deh_objectid)) +#define deh_location(p_deh) (le16_to_cpu((p_deh)->deh_location)) +#define deh_state(p_deh) (le16_to_cpu((p_deh)->deh_state)) + +#define put_deh_offset(p_deh,v) ((p_deh)->deh_offset = cpu_to_le32((v))) +#define put_deh_dir_id(p_deh,v) ((p_deh)->deh_dir_id = cpu_to_le32((v))) +#define put_deh_objectid(p_deh,v) ((p_deh)->deh_objectid = cpu_to_le32((v))) +#define put_deh_location(p_deh,v) ((p_deh)->deh_location = cpu_to_le16((v))) +#define put_deh_state(p_deh,v) ((p_deh)->deh_state = cpu_to_le16((v))) + +/* empty directory contains two entries "." and ".." and their headers */ +#define EMPTY_DIR_SIZE \ +(DEH_SIZE * 2 + ROUND_UP (strlen (".")) + ROUND_UP (strlen (".."))) + +/* old format directories have this size when empty */ +#define EMPTY_DIR_SIZE_V1 (DEH_SIZE * 2 + 3) + +#define DEH_Statdata 0 /* not used now */ +#define DEH_Visible 2 + +/* 64 bit systems (and the S/390) need to be aligned explicitly -jdm */ +#if BITS_PER_LONG == 64 || defined(__s390__) || defined(__hppa__) +# define ADDR_UNALIGNED_BITS (3) +#endif + +/* These are only used to manipulate deh_state. + * Because of this, we'll use the ext2_ bit routines, + * since they are little endian */ +#ifdef ADDR_UNALIGNED_BITS + +# define aligned_address(addr) ((void *)((long)(addr) & ~((1UL << ADDR_UNALIGNED_BITS) - 1))) +# define unaligned_offset(addr) (((int)((long)(addr) & ((1 << ADDR_UNALIGNED_BITS) - 1))) << 3) + +# define set_bit_unaligned(nr, addr) \ + __test_and_set_bit_le((nr) + unaligned_offset(addr), aligned_address(addr)) +# define clear_bit_unaligned(nr, addr) \ + __test_and_clear_bit_le((nr) + unaligned_offset(addr), aligned_address(addr)) +# define test_bit_unaligned(nr, addr) \ + test_bit_le((nr) + unaligned_offset(addr), aligned_address(addr)) + +#else + +# define set_bit_unaligned(nr, addr) __test_and_set_bit_le(nr, addr) +# define clear_bit_unaligned(nr, addr) __test_and_clear_bit_le(nr, addr) +# define test_bit_unaligned(nr, addr) test_bit_le(nr, addr) + +#endif + +#define mark_de_with_sd(deh) set_bit_unaligned (DEH_Statdata, &((deh)->deh_state)) +#define mark_de_without_sd(deh) clear_bit_unaligned (DEH_Statdata, &((deh)->deh_state)) +#define mark_de_visible(deh) set_bit_unaligned (DEH_Visible, &((deh)->deh_state)) +#define mark_de_hidden(deh) clear_bit_unaligned (DEH_Visible, &((deh)->deh_state)) + +#define de_with_sd(deh) test_bit_unaligned (DEH_Statdata, &((deh)->deh_state)) +#define de_visible(deh) test_bit_unaligned (DEH_Visible, &((deh)->deh_state)) +#define de_hidden(deh) !test_bit_unaligned (DEH_Visible, &((deh)->deh_state)) + +extern void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid, + __le32 par_dirid, __le32 par_objid); +extern void make_empty_dir_item(char *body, __le32 dirid, __le32 objid, + __le32 par_dirid, __le32 par_objid); + +/* array of the entry headers */ + /* get item body */ +#define B_I_PITEM(bh,ih) ( (bh)->b_data + ih_location(ih) ) +#define B_I_DEH(bh,ih) ((struct reiserfs_de_head *)(B_I_PITEM(bh,ih))) + +/* length of the directory entry in directory item. This define + calculates length of i-th directory entry using directory entry + locations from dir entry head. When it calculates length of 0-th + directory entry, it uses length of whole item in place of entry + location of the non-existent following entry in the calculation. + See picture above.*/ +/* +#define I_DEH_N_ENTRY_LENGTH(ih,deh,i) \ +((i) ? (deh_location((deh)-1) - deh_location((deh))) : (ih_item_len((ih)) - deh_location((deh)))) +*/ +static inline int entry_length(const struct buffer_head *bh, + const struct item_head *ih, int pos_in_item) +{ + struct reiserfs_de_head *deh; + + deh = B_I_DEH(bh, ih) + pos_in_item; + if (pos_in_item) + return deh_location(deh - 1) - deh_location(deh); + + return ih_item_len(ih) - deh_location(deh); +} + +/* number of entries in the directory item, depends on ENTRY_COUNT being at the start of directory dynamic data. */ +#define I_ENTRY_COUNT(ih) (ih_entry_count((ih))) + +/* name by bh, ih and entry_num */ +#define B_I_E_NAME(bh,ih,entry_num) ((char *)(bh->b_data + ih_location(ih) + deh_location(B_I_DEH(bh,ih)+(entry_num)))) + +// two entries per block (at least) +#define REISERFS_MAX_NAME(block_size) 255 + +/* this structure is used for operations on directory entries. It is + not a disk structure. */ +/* When reiserfs_find_entry or search_by_entry_key find directory + entry, they return filled reiserfs_dir_entry structure */ +struct reiserfs_dir_entry { + struct buffer_head *de_bh; + int de_item_num; + struct item_head *de_ih; + int de_entry_num; + struct reiserfs_de_head *de_deh; + int de_entrylen; + int de_namelen; + char *de_name; + unsigned long *de_gen_number_bit_string; + + __u32 de_dir_id; + __u32 de_objectid; + + struct cpu_key de_entry_key; +}; + +/* these defines are useful when a particular member of a reiserfs_dir_entry is needed */ + +/* pointer to file name, stored in entry */ +#define B_I_DEH_ENTRY_FILE_NAME(bh,ih,deh) (B_I_PITEM (bh, ih) + deh_location(deh)) + +/* length of name */ +#define I_DEH_N_ENTRY_FILE_NAME_LENGTH(ih,deh,entry_num) \ +(I_DEH_N_ENTRY_LENGTH (ih, deh, entry_num) - (de_with_sd (deh) ? SD_SIZE : 0)) + +/* hash value occupies bits from 7 up to 30 */ +#define GET_HASH_VALUE(offset) ((offset) & 0x7fffff80LL) +/* generation number occupies 7 bits starting from 0 up to 6 */ +#define GET_GENERATION_NUMBER(offset) ((offset) & 0x7fLL) +#define MAX_GENERATION_NUMBER 127 + +#define SET_GENERATION_NUMBER(offset,gen_number) (GET_HASH_VALUE(offset)|(gen_number)) + +/* + * Picture represents an internal node of the reiserfs tree + * ______________________________________________________ + * | | Array of | Array of | Free | + * |block | keys | pointers | space | + * | head | N | N+1 | | + * |______|_______________|___________________|___________| + */ + +/***************************************************************************/ +/* DISK CHILD */ +/***************************************************************************/ +/* Disk child pointer: The pointer from an internal node of the tree + to a node that is on disk. */ +struct disk_child { + __le32 dc_block_number; /* Disk child's block number. */ + __le16 dc_size; /* Disk child's used space. */ + __le16 dc_reserved; +}; + +#define DC_SIZE (sizeof(struct disk_child)) +#define dc_block_number(dc_p) (le32_to_cpu((dc_p)->dc_block_number)) +#define dc_size(dc_p) (le16_to_cpu((dc_p)->dc_size)) +#define put_dc_block_number(dc_p, val) do { (dc_p)->dc_block_number = cpu_to_le32(val); } while(0) +#define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0) + +/* Get disk child by buffer header and position in the tree node. */ +#define B_N_CHILD(bh, n_pos) ((struct disk_child *)\ +((bh)->b_data + BLKH_SIZE + B_NR_ITEMS(bh) * KEY_SIZE + DC_SIZE * (n_pos))) + +/* Get disk child number by buffer header and position in the tree node. */ +#define B_N_CHILD_NUM(bh, n_pos) (dc_block_number(B_N_CHILD(bh, n_pos))) +#define PUT_B_N_CHILD_NUM(bh, n_pos, val) \ + (put_dc_block_number(B_N_CHILD(bh, n_pos), val)) + + /* maximal value of field child_size in structure disk_child */ + /* child size is the combined size of all items and their headers */ +#define MAX_CHILD_SIZE(bh) ((int)( (bh)->b_size - BLKH_SIZE )) + +/* amount of used space in buffer (not including block head) */ +#define B_CHILD_SIZE(cur) (MAX_CHILD_SIZE(cur)-(B_FREE_SPACE(cur))) + +/* max and min number of keys in internal node */ +#define MAX_NR_KEY(bh) ( (MAX_CHILD_SIZE(bh)-DC_SIZE)/(KEY_SIZE+DC_SIZE) ) +#define MIN_NR_KEY(bh) (MAX_NR_KEY(bh)/2) + +/***************************************************************************/ +/* PATH STRUCTURES AND DEFINES */ +/***************************************************************************/ + +/* Search_by_key fills up the path from the root to the leaf as it descends the tree looking for the + key. It uses reiserfs_bread to try to find buffers in the cache given their block number. If it + does not find them in the cache it reads them from disk. For each node search_by_key finds using + reiserfs_bread it then uses bin_search to look through that node. bin_search will find the + position of the block_number of the next node if it is looking through an internal node. If it + is looking through a leaf node bin_search will find the position of the item which has key either + equal to given key, or which is the maximal key less than the given key. */ + +struct path_element { + struct buffer_head *pe_buffer; /* Pointer to the buffer at the path in the tree. */ + int pe_position; /* Position in the tree node which is placed in the */ + /* buffer above. */ +}; + +#define MAX_HEIGHT 5 /* maximal height of a tree. don't change this without changing JOURNAL_PER_BALANCE_CNT */ +#define EXTENDED_MAX_HEIGHT 7 /* Must be equals MAX_HEIGHT + FIRST_PATH_ELEMENT_OFFSET */ +#define FIRST_PATH_ELEMENT_OFFSET 2 /* Must be equal to at least 2. */ + +#define ILLEGAL_PATH_ELEMENT_OFFSET 1 /* Must be equal to FIRST_PATH_ELEMENT_OFFSET - 1 */ +#define MAX_FEB_SIZE 6 /* this MUST be MAX_HEIGHT + 1. See about FEB below */ + +/* We need to keep track of who the ancestors of nodes are. When we + perform a search we record which nodes were visited while + descending the tree looking for the node we searched for. This list + of nodes is called the path. This information is used while + performing balancing. Note that this path information may become + invalid, and this means we must check it when using it to see if it + is still valid. You'll need to read search_by_key and the comments + in it, especially about decrement_counters_in_path(), to understand + this structure. + +Paths make the code so much harder to work with and debug.... An +enormous number of bugs are due to them, and trying to write or modify +code that uses them just makes my head hurt. They are based on an +excessive effort to avoid disturbing the precious VFS code.:-( The +gods only know how we are going to SMP the code that uses them. +znodes are the way! */ + +#define PATH_READA 0x1 /* do read ahead */ +#define PATH_READA_BACK 0x2 /* read backwards */ + +struct treepath { + int path_length; /* Length of the array above. */ + int reada; + struct path_element path_elements[EXTENDED_MAX_HEIGHT]; /* Array of the path elements. */ + int pos_in_item; +}; + +#define pos_in_item(path) ((path)->pos_in_item) + +#define INITIALIZE_PATH(var) \ +struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,} + +/* Get path element by path and path position. */ +#define PATH_OFFSET_PELEMENT(path, n_offset) ((path)->path_elements + (n_offset)) + +/* Get buffer header at the path by path and path position. */ +#define PATH_OFFSET_PBUFFER(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_buffer) + +/* Get position in the element at the path by path and path position. */ +#define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position) + +#define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length)) + /* you know, to the person who didn't + write this the macro name does not + at first suggest what it does. + Maybe POSITION_FROM_PATH_END? Or + maybe we should just focus on + dumping paths... -Hans */ +#define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length)) + +#define PATH_PITEM_HEAD(path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path)) + +/* in do_balance leaf has h == 0 in contrast with path structure, + where root has level == 0. That is why we need these defines */ +#define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h)) /* tb->S[h] */ +#define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */ +#define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h)) +#define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */ + +#define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h)) + +#define get_last_bh(path) PATH_PLAST_BUFFER(path) +#define get_ih(path) PATH_PITEM_HEAD(path) +#define get_item_pos(path) PATH_LAST_POSITION(path) +#define get_item(path) ((void *)B_N_PITEM(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION (path))) +#define item_moved(ih,path) comp_items(ih, path) +#define path_changed(ih,path) comp_items (ih, path) + +/***************************************************************************/ +/* MISC */ +/***************************************************************************/ + +/* Size of pointer to the unformatted node. */ +#define UNFM_P_SIZE (sizeof(unp_t)) +#define UNFM_P_SHIFT 2 + +// in in-core inode key is stored on le form +#define INODE_PKEY(inode) ((struct reiserfs_key *)(REISERFS_I(inode)->i_key)) + +#define MAX_UL_INT 0xffffffff +#define MAX_INT 0x7ffffff +#define MAX_US_INT 0xffff + +// reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset +#define U32_MAX (~(__u32)0) + +static inline loff_t max_reiserfs_offset(struct inode *inode) +{ + if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5) + return (loff_t) U32_MAX; + + return (loff_t) ((~(__u64) 0) >> 4); +} + +/*#define MAX_KEY_UNIQUENESS MAX_UL_INT*/ +#define MAX_KEY_OBJECTID MAX_UL_INT + +#define MAX_B_NUM MAX_UL_INT +#define MAX_FC_NUM MAX_US_INT + +/* the purpose is to detect overflow of an unsigned short */ +#define REISERFS_LINK_MAX (MAX_US_INT - 1000) + +/* The following defines are used in reiserfs_insert_item and reiserfs_append_item */ +#define REISERFS_KERNEL_MEM 0 /* reiserfs kernel memory mode */ +#define REISERFS_USER_MEM 1 /* reiserfs user memory mode */ + +#define fs_generation(s) (REISERFS_SB(s)->s_generation_counter) +#define get_generation(s) atomic_read (&fs_generation(s)) +#define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen) +#define __fs_changed(gen,s) (gen != get_generation (s)) +#define fs_changed(gen,s) \ +({ \ + reiserfs_cond_resched(s); \ + __fs_changed(gen, s); \ +}) + +/***************************************************************************/ +/* FIXATE NODES */ +/***************************************************************************/ + +#define VI_TYPE_LEFT_MERGEABLE 1 +#define VI_TYPE_RIGHT_MERGEABLE 2 + +/* To make any changes in the tree we always first find node, that + contains item to be changed/deleted or place to insert a new + item. We call this node S. To do balancing we need to decide what + we will shift to left/right neighbor, or to a new node, where new + item will be etc. To make this analysis simpler we build virtual + node. Virtual node is an array of items, that will replace items of + node S. (For instance if we are going to delete an item, virtual + node does not contain it). Virtual node keeps information about + item sizes and types, mergeability of first and last items, sizes + of all entries in directory item. We use this array of items when + calculating what we can shift to neighbors and how many nodes we + have to have if we do not any shiftings, if we shift to left/right + neighbor or to both. */ +struct virtual_item { + int vi_index; // index in the array of item operations + unsigned short vi_type; // left/right mergeability + unsigned short vi_item_len; /* length of item that it will have after balancing */ + struct item_head *vi_ih; + const char *vi_item; // body of item (old or new) + const void *vi_new_data; // 0 always but paste mode + void *vi_uarea; // item specific area +}; + +struct virtual_node { + char *vn_free_ptr; /* this is a pointer to the free space in the buffer */ + unsigned short vn_nr_item; /* number of items in virtual node */ + short vn_size; /* size of node , that node would have if it has unlimited size and no balancing is performed */ + short vn_mode; /* mode of balancing (paste, insert, delete, cut) */ + short vn_affected_item_num; + short vn_pos_in_item; + struct item_head *vn_ins_ih; /* item header of inserted item, 0 for other modes */ + const void *vn_data; + struct virtual_item *vn_vi; /* array of items (including a new one, excluding item to be deleted) */ +}; + +/* used by directory items when creating virtual nodes */ +struct direntry_uarea { + int flags; + __u16 entry_count; + __u16 entry_sizes[1]; +} __attribute__ ((__packed__)); + +/***************************************************************************/ +/* TREE BALANCE */ +/***************************************************************************/ + +/* This temporary structure is used in tree balance algorithms, and + constructed as we go to the extent that its various parts are + needed. It contains arrays of nodes that can potentially be + involved in the balancing of node S, and parameters that define how + each of the nodes must be balanced. Note that in these algorithms + for balancing the worst case is to need to balance the current node + S and the left and right neighbors and all of their parents plus + create a new node. We implement S1 balancing for the leaf nodes + and S0 balancing for the internal nodes (S1 and S0 are defined in + our papers.)*/ + +#define MAX_FREE_BLOCK 7 /* size of the array of buffers to free at end of do_balance */ + +/* maximum number of FEB blocknrs on a single level */ +#define MAX_AMOUNT_NEEDED 2 + +/* someday somebody will prefix every field in this struct with tb_ */ +struct tree_balance { + int tb_mode; + int need_balance_dirty; + struct super_block *tb_sb; + struct reiserfs_transaction_handle *transaction_handle; + struct treepath *tb_path; + struct buffer_head *L[MAX_HEIGHT]; /* array of left neighbors of nodes in the path */ + struct buffer_head *R[MAX_HEIGHT]; /* array of right neighbors of nodes in the path */ + struct buffer_head *FL[MAX_HEIGHT]; /* array of fathers of the left neighbors */ + struct buffer_head *FR[MAX_HEIGHT]; /* array of fathers of the right neighbors */ + struct buffer_head *CFL[MAX_HEIGHT]; /* array of common parents of center node and its left neighbor */ + struct buffer_head *CFR[MAX_HEIGHT]; /* array of common parents of center node and its right neighbor */ + + struct buffer_head *FEB[MAX_FEB_SIZE]; /* array of empty buffers. Number of buffers in array equals + cur_blknum. */ + struct buffer_head *used[MAX_FEB_SIZE]; + struct buffer_head *thrown[MAX_FEB_SIZE]; + int lnum[MAX_HEIGHT]; /* array of number of items which must be + shifted to the left in order to balance the + current node; for leaves includes item that + will be partially shifted; for internal + nodes, it is the number of child pointers + rather than items. It includes the new item + being created. The code sometimes subtracts + one to get the number of wholly shifted + items for other purposes. */ + int rnum[MAX_HEIGHT]; /* substitute right for left in comment above */ + int lkey[MAX_HEIGHT]; /* array indexed by height h mapping the key delimiting L[h] and + S[h] to its item number within the node CFL[h] */ + int rkey[MAX_HEIGHT]; /* substitute r for l in comment above */ + int insert_size[MAX_HEIGHT]; /* the number of bytes by we are trying to add or remove from + S[h]. A negative value means removing. */ + int blknum[MAX_HEIGHT]; /* number of nodes that will replace node S[h] after + balancing on the level h of the tree. If 0 then S is + being deleted, if 1 then S is remaining and no new nodes + are being created, if 2 or 3 then 1 or 2 new nodes is + being created */ + + /* fields that are used only for balancing leaves of the tree */ + int cur_blknum; /* number of empty blocks having been already allocated */ + int s0num; /* number of items that fall into left most node when S[0] splits */ + int s1num; /* number of items that fall into first new node when S[0] splits */ + int s2num; /* number of items that fall into second new node when S[0] splits */ + int lbytes; /* number of bytes which can flow to the left neighbor from the left */ + /* most liquid item that cannot be shifted from S[0] entirely */ + /* if -1 then nothing will be partially shifted */ + int rbytes; /* number of bytes which will flow to the right neighbor from the right */ + /* most liquid item that cannot be shifted from S[0] entirely */ + /* if -1 then nothing will be partially shifted */ + int s1bytes; /* number of bytes which flow to the first new node when S[0] splits */ + /* note: if S[0] splits into 3 nodes, then items do not need to be cut */ + int s2bytes; + struct buffer_head *buf_to_free[MAX_FREE_BLOCK]; /* buffers which are to be freed after do_balance finishes by unfix_nodes */ + char *vn_buf; /* kmalloced memory. Used to create + virtual node and keep map of + dirtied bitmap blocks */ + int vn_buf_size; /* size of the vn_buf */ + struct virtual_node *tb_vn; /* VN starts after bitmap of bitmap blocks */ + + int fs_gen; /* saved value of `reiserfs_generation' counter + see FILESYSTEM_CHANGED() macro in reiserfs_fs.h */ +#ifdef DISPLACE_NEW_PACKING_LOCALITIES + struct in_core_key key; /* key pointer, to pass to block allocator or + another low-level subsystem */ +#endif +}; + +/* These are modes of balancing */ + +/* When inserting an item. */ +#define M_INSERT 'i' +/* When inserting into (directories only) or appending onto an already + existent item. */ +#define M_PASTE 'p' +/* When deleting an item. */ +#define M_DELETE 'd' +/* When truncating an item or removing an entry from a (directory) item. */ +#define M_CUT 'c' + +/* used when balancing on leaf level skipped (in reiserfsck) */ +#define M_INTERNAL 'n' + +/* When further balancing is not needed, then do_balance does not need + to be called. */ +#define M_SKIP_BALANCING 's' +#define M_CONVERT 'v' + +/* modes of leaf_move_items */ +#define LEAF_FROM_S_TO_L 0 +#define LEAF_FROM_S_TO_R 1 +#define LEAF_FROM_R_TO_L 2 +#define LEAF_FROM_L_TO_R 3 +#define LEAF_FROM_S_TO_SNEW 4 + +#define FIRST_TO_LAST 0 +#define LAST_TO_FIRST 1 + +/* used in do_balance for passing parent of node information that has + been gotten from tb struct */ +struct buffer_info { + struct tree_balance *tb; + struct buffer_head *bi_bh; + struct buffer_head *bi_parent; + int bi_position; +}; + +static inline struct super_block *sb_from_tb(struct tree_balance *tb) +{ + return tb ? tb->tb_sb : NULL; +} + +static inline struct super_block *sb_from_bi(struct buffer_info *bi) +{ + return bi ? sb_from_tb(bi->tb) : NULL; +} + +/* there are 4 types of items: stat data, directory item, indirect, direct. ++-------------------+------------+--------------+------------+ +| | k_offset | k_uniqueness | mergeable? | ++-------------------+------------+--------------+------------+ +| stat data | 0 | 0 | no | ++-------------------+------------+--------------+------------+ +| 1st directory item| DOT_OFFSET |DIRENTRY_UNIQUENESS| no | +| non 1st directory | hash value | | yes | +| item | | | | ++-------------------+------------+--------------+------------+ +| indirect item | offset + 1 |TYPE_INDIRECT | if this is not the first indirect item of the object ++-------------------+------------+--------------+------------+ +| direct item | offset + 1 |TYPE_DIRECT | if not this is not the first direct item of the object ++-------------------+------------+--------------+------------+ +*/ + +struct item_operations { + int (*bytes_number) (struct item_head * ih, int block_size); + void (*decrement_key) (struct cpu_key *); + int (*is_left_mergeable) (struct reiserfs_key * ih, + unsigned long bsize); + void (*print_item) (struct item_head *, char *item); + void (*check_item) (struct item_head *, char *item); + + int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi, + int is_affected, int insert_size); + int (*check_left) (struct virtual_item * vi, int free, + int start_skip, int end_skip); + int (*check_right) (struct virtual_item * vi, int free); + int (*part_size) (struct virtual_item * vi, int from, int to); + int (*unit_num) (struct virtual_item * vi); + void (*print_vi) (struct virtual_item * vi); +}; + +extern struct item_operations *item_ops[TYPE_ANY + 1]; + +#define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize) +#define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize) +#define op_print_item(ih,item) item_ops[le_ih_k_type (ih)]->print_item (ih, item) +#define op_check_item(ih,item) item_ops[le_ih_k_type (ih)]->check_item (ih, item) +#define op_create_vi(vn,vi,is_affected,insert_size) item_ops[le_ih_k_type ((vi)->vi_ih)]->create_vi (vn,vi,is_affected,insert_size) +#define op_check_left(vi,free,start_skip,end_skip) item_ops[(vi)->vi_index]->check_left (vi, free, start_skip, end_skip) +#define op_check_right(vi,free) item_ops[(vi)->vi_index]->check_right (vi, free) +#define op_part_size(vi,from,to) item_ops[(vi)->vi_index]->part_size (vi, from, to) +#define op_unit_num(vi) item_ops[(vi)->vi_index]->unit_num (vi) +#define op_print_vi(vi) item_ops[(vi)->vi_index]->print_vi (vi) + +#define COMP_SHORT_KEYS comp_short_keys + +/* number of blocks pointed to by the indirect item */ +#define I_UNFM_NUM(ih) (ih_item_len(ih) / UNFM_P_SIZE) + +/* the used space within the unformatted node corresponding to pos within the item pointed to by ih */ +#define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size)) + +/* number of bytes contained by the direct item or the unformatted nodes the indirect item points to */ + +/* get the item header */ +#define B_N_PITEM_HEAD(bh,item_num) ( (struct item_head * )((bh)->b_data + BLKH_SIZE) + (item_num) ) + +/* get key */ +#define B_N_PDELIM_KEY(bh,item_num) ( (struct reiserfs_key * )((bh)->b_data + BLKH_SIZE) + (item_num) ) + +/* get the key */ +#define B_N_PKEY(bh,item_num) ( &(B_N_PITEM_HEAD(bh,item_num)->ih_key) ) + +/* get item body */ +#define B_N_PITEM(bh,item_num) ( (bh)->b_data + ih_location(B_N_PITEM_HEAD((bh),(item_num)))) + +/* get the stat data by the buffer header and the item order */ +#define B_N_STAT_DATA(bh,nr) \ +( (struct stat_data *)((bh)->b_data + ih_location(B_N_PITEM_HEAD((bh),(nr))) ) ) + + /* following defines use reiserfs buffer header and item header */ + +/* get stat-data */ +#define B_I_STAT_DATA(bh, ih) ( (struct stat_data * )((bh)->b_data + ih_location(ih)) ) + +// this is 3976 for size==4096 +#define MAX_DIRECT_ITEM_LEN(size) ((size) - BLKH_SIZE - 2*IH_SIZE - SD_SIZE - UNFM_P_SIZE) + +/* indirect items consist of entries which contain blocknrs, pos + indicates which entry, and B_I_POS_UNFM_POINTER resolves to the + blocknr contained by the entry pos points to */ +#define B_I_POS_UNFM_POINTER(bh,ih,pos) le32_to_cpu(*(((unp_t *)B_I_PITEM(bh,ih)) + (pos))) +#define PUT_B_I_POS_UNFM_POINTER(bh,ih,pos, val) do {*(((unp_t *)B_I_PITEM(bh,ih)) + (pos)) = cpu_to_le32(val); } while (0) + +struct reiserfs_iget_args { + __u32 objectid; + __u32 dirid; +}; + +/***************************************************************************/ +/* FUNCTION DECLARATIONS */ +/***************************************************************************/ + +#define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12) + +#define journal_trans_half(blocksize) \ + ((blocksize - sizeof (struct reiserfs_journal_desc) + sizeof (__u32) - 12) / sizeof (__u32)) + +/* journal.c see journal.c for all the comments here */ + +/* first block written in a commit. */ +struct reiserfs_journal_desc { + __le32 j_trans_id; /* id of commit */ + __le32 j_len; /* length of commit. len +1 is the commit block */ + __le32 j_mount_id; /* mount id of this trans */ + __le32 j_realblock[1]; /* real locations for each block */ +}; + +#define get_desc_trans_id(d) le32_to_cpu((d)->j_trans_id) +#define get_desc_trans_len(d) le32_to_cpu((d)->j_len) +#define get_desc_mount_id(d) le32_to_cpu((d)->j_mount_id) + +#define set_desc_trans_id(d,val) do { (d)->j_trans_id = cpu_to_le32 (val); } while (0) +#define set_desc_trans_len(d,val) do { (d)->j_len = cpu_to_le32 (val); } while (0) +#define set_desc_mount_id(d,val) do { (d)->j_mount_id = cpu_to_le32 (val); } while (0) + +/* last block written in a commit */ +struct reiserfs_journal_commit { + __le32 j_trans_id; /* must match j_trans_id from the desc block */ + __le32 j_len; /* ditto */ + __le32 j_realblock[1]; /* real locations for each block */ +}; + +#define get_commit_trans_id(c) le32_to_cpu((c)->j_trans_id) +#define get_commit_trans_len(c) le32_to_cpu((c)->j_len) +#define get_commit_mount_id(c) le32_to_cpu((c)->j_mount_id) + +#define set_commit_trans_id(c,val) do { (c)->j_trans_id = cpu_to_le32 (val); } while (0) +#define set_commit_trans_len(c,val) do { (c)->j_len = cpu_to_le32 (val); } while (0) + +/* this header block gets written whenever a transaction is considered fully flushed, and is more recent than the +** last fully flushed transaction. fully flushed means all the log blocks and all the real blocks are on disk, +** and this transaction does not need to be replayed. +*/ +struct reiserfs_journal_header { + __le32 j_last_flush_trans_id; /* id of last fully flushed transaction */ + __le32 j_first_unflushed_offset; /* offset in the log of where to start replay after a crash */ + __le32 j_mount_id; + /* 12 */ struct journal_params jh_journal; +}; + +/* biggest tunable defines are right here */ +#define JOURNAL_BLOCK_COUNT 8192 /* number of blocks in the journal */ +#define JOURNAL_TRANS_MAX_DEFAULT 1024 /* biggest possible single transaction, don't change for now (8/3/99) */ +#define JOURNAL_TRANS_MIN_DEFAULT 256 +#define JOURNAL_MAX_BATCH_DEFAULT 900 /* max blocks to batch into one transaction, don't make this any bigger than 900 */ +#define JOURNAL_MIN_RATIO 2 +#define JOURNAL_MAX_COMMIT_AGE 30 +#define JOURNAL_MAX_TRANS_AGE 30 +#define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9) +#define JOURNAL_BLOCKS_PER_OBJECT(sb) (JOURNAL_PER_BALANCE_CNT * 3 + \ + 2 * (REISERFS_QUOTA_INIT_BLOCKS(sb) + \ + REISERFS_QUOTA_TRANS_BLOCKS(sb))) + +#ifdef CONFIG_QUOTA +#define REISERFS_QUOTA_OPTS ((1 << REISERFS_USRQUOTA) | (1 << REISERFS_GRPQUOTA)) +/* We need to update data and inode (atime) */ +#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? 2 : 0) +/* 1 balancing, 1 bitmap, 1 data per write + stat data update */ +#define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \ +(DQUOT_INIT_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_INIT_REWRITE+1) : 0) +/* same as with INIT */ +#define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \ +(DQUOT_DEL_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_DEL_REWRITE+1) : 0) +#else +#define REISERFS_QUOTA_TRANS_BLOCKS(s) 0 +#define REISERFS_QUOTA_INIT_BLOCKS(s) 0 +#define REISERFS_QUOTA_DEL_BLOCKS(s) 0 +#endif + +/* both of these can be as low as 1, or as high as you want. The min is the +** number of 4k bitmap nodes preallocated on mount. New nodes are allocated +** as needed, and released when transactions are committed. On release, if +** the current number of nodes is > max, the node is freed, otherwise, +** it is put on a free list for faster use later. +*/ +#define REISERFS_MIN_BITMAP_NODES 10 +#define REISERFS_MAX_BITMAP_NODES 100 + +#define JBH_HASH_SHIFT 13 /* these are based on journal hash size of 8192 */ +#define JBH_HASH_MASK 8191 + +#define _jhashfn(sb,block) \ + (((unsigned long)sb>>L1_CACHE_SHIFT) ^ \ + (((block)<<(JBH_HASH_SHIFT - 6)) ^ ((block) >> 13) ^ ((block) << (JBH_HASH_SHIFT - 12)))) +#define journal_hash(t,sb,block) ((t)[_jhashfn((sb),(block)) & JBH_HASH_MASK]) + +// We need these to make journal.c code more readable +#define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize) +#define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize) +#define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize) + +enum reiserfs_bh_state_bits { + BH_JDirty = BH_PrivateStart, /* buffer is in current transaction */ + BH_JDirty_wait, + BH_JNew, /* disk block was taken off free list before + * being in a finished transaction, or + * written to disk. Can be reused immed. */ + BH_JPrepared, + BH_JRestore_dirty, + BH_JTest, // debugging only will go away +}; + +BUFFER_FNS(JDirty, journaled); +TAS_BUFFER_FNS(JDirty, journaled); +BUFFER_FNS(JDirty_wait, journal_dirty); +TAS_BUFFER_FNS(JDirty_wait, journal_dirty); +BUFFER_FNS(JNew, journal_new); +TAS_BUFFER_FNS(JNew, journal_new); +BUFFER_FNS(JPrepared, journal_prepared); +TAS_BUFFER_FNS(JPrepared, journal_prepared); +BUFFER_FNS(JRestore_dirty, journal_restore_dirty); +TAS_BUFFER_FNS(JRestore_dirty, journal_restore_dirty); +BUFFER_FNS(JTest, journal_test); +TAS_BUFFER_FNS(JTest, journal_test); + +/* +** transaction handle which is passed around for all journal calls +*/ +struct reiserfs_transaction_handle { + struct super_block *t_super; /* super for this FS when journal_begin was + called. saves calls to reiserfs_get_super + also used by nested transactions to make + sure they are nesting on the right FS + _must_ be first in the handle + */ + int t_refcount; + int t_blocks_logged; /* number of blocks this writer has logged */ + int t_blocks_allocated; /* number of blocks this writer allocated */ + unsigned int t_trans_id; /* sanity check, equals the current trans id */ + void *t_handle_save; /* save existing current->journal_info */ + unsigned displace_new_blocks:1; /* if new block allocation occurres, that block + should be displaced from others */ + struct list_head t_list; +}; + +/* used to keep track of ordered and tail writes, attached to the buffer + * head through b_journal_head. + */ +struct reiserfs_jh { + struct reiserfs_journal_list *jl; + struct buffer_head *bh; + struct list_head list; +}; + +void reiserfs_free_jh(struct buffer_head *bh); +int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh); +int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh); +int journal_mark_dirty(struct reiserfs_transaction_handle *, + struct super_block *, struct buffer_head *bh); + +static inline int reiserfs_file_data_log(struct inode *inode) +{ + if (reiserfs_data_log(inode->i_sb) || + (REISERFS_I(inode)->i_flags & i_data_log)) + return 1; + return 0; +} + +static inline int reiserfs_transaction_running(struct super_block *s) +{ + struct reiserfs_transaction_handle *th = current->journal_info; + if (th && th->t_super == s) + return 1; + if (th && th->t_super == NULL) + BUG(); + return 0; +} + +static inline int reiserfs_transaction_free_space(struct reiserfs_transaction_handle *th) +{ + return th->t_blocks_allocated - th->t_blocks_logged; +} + +struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct + super_block + *, + int count); +int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *); +int reiserfs_commit_page(struct inode *inode, struct page *page, + unsigned from, unsigned to); +int reiserfs_flush_old_commits(struct super_block *); +int reiserfs_commit_for_inode(struct inode *); +int reiserfs_inode_needs_commit(struct inode *); +void reiserfs_update_inode_transaction(struct inode *); +void reiserfs_wait_on_write_block(struct super_block *s); +void reiserfs_block_writes(struct reiserfs_transaction_handle *th); +void reiserfs_allow_writes(struct super_block *s); +void reiserfs_check_lock_depth(struct super_block *s, char *caller); +int reiserfs_prepare_for_journal(struct super_block *, struct buffer_head *bh, + int wait); +void reiserfs_restore_prepared_buffer(struct super_block *, + struct buffer_head *bh); +int journal_init(struct super_block *, const char *j_dev_name, int old_format, + unsigned int); +int journal_release(struct reiserfs_transaction_handle *, struct super_block *); +int journal_release_error(struct reiserfs_transaction_handle *, + struct super_block *); +int journal_end(struct reiserfs_transaction_handle *, struct super_block *, + unsigned long); +int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *, + unsigned long); +int journal_mark_freed(struct reiserfs_transaction_handle *, + struct super_block *, b_blocknr_t blocknr); +int journal_transaction_should_end(struct reiserfs_transaction_handle *, int); +int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr, + int bit_nr, int searchall, b_blocknr_t *next); +int journal_begin(struct reiserfs_transaction_handle *, + struct super_block *sb, unsigned long); +int journal_join_abort(struct reiserfs_transaction_handle *, + struct super_block *sb, unsigned long); +void reiserfs_abort_journal(struct super_block *sb, int errno); +void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...); +int reiserfs_allocate_list_bitmaps(struct super_block *s, + struct reiserfs_list_bitmap *, unsigned int); + +void add_save_link(struct reiserfs_transaction_handle *th, + struct inode *inode, int truncate); +int remove_save_link(struct inode *inode, int truncate); + +/* objectid.c */ +__u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th); +void reiserfs_release_objectid(struct reiserfs_transaction_handle *th, + __u32 objectid_to_release); +int reiserfs_convert_objectid_map_v1(struct super_block *); + +/* stree.c */ +int B_IS_IN_TREE(const struct buffer_head *); +extern void copy_item_head(struct item_head *to, + const struct item_head *from); + +// first key is in cpu form, second - le +extern int comp_short_keys(const struct reiserfs_key *le_key, + const struct cpu_key *cpu_key); +extern void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from); + +// both are in le form +extern int comp_le_keys(const struct reiserfs_key *, + const struct reiserfs_key *); +extern int comp_short_le_keys(const struct reiserfs_key *, + const struct reiserfs_key *); + +// +// get key version from on disk key - kludge +// +static inline int le_key_version(const struct reiserfs_key *key) +{ + int type; + + type = offset_v2_k_type(&(key->u.k_offset_v2)); + if (type != TYPE_DIRECT && type != TYPE_INDIRECT + && type != TYPE_DIRENTRY) + return KEY_FORMAT_3_5; + + return KEY_FORMAT_3_6; + +} + +static inline void copy_key(struct reiserfs_key *to, + const struct reiserfs_key *from) +{ + memcpy(to, from, KEY_SIZE); +} + +int comp_items(const struct item_head *stored_ih, const struct treepath *path); +const struct reiserfs_key *get_rkey(const struct treepath *chk_path, + const struct super_block *sb); +int search_by_key(struct super_block *, const struct cpu_key *, + struct treepath *, int); +#define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL) +int search_for_position_by_key(struct super_block *sb, + const struct cpu_key *cpu_key, + struct treepath *search_path); +extern void decrement_bcount(struct buffer_head *bh); +void decrement_counters_in_path(struct treepath *search_path); +void pathrelse(struct treepath *search_path); +int reiserfs_check_path(struct treepath *p); +void pathrelse_and_restore(struct super_block *s, struct treepath *search_path); + +int reiserfs_insert_item(struct reiserfs_transaction_handle *th, + struct treepath *path, + const struct cpu_key *key, + struct item_head *ih, + struct inode *inode, const char *body); + +int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, + struct treepath *path, + const struct cpu_key *key, + struct inode *inode, + const char *body, int paste_size); + +int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, + struct treepath *path, + struct cpu_key *key, + struct inode *inode, + struct page *page, loff_t new_file_size); + +int reiserfs_delete_item(struct reiserfs_transaction_handle *th, + struct treepath *path, + const struct cpu_key *key, + struct inode *inode, struct buffer_head *un_bh); + +void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, + struct inode *inode, struct reiserfs_key *key); +int reiserfs_delete_object(struct reiserfs_transaction_handle *th, + struct inode *inode); +int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, + struct inode *inode, struct page *, + int update_timestamps); + +#define i_block_size(inode) ((inode)->i_sb->s_blocksize) +#define file_size(inode) ((inode)->i_size) +#define tail_size(inode) (file_size (inode) & (i_block_size (inode) - 1)) + +#define tail_has_to_be_packed(inode) (have_large_tails ((inode)->i_sb)?\ +!STORE_TAIL_IN_UNFM_S1(file_size (inode), tail_size(inode), inode->i_sb->s_blocksize):have_small_tails ((inode)->i_sb)?!STORE_TAIL_IN_UNFM_S2(file_size (inode), tail_size(inode), inode->i_sb->s_blocksize):0 ) + +void padd_item(char *item, int total_length, int length); + +/* inode.c */ +/* args for the create parameter of reiserfs_get_block */ +#define GET_BLOCK_NO_CREATE 0 /* don't create new blocks or convert tails */ +#define GET_BLOCK_CREATE 1 /* add anything you need to find block */ +#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */ +#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */ +#define GET_BLOCK_NO_IMUX 8 /* i_mutex is not held, don't preallocate */ +#define GET_BLOCK_NO_DANGLE 16 /* don't leave any transactions running */ + +void reiserfs_read_locked_inode(struct inode *inode, + struct reiserfs_iget_args *args); +int reiserfs_find_actor(struct inode *inode, void *p); +int reiserfs_init_locked_inode(struct inode *inode, void *p); +void reiserfs_evict_inode(struct inode *inode); +int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc); +int reiserfs_get_block(struct inode *inode, sector_t block, + struct buffer_head *bh_result, int create); +struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type); +struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type); +int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, + int connectable); + +int reiserfs_truncate_file(struct inode *, int update_timestamps); +void make_cpu_key(struct cpu_key *cpu_key, struct inode *inode, loff_t offset, + int type, int key_length); +void make_le_item_head(struct item_head *ih, const struct cpu_key *key, + int version, + loff_t offset, int type, int length, int entry_count); +struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key); + +struct reiserfs_security_handle; +int reiserfs_new_inode(struct reiserfs_transaction_handle *th, + struct inode *dir, umode_t mode, + const char *symname, loff_t i_size, + struct dentry *dentry, struct inode *inode, + struct reiserfs_security_handle *security); + +void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th, + struct inode *inode, loff_t size); + +static inline void reiserfs_update_sd(struct reiserfs_transaction_handle *th, + struct inode *inode) +{ + reiserfs_update_sd_size(th, inode, inode->i_size); +} + +void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode); +void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs); +int reiserfs_setattr(struct dentry *dentry, struct iattr *attr); + +int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len); + +/* namei.c */ +void set_de_name_and_namelen(struct reiserfs_dir_entry *de); +int search_by_entry_key(struct super_block *sb, const struct cpu_key *key, + struct treepath *path, struct reiserfs_dir_entry *de); +struct dentry *reiserfs_get_parent(struct dentry *); + +#ifdef CONFIG_REISERFS_PROC_INFO +int reiserfs_proc_info_init(struct super_block *sb); +int reiserfs_proc_info_done(struct super_block *sb); +int reiserfs_proc_info_global_init(void); +int reiserfs_proc_info_global_done(void); + +#define PROC_EXP( e ) e + +#define __PINFO( sb ) REISERFS_SB(sb) -> s_proc_info_data +#define PROC_INFO_MAX( sb, field, value ) \ + __PINFO( sb ).field = \ + max( REISERFS_SB( sb ) -> s_proc_info_data.field, value ) +#define PROC_INFO_INC( sb, field ) ( ++ ( __PINFO( sb ).field ) ) +#define PROC_INFO_ADD( sb, field, val ) ( __PINFO( sb ).field += ( val ) ) +#define PROC_INFO_BH_STAT( sb, bh, level ) \ + PROC_INFO_INC( sb, sbk_read_at[ ( level ) ] ); \ + PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \ + PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) ) +#else +static inline int reiserfs_proc_info_init(struct super_block *sb) +{ + return 0; +} + +static inline int reiserfs_proc_info_done(struct super_block *sb) +{ + return 0; +} + +static inline int reiserfs_proc_info_global_init(void) +{ + return 0; +} + +static inline int reiserfs_proc_info_global_done(void) +{ + return 0; +} + +#define PROC_EXP( e ) +#define VOID_V ( ( void ) 0 ) +#define PROC_INFO_MAX( sb, field, value ) VOID_V +#define PROC_INFO_INC( sb, field ) VOID_V +#define PROC_INFO_ADD( sb, field, val ) VOID_V +#define PROC_INFO_BH_STAT(sb, bh, n_node_level) VOID_V +#endif + +/* dir.c */ +extern const struct inode_operations reiserfs_dir_inode_operations; +extern const struct inode_operations reiserfs_symlink_inode_operations; +extern const struct inode_operations reiserfs_special_inode_operations; +extern const struct file_operations reiserfs_dir_operations; +int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *); + +/* tail_conversion.c */ +int direct2indirect(struct reiserfs_transaction_handle *, struct inode *, + struct treepath *, struct buffer_head *, loff_t); +int indirect2direct(struct reiserfs_transaction_handle *, struct inode *, + struct page *, struct treepath *, const struct cpu_key *, + loff_t, char *); +void reiserfs_unmap_buffer(struct buffer_head *); + +/* file.c */ +extern const struct inode_operations reiserfs_file_inode_operations; +extern const struct file_operations reiserfs_file_operations; +extern const struct address_space_operations reiserfs_address_space_operations; + +/* fix_nodes.c */ + +int fix_nodes(int n_op_mode, struct tree_balance *tb, + struct item_head *ins_ih, const void *); +void unfix_nodes(struct tree_balance *); + +/* prints.c */ +void __reiserfs_panic(struct super_block *s, const char *id, + const char *function, const char *fmt, ...) + __attribute__ ((noreturn)); +#define reiserfs_panic(s, id, fmt, args...) \ + __reiserfs_panic(s, id, __func__, fmt, ##args) +void __reiserfs_error(struct super_block *s, const char *id, + const char *function, const char *fmt, ...); +#define reiserfs_error(s, id, fmt, args...) \ + __reiserfs_error(s, id, __func__, fmt, ##args) +void reiserfs_info(struct super_block *s, const char *fmt, ...); +void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...); +void print_indirect_item(struct buffer_head *bh, int item_num); +void store_print_tb(struct tree_balance *tb); +void print_cur_tb(char *mes); +void print_de(struct reiserfs_dir_entry *de); +void print_bi(struct buffer_info *bi, char *mes); +#define PRINT_LEAF_ITEMS 1 /* print all items */ +#define PRINT_DIRECTORY_ITEMS 2 /* print directory items */ +#define PRINT_DIRECT_ITEMS 4 /* print contents of direct items */ +void print_block(struct buffer_head *bh, ...); +void print_bmap(struct super_block *s, int silent); +void print_bmap_block(int i, char *data, int size, int silent); +/*void print_super_block (struct super_block * s, char * mes);*/ +void print_objectid_map(struct super_block *s); +void print_block_head(struct buffer_head *bh, char *mes); +void check_leaf(struct buffer_head *bh); +void check_internal(struct buffer_head *bh); +void print_statistics(struct super_block *s); +char *reiserfs_hashname(int code); + +/* lbalance.c */ +int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num, + int mov_bytes, struct buffer_head *Snew); +int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes); +int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes); +void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first, + int del_num, int del_bytes); +void leaf_insert_into_buf(struct buffer_info *bi, int before, + struct item_head *inserted_item_ih, + const char *inserted_item_body, int zeros_number); +void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, + int pos_in_item, int paste_size, const char *body, + int zeros_number); +void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, + int pos_in_item, int cut_size); +void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, + int new_entry_count, struct reiserfs_de_head *new_dehs, + const char *records, int paste_size); +/* ibalance.c */ +int balance_internal(struct tree_balance *, int, int, struct item_head *, + struct buffer_head **); + +/* do_balance.c */ +void do_balance_mark_leaf_dirty(struct tree_balance *tb, + struct buffer_head *bh, int flag); +#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty +#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty + +void do_balance(struct tree_balance *tb, struct item_head *ih, + const char *body, int flag); +void reiserfs_invalidate_buffer(struct tree_balance *tb, + struct buffer_head *bh); + +int get_left_neighbor_position(struct tree_balance *tb, int h); +int get_right_neighbor_position(struct tree_balance *tb, int h); +void replace_key(struct tree_balance *tb, struct buffer_head *, int, + struct buffer_head *, int); +void make_empty_node(struct buffer_info *); +struct buffer_head *get_FEB(struct tree_balance *); + +/* bitmap.c */ + +/* structure contains hints for block allocator, and it is a container for + * arguments, such as node, search path, transaction_handle, etc. */ +struct __reiserfs_blocknr_hint { + struct inode *inode; /* inode passed to allocator, if we allocate unf. nodes */ + sector_t block; /* file offset, in blocks */ + struct in_core_key key; + struct treepath *path; /* search path, used by allocator to deternine search_start by + * various ways */ + struct reiserfs_transaction_handle *th; /* transaction handle is needed to log super blocks and + * bitmap blocks changes */ + b_blocknr_t beg, end; + b_blocknr_t search_start; /* a field used to transfer search start value (block number) + * between different block allocator procedures + * (determine_search_start() and others) */ + int prealloc_size; /* is set in determine_prealloc_size() function, used by underlayed + * function that do actual allocation */ + + unsigned formatted_node:1; /* the allocator uses different polices for getting disk space for + * formatted/unformatted blocks with/without preallocation */ + unsigned preallocate:1; +}; + +typedef struct __reiserfs_blocknr_hint reiserfs_blocknr_hint_t; + +int reiserfs_parse_alloc_options(struct super_block *, char *); +void reiserfs_init_alloc_options(struct super_block *s); + +/* + * given a directory, this will tell you what packing locality + * to use for a new object underneat it. The locality is returned + * in disk byte order (le). + */ +__le32 reiserfs_choose_packing(struct inode *dir); + +int reiserfs_init_bitmap_cache(struct super_block *sb); +void reiserfs_free_bitmap_cache(struct super_block *sb); +void reiserfs_cache_bitmap_metadata(struct super_block *sb, struct buffer_head *bh, struct reiserfs_bitmap_info *info); +struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb, unsigned int bitmap); +int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value); +void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *, + b_blocknr_t, int for_unformatted); +int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int, + int); +static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, + b_blocknr_t * new_blocknrs, + int amount_needed) +{ + reiserfs_blocknr_hint_t hint = { + .th = tb->transaction_handle, + .path = tb->tb_path, + .inode = NULL, + .key = tb->key, + .block = 0, + .formatted_node = 1 + }; + return reiserfs_allocate_blocknrs(&hint, new_blocknrs, amount_needed, + 0); +} + +static inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle + *th, struct inode *inode, + b_blocknr_t * new_blocknrs, + struct treepath *path, + sector_t block) +{ + reiserfs_blocknr_hint_t hint = { + .th = th, + .path = path, + .inode = inode, + .block = block, + .formatted_node = 0, + .preallocate = 0 + }; + return reiserfs_allocate_blocknrs(&hint, new_blocknrs, 1, 0); +} + +#ifdef REISERFS_PREALLOCATE +static inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle + *th, struct inode *inode, + b_blocknr_t * new_blocknrs, + struct treepath *path, + sector_t block) +{ + reiserfs_blocknr_hint_t hint = { + .th = th, + .path = path, + .inode = inode, + .block = block, + .formatted_node = 0, + .preallocate = 1 + }; + return reiserfs_allocate_blocknrs(&hint, new_blocknrs, 1, 0); +} + +void reiserfs_discard_prealloc(struct reiserfs_transaction_handle *th, + struct inode *inode); +void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th); +#endif + +/* hashes.c */ +__u32 keyed_hash(const signed char *msg, int len); +__u32 yura_hash(const signed char *msg, int len); +__u32 r5_hash(const signed char *msg, int len); + +#define reiserfs_set_le_bit __set_bit_le +#define reiserfs_test_and_set_le_bit __test_and_set_bit_le +#define reiserfs_clear_le_bit __clear_bit_le +#define reiserfs_test_and_clear_le_bit __test_and_clear_bit_le +#define reiserfs_test_le_bit test_bit_le +#define reiserfs_find_next_zero_le_bit find_next_zero_bit_le + +/* sometimes reiserfs_truncate may require to allocate few new blocks + to perform indirect2direct conversion. People probably used to + think, that truncate should work without problems on a filesystem + without free disk space. They may complain that they can not + truncate due to lack of free disk space. This spare space allows us + to not worry about it. 500 is probably too much, but it should be + absolutely safe */ +#define SPARE_SPACE 500 + +/* prototypes from ioctl.c */ +long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +long reiserfs_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); +int reiserfs_unpack(struct inode *inode, struct file *filp); diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c index e1415ad93251..9a17f63c3fd7 100644 --- a/fs/reiserfs/resize.c +++ b/fs/reiserfs/resize.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include "reiserfs.h" #include int reiserfs_resize(struct super_block *s, unsigned long block_count_new) diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 313d39d639eb..f1b68afc268f 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -51,7 +51,7 @@ #include #include #include -#include +#include "reiserfs.h" #include #include diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index cf68a6ba0ec6..8b7616ef06d8 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include "reiserfs.h" #include "acl.h" #include "xattr.h" #include diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c index d7f6e51bef2a..32f9a80d292f 100644 --- a/fs/reiserfs/tail_conversion.c +++ b/fs/reiserfs/tail_conversion.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include "reiserfs.h" /* access to tail : when one is going to read tail it must make sure, that is not running. direct2indirect and indirect2direct can not run concurrently */ diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 61c9b5633e27..46fc1c20a6b1 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -33,7 +33,7 @@ * The xattrs themselves are protected by the xattr_sem. */ -#include +#include "reiserfs.h" #include #include #include diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h index ccd146bb0665..f59626c5d33b 100644 --- a/fs/reiserfs/xattr.h +++ b/fs/reiserfs/xattr.h @@ -2,7 +2,6 @@ #include #include #include -#include struct inode; struct dentry; diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index f09094057eaa..44474f9b990d 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -1,7 +1,7 @@ #include #include #include -#include +#include "reiserfs.h" #include #include #include diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index 6104066e44c4..800a3cef6f62 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c @@ -1,4 +1,4 @@ -#include +#include "reiserfs.h" #include #include #include diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c index f995b258c141..a0035719f66b 100644 --- a/fs/reiserfs/xattr_trusted.c +++ b/fs/reiserfs/xattr_trusted.c @@ -1,4 +1,4 @@ -#include +#include "reiserfs.h" #include #include #include diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c index 748978db6f03..8667491ae7c3 100644 --- a/fs/reiserfs/xattr_user.c +++ b/fs/reiserfs/xattr_user.c @@ -1,4 +1,4 @@ -#include +#include "reiserfs.h" #include #include #include -- cgit From 765fd6b23d8b1130bd9e2c995040b137faa0992d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 01:19:24 -0400 Subject: kill reiserfs_fs_{i,sb}.h Signed-off-by: Al Viro --- fs/reiserfs/reiserfs.h | 599 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 597 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index b3865c84f54c..445d768eea44 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -12,8 +12,6 @@ #include #include #include -#include -#include /* the 32 bit compat definitions with int argument */ #define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int) @@ -22,6 +20,603 @@ #define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION #define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION +struct reiserfs_journal_list; + +/** bitmasks for i_flags field in reiserfs-specific part of inode */ +typedef enum { + /** this says what format of key do all items (but stat data) of + an object have. If this is set, that format is 3.6 otherwise + - 3.5 */ + i_item_key_version_mask = 0x0001, + /** If this is unset, object has 3.5 stat data, otherwise, it has + 3.6 stat data with 64bit size, 32bit nlink etc. */ + i_stat_data_version_mask = 0x0002, + /** file might need tail packing on close */ + i_pack_on_close_mask = 0x0004, + /** don't pack tail of file */ + i_nopack_mask = 0x0008, + /** If those is set, "safe link" was created for this file during + truncate or unlink. Safe link is used to avoid leakage of disk + space on crash with some files open, but unlinked. */ + i_link_saved_unlink_mask = 0x0010, + i_link_saved_truncate_mask = 0x0020, + i_has_xattr_dir = 0x0040, + i_data_log = 0x0080, +} reiserfs_inode_flags; + +struct reiserfs_inode_info { + __u32 i_key[4]; /* key is still 4 32 bit integers */ + /** transient inode flags that are never stored on disk. Bitmasks + for this field are defined above. */ + __u32 i_flags; + + __u32 i_first_direct_byte; // offset of first byte stored in direct item. + + /* copy of persistent inode flags read from sd_attrs. */ + __u32 i_attrs; + + int i_prealloc_block; /* first unused block of a sequence of unused blocks */ + int i_prealloc_count; /* length of that sequence */ + struct list_head i_prealloc_list; /* per-transaction list of inodes which + * have preallocated blocks */ + + unsigned new_packing_locality:1; /* new_packig_locality is created; new blocks + * for the contents of this directory should be + * displaced */ + + /* we use these for fsync or O_SYNC to decide which transaction + ** needs to be committed in order for this inode to be properly + ** flushed */ + unsigned int i_trans_id; + struct reiserfs_journal_list *i_jl; + atomic_t openers; + struct mutex tailpack; +#ifdef CONFIG_REISERFS_FS_XATTR + struct rw_semaphore i_xattr_sem; +#endif + struct inode vfs_inode; +}; + +typedef enum { + reiserfs_attrs_cleared = 0x00000001, +} reiserfs_super_block_flags; + +/* struct reiserfs_super_block accessors/mutators + * since this is a disk structure, it will always be in + * little endian format. */ +#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count)) +#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v)) +#define sb_free_blocks(sbp) (le32_to_cpu((sbp)->s_v1.s_free_blocks)) +#define set_sb_free_blocks(sbp,v) ((sbp)->s_v1.s_free_blocks = cpu_to_le32(v)) +#define sb_root_block(sbp) (le32_to_cpu((sbp)->s_v1.s_root_block)) +#define set_sb_root_block(sbp,v) ((sbp)->s_v1.s_root_block = cpu_to_le32(v)) + +#define sb_jp_journal_1st_block(sbp) \ + (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_1st_block)) +#define set_sb_jp_journal_1st_block(sbp,v) \ + ((sbp)->s_v1.s_journal.jp_journal_1st_block = cpu_to_le32(v)) +#define sb_jp_journal_dev(sbp) \ + (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_dev)) +#define set_sb_jp_journal_dev(sbp,v) \ + ((sbp)->s_v1.s_journal.jp_journal_dev = cpu_to_le32(v)) +#define sb_jp_journal_size(sbp) \ + (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_size)) +#define set_sb_jp_journal_size(sbp,v) \ + ((sbp)->s_v1.s_journal.jp_journal_size = cpu_to_le32(v)) +#define sb_jp_journal_trans_max(sbp) \ + (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_trans_max)) +#define set_sb_jp_journal_trans_max(sbp,v) \ + ((sbp)->s_v1.s_journal.jp_journal_trans_max = cpu_to_le32(v)) +#define sb_jp_journal_magic(sbp) \ + (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_magic)) +#define set_sb_jp_journal_magic(sbp,v) \ + ((sbp)->s_v1.s_journal.jp_journal_magic = cpu_to_le32(v)) +#define sb_jp_journal_max_batch(sbp) \ + (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_batch)) +#define set_sb_jp_journal_max_batch(sbp,v) \ + ((sbp)->s_v1.s_journal.jp_journal_max_batch = cpu_to_le32(v)) +#define sb_jp_jourmal_max_commit_age(sbp) \ + (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_commit_age)) +#define set_sb_jp_journal_max_commit_age(sbp,v) \ + ((sbp)->s_v1.s_journal.jp_journal_max_commit_age = cpu_to_le32(v)) + +#define sb_blocksize(sbp) (le16_to_cpu((sbp)->s_v1.s_blocksize)) +#define set_sb_blocksize(sbp,v) ((sbp)->s_v1.s_blocksize = cpu_to_le16(v)) +#define sb_oid_maxsize(sbp) (le16_to_cpu((sbp)->s_v1.s_oid_maxsize)) +#define set_sb_oid_maxsize(sbp,v) ((sbp)->s_v1.s_oid_maxsize = cpu_to_le16(v)) +#define sb_oid_cursize(sbp) (le16_to_cpu((sbp)->s_v1.s_oid_cursize)) +#define set_sb_oid_cursize(sbp,v) ((sbp)->s_v1.s_oid_cursize = cpu_to_le16(v)) +#define sb_umount_state(sbp) (le16_to_cpu((sbp)->s_v1.s_umount_state)) +#define set_sb_umount_state(sbp,v) ((sbp)->s_v1.s_umount_state = cpu_to_le16(v)) +#define sb_fs_state(sbp) (le16_to_cpu((sbp)->s_v1.s_fs_state)) +#define set_sb_fs_state(sbp,v) ((sbp)->s_v1.s_fs_state = cpu_to_le16(v)) +#define sb_hash_function_code(sbp) \ + (le32_to_cpu((sbp)->s_v1.s_hash_function_code)) +#define set_sb_hash_function_code(sbp,v) \ + ((sbp)->s_v1.s_hash_function_code = cpu_to_le32(v)) +#define sb_tree_height(sbp) (le16_to_cpu((sbp)->s_v1.s_tree_height)) +#define set_sb_tree_height(sbp,v) ((sbp)->s_v1.s_tree_height = cpu_to_le16(v)) +#define sb_bmap_nr(sbp) (le16_to_cpu((sbp)->s_v1.s_bmap_nr)) +#define set_sb_bmap_nr(sbp,v) ((sbp)->s_v1.s_bmap_nr = cpu_to_le16(v)) +#define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version)) +#define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v)) + +#define sb_mnt_count(sbp) (le16_to_cpu((sbp)->s_mnt_count)) +#define set_sb_mnt_count(sbp, v) ((sbp)->s_mnt_count = cpu_to_le16(v)) + +#define sb_reserved_for_journal(sbp) \ + (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal)) +#define set_sb_reserved_for_journal(sbp,v) \ + ((sbp)->s_v1.s_reserved_for_journal = cpu_to_le16(v)) + +/* LOGGING -- */ + +/* These all interelate for performance. +** +** If the journal block count is smaller than n transactions, you lose speed. +** I don't know what n is yet, I'm guessing 8-16. +** +** typical transaction size depends on the application, how often fsync is +** called, and how many metadata blocks you dirty in a 30 second period. +** The more small files (<16k) you use, the larger your transactions will +** be. +** +** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal +** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough +** to prevent wrapping before dirty meta blocks get to disk. +** +** If the batch max is smaller than the transaction max, you'll waste space at the end of the journal +** because journal_end sets the next transaction to start at 0 if the next transaction has any chance of wrapping. +** +** The large the batch max age, the better the speed, and the more meta data changes you'll lose after a crash. +** +*/ + +/* don't mess with these for a while */ + /* we have a node size define somewhere in reiserfs_fs.h. -Hans */ +#define JOURNAL_BLOCK_SIZE 4096 /* BUG gotta get rid of this */ +#define JOURNAL_MAX_CNODE 1500 /* max cnodes to allocate. */ +#define JOURNAL_HASH_SIZE 8192 +#define JOURNAL_NUM_BITMAPS 5 /* number of copies of the bitmaps to have floating. Must be >= 2 */ + +/* One of these for every block in every transaction +** Each one is in two hash tables. First, a hash of the current transaction, and after journal_end, a +** hash of all the in memory transactions. +** next and prev are used by the current transaction (journal_hash). +** hnext and hprev are used by journal_list_hash. If a block is in more than one transaction, the journal_list_hash +** links it in multiple times. This allows flush_journal_list to remove just the cnode belonging +** to a given transaction. +*/ +struct reiserfs_journal_cnode { + struct buffer_head *bh; /* real buffer head */ + struct super_block *sb; /* dev of real buffer head */ + __u32 blocknr; /* block number of real buffer head, == 0 when buffer on disk */ + unsigned long state; + struct reiserfs_journal_list *jlist; /* journal list this cnode lives in */ + struct reiserfs_journal_cnode *next; /* next in transaction list */ + struct reiserfs_journal_cnode *prev; /* prev in transaction list */ + struct reiserfs_journal_cnode *hprev; /* prev in hash list */ + struct reiserfs_journal_cnode *hnext; /* next in hash list */ +}; + +struct reiserfs_bitmap_node { + int id; + char *data; + struct list_head list; +}; + +struct reiserfs_list_bitmap { + struct reiserfs_journal_list *journal_list; + struct reiserfs_bitmap_node **bitmaps; +}; + +/* +** one of these for each transaction. The most important part here is the j_realblock. +** this list of cnodes is used to hash all the blocks in all the commits, to mark all the +** real buffer heads dirty once all the commits hit the disk, +** and to make sure every real block in a transaction is on disk before allowing the log area +** to be overwritten */ +struct reiserfs_journal_list { + unsigned long j_start; + unsigned long j_state; + unsigned long j_len; + atomic_t j_nonzerolen; + atomic_t j_commit_left; + atomic_t j_older_commits_done; /* all commits older than this on disk */ + struct mutex j_commit_mutex; + unsigned int j_trans_id; + time_t j_timestamp; + struct reiserfs_list_bitmap *j_list_bitmap; + struct buffer_head *j_commit_bh; /* commit buffer head */ + struct reiserfs_journal_cnode *j_realblock; + struct reiserfs_journal_cnode *j_freedlist; /* list of buffers that were freed during this trans. free each of these on flush */ + /* time ordered list of all active transactions */ + struct list_head j_list; + + /* time ordered list of all transactions we haven't tried to flush yet */ + struct list_head j_working_list; + + /* list of tail conversion targets in need of flush before commit */ + struct list_head j_tail_bh_list; + /* list of data=ordered buffers in need of flush before commit */ + struct list_head j_bh_list; + int j_refcount; +}; + +struct reiserfs_journal { + struct buffer_head **j_ap_blocks; /* journal blocks on disk */ + struct reiserfs_journal_cnode *j_last; /* newest journal block */ + struct reiserfs_journal_cnode *j_first; /* oldest journal block. start here for traverse */ + + struct block_device *j_dev_bd; + fmode_t j_dev_mode; + int j_1st_reserved_block; /* first block on s_dev of reserved area journal */ + + unsigned long j_state; + unsigned int j_trans_id; + unsigned long j_mount_id; + unsigned long j_start; /* start of current waiting commit (index into j_ap_blocks) */ + unsigned long j_len; /* length of current waiting commit */ + unsigned long j_len_alloc; /* number of buffers requested by journal_begin() */ + atomic_t j_wcount; /* count of writers for current commit */ + unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */ + unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */ + unsigned j_last_flush_trans_id; /* last fully flushed journal timestamp */ + struct buffer_head *j_header_bh; + + time_t j_trans_start_time; /* time this transaction started */ + struct mutex j_mutex; + struct mutex j_flush_mutex; + wait_queue_head_t j_join_wait; /* wait for current transaction to finish before starting new one */ + atomic_t j_jlock; /* lock for j_join_wait */ + int j_list_bitmap_index; /* number of next list bitmap to use */ + int j_must_wait; /* no more journal begins allowed. MUST sleep on j_join_wait */ + int j_next_full_flush; /* next journal_end will flush all journal list */ + int j_next_async_flush; /* next journal_end will flush all async commits */ + + int j_cnode_used; /* number of cnodes on the used list */ + int j_cnode_free; /* number of cnodes on the free list */ + + unsigned int j_trans_max; /* max number of blocks in a transaction. */ + unsigned int j_max_batch; /* max number of blocks to batch into a trans */ + unsigned int j_max_commit_age; /* in seconds, how old can an async commit be */ + unsigned int j_max_trans_age; /* in seconds, how old can a transaction be */ + unsigned int j_default_max_commit_age; /* the default for the max commit age */ + + struct reiserfs_journal_cnode *j_cnode_free_list; + struct reiserfs_journal_cnode *j_cnode_free_orig; /* orig pointer returned from vmalloc */ + + struct reiserfs_journal_list *j_current_jl; + int j_free_bitmap_nodes; + int j_used_bitmap_nodes; + + int j_num_lists; /* total number of active transactions */ + int j_num_work_lists; /* number that need attention from kreiserfsd */ + + /* debugging to make sure things are flushed in order */ + unsigned int j_last_flush_id; + + /* debugging to make sure things are committed in order */ + unsigned int j_last_commit_id; + + struct list_head j_bitmap_nodes; + struct list_head j_dirty_buffers; + spinlock_t j_dirty_buffers_lock; /* protects j_dirty_buffers */ + + /* list of all active transactions */ + struct list_head j_journal_list; + /* lists that haven't been touched by writeback attempts */ + struct list_head j_working_list; + + struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */ + struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */ + struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all + the transactions */ + struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */ + int j_persistent_trans; + unsigned long j_max_trans_size; + unsigned long j_max_batch_size; + + int j_errno; + + /* when flushing ordered buffers, throttle new ordered writers */ + struct delayed_work j_work; + struct super_block *j_work_sb; + atomic_t j_async_throttle; +}; + +enum journal_state_bits { + J_WRITERS_BLOCKED = 1, /* set when new writers not allowed */ + J_WRITERS_QUEUED, /* set when log is full due to too many writers */ + J_ABORTED, /* set when log is aborted */ +}; + +#define JOURNAL_DESC_MAGIC "ReIsErLB" /* ick. magic string to find desc blocks in the journal */ + +typedef __u32(*hashf_t) (const signed char *, int); + +struct reiserfs_bitmap_info { + __u32 free_count; +}; + +struct proc_dir_entry; + +#if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO ) +typedef unsigned long int stat_cnt_t; +typedef struct reiserfs_proc_info_data { + spinlock_t lock; + int exiting; + int max_hash_collisions; + + stat_cnt_t breads; + stat_cnt_t bread_miss; + stat_cnt_t search_by_key; + stat_cnt_t search_by_key_fs_changed; + stat_cnt_t search_by_key_restarted; + + stat_cnt_t insert_item_restarted; + stat_cnt_t paste_into_item_restarted; + stat_cnt_t cut_from_item_restarted; + stat_cnt_t delete_solid_item_restarted; + stat_cnt_t delete_item_restarted; + + stat_cnt_t leaked_oid; + stat_cnt_t leaves_removable; + + /* balances per level. Use explicit 5 as MAX_HEIGHT is not visible yet. */ + stat_cnt_t balance_at[5]; /* XXX */ + /* sbk == search_by_key */ + stat_cnt_t sbk_read_at[5]; /* XXX */ + stat_cnt_t sbk_fs_changed[5]; + stat_cnt_t sbk_restarted[5]; + stat_cnt_t items_at[5]; /* XXX */ + stat_cnt_t free_at[5]; /* XXX */ + stat_cnt_t can_node_be_removed[5]; /* XXX */ + long int lnum[5]; /* XXX */ + long int rnum[5]; /* XXX */ + long int lbytes[5]; /* XXX */ + long int rbytes[5]; /* XXX */ + stat_cnt_t get_neighbors[5]; + stat_cnt_t get_neighbors_restart[5]; + stat_cnt_t need_l_neighbor[5]; + stat_cnt_t need_r_neighbor[5]; + + stat_cnt_t free_block; + struct __scan_bitmap_stats { + stat_cnt_t call; + stat_cnt_t wait; + stat_cnt_t bmap; + stat_cnt_t retry; + stat_cnt_t in_journal_hint; + stat_cnt_t in_journal_nohint; + stat_cnt_t stolen; + } scan_bitmap; + struct __journal_stats { + stat_cnt_t in_journal; + stat_cnt_t in_journal_bitmap; + stat_cnt_t in_journal_reusable; + stat_cnt_t lock_journal; + stat_cnt_t lock_journal_wait; + stat_cnt_t journal_being; + stat_cnt_t journal_relock_writers; + stat_cnt_t journal_relock_wcount; + stat_cnt_t mark_dirty; + stat_cnt_t mark_dirty_already; + stat_cnt_t mark_dirty_notjournal; + stat_cnt_t restore_prepared; + stat_cnt_t prepare; + stat_cnt_t prepare_retry; + } journal; +} reiserfs_proc_info_data_t; +#else +typedef struct reiserfs_proc_info_data { +} reiserfs_proc_info_data_t; +#endif + +/* reiserfs union of in-core super block data */ +struct reiserfs_sb_info { + struct buffer_head *s_sbh; /* Buffer containing the super block */ + /* both the comment and the choice of + name are unclear for s_rs -Hans */ + struct reiserfs_super_block *s_rs; /* Pointer to the super block in the buffer */ + struct reiserfs_bitmap_info *s_ap_bitmap; + struct reiserfs_journal *s_journal; /* pointer to journal information */ + unsigned short s_mount_state; /* reiserfs state (valid, invalid) */ + + /* Serialize writers access, replace the old bkl */ + struct mutex lock; + /* Owner of the lock (can be recursive) */ + struct task_struct *lock_owner; + /* Depth of the lock, start from -1 like the bkl */ + int lock_depth; + + /* Comment? -Hans */ + void (*end_io_handler) (struct buffer_head *, int); + hashf_t s_hash_function; /* pointer to function which is used + to sort names in directory. Set on + mount */ + unsigned long s_mount_opt; /* reiserfs's mount options are set + here (currently - NOTAIL, NOLOG, + REPLAYONLY) */ + + struct { /* This is a structure that describes block allocator options */ + unsigned long bits; /* Bitfield for enable/disable kind of options */ + unsigned long large_file_size; /* size started from which we consider file to be a large one(in blocks) */ + int border; /* percentage of disk, border takes */ + int preallocmin; /* Minimal file size (in blocks) starting from which we do preallocations */ + int preallocsize; /* Number of blocks we try to prealloc when file + reaches preallocmin size (in blocks) or + prealloc_list is empty. */ + } s_alloc_options; + + /* Comment? -Hans */ + wait_queue_head_t s_wait; + /* To be obsoleted soon by per buffer seals.. -Hans */ + atomic_t s_generation_counter; // increased by one every time the + // tree gets re-balanced + unsigned long s_properties; /* File system properties. Currently holds + on-disk FS format */ + + /* session statistics */ + int s_disk_reads; + int s_disk_writes; + int s_fix_nodes; + int s_do_balance; + int s_unneeded_left_neighbor; + int s_good_search_by_key_reada; + int s_bmaps; + int s_bmaps_without_search; + int s_direct2indirect; + int s_indirect2direct; + /* set up when it's ok for reiserfs_read_inode2() to read from + disk inode with nlink==0. Currently this is only used during + finish_unfinished() processing at mount time */ + int s_is_unlinked_ok; + reiserfs_proc_info_data_t s_proc_info_data; + struct proc_dir_entry *procdir; + int reserved_blocks; /* amount of blocks reserved for further allocations */ + spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */ + struct dentry *priv_root; /* root of /.reiserfs_priv */ + struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */ + int j_errno; +#ifdef CONFIG_QUOTA + char *s_qf_names[MAXQUOTAS]; + int s_jquota_fmt; +#endif + char *s_jdev; /* Stored jdev for mount option showing */ +#ifdef CONFIG_REISERFS_CHECK + + struct tree_balance *cur_tb; /* + * Detects whether more than one + * copy of tb exists per superblock + * as a means of checking whether + * do_balance is executing concurrently + * against another tree reader/writer + * on a same mount point. + */ +#endif +}; + +/* Definitions of reiserfs on-disk properties: */ +#define REISERFS_3_5 0 +#define REISERFS_3_6 1 +#define REISERFS_OLD_FORMAT 2 + +enum reiserfs_mount_options { +/* Mount options */ + REISERFS_LARGETAIL, /* large tails will be created in a session */ + REISERFS_SMALLTAIL, /* small (for files less than block size) tails will be created in a session */ + REPLAYONLY, /* replay journal and return 0. Use by fsck */ + REISERFS_CONVERT, /* -o conv: causes conversion of old + format super block to the new + format. If not specified - old + partition will be dealt with in a + manner of 3.5.x */ + +/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting +** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option +** is not required. If the normal autodection code can't determine which +** hash to use (because both hashes had the same value for a file) +** use this option to force a specific hash. It won't allow you to override +** the existing hash on the FS, so if you have a tea hash disk, and mount +** with -o hash=rupasov, the mount will fail. +*/ + FORCE_TEA_HASH, /* try to force tea hash on mount */ + FORCE_RUPASOV_HASH, /* try to force rupasov hash on mount */ + FORCE_R5_HASH, /* try to force rupasov hash on mount */ + FORCE_HASH_DETECT, /* try to detect hash function on mount */ + + REISERFS_DATA_LOG, + REISERFS_DATA_ORDERED, + REISERFS_DATA_WRITEBACK, + +/* used for testing experimental features, makes benchmarking new + features with and without more convenient, should never be used by + users in any code shipped to users (ideally) */ + + REISERFS_NO_BORDER, + REISERFS_NO_UNHASHED_RELOCATION, + REISERFS_HASHED_RELOCATION, + REISERFS_ATTRS, + REISERFS_XATTRS_USER, + REISERFS_POSIXACL, + REISERFS_EXPOSE_PRIVROOT, + REISERFS_BARRIER_NONE, + REISERFS_BARRIER_FLUSH, + + /* Actions on error */ + REISERFS_ERROR_PANIC, + REISERFS_ERROR_RO, + REISERFS_ERROR_CONTINUE, + + REISERFS_USRQUOTA, /* User quota option specified */ + REISERFS_GRPQUOTA, /* Group quota option specified */ + + REISERFS_TEST1, + REISERFS_TEST2, + REISERFS_TEST3, + REISERFS_TEST4, + REISERFS_UNSUPPORTED_OPT, +}; + +#define reiserfs_r5_hash(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_R5_HASH)) +#define reiserfs_rupasov_hash(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_RUPASOV_HASH)) +#define reiserfs_tea_hash(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_TEA_HASH)) +#define reiserfs_hash_detect(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_HASH_DETECT)) +#define reiserfs_no_border(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_NO_BORDER)) +#define reiserfs_no_unhashed_relocation(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_NO_UNHASHED_RELOCATION)) +#define reiserfs_hashed_relocation(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_HASHED_RELOCATION)) +#define reiserfs_test4(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_TEST4)) + +#define have_large_tails(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_LARGETAIL)) +#define have_small_tails(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_SMALLTAIL)) +#define replay_only(s) (REISERFS_SB(s)->s_mount_opt & (1 << REPLAYONLY)) +#define reiserfs_attrs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_ATTRS)) +#define old_format_only(s) (REISERFS_SB(s)->s_properties & (1 << REISERFS_3_5)) +#define convert_reiserfs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_CONVERT)) +#define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG)) +#define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED)) +#define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) +#define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) +#define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) +#define reiserfs_expose_privroot(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_EXPOSE_PRIVROOT)) +#define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) +#define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE)) +#define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH)) + +#define reiserfs_error_panic(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_ERROR_PANIC)) +#define reiserfs_error_ro(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_ERROR_RO)) + +void reiserfs_file_buffer(struct buffer_head *bh, int list); +extern struct file_system_type reiserfs_fs_type; +int reiserfs_resize(struct super_block *, unsigned long); + +#define CARRY_ON 0 +#define SCHEDULE_OCCURRED 1 + +#define SB_BUFFER_WITH_SB(s) (REISERFS_SB(s)->s_sbh) +#define SB_JOURNAL(s) (REISERFS_SB(s)->s_journal) +#define SB_JOURNAL_1st_RESERVED_BLOCK(s) (SB_JOURNAL(s)->j_1st_reserved_block) +#define SB_JOURNAL_LEN_FREE(s) (SB_JOURNAL(s)->j_journal_len_free) +#define SB_AP_BITMAP(s) (REISERFS_SB(s)->s_ap_bitmap) + +#define SB_DISK_JOURNAL_HEAD(s) (SB_JOURNAL(s)->j_header_bh->) + +/* A safe version of the "bdevname", which returns the "s_id" field of + * a superblock or else "Null superblock" if the super block is NULL. + */ +static inline char *reiserfs_bdevname(struct super_block *s) +{ + return (s == NULL) ? "Null superblock" : s->s_id; +} + +#define reiserfs_is_journal_aborted(journal) (unlikely (__reiserfs_is_journal_aborted (journal))) +static inline int __reiserfs_is_journal_aborted(struct reiserfs_journal + *journal) +{ + return test_bit(J_ABORTED, &journal->j_state); +} + /* * Locking primitives. The write lock is a per superblock * special mutex that has properties close to the Big Kernel Lock -- cgit From 2452992aa78315bf0f510620d22fae8ecb40a2eb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 02:13:52 -0400 Subject: make simple_pin_fs() pass MS_KERNMOUNT - it's a kernel-internal one Signed-off-by: Al Viro --- fs/libfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/libfs.c b/fs/libfs.c index 7c895a763a1e..722e0d5ba182 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -534,7 +534,7 @@ int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *c spin_lock(&pin_fs_lock); if (unlikely(!*mount)) { spin_unlock(&pin_fs_lock); - mnt = vfs_kern_mount(type, 0, type->name, NULL); + mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, NULL); if (IS_ERR(mnt)) return PTR_ERR(mnt); spin_lock(&pin_fs_lock); -- cgit From 2226a288fac462ebc98e40da007842f92a7e4799 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 02:37:05 -0400 Subject: 9p: make register_filesystem() the last failure exit Signed-off-by: Al Viro --- fs/9p/v9fs.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 1964f98e74be..b85efa773949 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -594,21 +594,21 @@ static int __init init_v9fs(void) int err; pr_info("Installing v9fs 9p2000 file system support\n"); /* TODO: Setup list of registered trasnport modules */ - err = register_filesystem(&v9fs_fs_type); - if (err < 0) { - pr_err("Failed to register filesystem\n"); - return err; - } err = v9fs_cache_register(); if (err < 0) { pr_err("Failed to register v9fs for caching\n"); - goto out_fs_unreg; + return err; } err = v9fs_sysfs_init(); if (err < 0) { pr_err("Failed to register with sysfs\n"); + goto out_cache; + } + err = register_filesystem(&v9fs_fs_type); + if (err < 0) { + pr_err("Failed to register filesystem\n"); goto out_sysfs_cleanup; } @@ -617,8 +617,8 @@ static int __init init_v9fs(void) out_sysfs_cleanup: v9fs_sysfs_cleanup(); -out_fs_unreg: - unregister_filesystem(&v9fs_fs_type); +out_cache: + v9fs_cache_unregister(); return err; } -- cgit From ca7068c41a8d48ef35d1a3bba422ebcedace8513 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 02:52:29 -0400 Subject: anon_inodes: move allocation of anon_inode into ->mount() Signed-off-by: Al Viro --- fs/anon_inodes.c | 109 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 56 insertions(+), 53 deletions(-) (limited to 'fs') diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index f11e43ed907d..28d39fb84ae3 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -39,19 +39,6 @@ static const struct dentry_operations anon_inodefs_dentry_operations = { .d_dname = anon_inodefs_dname, }; -static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) -{ - return mount_pseudo(fs_type, "anon_inode:", NULL, - &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC); -} - -static struct file_system_type anon_inode_fs_type = { - .name = "anon_inodefs", - .mount = anon_inodefs_mount, - .kill_sb = kill_anon_super, -}; - /* * nop .set_page_dirty method so that people can use .page_mkwrite on * anon inodes. @@ -65,6 +52,62 @@ static const struct address_space_operations anon_aops = { .set_page_dirty = anon_set_page_dirty, }; +/* + * A single inode exists for all anon_inode files. Contrary to pipes, + * anon_inode inodes have no associated per-instance data, so we need + * only allocate one of them. + */ +static struct inode *anon_inode_mkinode(struct super_block *s) +{ + struct inode *inode = new_inode_pseudo(s); + + if (!inode) + return ERR_PTR(-ENOMEM); + + inode->i_ino = get_next_ino(); + inode->i_fop = &anon_inode_fops; + + inode->i_mapping->a_ops = &anon_aops; + + /* + * Mark the inode dirty from the very beginning, + * that way it will never be moved to the dirty + * list because mark_inode_dirty() will think + * that it already _is_ on the dirty list. + */ + inode->i_state = I_DIRTY; + inode->i_mode = S_IRUSR | S_IWUSR; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); + inode->i_flags |= S_PRIVATE; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + return inode; +} + +static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + struct dentry *root; + root = mount_pseudo(fs_type, "anon_inode:", NULL, + &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC); + if (!IS_ERR(root)) { + struct super_block *s = root->d_sb; + anon_inode_inode = anon_inode_mkinode(s); + if (IS_ERR(anon_inode_inode)) { + dput(root); + deactivate_locked_super(s); + root = ERR_CAST(anon_inode_inode); + } + } + return root; +} + +static struct file_system_type anon_inode_fs_type = { + .name = "anon_inodefs", + .mount = anon_inodefs_mount, + .kill_sb = kill_anon_super, +}; + /** * anon_inode_getfile - creates a new file instance by hooking it up to an * anonymous inode, and a dentry that describe the "class" @@ -180,38 +223,6 @@ err_put_unused_fd: } EXPORT_SYMBOL_GPL(anon_inode_getfd); -/* - * A single inode exists for all anon_inode files. Contrary to pipes, - * anon_inode inodes have no associated per-instance data, so we need - * only allocate one of them. - */ -static struct inode *anon_inode_mkinode(void) -{ - struct inode *inode = new_inode_pseudo(anon_inode_mnt->mnt_sb); - - if (!inode) - return ERR_PTR(-ENOMEM); - - inode->i_ino = get_next_ino(); - inode->i_fop = &anon_inode_fops; - - inode->i_mapping->a_ops = &anon_aops; - - /* - * Mark the inode dirty from the very beginning, - * that way it will never be moved to the dirty - * list because mark_inode_dirty() will think - * that it already _is_ on the dirty list. - */ - inode->i_state = I_DIRTY; - inode->i_mode = S_IRUSR | S_IWUSR; - inode->i_uid = current_fsuid(); - inode->i_gid = current_fsgid(); - inode->i_flags |= S_PRIVATE; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - return inode; -} - static int __init anon_inode_init(void) { int error; @@ -224,16 +235,8 @@ static int __init anon_inode_init(void) error = PTR_ERR(anon_inode_mnt); goto err_unregister_filesystem; } - anon_inode_inode = anon_inode_mkinode(); - if (IS_ERR(anon_inode_inode)) { - error = PTR_ERR(anon_inode_inode); - goto err_mntput; - } - return 0; -err_mntput: - kern_unmount(anon_inode_mnt); err_unregister_filesystem: unregister_filesystem(&anon_inode_fs_type); err_exit: -- cgit From 54bf586e1f51018ba7624c851e7aa14cee0548d2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 02:55:49 -0400 Subject: autofs: set things up *before* registering fs type it's not a serious race, but we really want misc device before anybody gets to mount this sucker. Signed-off-by: Al Viro --- fs/autofs4/init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/init.c b/fs/autofs4/init.c index c038727b4050..cddc74b9cdb2 100644 --- a/fs/autofs4/init.c +++ b/fs/autofs4/init.c @@ -31,11 +31,11 @@ static int __init init_autofs4_fs(void) { int err; + autofs_dev_ioctl_init(); + err = register_filesystem(&autofs_fs_type); if (err) - return err; - - autofs_dev_ioctl_init(); + autofs_dev_ioctl_exit(); return err; } -- cgit From 8fc3dc5a3a17aa2b353886422bd89420619af211 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 03:05:16 -0400 Subject: __register_binfmt() made void Just don't pass NULL to it - nobody does, anyway. Signed-off-by: Al Viro --- fs/binfmt_aout.c | 3 ++- fs/binfmt_elf.c | 3 ++- fs/binfmt_elf_fdpic.c | 3 ++- fs/binfmt_em86.c | 3 ++- fs/binfmt_flat.c | 3 ++- fs/binfmt_misc.c | 7 ++----- fs/binfmt_script.c | 3 ++- fs/binfmt_som.c | 3 ++- fs/exec.c | 6 ++---- 9 files changed, 18 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 1ff94054d35a..a543364ba29b 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -454,7 +454,8 @@ out: static int __init init_aout_binfmt(void) { - return register_binfmt(&aout_format); + register_binfmt(&aout_format); + return 0; } static void __exit exit_aout_binfmt(void) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 07d096c49920..f8ac4251877e 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2077,7 +2077,8 @@ out: static int __init init_elf_binfmt(void) { - return register_binfmt(&elf_format); + register_binfmt(&elf_format); + return 0; } static void __exit exit_elf_binfmt(void) diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 30745f459faf..e7afcb67a2d3 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -91,7 +91,8 @@ static struct linux_binfmt elf_fdpic_format = { static int __init init_elf_fdpic_binfmt(void) { - return register_binfmt(&elf_fdpic_format); + register_binfmt(&elf_fdpic_format); + return 0; } static void __exit exit_elf_fdpic_binfmt(void) diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c index b8e8b0acf9bd..2790c7e1912e 100644 --- a/fs/binfmt_em86.c +++ b/fs/binfmt_em86.c @@ -100,7 +100,8 @@ static struct linux_binfmt em86_format = { static int __init init_em86_binfmt(void) { - return register_binfmt(&em86_format); + register_binfmt(&em86_format); + return 0; } static void __exit exit_em86_binfmt(void) diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 1bffbe0ed778..68affab88146 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -950,7 +950,8 @@ static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs) static int __init init_flat_binfmt(void) { - return register_binfmt(&flat_format); + register_binfmt(&flat_format); + return 0; } /****************************************************************************/ diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index a9198dfd5f85..1ffb60355cae 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -726,11 +726,8 @@ static struct file_system_type bm_fs_type = { static int __init init_misc_binfmt(void) { int err = register_filesystem(&bm_fs_type); - if (!err) { - err = insert_binfmt(&misc_format); - if (err) - unregister_filesystem(&bm_fs_type); - } + if (!err) + insert_binfmt(&misc_format); return err; } diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index 396a9884591f..d3b8c1f63155 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c @@ -105,7 +105,8 @@ static struct linux_binfmt script_format = { static int __init init_script_binfmt(void) { - return register_binfmt(&script_format); + register_binfmt(&script_format); + return 0; } static void __exit exit_script_binfmt(void) diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c index cc8560f6c9b0..ec15972dd98a 100644 --- a/fs/binfmt_som.c +++ b/fs/binfmt_som.c @@ -289,7 +289,8 @@ static int load_som_library(struct file *f) static int __init init_som_binfmt(void) { - return register_binfmt(&som_format); + register_binfmt(&som_format); + return 0; } static void __exit exit_som_binfmt(void) diff --git a/fs/exec.c b/fs/exec.c index 153dee14fe55..2c5ae338773c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -79,15 +79,13 @@ static atomic_t call_count = ATOMIC_INIT(1); static LIST_HEAD(formats); static DEFINE_RWLOCK(binfmt_lock); -int __register_binfmt(struct linux_binfmt * fmt, int insert) +void __register_binfmt(struct linux_binfmt * fmt, int insert) { - if (!fmt) - return -EINVAL; + BUG_ON(!fmt); write_lock(&binfmt_lock); insert ? list_add(&fmt->lh, &formats) : list_add_tail(&fmt->lh, &formats); write_unlock(&binfmt_lock); - return 0; } EXPORT_SYMBOL(__register_binfmt); -- cgit From 81d44ed159e3e81f7e62cee2d0fe68aae0c95e78 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 16:13:25 -0400 Subject: configfs: don't bother with checks for mkdir/rmdir/unlink/symlink in root just give root directory separate inode_operations without all those methods... Signed-off-by: Al Viro --- fs/configfs/configfs_internal.h | 1 + fs/configfs/dir.c | 13 +++++-------- fs/configfs/mount.c | 2 +- fs/configfs/symlink.c | 6 ------ 4 files changed, 7 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index ede857d20a04..1b7fdc0a6a09 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -89,6 +89,7 @@ extern const struct file_operations configfs_dir_operations; extern const struct file_operations configfs_file_operations; extern const struct file_operations bin_fops; extern const struct inode_operations configfs_dir_inode_operations; +extern const struct inode_operations configfs_root_inode_operations; extern const struct inode_operations configfs_symlink_inode_operations; extern const struct dentry_operations configfs_dentry_ops; diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 5ddd7ebd9dcd..b0fbcbeb03ee 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1183,11 +1183,6 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode struct module *subsys_owner = NULL, *new_item_owner = NULL; char *name; - if (dentry->d_parent == configfs_sb->s_root) { - ret = -EPERM; - goto out; - } - sd = dentry->d_parent->d_fsdata; /* @@ -1359,9 +1354,6 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) struct module *subsys_owner = NULL, *dead_item_owner = NULL; int ret; - if (dentry->d_parent == configfs_sb->s_root) - return -EPERM; - sd = dentry->d_fsdata; if (sd->s_type & CONFIGFS_USET_DEFAULT) return -EPERM; @@ -1459,6 +1451,11 @@ const struct inode_operations configfs_dir_inode_operations = { .setattr = configfs_setattr, }; +const struct inode_operations configfs_root_inode_operations = { + .lookup = configfs_lookup, + .setattr = configfs_setattr, +}; + #if 0 int configfs_rename_dir(struct config_item * item, const char *new_name) { diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index 07f60455f1c1..eb41adc28cfe 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -82,7 +82,7 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent) inode = configfs_new_inode(S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, &configfs_root); if (inode) { - inode->i_op = &configfs_dir_inode_operations; + inode->i_op = &configfs_root_inode_operations; inode->i_fop = &configfs_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index 0f3eb41d9201..2817153d33c2 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c @@ -141,10 +141,6 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna struct config_item *target_item = NULL; struct config_item_type *type; - ret = -EPERM; /* What lack-of-symlink returns */ - if (dentry->d_parent == configfs_sb->s_root) - goto out; - sd = dentry->d_parent->d_fsdata; /* * Fake invisibility if dir belongs to a group/default groups hierarchy @@ -198,8 +194,6 @@ int configfs_unlink(struct inode *dir, struct dentry *dentry) if (!(sd->s_type & CONFIGFS_ITEM_LINK)) goto out; - BUG_ON(dentry->d_parent == configfs_sb->s_root); - sl = sd->s_element; parent_item = configfs_get_config_item(dentry->d_parent); -- cgit From b7c177fcd2022ca8572284deb8f9b6ab5730eafb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 16:24:54 -0400 Subject: configfs: kill configfs_sb Signed-off-by: Al Viro --- fs/configfs/configfs_internal.h | 3 +-- fs/configfs/dir.c | 29 +++++++++++++++++------------ fs/configfs/inode.c | 9 ++++----- fs/configfs/mount.c | 4 +--- fs/configfs/symlink.c | 6 +++--- 5 files changed, 26 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index 1b7fdc0a6a09..37121c2be110 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -63,7 +63,7 @@ extern struct kmem_cache *configfs_dir_cachep; extern int configfs_is_root(struct config_item *item); -extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *); +extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *)); extern int configfs_inode_init(void); extern void configfs_inode_exit(void); @@ -84,7 +84,6 @@ extern int configfs_pin_fs(void); extern void configfs_release_fs(void); extern struct rw_semaphore configfs_rename_sem; -extern struct super_block * configfs_sb; extern const struct file_operations configfs_dir_operations; extern const struct file_operations configfs_file_operations; extern const struct file_operations bin_fops; diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index b0fbcbeb03ee..54c59a7e37ce 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1079,6 +1079,7 @@ int configfs_depend_item(struct configfs_subsystem *subsys, int ret; struct configfs_dirent *p, *root_sd, *subsys_sd = NULL; struct config_item *s_item = &subsys->su_group.cg_item; + struct dentry *root; /* * Pin the configfs filesystem. This means we can safely access @@ -1093,9 +1094,10 @@ int configfs_depend_item(struct configfs_subsystem *subsys, * subsystem is really registered, and so we need to lock out * configfs_[un]register_subsystem(). */ - mutex_lock(&configfs_sb->s_root->d_inode->i_mutex); + root = configfs_mount->mnt_root; + mutex_lock(&root->d_inode->i_mutex); - root_sd = configfs_sb->s_root->d_fsdata; + root_sd = root->d_fsdata; list_for_each_entry(p, &root_sd->s_children, s_sibling) { if (p->s_type & CONFIGFS_DIR) { @@ -1129,7 +1131,7 @@ int configfs_depend_item(struct configfs_subsystem *subsys, out_unlock_dirent_lock: spin_unlock(&configfs_dirent_lock); out_unlock_fs: - mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex); + mutex_unlock(&root->d_inode->i_mutex); /* * If we succeeded, the fs is pinned via other methods. If not, @@ -1543,6 +1545,7 @@ static inline unsigned char dt_type(struct configfs_dirent *sd) static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir) { struct dentry *dentry = filp->f_path.dentry; + struct super_block *sb = dentry->d_sb; struct configfs_dirent * parent_sd = dentry->d_fsdata; struct configfs_dirent *cursor = filp->private_data; struct list_head *p, *q = &cursor->s_sibling; @@ -1605,7 +1608,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir ino = inode->i_ino; spin_unlock(&configfs_dirent_lock); if (!inode) - ino = iunique(configfs_sb, 2); + ino = iunique(sb, 2); if (filldir(dirent, name, len, filp->f_pos, ino, dt_type(next)) < 0) @@ -1677,6 +1680,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) struct config_group *group = &subsys->su_group; struct qstr name; struct dentry *dentry; + struct dentry *root; struct configfs_dirent *sd; err = configfs_pin_fs(); @@ -1686,18 +1690,18 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) if (!group->cg_item.ci_name) group->cg_item.ci_name = group->cg_item.ci_namebuf; - sd = configfs_sb->s_root->d_fsdata; + root = configfs_mount->mnt_root; + sd = root->d_fsdata; link_group(to_config_group(sd->s_element), group); - mutex_lock_nested(&configfs_sb->s_root->d_inode->i_mutex, - I_MUTEX_PARENT); + mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_PARENT); name.name = group->cg_item.ci_name; name.len = strlen(name.name); name.hash = full_name_hash(name.name, name.len); err = -ENOMEM; - dentry = d_alloc(configfs_sb->s_root, &name); + dentry = d_alloc(root, &name); if (dentry) { d_add(dentry, NULL); @@ -1714,7 +1718,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) } } - mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex); + mutex_unlock(&root->d_inode->i_mutex); if (err) { unlink_group(group); @@ -1728,13 +1732,14 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) { struct config_group *group = &subsys->su_group; struct dentry *dentry = group->cg_item.ci_dentry; + struct dentry *root = dentry->d_sb->s_root; - if (dentry->d_parent != configfs_sb->s_root) { + if (dentry->d_parent != root) { printk(KERN_ERR "configfs: Tried to unregister non-subsystem!\n"); return; } - mutex_lock_nested(&configfs_sb->s_root->d_inode->i_mutex, + mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); mutex_lock(&configfs_symlink_mutex); @@ -1751,7 +1756,7 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) d_delete(dentry); - mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex); + mutex_unlock(&root->d_inode->i_mutex); dput(dentry); diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 3ee36d418863..8cf21ef902fc 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -44,8 +44,6 @@ static struct lock_class_key default_group_class[MAX_LOCK_DEPTH]; #endif -extern struct super_block * configfs_sb; - static const struct address_space_operations configfs_aops = { .readpage = simple_readpage, .write_begin = simple_write_begin, @@ -132,9 +130,10 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr) inode->i_ctime = iattr->ia_ctime; } -struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent * sd) +struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd, + struct super_block *s) { - struct inode * inode = new_inode(configfs_sb); + struct inode * inode = new_inode(s); if (inode) { inode->i_ino = get_next_ino(); inode->i_mapping->a_ops = &configfs_aops; @@ -192,7 +191,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct ino if (dentry) { if (!dentry->d_inode) { struct configfs_dirent *sd = dentry->d_fsdata; - if ((inode = configfs_new_inode(mode, sd))) { + if ((inode = configfs_new_inode(mode, sd, dentry->d_sb))) { if (dentry->d_parent && dentry->d_parent->d_inode) { struct inode *p_inode = dentry->d_parent->d_inode; p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index eb41adc28cfe..cc829fc85d77 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -38,7 +38,6 @@ #define CONFIGFS_MAGIC 0x62656570 struct vfsmount * configfs_mount = NULL; -struct super_block * configfs_sb = NULL; struct kmem_cache *configfs_dir_cachep; static int configfs_mnt_count = 0; @@ -77,10 +76,9 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_magic = CONFIGFS_MAGIC; sb->s_op = &configfs_ops; sb->s_time_gran = 1; - configfs_sb = sb; inode = configfs_new_inode(S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, - &configfs_root); + &configfs_root, sb); if (inode) { inode->i_op = &configfs_root_inode_operations; inode->i_fop = &configfs_dir_operations; diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index 2817153d33c2..cc9f2546ea4a 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c @@ -110,13 +110,13 @@ out: static int get_target(const char *symname, struct path *path, - struct config_item **target) + struct config_item **target, struct super_block *sb) { int ret; ret = kern_path(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, path); if (!ret) { - if (path->dentry->d_sb == configfs_sb) { + if (path->dentry->d_sb == sb) { *target = configfs_get_config_item(path->dentry); if (!*target) { ret = -ENOENT; @@ -158,7 +158,7 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna !type->ct_item_ops->allow_link) goto out_put; - ret = get_target(symname, &path, &target_item); + ret = get_target(symname, &path, &target_item, dentry->d_sb); if (ret) goto out_put; -- cgit From 16d13b59b5b85ebc91de6c889716fa6e7766237f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 16:41:55 -0400 Subject: configfs: sanitize configfs_create() Signed-off-by: Al Viro --- fs/configfs/inode.c | 55 ++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 8cf21ef902fc..0074362d9f7f 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -187,36 +187,35 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd, int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *)) { int error = 0; - struct inode * inode = NULL; - if (dentry) { - if (!dentry->d_inode) { - struct configfs_dirent *sd = dentry->d_fsdata; - if ((inode = configfs_new_inode(mode, sd, dentry->d_sb))) { - if (dentry->d_parent && dentry->d_parent->d_inode) { - struct inode *p_inode = dentry->d_parent->d_inode; - p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; - } - configfs_set_inode_lock_class(sd, inode); - goto Proceed; - } - else - error = -ENOMEM; - } else - error = -EEXIST; - } else - error = -ENOENT; - goto Done; + struct inode *inode = NULL; + struct configfs_dirent *sd; + struct inode *p_inode; + + if (!dentry) + return -ENOENT; + + if (dentry->d_inode) + return -EEXIST; - Proceed: - if (init) + sd = dentry->d_fsdata; + inode = configfs_new_inode(mode, sd, dentry->d_sb); + if (!inode) + return -ENOMEM; + + p_inode = dentry->d_parent->d_inode; + p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; + configfs_set_inode_lock_class(sd, inode); + + if (init) { error = init(inode); - if (!error) { - d_instantiate(dentry, inode); - if (S_ISDIR(mode) || S_ISLNK(mode)) - dget(dentry); /* pin link and directory dentries in core */ - } else - iput(inode); - Done: + if (error) { + iput(inode); + return error; + } + } + d_instantiate(dentry, inode); + if (S_ISDIR(mode) || S_ISLNK(mode)) + dget(dentry); /* pin link and directory dentries in core */ return error; } -- cgit From 0dd6c08a0042ed83037cf5c772d9066e33046427 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 16:49:20 -0400 Subject: configfs: configfs_create_dir() has parent dentry in dentry->d_parent no need to play sick games with parent item, internal mount, etc. Signed-off-by: Al Viro --- fs/configfs/dir.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 54c59a7e37ce..6c560e77965c 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -264,11 +264,13 @@ static int init_symlink(struct inode * inode) return 0; } -static int create_dir(struct config_item * k, struct dentry * p, - struct dentry * d) +static int create_dir(struct config_item *k, struct dentry *d) { int error; umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; + struct dentry *p = d->d_parent; + + BUG_ON(!k); error = configfs_dirent_exists(p->d_fsdata, d->d_name.name); if (!error) @@ -304,19 +306,7 @@ static int create_dir(struct config_item * k, struct dentry * p, static int configfs_create_dir(struct config_item * item, struct dentry *dentry) { - struct dentry * parent; - int error = 0; - - BUG_ON(!item); - - if (item->ci_parent) - parent = item->ci_parent->ci_dentry; - else if (configfs_mount) - parent = configfs_mount->mnt_root; - else - return -EFAULT; - - error = create_dir(item,parent,dentry); + int error = create_dir(item, dentry); if (!error) item->ci_dentry = dentry; return error; -- cgit From 2a152ad3a58508b06b9e0482e68117a79bbb27ce Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 16:53:29 -0400 Subject: make configfs_pin_fs() return root dentry on success ... and make configfs_mnt static Signed-off-by: Al Viro --- fs/configfs/configfs_internal.h | 3 +-- fs/configfs/dir.c | 14 ++++++-------- fs/configfs/mount.c | 7 ++++--- 3 files changed, 11 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index 37121c2be110..b5f0a3b91f18 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -58,7 +58,6 @@ struct configfs_dirent { extern struct mutex configfs_symlink_mutex; extern spinlock_t configfs_dirent_lock; -extern struct vfsmount * configfs_mount; extern struct kmem_cache *configfs_dir_cachep; extern int configfs_is_root(struct config_item *item); @@ -80,7 +79,7 @@ extern const unsigned char * configfs_get_name(struct configfs_dirent *sd); extern void configfs_drop_dentry(struct configfs_dirent *sd, struct dentry *parent); extern int configfs_setattr(struct dentry *dentry, struct iattr *iattr); -extern int configfs_pin_fs(void); +extern struct dentry *configfs_pin_fs(void); extern void configfs_release_fs(void); extern struct rw_semaphore configfs_rename_sem; diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 6c560e77965c..7e6c52d8a207 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1075,16 +1075,15 @@ int configfs_depend_item(struct configfs_subsystem *subsys, * Pin the configfs filesystem. This means we can safely access * the root of the configfs filesystem. */ - ret = configfs_pin_fs(); - if (ret) - return ret; + root = configfs_pin_fs(); + if (IS_ERR(root)) + return PTR_ERR(root); /* * Next, lock the root directory. We're going to check that the * subsystem is really registered, and so we need to lock out * configfs_[un]register_subsystem(). */ - root = configfs_mount->mnt_root; mutex_lock(&root->d_inode->i_mutex); root_sd = root->d_fsdata; @@ -1673,14 +1672,13 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) struct dentry *root; struct configfs_dirent *sd; - err = configfs_pin_fs(); - if (err) - return err; + root = configfs_pin_fs(); + if (IS_ERR(root)) + return PTR_ERR(root); if (!group->cg_item.ci_name) group->cg_item.ci_name = group->cg_item.ci_namebuf; - root = configfs_mount->mnt_root; sd = root->d_fsdata; link_group(to_config_group(sd->s_element), group); diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index cc829fc85d77..aee0a7ebbd8e 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -37,7 +37,7 @@ /* Random magic number */ #define CONFIGFS_MAGIC 0x62656570 -struct vfsmount * configfs_mount = NULL; +static struct vfsmount *configfs_mount = NULL; struct kmem_cache *configfs_dir_cachep; static int configfs_mnt_count = 0; @@ -115,10 +115,11 @@ static struct file_system_type configfs_fs_type = { .kill_sb = kill_litter_super, }; -int configfs_pin_fs(void) +struct dentry *configfs_pin_fs(void) { - return simple_pin_fs(&configfs_fs_type, &configfs_mount, + int err = simple_pin_fs(&configfs_fs_type, &configfs_mount, &configfs_mnt_count); + return err ? ERR_PTR(err) : configfs_mount->mnt_root; } void configfs_release_fs(void) -- cgit From 76bf09fcf76da1a2d777d9827d5a0a6a4cec0dcb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 18:14:34 -0400 Subject: jfs: mising cleanup on register_filesystem() failure Signed-off-by: Al Viro --- fs/jfs/super.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jfs/super.c b/fs/jfs/super.c index b3bb95504479..4a82950f412f 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -860,8 +860,14 @@ static int __init init_jfs_fs(void) jfs_proc_init(); #endif - return register_filesystem(&jfs_fs_type); + rc = register_filesystem(&jfs_fs_type); + if (!rc) + return 0; +#ifdef PROC_FS_JFS + jfs_proc_clean(); +#endif + kthread_stop(jfsSyncThread); kill_committask: for (i = 0; i < commit_threads; i++) kthread_stop(jfsCommitThread[i]); -- cgit From 03e897a1edb9604c299e1a7646ade0ca90fe1e02 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 18:16:24 -0400 Subject: logfs: missing cleanup on register_filesystem() failure Signed-off-by: Al Viro --- fs/logfs/super.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/logfs/super.c b/fs/logfs/super.c index 7de18c3021fe..97bca623d893 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c @@ -626,7 +626,10 @@ static int __init logfs_init(void) if (ret) goto out2; - return register_filesystem(&logfs_fs_type); + ret = register_filesystem(&logfs_fs_type); + if (!ret) + return 0; + logfs_destroy_inode_cache(); out2: logfs_compr_exit(); out1: -- cgit From f4c5499d2cbe54f0470764acfd0faf4e965aa7e9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 18:19:57 -0400 Subject: ntfs: forgets to unregister sysctls on register_filesystem() failure Signed-off-by: Al Viro --- fs/ntfs/super.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 22020d8b1ed2..3502a9e8f0a9 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -3159,6 +3159,8 @@ static int __init init_ntfs_fs(void) } printk(KERN_CRIT "NTFS: Failed to register NTFS filesystem driver!\n"); + /* Unregister the ntfs sysctls. */ + ntfs_sysctl(0); sysctl_err_out: kmem_cache_destroy(ntfs_big_inode_cache); big_inode_err_out: -- cgit From 0794f569ec307dc25bbb12456ef75aa71f72f744 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 21:29:13 -0400 Subject: ecryptfs: make register_filesystem() the last potential failure exit Signed-off-by: Al Viro --- fs/ecryptfs/main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 6e0e017e6932..68954937a071 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -794,15 +794,10 @@ static int __init ecryptfs_init(void) "Failed to allocate one or more kmem_cache objects\n"); goto out; } - rc = register_filesystem(&ecryptfs_fs_type); - if (rc) { - printk(KERN_ERR "Failed to register filesystem\n"); - goto out_free_kmem_caches; - } rc = do_sysfs_registration(); if (rc) { printk(KERN_ERR "sysfs registration failed\n"); - goto out_unregister_filesystem; + goto out_free_kmem_caches; } rc = ecryptfs_init_kthread(); if (rc) { @@ -823,19 +818,24 @@ static int __init ecryptfs_init(void) "rc = [%d]\n", rc); goto out_release_messaging; } + rc = register_filesystem(&ecryptfs_fs_type); + if (rc) { + printk(KERN_ERR "Failed to register filesystem\n"); + goto out_destroy_crypto; + } if (ecryptfs_verbosity > 0) printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values " "will be written to the syslog!\n", ecryptfs_verbosity); goto out; +out_destroy_crypto: + ecryptfs_destroy_crypto(); out_release_messaging: ecryptfs_release_messaging(); out_destroy_kthread: ecryptfs_destroy_kthread(); out_do_sysfs_unregistration: do_sysfs_unregistration(); -out_unregister_filesystem: - unregister_filesystem(&ecryptfs_fs_type); out_free_kmem_caches: ecryptfs_free_kmem_caches(); out: -- cgit From 342827d7d19cb52b562bb3efeb4d4b672d008c35 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Mar 2012 22:03:58 -0400 Subject: ocfs2: fix leaks on failure exits in module_init Signed-off-by: Al Viro --- fs/ocfs2/super.c | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 337687c3e233..68f4541c2db9 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1624,21 +1624,17 @@ static int __init ocfs2_init(void) init_waitqueue_head(&ocfs2__ioend_wq[i]); status = init_ocfs2_uptodate_cache(); - if (status < 0) { - mlog_errno(status); - goto leave; - } + if (status < 0) + goto out1; status = ocfs2_initialize_mem_caches(); - if (status < 0) { - mlog_errno(status); - goto leave; - } + if (status < 0) + goto out2; ocfs2_wq = create_singlethread_workqueue("ocfs2_wq"); if (!ocfs2_wq) { status = -ENOMEM; - goto leave; + goto out3; } ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL); @@ -1650,17 +1646,23 @@ static int __init ocfs2_init(void) ocfs2_set_locking_protocol(); status = register_quota_format(&ocfs2_quota_format); -leave: - if (status < 0) { - ocfs2_free_mem_caches(); - exit_ocfs2_uptodate_cache(); - mlog_errno(status); - } + if (status < 0) + goto out4; + status = register_filesystem(&ocfs2_fs_type); + if (!status) + return 0; - if (status >= 0) { - return register_filesystem(&ocfs2_fs_type); - } else - return -1; + unregister_quota_format(&ocfs2_quota_format); +out4: + destroy_workqueue(ocfs2_wq); + debugfs_remove(ocfs2_debugfs_root); +out3: + ocfs2_free_mem_caches(); +out2: + exit_ocfs2_uptodate_cache(); +out1: + mlog_errno(status); + return status; } static void __exit ocfs2_exit(void) -- cgit From e59cc473cc603d562f2c80c12c943ef2a8cde6b2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 7 Dec 2011 13:17:19 -0500 Subject: trim includes in inode.c Signed-off-by: Al Viro --- fs/inode.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 8b612813a6a7..9f4f5fecc096 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -2,29 +2,19 @@ * (C) 1997 Linus Torvalds * (C) 1999 Andrea Arcangeli (dynamic inode allocation) */ +#include #include #include -#include -#include -#include -#include -#include #include -#include -#include #include #include #include -#include #include #include #include #include -#include #include #include -#include -#include #include /* for inode_has_buffers */ #include #include "internal.h" -- cgit From 19e5109fef2c368ab3f8a5157270f87f4a7c0326 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 23 Feb 2012 22:29:17 -0500 Subject: take removal of PF_FORKNOEXEC to flush_old_exec() Signed-off-by: Al Viro --- fs/binfmt_aout.c | 1 - fs/binfmt_elf.c | 2 -- fs/binfmt_elf_fdpic.c | 3 --- fs/binfmt_flat.c | 1 - fs/binfmt_som.c | 1 - fs/exec.c | 2 +- 6 files changed, 1 insertion(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index a543364ba29b..4d5e6d26578c 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -267,7 +267,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) } install_exec_creds(bprm); - current->flags &= ~PF_FORKNOEXEC; if (N_MAGIC(ex) == OMAGIC) { unsigned long text_addr, map_size; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index f8ac4251877e..81878b78c9d4 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -712,7 +712,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) goto out_free_dentry; /* OK, This is the point of no return */ - current->flags &= ~PF_FORKNOEXEC; current->mm->def_flags = def_flags; /* Do this immediately, since STACK_TOP as used in setup_arg_pages @@ -934,7 +933,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ install_exec_creds(bprm); - current->flags &= ~PF_FORKNOEXEC; retval = create_elf_tables(bprm, &loc->elf_ex, load_addr, interp_load_addr); if (retval < 0) { diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index e7afcb67a2d3..c64bf5ee2df4 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -335,8 +335,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, current->mm->context.exec_fdpic_loadmap = 0; current->mm->context.interp_fdpic_loadmap = 0; - current->flags &= ~PF_FORKNOEXEC; - #ifdef CONFIG_MMU elf_fdpic_arch_lay_out_mm(&exec_params, &interp_params, @@ -414,7 +412,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, #endif install_exec_creds(bprm); - current->flags &= ~PF_FORKNOEXEC; if (create_elf_fdpic_tables(bprm, current->mm, &exec_params, &interp_params) < 0) goto error_kill; diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 68affab88146..04f61f0bdfde 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -902,7 +902,6 @@ static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs) libinfo.lib_list[j].start_data:UNLOADED_LIB; install_exec_creds(bprm); - current->flags &= ~PF_FORKNOEXEC; set_binfmt(&flat_format); diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c index ec15972dd98a..e4fc746629a7 100644 --- a/fs/binfmt_som.c +++ b/fs/binfmt_som.c @@ -225,7 +225,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) goto out_free; /* OK, This is the point of no return */ - current->flags &= ~PF_FORKNOEXEC; current->personality = PER_HPUX; setup_new_exec(bprm); diff --git a/fs/exec.c b/fs/exec.c index 2c5ae338773c..60478a0e7a37 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1110,7 +1110,7 @@ int flush_old_exec(struct linux_binprm * bprm) bprm->mm = NULL; /* We're using it now */ set_fs(USER_DS); - current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD); + current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD); flush_thread(); current->personality &= ~bprm->per_clear; -- cgit From 8f82ecae53347ec65721e1c0683c3f26647e3c97 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 13 Feb 2012 20:57:12 -0500 Subject: qnx4: get rid of qnx4_bread/qnx4_getblk pointless, since the only caller will want the physical block number anyway; might as well call qnx4_block_map() and use sb_bread() Signed-off-by: Al Viro --- fs/qnx4/inode.c | 32 -------------------------------- fs/qnx4/namei.c | 5 +++-- fs/qnx4/qnx4.h | 2 -- 3 files changed, 3 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 14a0ba0f0c1c..7900a813d90f 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -52,38 +52,6 @@ static int qnx4_remount(struct super_block *sb, int *flags, char *data) return 0; } -static struct buffer_head *qnx4_getblk(struct inode *inode, int nr, - int create) -{ - struct buffer_head *result = NULL; - - if ( nr >= 0 ) - nr = qnx4_block_map( inode, nr ); - if (nr) { - result = sb_getblk(inode->i_sb, nr); - return result; - } - return NULL; -} - -struct buffer_head *qnx4_bread(struct inode *inode, int block, int create) -{ - struct buffer_head *bh; - - bh = qnx4_getblk(inode, block, create); - if (!bh || buffer_uptodate(bh)) { - return bh; - } - ll_rw_block(READ, 1, &bh); - wait_on_buffer(bh); - if (buffer_uptodate(bh)) { - return bh; - } - brelse(bh); - - return NULL; -} - static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create ) { unsigned long phys; diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c index e8eb8784ed30..a512c0b30e8e 100644 --- a/fs/qnx4/namei.c +++ b/fs/qnx4/namei.c @@ -68,7 +68,9 @@ static struct buffer_head *qnx4_find_entry(int len, struct inode *dir, block = offset = blkofs = 0; while (blkofs * QNX4_BLOCK_SIZE + offset < dir->i_size) { if (!bh) { - bh = qnx4_bread(dir, blkofs, 0); + block = qnx4_block_map(dir, blkofs); + if (block) + bh = sb_bread(dir->i_sb, block); if (!bh) { blkofs++; continue; @@ -76,7 +78,6 @@ static struct buffer_head *qnx4_find_entry(int len, struct inode *dir, } *res_dir = (struct qnx4_inode_entry *) (bh->b_data + offset); if (qnx4_match(len, name, bh, &offset)) { - block = qnx4_block_map( dir, blkofs ); *ino = block * QNX4_INODES_PER_BLOCK + (offset / QNX4_DIR_ENTRY_SIZE) - 1; return bh; diff --git a/fs/qnx4/qnx4.h b/fs/qnx4/qnx4.h index 33a60858203b..244d4620189b 100644 --- a/fs/qnx4/qnx4.h +++ b/fs/qnx4/qnx4.h @@ -27,8 +27,6 @@ extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, stru extern unsigned long qnx4_count_free_blocks(struct super_block *sb); extern unsigned long qnx4_block_map(struct inode *inode, long iblock); -extern struct buffer_head *qnx4_bread(struct inode *, int, int); - extern const struct inode_operations qnx4_dir_inode_operations; extern const struct file_operations qnx4_dir_operations; extern int qnx4_is_free(struct super_block *sb, long block); -- cgit From 7cd916f6ea86e8538b1e8136847dfb941813fe94 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 14 Feb 2012 17:48:20 -0500 Subject: qnx4: new helper - try_extent() checking if an extent is the one we are looking for is done twice in qnx4_block_map(); gather that code into a helper function. Signed-off-by: Al Viro --- fs/qnx4/inode.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 7900a813d90f..552e994e3aa1 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -66,23 +66,31 @@ static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_h return 0; } +static inline u32 try_extent(qnx4_xtnt_t *extent, u32 *offset) +{ + u32 size = le32_to_cpu(extent->xtnt_size); + if (*offset < size) + return le32_to_cpu(extent->xtnt_blk) + *offset - 1; + *offset -= size; + return 0; +} + unsigned long qnx4_block_map( struct inode *inode, long iblock ) { int ix; - long offset, i_xblk; - unsigned long block = 0; + long i_xblk; struct buffer_head *bh = NULL; struct qnx4_xblk *xblk = NULL; struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode); u16 nxtnt = le16_to_cpu(qnx4_inode->di_num_xtnts); + u32 offset = iblock; + u32 block = try_extent(&qnx4_inode->di_first_xtnt, &offset); - if ( iblock < le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_size) ) { + if (block) { // iblock is in the first extent. This is easy. - block = le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_blk) + iblock - 1; } else { // iblock is beyond first extent. We have to follow the extent chain. i_xblk = le32_to_cpu(qnx4_inode->di_xblk); - offset = iblock - le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_size); ix = 0; while ( --nxtnt > 0 ) { if ( ix == 0 ) { @@ -98,12 +106,11 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock ) return -EIO; } } - if ( offset < le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_size) ) { + block = try_extent(&xblk->xblk_xtnts[ix], &offset); + if (block) { // got it! - block = le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_blk) + offset - 1; break; } - offset -= le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_size); if ( ++ix >= xblk->xblk_num_xtnts ) { i_xblk = le32_to_cpu(xblk->xblk_next_xblk); ix = 0; -- cgit From f3922382ce930e76773fb06416a7a6081a8702ad Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Tue, 13 Mar 2012 12:10:34 -0400 Subject: hfsplus: initialise userflags The userflags field was being written to the filesystem without being initialised. Make sure it's clear, since otherwise files end up with garbage attributes. Signed-off-by: Matthew Garrett Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/hfsplus/inode.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 6643b242bdd7..82b69ee4dacc 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -193,6 +193,7 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, mutex_init(&hip->extents_lock); hip->extent_state = 0; hip->flags = 0; + hip->userflags = 0; set_bit(HFSPLUS_I_RSRC, &hip->flags); err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); @@ -400,6 +401,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode) atomic_set(&hip->opencnt, 0); hip->extent_state = 0; hip->flags = 0; + hip->userflags = 0; memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); hip->alloc_blocks = 0; -- cgit From 0347b6e95ceeb648631f89a0ced001e90e4daaa9 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Thu, 2 Feb 2012 15:39:50 -0500 Subject: hfsplus: change finder_info to u32 The finder_info block in the hfsplus volume header is currently defined as an array of 8 bit values, but TN1150 defines it as being an array of 32 bit values. Fix for convenience. Signed-off-by: Matthew Garrett Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/hfsplus/hfsplus_raw.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h index 927cdd6d5bf5..921967e5abb1 100644 --- a/fs/hfsplus/hfsplus_raw.h +++ b/fs/hfsplus/hfsplus_raw.h @@ -117,7 +117,7 @@ struct hfsplus_vh { __be32 write_count; __be64 encodings_bmp; - u8 finder_info[32]; + u32 finder_info[8]; struct hfsplus_fork_raw alloc_file; struct hfsplus_fork_raw ext_file; -- cgit From a051f71ce97c53cde3ac64de64eb02d658d9308e Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 6 Feb 2012 15:14:40 -0500 Subject: hfsplus: add an ioctl to bless files Making an hfsplus partition bootable requires the ability to "bless" a file by putting its inode number in the volume header. Doing this from userspace on a mounted filesystem is impractical since the kernel will write back the original values on unmount. Add an ioctl to allow userspace to update the volume header information based on the target file. Signed-off-by: Matthew Garrett Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/hfsplus/hfsplus_fs.h | 5 +++++ fs/hfsplus/ioctl.c | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) (limited to 'fs') diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 21a5b7fc6db4..4e75ac646fea 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -316,6 +316,11 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb) #define HFSPLUS_IOC_EXT2_SETFLAGS FS_IOC_SETFLAGS +/* + * hfs+-specific ioctl for making the filesystem bootable + */ +#define HFSPLUS_IOC_BLESS _IO('h', 0x80) + /* * Functions in any *.c used in other files */ diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c index f66c7655b3f7..c640ba57074b 100644 --- a/fs/hfsplus/ioctl.c +++ b/fs/hfsplus/ioctl.c @@ -20,6 +20,38 @@ #include #include "hfsplus_fs.h" +/* + * "Blessing" an HFS+ filesystem writes metadata to the superblock informing + * the platform firmware which file to boot from + */ +static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags) +{ + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = dentry->d_inode; + struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); + struct hfsplus_vh *vh = sbi->s_vhdr; + struct hfsplus_vh *bvh = sbi->s_backup_vhdr; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + mutex_lock(&sbi->vh_mutex); + + /* Directory containing the bootable system */ + vh->finder_info[0] = bvh->finder_info[0] = + cpu_to_be32(parent_ino(dentry)); + + /* Bootloader */ + vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino); + + /* Per spec, the OS X system folder - same as finder_info[0] here */ + vh->finder_info[5] = bvh->finder_info[5] = + cpu_to_be32(parent_ino(dentry)); + + mutex_unlock(&sbi->vh_mutex); + return 0; +} + static int hfsplus_ioctl_getflags(struct file *file, int __user *user_flags) { struct inode *inode = file->f_path.dentry->d_inode; @@ -108,6 +140,8 @@ long hfsplus_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return hfsplus_ioctl_getflags(file, argp); case HFSPLUS_IOC_EXT2_SETFLAGS: return hfsplus_ioctl_setflags(file, argp); + case HFSPLUS_IOC_BLESS: + return hfsplus_ioctl_bless(file, argp); default: return -ENOTTY; } -- cgit From 88187398cc5fa6650f38b9dcd5464667f468888f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 20 Mar 2012 06:00:24 -0400 Subject: debugfs-related mode_t whack-a-mole all of those should be umode_t... Signed-off-by: Al Viro --- fs/debugfs/file.c | 2 +- fs/nfsd/fault_inject.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index ef023eef0464..21e93605161c 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -611,7 +611,7 @@ static const struct file_operations fops_regset32 = { * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling * code. */ -struct dentry *debugfs_create_regset32(const char *name, mode_t mode, +struct dentry *debugfs_create_regset32(const char *name, umode_t mode, struct dentry *parent, struct debugfs_regset32 *regset) { diff --git a/fs/nfsd/fault_inject.c b/fs/nfsd/fault_inject.c index ce7f0758d84c..9559ce468732 100644 --- a/fs/nfsd/fault_inject.c +++ b/fs/nfsd/fault_inject.c @@ -72,7 +72,7 @@ int nfsd_fault_inject_init(void) { unsigned int i; struct nfsd_fault_inject_op *op; - mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; + umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; debug_dir = debugfs_create_dir("nfsd", NULL); if (!debug_dir) -- cgit From 07c0c5d8b8c122b2f2df9ee574ac3083daefc981 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 20 Mar 2012 22:05:02 -0400 Subject: ext4: initialization of ext4_li_mtx needs to be done earlier Signed-off-by: Al Viro --- fs/ext4/super.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index d2baea7bcf30..933900909ed0 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5055,6 +5055,9 @@ static int __init ext4_init_fs(void) { int i, err; + ext4_li_info = NULL; + mutex_init(&ext4_li_mtx); + ext4_check_flag_values(); for (i = 0; i < EXT4_WQ_HASH_SZ; i++) { @@ -5093,8 +5096,6 @@ static int __init ext4_init_fs(void) if (err) goto out; - ext4_li_info = NULL; - mutex_init(&ext4_li_mtx); return 0; out: unregister_as_ext2(); -- cgit From ffa94db6042e6fd014ae0bed8832ac707ef2afe9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 20 Mar 2012 09:22:00 -0400 Subject: SUNRPC/LOCKD: Fix build warnings when CONFIG_SUNRPC_DEBUG is undefined Stephen Rothwell reports: net/sunrpc/rpcb_clnt.c: In function 'rpcb_enc_mapping': net/sunrpc/rpcb_clnt.c:820:19: warning: unused variable 'task' [-Wunused-variable] net/sunrpc/rpcb_clnt.c: In function 'rpcb_dec_getport': net/sunrpc/rpcb_clnt.c:837:19: warning: unused variable 'task' [-Wunused-variable] net/sunrpc/rpcb_clnt.c: In function 'rpcb_dec_set': net/sunrpc/rpcb_clnt.c:860:19: warning: unused variable 'task' [-Wunused-variable] net/sunrpc/rpcb_clnt.c: In function 'rpcb_enc_getaddr': net/sunrpc/rpcb_clnt.c:892:19: warning: unused variable 'task' [-Wunused-variable] net/sunrpc/rpcb_clnt.c: In function 'rpcb_dec_getaddr': net/sunrpc/rpcb_clnt.c:914:19: warning: unused variable 'task' [-Wunused-variable] fs/lockd/svclock.c:49:20: warning: 'nlmdbg_cookie2a' declared 'static' but never defined [-Wunused-function] Reported-by: Stephen Rothwell Signed-off-by: Trond Myklebust --- fs/lockd/svclock.c | 59 +++++++++++++++++++++++++++--------------------------- 1 file changed, 29 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index f0179c3745d2..e46353f41a42 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -46,7 +46,6 @@ static void nlmsvc_remove_block(struct nlm_block *block); static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); static void nlmsvc_freegrantargs(struct nlm_rqst *call); static const struct rpc_call_ops nlmsvc_grant_ops; -static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie); /* * The list of blocked locks to retry @@ -54,6 +53,35 @@ static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie); static LIST_HEAD(nlm_blocked); static DEFINE_SPINLOCK(nlm_blocked_lock); +#ifdef LOCKD_DEBUG +static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) +{ + /* + * We can get away with a static buffer because we're only + * called with BKL held. + */ + static char buf[2*NLM_MAXCOOKIELEN+1]; + unsigned int i, len = sizeof(buf); + char *p = buf; + + len--; /* allow for trailing \0 */ + if (len < 3) + return "???"; + for (i = 0 ; i < cookie->len ; i++) { + if (len < 2) { + strcpy(p-3, "..."); + break; + } + sprintf(p, "%02x", cookie->data[i]); + p += 2; + len -= 2; + } + *p = '\0'; + + return buf; +} +#endif + /* * Insert a blocked lock into the global list */ @@ -935,32 +963,3 @@ nlmsvc_retry_blocked(void) return timeout; } - -#ifdef RPC_DEBUG -static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) -{ - /* - * We can get away with a static buffer because we're only - * called with BKL held. - */ - static char buf[2*NLM_MAXCOOKIELEN+1]; - unsigned int i, len = sizeof(buf); - char *p = buf; - - len--; /* allow for trailing \0 */ - if (len < 3) - return "???"; - for (i = 0 ; i < cookie->len ; i++) { - if (len < 2) { - strcpy(p-3, "..."); - break; - } - sprintf(p, "%02x", cookie->data[i]); - p += 2; - len -= 2; - } - *p = '\0'; - - return buf; -} -#endif -- cgit From 6f00866ddd15724eb20eac4ddf6e2c6c1a6cfcdc Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 20 Mar 2012 14:12:46 -0400 Subject: NFS: Fix more NFS debug related build warnings Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.c | 5 ++--- fs/nfs/fscache.c | 2 +- fs/nfs/pnfs.h | 8 +++++++- fs/nfs/pnfs_dev.c | 2 ++ 4 files changed, 12 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 61501346324e..9c94297bb70e 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -233,12 +233,11 @@ bl_read_pagelist(struct nfs_read_data *rdata) sector_t isect, extent_length = 0; struct parallel_io *par; loff_t f_offset = rdata->args.offset; - size_t count = rdata->args.count; struct page **pages = rdata->args.pages; int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT; - dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__, - rdata->npages, f_offset, count); + dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, + rdata->npages, f_offset, (unsigned int)rdata->args.count); par = alloc_parallel(rdata); if (!par) diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 419119c371bf..ae65c16b3670 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -327,7 +327,7 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_server *nfss = NFS_SERVER(inode); - struct fscache_cookie *old = nfsi->fscache; + NFS_IFDEBUG(struct fscache_cookie *old = nfsi->fscache); nfs_fscache_inode_lock(inode); if (nfsi->fscache) { diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 07802652f5a3..442ebf68eeec 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -228,7 +228,6 @@ struct nfs4_deviceid_node { atomic_t ref; }; -void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id); struct nfs4_deviceid_node *nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *); void nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *); void nfs4_init_deviceid_node(struct nfs4_deviceid_node *, @@ -328,6 +327,13 @@ static inline int pnfs_return_layout(struct inode *ino) return 0; } +#ifdef NFS_DEBUG +void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id); +#else +static inline void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id) +{ +} +#endif /* NFS_DEBUG */ #else /* CONFIG_NFS_V4_1 */ static inline void pnfs_destroy_all_layouts(struct nfs_client *clp) diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c index 6b4cd3849306..73f701f1f4d3 100644 --- a/fs/nfs/pnfs_dev.c +++ b/fs/nfs/pnfs_dev.c @@ -43,6 +43,7 @@ static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE]; static DEFINE_SPINLOCK(nfs4_deviceid_lock); +#ifdef NFS_DEBUG void nfs4_print_deviceid(const struct nfs4_deviceid *id) { @@ -52,6 +53,7 @@ nfs4_print_deviceid(const struct nfs4_deviceid *id) p[0], p[1], p[2], p[3]); } EXPORT_SYMBOL_GPL(nfs4_print_deviceid); +#endif static inline u32 nfs4_deviceid_hash(const struct nfs4_deviceid *id) -- cgit From c6cb80d00be42f30716ec817b963bcec094433b5 Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Mon, 19 Mar 2012 14:54:39 -0400 Subject: NFS: Remove nfs4_setup_sequence from generic write code This is an NFS v4 specific operation, so it belongs in the NFS v4 code and not the generic client. Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 4 ---- fs/nfs/nfs3proc.c | 6 ++++++ fs/nfs/nfs4proc.c | 11 +++++++++++ fs/nfs/proc.c | 6 ++++++ fs/nfs/write.c | 15 +-------------- 5 files changed, 24 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 1940f1a56a5f..c4bdaf15289a 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -554,9 +554,7 @@ static void nfs_direct_commit_release(void *calldata) } static const struct rpc_call_ops nfs_commit_direct_ops = { -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_write_prepare, -#endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs_direct_commit_result, .rpc_release = nfs_direct_commit_release, }; @@ -696,9 +694,7 @@ out_unlock: } static const struct rpc_call_ops nfs_write_direct_ops = { -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_write_prepare, -#endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs_direct_write_result, .rpc_release = nfs_direct_write_release, }; diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 91943953a370..3bcf722800f3 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -828,6 +828,11 @@ static void nfs3_proc_write_setup(struct nfs_write_data *data, struct rpc_messag msg->rpc_proc = &nfs3_procedures[NFS3PROC_WRITE]; } +static void nfs3_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) +{ + rpc_call_start(task); +} + static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data) { if (nfs3_async_handle_jukebox(task, data->inode)) @@ -881,6 +886,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = { .read_setup = nfs3_proc_read_setup, .read_done = nfs3_read_done, .write_setup = nfs3_proc_write_setup, + .write_rpc_prepare = nfs3_proc_write_rpc_prepare, .write_done = nfs3_write_done, .commit_setup = nfs3_proc_commit_setup, .commit_done = nfs3_commit_done, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d41d97fb4cb9..dc910691acc0 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3373,6 +3373,16 @@ static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_messag nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); } +static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) +{ + if (nfs4_setup_sequence(NFS_SERVER(data->inode), + &data->args.seq_args, + &data->res.seq_res, + task)) + return; + rpc_call_start(task); +} + static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; @@ -6449,6 +6459,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .read_setup = nfs4_proc_read_setup, .read_done = nfs4_read_done, .write_setup = nfs4_proc_write_setup, + .write_rpc_prepare = nfs4_proc_write_rpc_prepare, .write_done = nfs4_write_done, .commit_setup = nfs4_proc_commit_setup, .commit_done = nfs4_commit_done, diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 0c672588fe5a..8069b41e7f2d 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -668,6 +668,11 @@ static void nfs_proc_write_setup(struct nfs_write_data *data, struct rpc_message msg->rpc_proc = &nfs_procedures[NFSPROC_WRITE]; } +static void nfs_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) +{ + rpc_call_start(task); +} + static void nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg) { @@ -738,6 +743,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = { .read_setup = nfs_proc_read_setup, .read_done = nfs_read_done, .write_setup = nfs_proc_write_setup, + .write_rpc_prepare = nfs_proc_write_rpc_prepare, .write_done = nfs_write_done, .commit_setup = nfs_proc_commit_setup, .lock = nfs_proc_lock, diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bd93d40099f9..2c68818f68ac 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1172,23 +1172,14 @@ out: nfs_writedata_release(calldata); } -#if defined(CONFIG_NFS_V4_1) void nfs_write_prepare(struct rpc_task *task, void *calldata) { struct nfs_write_data *data = calldata; - - if (nfs4_setup_sequence(NFS_SERVER(data->inode), - &data->args.seq_args, - &data->res.seq_res, task)) - return; - rpc_call_start(task); + NFS_PROTO(data->inode)->write_rpc_prepare(task, data); } -#endif /* CONFIG_NFS_V4_1 */ static const struct rpc_call_ops nfs_write_partial_ops = { -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_write_prepare, -#endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs_writeback_done_partial, .rpc_release = nfs_writeback_release_partial, }; @@ -1250,9 +1241,7 @@ remove_request: } static const struct rpc_call_ops nfs_write_full_ops = { -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_write_prepare, -#endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs_writeback_done_full, .rpc_release = nfs_writeback_release_full, }; @@ -1544,9 +1533,7 @@ static void nfs_commit_release(void *calldata) } static const struct rpc_call_ops nfs_commit_ops = { -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_write_prepare, -#endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs_commit_done, .rpc_release = nfs_commit_release, }; -- cgit From ea7c330362257c072791aeaf03bae2cebf9fb984 Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Mon, 19 Mar 2012 14:54:40 -0400 Subject: NFS: Remove nfs4_setup_sequence from generic read code This is an NFS v4 specific operation, so it belongs in the NFS v4 code and not the generic client. Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 2 -- fs/nfs/nfs3proc.c | 6 ++++++ fs/nfs/nfs4proc.c | 11 +++++++++++ fs/nfs/proc.c | 6 ++++++ fs/nfs/read.c | 13 +------------ 5 files changed, 24 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index c4bdaf15289a..9c7f66ac6cc2 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -265,9 +265,7 @@ static void nfs_direct_read_release(void *calldata) } static const struct rpc_call_ops nfs_read_direct_ops = { -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_read_prepare, -#endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs_direct_read_result, .rpc_release = nfs_direct_read_release, }; diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 3bcf722800f3..9d9b239329dc 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -814,6 +814,11 @@ static void nfs3_proc_read_setup(struct nfs_read_data *data, struct rpc_message msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ]; } +static void nfs3_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) +{ + rpc_call_start(task); +} + static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data) { if (nfs3_async_handle_jukebox(task, data->inode)) @@ -884,6 +889,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = { .pathconf = nfs3_proc_pathconf, .decode_dirent = nfs3_decode_dirent, .read_setup = nfs3_proc_read_setup, + .read_rpc_prepare = nfs3_proc_read_rpc_prepare, .read_done = nfs3_read_done, .write_setup = nfs3_proc_write_setup, .write_rpc_prepare = nfs3_proc_write_rpc_prepare, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index dc910691acc0..915385fcf532 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3299,6 +3299,16 @@ static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); } +static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) +{ + if (nfs4_setup_sequence(NFS_SERVER(data->inode), + &data->args.seq_args, + &data->res.seq_res, + task)) + return; + rpc_call_start(task); +} + /* Reset the the nfs_read_data to send the read to the MDS. */ void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data) { @@ -6457,6 +6467,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .set_capabilities = nfs4_server_capabilities, .decode_dirent = nfs4_decode_dirent, .read_setup = nfs4_proc_read_setup, + .read_rpc_prepare = nfs4_proc_read_rpc_prepare, .read_done = nfs4_read_done, .write_setup = nfs4_proc_write_setup, .write_rpc_prepare = nfs4_proc_write_rpc_prepare, diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 8069b41e7f2d..a8df70742d00 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -651,6 +651,11 @@ static void nfs_proc_read_setup(struct nfs_read_data *data, struct rpc_message * msg->rpc_proc = &nfs_procedures[NFSPROC_READ]; } +static void nfs_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) +{ + rpc_call_start(task); +} + static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data) { if (nfs_async_handle_expired_key(task)) @@ -741,6 +746,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = { .pathconf = nfs_proc_pathconf, .decode_dirent = nfs2_decode_dirent, .read_setup = nfs_proc_read_setup, + .read_rpc_prepare = nfs_proc_read_rpc_prepare, .read_done = nfs_read_done, .write_setup = nfs_proc_write_setup, .write_rpc_prepare = nfs_proc_write_rpc_prepare, diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 2662c0298dd0..cc1f758a7ee1 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -464,23 +464,14 @@ static void nfs_readpage_release_partial(void *calldata) nfs_readdata_release(calldata); } -#if defined(CONFIG_NFS_V4_1) void nfs_read_prepare(struct rpc_task *task, void *calldata) { struct nfs_read_data *data = calldata; - - if (nfs4_setup_sequence(NFS_SERVER(data->inode), - &data->args.seq_args, &data->res.seq_res, - task)) - return; - rpc_call_start(task); + NFS_PROTO(data->inode)->read_rpc_prepare(task, data); } -#endif /* CONFIG_NFS_V4_1 */ static const struct rpc_call_ops nfs_read_partial_ops = { -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_read_prepare, -#endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs_readpage_result_partial, .rpc_release = nfs_readpage_release_partial, }; @@ -544,9 +535,7 @@ static void nfs_readpage_release_full(void *calldata) } static const struct rpc_call_ops nfs_read_full_ops = { -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_read_prepare, -#endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs_readpage_result_full, .rpc_release = nfs_readpage_release_full, }; -- cgit From 34e137cc7e3b63c254875e59cd48dcbe6757fe6c Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Mon, 19 Mar 2012 14:54:41 -0400 Subject: NFS: Remove nfs4_setup_sequence from generic unlink code This is an NFS v4 specific operation, so it belongs in the NFS v4 code and not the generic client. Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/nfs3proc.c | 6 ++++++ fs/nfs/nfs4proc.c | 11 +++++++++++ fs/nfs/proc.c | 6 ++++++ fs/nfs/unlink.c | 20 +------------------- 4 files changed, 24 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 9d9b239329dc..7f3f957f677c 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -428,6 +428,11 @@ nfs3_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) msg->rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE]; } +static void nfs3_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) +{ + rpc_call_start(task); +} + static int nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir) { @@ -874,6 +879,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = { .create = nfs3_proc_create, .remove = nfs3_proc_remove, .unlink_setup = nfs3_proc_unlink_setup, + .unlink_rpc_prepare = nfs3_proc_unlink_rpc_prepare, .unlink_done = nfs3_proc_unlink_done, .rename = nfs3_proc_rename, .rename_setup = nfs3_proc_rename_setup, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 915385fcf532..9c247fa7915a 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2779,6 +2779,16 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); } +static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) +{ + if (nfs4_setup_sequence(NFS_SERVER(data->dir), + &data->args.seq_args, + &data->res.seq_res, + task)) + return; + rpc_call_start(task); +} + static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) { struct nfs_removeres *res = task->tk_msg.rpc_resp; @@ -6451,6 +6461,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .create = nfs4_proc_create, .remove = nfs4_proc_remove, .unlink_setup = nfs4_proc_unlink_setup, + .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, .unlink_done = nfs4_proc_unlink_done, .rename = nfs4_proc_rename, .rename_setup = nfs4_proc_rename_setup, diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index a8df70742d00..528b9a2fae05 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -358,6 +358,11 @@ nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) msg->rpc_proc = &nfs_procedures[NFSPROC_REMOVE]; } +static void nfs_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) +{ + rpc_call_start(task); +} + static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) { if (nfs_async_handle_expired_key(task)) @@ -731,6 +736,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = { .create = nfs_proc_create, .remove = nfs_proc_remove, .unlink_setup = nfs_proc_unlink_setup, + .unlink_rpc_prepare = nfs_proc_unlink_rpc_prepare, .unlink_done = nfs_proc_unlink_done, .rename = nfs_proc_rename, .rename_setup = nfs_proc_rename_setup, diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index fae71c9f5050..9c5a7980e244 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -20,15 +20,6 @@ #include "iostat.h" #include "delegation.h" -struct nfs_unlinkdata { - struct hlist_node list; - struct nfs_removeargs args; - struct nfs_removeres res; - struct inode *dir; - struct rpc_cred *cred; - struct nfs_fattr dir_attr; -}; - /** * nfs_free_unlinkdata - release data from a sillydelete operation. * @data: pointer to unlink structure. @@ -107,25 +98,16 @@ static void nfs_async_unlink_release(void *calldata) nfs_sb_deactive(sb); } -#if defined(CONFIG_NFS_V4_1) static void nfs_unlink_prepare(struct rpc_task *task, void *calldata) { struct nfs_unlinkdata *data = calldata; - struct nfs_server *server = NFS_SERVER(data->dir); - - if (nfs4_setup_sequence(server, &data->args.seq_args, - &data->res.seq_res, task)) - return; - rpc_call_start(task); + NFS_PROTO(data->dir)->unlink_rpc_prepare(task, data); } -#endif /* CONFIG_NFS_V4_1 */ static const struct rpc_call_ops nfs_unlink_ops = { .rpc_call_done = nfs_async_unlink_done, .rpc_release = nfs_async_unlink_release, -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_unlink_prepare, -#endif /* CONFIG_NFS_V4_1 */ }; static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data) -- cgit From c6bfa1a16377b42496ecc0490a33516c0e414e7b Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Mon, 19 Mar 2012 14:54:42 -0400 Subject: NFS: Remove nfs4_setup_sequence from generic rename code This is an NFS v4 specific operation, so it belongs in the NFS v4 code and not the generic client. Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/nfs3proc.c | 6 ++++++ fs/nfs/nfs4proc.c | 11 +++++++++++ fs/nfs/proc.c | 6 ++++++ fs/nfs/unlink.c | 23 +---------------------- 4 files changed, 24 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 7f3f957f677c..5242eae6711a 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -450,6 +450,11 @@ nfs3_proc_rename_setup(struct rpc_message *msg, struct inode *dir) msg->rpc_proc = &nfs3_procedures[NFS3PROC_RENAME]; } +static void nfs3_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) +{ + rpc_call_start(task); +} + static int nfs3_proc_rename_done(struct rpc_task *task, struct inode *old_dir, struct inode *new_dir) @@ -883,6 +888,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = { .unlink_done = nfs3_proc_unlink_done, .rename = nfs3_proc_rename, .rename_setup = nfs3_proc_rename_setup, + .rename_rpc_prepare = nfs3_proc_rename_rpc_prepare, .rename_done = nfs3_proc_rename_done, .link = nfs3_proc_link, .symlink = nfs3_proc_symlink, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9c247fa7915a..b76dd0efae75 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2814,6 +2814,16 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); } +static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) +{ + if (nfs4_setup_sequence(NFS_SERVER(data->old_dir), + &data->args.seq_args, + &data->res.seq_res, + task)) + return; + rpc_call_start(task); +} + static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, struct inode *new_dir) { @@ -6465,6 +6475,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .unlink_done = nfs4_proc_unlink_done, .rename = nfs4_proc_rename, .rename_setup = nfs4_proc_rename_setup, + .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, .rename_done = nfs4_proc_rename_done, .link = nfs4_proc_link, .symlink = nfs4_proc_symlink, diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 528b9a2fae05..b63b6f4d14fb 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -377,6 +377,11 @@ nfs_proc_rename_setup(struct rpc_message *msg, struct inode *dir) msg->rpc_proc = &nfs_procedures[NFSPROC_RENAME]; } +static void nfs_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) +{ + rpc_call_start(task); +} + static int nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir, struct inode *new_dir) @@ -740,6 +745,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = { .unlink_done = nfs_proc_unlink_done, .rename = nfs_proc_rename, .rename_setup = nfs_proc_rename_setup, + .rename_rpc_prepare = nfs_proc_rename_rpc_prepare, .rename_done = nfs_proc_rename_done, .link = nfs_proc_link, .symlink = nfs_proc_symlink, diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index 9c5a7980e244..3210a03342f9 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -323,18 +323,6 @@ nfs_cancel_async_unlink(struct dentry *dentry) spin_unlock(&dentry->d_lock); } -struct nfs_renamedata { - struct nfs_renameargs args; - struct nfs_renameres res; - struct rpc_cred *cred; - struct inode *old_dir; - struct dentry *old_dentry; - struct nfs_fattr old_fattr; - struct inode *new_dir; - struct dentry *new_dentry; - struct nfs_fattr new_fattr; -}; - /** * nfs_async_rename_done - Sillyrename post-processing * @task: rpc_task of the sillyrename @@ -385,25 +373,16 @@ static void nfs_async_rename_release(void *calldata) kfree(data); } -#if defined(CONFIG_NFS_V4_1) static void nfs_rename_prepare(struct rpc_task *task, void *calldata) { struct nfs_renamedata *data = calldata; - struct nfs_server *server = NFS_SERVER(data->old_dir); - - if (nfs4_setup_sequence(server, &data->args.seq_args, - &data->res.seq_res, task)) - return; - rpc_call_start(task); + NFS_PROTO(data->old_dir)->rename_rpc_prepare(task, data); } -#endif /* CONFIG_NFS_V4_1 */ static const struct rpc_call_ops nfs_rename_ops = { .rpc_call_done = nfs_async_rename_done, .rpc_release = nfs_async_rename_release, -#if defined(CONFIG_NFS_V4_1) .rpc_call_prepare = nfs_rename_prepare, -#endif /* CONFIG_NFS_V4_1 */ }; /** -- cgit From 18d98f6c04991dd3c12acf6f39cea40e9510640a Mon Sep 17 00:00:00 2001 From: Sachin Bhamare Date: Mon, 19 Mar 2012 20:47:58 -0700 Subject: pnfs-obj: autologin: Add support for protocol autologin The pnfs-objects protocol mandates that we autologin into devices not present in the system, according to information specified in the get_device_info returned from the server. The Protocol specifies two login hints. 1. An IP address:port combination 2. A string URI which is constructed as a URL with a protocol prefix followed by :// and a string as address. For each protocol prefix the string-address format might be different. We only support the second option. The first option is just redundant to the second one. NOTE: The Kernel part of autologin does not parse the URI string. It just channels it to a user-mode script. So any new login protocols should only update the user-mode script which is a part of the nfs-utils package, but the Kernel need not change. We implement the autologin by using the call_usermodehelper() API. (Thanks to Steve Dickson for pointing it out) So there is no running daemon needed, and/or special setup. We Add the osd_login_prog Kernel module parameters which defaults to: /sbin/osd_login Kernel try's to upcall the program specified in osd_login_prog. If the file is not found or the execution fails Kernel will disable any farther upcalls, by zeroing out osd_login_prog, Until Admin re-enables it by setting the osd_login_prog parameter to a proper program. Also add text about the osd_login program command line API to: Documentation/filesystems/nfs/pnfs.txt and documentation of the new osd_login_prog module parameter to: Documentation/kernel-parameters.txt TODO: Add timeout option in the case osd_login program gets stuck Signed-off-by: Sachin Bhamare Signed-off-by: Boaz Harrosh Signed-off-by: Trond Myklebust --- fs/nfs/objlayout/objio_osd.c | 9 +++ fs/nfs/objlayout/objlayout.c | 134 +++++++++++++++++++++++++++++++++++++++++++ fs/nfs/objlayout/objlayout.h | 2 + 3 files changed, 145 insertions(+) (limited to 'fs') diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 3a621a2fd321..4bff4a3dab46 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -137,6 +137,7 @@ static int objio_devices_lookup(struct pnfs_layout_hdr *pnfslay, struct objio_dev_ent *ode; struct osd_dev *od; struct osd_dev_info odi; + bool retry_flag = true; int err; ode = _dev_list_find(NFS_SERVER(pnfslay->plh_inode), d_id); @@ -171,10 +172,18 @@ static int objio_devices_lookup(struct pnfs_layout_hdr *pnfslay, goto out; } +retry_lookup: od = osduld_info_lookup(&odi); if (unlikely(IS_ERR(od))) { err = PTR_ERR(od); dprintk("%s: osduld_info_lookup => %d\n", __func__, err); + if (err == -ENODEV && retry_flag) { + err = objlayout_autologin(deviceaddr); + if (likely(!err)) { + retry_flag = false; + goto retry_lookup; + } + } goto out; } diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c index 157c47e277e0..8d45f1c318ce 100644 --- a/fs/nfs/objlayout/objlayout.c +++ b/fs/nfs/objlayout/objlayout.c @@ -37,6 +37,9 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include +#include +#include #include #include "objlayout.h" @@ -651,3 +654,134 @@ void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr) __free_page(odi->page); kfree(odi); } + +enum { + OBJLAYOUT_MAX_URI_LEN = 256, OBJLAYOUT_MAX_OSDNAME_LEN = 64, + OBJLAYOUT_MAX_SYSID_HEX_LEN = OSD_SYSTEMID_LEN * 2 + 1, + OSD_LOGIN_UPCALL_PATHLEN = 256 +}; + +static char osd_login_prog[OSD_LOGIN_UPCALL_PATHLEN] = "/sbin/osd_login"; + +module_param_string(osd_login_prog, osd_login_prog, sizeof(osd_login_prog), + 0600); +MODULE_PARM_DESC(osd_login_prog, "Path to the osd_login upcall program"); + +struct __auto_login { + char uri[OBJLAYOUT_MAX_URI_LEN]; + char osdname[OBJLAYOUT_MAX_OSDNAME_LEN]; + char systemid_hex[OBJLAYOUT_MAX_SYSID_HEX_LEN]; +}; + +static int __objlayout_upcall(struct __auto_login *login) +{ + static char *envp[] = { "HOME=/", + "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", + NULL + }; + char *argv[8]; + int ret; + + if (unlikely(!osd_login_prog[0])) { + dprintk("%s: osd_login_prog is disabled\n", __func__); + return -EACCES; + } + + dprintk("%s uri: %s\n", __func__, login->uri); + dprintk("%s osdname %s\n", __func__, login->osdname); + dprintk("%s systemid_hex %s\n", __func__, login->systemid_hex); + + argv[0] = (char *)osd_login_prog; + argv[1] = "-u"; + argv[2] = login->uri; + argv[3] = "-o"; + argv[4] = login->osdname; + argv[5] = "-s"; + argv[6] = login->systemid_hex; + argv[7] = NULL; + + ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); + /* + * Disable the upcall mechanism if we're getting an ENOENT or + * EACCES error. The admin can re-enable it on the fly by using + * sysfs to set the objlayoutdriver.osd_login_prog module parameter once + * the problem has been fixed. + */ + if (ret == -ENOENT || ret == -EACCES) { + printk(KERN_ERR "PNFS-OBJ: %s was not found please set " + "objlayoutdriver.osd_login_prog kernel parameter!\n", + osd_login_prog); + osd_login_prog[0] = '\0'; + } + dprintk("%s %s return value: %d\n", __func__, osd_login_prog, ret); + + return ret; +} + +/* Assume dest is all zeros */ +static void __copy_nfsS_and_zero_terminate(struct nfs4_string s, + char *dest, int max_len, + const char *var_name) +{ + if (!s.len) + return; + + if (s.len >= max_len) { + pr_warn_ratelimited( + "objlayout_autologin: %s: s.len(%d) >= max_len(%d)", + var_name, s.len, max_len); + s.len = max_len - 1; /* space for null terminator */ + } + + memcpy(dest, s.data, s.len); +} + +/* Assume sysid is all zeros */ +static void _sysid_2_hex(struct nfs4_string s, + char sysid[OBJLAYOUT_MAX_SYSID_HEX_LEN]) +{ + int i; + char *cur; + + if (!s.len) + return; + + if (s.len != OSD_SYSTEMID_LEN) { + pr_warn_ratelimited( + "objlayout_autologin: systemid_len(%d) != OSD_SYSTEMID_LEN", + s.len); + if (s.len > OSD_SYSTEMID_LEN) + s.len = OSD_SYSTEMID_LEN; + } + + cur = sysid; + for (i = 0; i < s.len; i++) + cur = hex_byte_pack(cur, s.data[i]); +} + +int objlayout_autologin(struct pnfs_osd_deviceaddr *deviceaddr) +{ + int rc; + struct __auto_login login; + + if (!deviceaddr->oda_targetaddr.ota_netaddr.r_addr.len) + return -ENODEV; + + memset(&login, 0, sizeof(login)); + __copy_nfsS_and_zero_terminate( + deviceaddr->oda_targetaddr.ota_netaddr.r_addr, + login.uri, sizeof(login.uri), "URI"); + + __copy_nfsS_and_zero_terminate( + deviceaddr->oda_osdname, + login.osdname, sizeof(login.osdname), "OSDNAME"); + + _sysid_2_hex(deviceaddr->oda_systemid, login.systemid_hex); + + rc = __objlayout_upcall(&login); + if (rc > 0) /* script returns positive values */ + rc = -ENODEV; + + return rc; +} diff --git a/fs/nfs/objlayout/objlayout.h b/fs/nfs/objlayout/objlayout.h index 8ec34727ed21..880ba086be94 100644 --- a/fs/nfs/objlayout/objlayout.h +++ b/fs/nfs/objlayout/objlayout.h @@ -184,4 +184,6 @@ extern void objlayout_encode_layoutreturn( struct xdr_stream *, const struct nfs4_layoutreturn_args *); +extern int objlayout_autologin(struct pnfs_osd_deviceaddr *deviceaddr); + #endif /* _OBJLAYOUT_H */ -- cgit From 1b189b8889b7d8e0bddc2655d171c43cfd344157 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Wed, 21 Mar 2012 09:18:34 -0500 Subject: dlm: last element of dlm_local_addr[] never used The last element of dlm_local_addr[DLM_MAX_ADDR_COUNT] was not used because the loop ended at COUNT - 1. Reported-by: Dan Carpenter Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 0b3109ee4257..ad607996def0 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1082,7 +1082,7 @@ static void init_local(void) int i; dlm_local_count = 0; - for (i = 0; i < DLM_MAX_ADDR_COUNT - 1; i++) { + for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) { if (dlm_our_addr(&sas, i)) break; -- cgit From 5a7c9eec9fde1da0e3adf0a4ddb64ff2a324a492 Mon Sep 17 00:00:00 2001 From: Vivek Trivedi Date: Thu, 15 Mar 2012 23:58:52 +0530 Subject: NFS: fix sb->s_id in nfs debug prints NFS bdi flush thread in ps output is printed like "flush-:" For example: $ ps aux | grep flush 2079 root 0 SW [flush-0:18] ^^^^ nfs_bdi_register() ==> bdi_register_dev() ==> bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); ^^^^^ However, NFS sb->s_id store major:minor number in hex: nfs_initialise_sb() ==> snprintf(sb->s_id, sizeof(sb->s_id), "%x:%x", MAJOR(sb->s_dev), MINOR(sb->s_dev)); ^^^^^ If we enable nfs debug prints using command: $ rpcdebug -m nfs -s all write to a file: $ dd if=/dev/zero of=/testfile.txt bs=32768 count=1 Without Patch: [ 2431.032000] NFS: 0 initiated write call (req 0:12/40, 32768 bytes @ offset 0) ^^^^ With Patch: [ 2431.032000] NFS: 0 initiated write call (req 0:18/40, 32768 bytes @ offset 0) ^^^^ We should store NFS "s->s_id" in decimal to avoid confusion between NFS flush thread name(in ps output) and NFS debug prints. Signed-off-by: Vivek Trivedi Signed-off-by: Namjae Jeon Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index aac403085be5..ccc4cdb1efe9 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2115,7 +2115,7 @@ static inline void nfs_initialise_sb(struct super_block *sb) /* We probably want something more informative here */ snprintf(sb->s_id, sizeof(sb->s_id), - "%x:%x", MAJOR(sb->s_dev), MINOR(sb->s_dev)); + "%u:%u", MAJOR(sb->s_dev), MINOR(sb->s_dev)); if (sb->s_blocksize == 0) sb->s_blocksize = nfs_block_bits(server->wsize, -- cgit From 1daaae8fa4afe3df78ca34e724ed7e8187e4eb32 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 21 Mar 2012 06:30:40 -0400 Subject: cifs: fix issue mounting of DFS ROOT when redirecting from one domain controller to the next This patch fixes an issue when cifs_mount receives a STATUS_BAD_NETWORK_NAME error during cifs_get_tcon but is able to continue after an DFS ROOT referral. In this case, the return code variable is not reset prior to trying to mount from the system referred to. Thus, is_path_accessible is not executed and the final DFS referral is not performed causing a mount error. Use case: In DNS, example.com resolves to the secondary AD server ad2.example.com Our primary domain controller is ad1.example.com and has a DFS redirection set up from \\ad1\share\Users to \\files\share\Users. Mounting \\example.com\share\Users fails. Regression introduced by commit 724d9f1. Cc: stable@vger.kernel.org Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 03f71fb40a8a..0ac595c8c262 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3368,7 +3368,7 @@ cifs_ra_pages(struct cifs_sb_info *cifs_sb) int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) { - int rc = 0; + int rc; int xid; struct cifs_ses *pSesInfo; struct cifs_tcon *tcon; @@ -3395,6 +3395,7 @@ try_mount_again: FreeXid(xid); } #endif + rc = 0; tcon = NULL; pSesInfo = NULL; srvTcp = NULL; -- cgit From fc40f9cf828908e91d9af820e9300a9d42fbbd72 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Fri, 17 Feb 2012 17:09:12 +0300 Subject: CIFS: Simplify inFlight logic by making it as unsigned integer and surround access with req_lock from server structure. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifs_debug.c | 3 +-- fs/cifs/cifsglob.h | 21 ++++++++++++++++++++- fs/cifs/cifssmb.c | 6 +++--- fs/cifs/connect.c | 10 +++++----- fs/cifs/transport.c | 45 +++++++++++++++++++++++---------------------- 5 files changed, 52 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 24b3dfc05282..573b899b5a5d 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -171,8 +171,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) seq_printf(m, "TCP status: %d\n\tLocal Users To " "Server: %d SecMode: 0x%x Req On Wire: %d", server->tcpStatus, server->srv_count, - server->sec_mode, - atomic_read(&server->inFlight)); + server->sec_mode, in_flight(server)); #ifdef CONFIG_CIFS_STATS2 seq_printf(m, " In Send: %d In MaxReq Wait: %d", diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index d47d20aac670..fb78bc903887 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -250,7 +250,8 @@ struct TCP_Server_Info { bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ bool tcp_nodelay; - atomic_t inFlight; /* number of requests on the wire to server */ + unsigned int in_flight; /* number of requests on the wire to server */ + spinlock_t req_lock; /* protect the value above */ struct mutex srv_mutex; struct task_struct *tsk; char server_GUID[16]; @@ -303,6 +304,24 @@ struct TCP_Server_Info { #endif }; +static inline unsigned int +in_flight(struct TCP_Server_Info *server) +{ + unsigned int num; + spin_lock(&server->req_lock); + num = server->in_flight; + spin_unlock(&server->req_lock); + return num; +} + +static inline void +dec_in_flight(struct TCP_Server_Info *server) +{ + spin_lock(&server->req_lock); + server->in_flight--; + spin_unlock(&server->req_lock); +} + /* * Macros to allow the TCP_Server_Info->net field and related code to drop out * when CONFIG_NET_NS isn't set. diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index cd66b76e3282..d7cbcfa21a0c 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -721,7 +721,7 @@ cifs_echo_callback(struct mid_q_entry *mid) struct TCP_Server_Info *server = mid->callback_data; DeleteMidQEntry(mid); - atomic_dec(&server->inFlight); + dec_in_flight(server); wake_up(&server->request_q); } @@ -1674,7 +1674,7 @@ cifs_readv_callback(struct mid_q_entry *mid) queue_work(system_nrt_wq, &rdata->work); DeleteMidQEntry(mid); - atomic_dec(&server->inFlight); + dec_in_flight(server); wake_up(&server->request_q); } @@ -2115,7 +2115,7 @@ cifs_writev_callback(struct mid_q_entry *mid) queue_work(system_nrt_wq, &wdata->work); DeleteMidQEntry(mid); - atomic_dec(&tcon->ses->server->inFlight); + dec_in_flight(tcon->ses->server); wake_up(&tcon->ses->server->request_q); } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 0ac595c8c262..ed91abcce8a9 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -643,14 +643,14 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) wake_up_all(&server->response_q); /* Check if we have blocked requests that need to free. */ - spin_lock(&GlobalMid_Lock); - if (atomic_read(&server->inFlight) >= server->maxReq) - atomic_set(&server->inFlight, server->maxReq - 1); + spin_lock(&server->req_lock); + if (server->in_flight >= server->maxReq) + server->in_flight = server->maxReq - 1; /* * We do not want to set the max_pending too low or we could end up * with the counter going negative. */ - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->req_lock); /* * Although there should not be any requests blocked on this queue it * can not hurt to be paranoid and try to wake up requests that may @@ -1905,7 +1905,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->noblocksnd = volume_info->noblocksnd; tcp_ses->noautotune = volume_info->noautotune; tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; - atomic_set(&tcp_ses->inFlight, 0); + tcp_ses->in_flight = 0; tcp_ses->maxReq = 1; /* enough to send negotiate request */ init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 99a27cfa6cd2..e2673aa34381 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -254,28 +254,29 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, return smb_sendv(server, &iov, 1); } -static int wait_for_free_request(struct TCP_Server_Info *server, - const int long_op) +static int +wait_for_free_request(struct TCP_Server_Info *server, const int long_op) { + spin_lock(&server->req_lock); + if (long_op == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ - atomic_inc(&server->inFlight); + server->in_flight++; + spin_unlock(&server->req_lock); return 0; } - spin_lock(&GlobalMid_Lock); while (1) { - if (atomic_read(&server->inFlight) >= server->maxReq) { - spin_unlock(&GlobalMid_Lock); + if (server->in_flight >= server->maxReq) { + spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); wait_event(server->request_q, - atomic_read(&server->inFlight) - < server->maxReq); + in_flight(server) < server->maxReq); cifs_num_waiters_dec(server); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->req_lock); return -ENOENT; } @@ -284,8 +285,8 @@ static int wait_for_free_request(struct TCP_Server_Info *server, /* update # of requests on the wire to server */ if (long_op != CIFS_BLOCKING_OP) - atomic_inc(&server->inFlight); - spin_unlock(&GlobalMid_Lock); + server->in_flight++; + spin_unlock(&server->req_lock); break; } } @@ -359,7 +360,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, mid = AllocMidQEntry(hdr, server); if (mid == NULL) { mutex_unlock(&server->srv_mutex); - atomic_dec(&server->inFlight); + dec_in_flight(server); wake_up(&server->request_q); return -ENOMEM; } @@ -392,7 +393,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, return rc; out_err: delete_mid(mid); - atomic_dec(&server->inFlight); + dec_in_flight(server); wake_up(&server->request_q); return rc; } @@ -564,7 +565,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, mutex_unlock(&ses->server->srv_mutex); cifs_small_buf_release(in_buf); /* Update # of requests on wire to server */ - atomic_dec(&ses->server->inFlight); + dec_in_flight(ses->server); wake_up(&ses->server->request_q); return rc; } @@ -601,7 +602,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); cifs_small_buf_release(in_buf); - atomic_dec(&ses->server->inFlight); + dec_in_flight(ses->server); wake_up(&ses->server->request_q); return rc; } @@ -612,7 +613,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, rc = cifs_sync_mid_result(midQ, ses->server); if (rc != 0) { - atomic_dec(&ses->server->inFlight); + dec_in_flight(ses->server); wake_up(&ses->server->request_q); return rc; } @@ -637,7 +638,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, midQ->resp_buf = NULL; out: delete_mid(midQ); - atomic_dec(&ses->server->inFlight); + dec_in_flight(ses->server); wake_up(&ses->server->request_q); return rc; @@ -688,7 +689,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, if (rc) { mutex_unlock(&ses->server->srv_mutex); /* Update # of requests on wire to server */ - atomic_dec(&ses->server->inFlight); + dec_in_flight(ses->server); wake_up(&ses->server->request_q); return rc; } @@ -721,7 +722,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, /* no longer considered to be "in-flight" */ midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); - atomic_dec(&ses->server->inFlight); + dec_in_flight(ses->server); wake_up(&ses->server->request_q); return rc; } @@ -730,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, rc = cifs_sync_mid_result(midQ, ses->server); if (rc != 0) { - atomic_dec(&ses->server->inFlight); + dec_in_flight(ses->server); wake_up(&ses->server->request_q); return rc; } @@ -747,7 +748,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, rc = cifs_check_receive(midQ, ses->server, 0); out: delete_mid(midQ); - atomic_dec(&ses->server->inFlight); + dec_in_flight(ses->server); wake_up(&ses->server->request_q); return rc; -- cgit From 2d86dbc97094ea4cfc2204fdefd7d07685496189 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Mon, 6 Feb 2012 15:59:18 +0400 Subject: CIFS: Introduce credit-based flow control and send no more than credits value requests at once. For SMB/CIFS it's trivial: increment this value by receiving any message and decrement by sending one. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 11 +++++++---- fs/cifs/cifsproto.h | 3 +++ fs/cifs/cifssmb.c | 13 +++++-------- fs/cifs/connect.c | 14 ++++++-------- fs/cifs/misc.c | 19 +++++++++++++++++++ fs/cifs/transport.c | 44 ++++++++++++++++++++------------------------ 6 files changed, 60 insertions(+), 44 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index fb78bc903887..d55de9684df9 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -250,8 +250,9 @@ struct TCP_Server_Info { bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ bool tcp_nodelay; + int credits; /* send no more requests at once */ unsigned int in_flight; /* number of requests on the wire to server */ - spinlock_t req_lock; /* protect the value above */ + spinlock_t req_lock; /* protect the two values above */ struct mutex srv_mutex; struct task_struct *tsk; char server_GUID[16]; @@ -314,12 +315,14 @@ in_flight(struct TCP_Server_Info *server) return num; } -static inline void -dec_in_flight(struct TCP_Server_Info *server) +static inline bool +has_credits(struct TCP_Server_Info *server) { + int num; spin_lock(&server->req_lock); - server->in_flight--; + num = server->credits; spin_unlock(&server->req_lock); + return num > 0; } /* diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 6f4e243e0f62..47a769e535b1 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -88,6 +88,9 @@ extern int SendReceiveBlockingLock(const unsigned int xid, struct smb_hdr *in_buf , struct smb_hdr *out_buf, int *bytes_returned); +extern void cifs_add_credits(struct TCP_Server_Info *server, + const unsigned int add); +extern void cifs_set_credits(struct TCP_Server_Info *server, const int val); extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length); extern bool is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index d7cbcfa21a0c..70aac35c398f 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -461,7 +461,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) server->maxReq = min_t(unsigned int, le16_to_cpu(rsp->MaxMpxCount), cifs_max_pending); - server->oplocks = server->maxReq > 1 ? enable_oplocks : false; + cifs_set_credits(server, server->maxReq); server->maxBuf = le16_to_cpu(rsp->MaxBufSize); server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); /* even though we do not use raw we might as well set this @@ -569,7 +569,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) little endian */ server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount), cifs_max_pending); - server->oplocks = server->maxReq > 1 ? enable_oplocks : false; + cifs_set_credits(server, server->maxReq); /* probably no need to store and check maxvcs */ server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize); server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); @@ -721,8 +721,7 @@ cifs_echo_callback(struct mid_q_entry *mid) struct TCP_Server_Info *server = mid->callback_data; DeleteMidQEntry(mid); - dec_in_flight(server); - wake_up(&server->request_q); + cifs_add_credits(server, 1); } int @@ -1674,8 +1673,7 @@ cifs_readv_callback(struct mid_q_entry *mid) queue_work(system_nrt_wq, &rdata->work); DeleteMidQEntry(mid); - dec_in_flight(server); - wake_up(&server->request_q); + cifs_add_credits(server, 1); } /* cifs_async_readv - send an async write, and set up mid to handle result */ @@ -2115,8 +2113,7 @@ cifs_writev_callback(struct mid_q_entry *mid) queue_work(system_nrt_wq, &wdata->work); DeleteMidQEntry(mid); - dec_in_flight(tcon->ses->server); - wake_up(&tcon->ses->server->request_q); + cifs_add_credits(tcon->ses->server, 1); } /* cifs_async_writev - send an async write, and set up mid to handle result */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ed91abcce8a9..1d489010615b 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -642,14 +642,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) spin_unlock(&GlobalMid_Lock); wake_up_all(&server->response_q); - /* Check if we have blocked requests that need to free. */ + /* check if we have blocked requests that need to free */ spin_lock(&server->req_lock); - if (server->in_flight >= server->maxReq) - server->in_flight = server->maxReq - 1; - /* - * We do not want to set the max_pending too low or we could end up - * with the counter going negative. - */ + if (server->credits <= 0) + server->credits = 1; spin_unlock(&server->req_lock); /* * Although there should not be any requests blocked on this queue it @@ -1906,7 +1902,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->noautotune = volume_info->noautotune; tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; tcp_ses->in_flight = 0; - tcp_ses->maxReq = 1; /* enough to send negotiate request */ + tcp_ses->credits = 1; init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); INIT_LIST_HEAD(&tcp_ses->pending_mid_q); @@ -3757,9 +3753,11 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses) if (server->maxBuf != 0) return 0; + cifs_set_credits(server, 1); rc = CIFSSMBNegotiate(xid, ses); if (rc == -EAGAIN) { /* retry only once on 1st time connection */ + cifs_set_credits(server, 1); rc = CIFSSMBNegotiate(xid, ses); if (rc == -EAGAIN) rc = -EHOSTDOWN; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 703ef5c6fdb1..c273c12de98e 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -690,3 +690,22 @@ backup_cred(struct cifs_sb_info *cifs_sb) return false; } + +void +cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add) +{ + spin_lock(&server->req_lock); + server->credits += add; + server->in_flight--; + spin_unlock(&server->req_lock); + wake_up(&server->request_q); +} + +void +cifs_set_credits(struct TCP_Server_Info *server, const int val) +{ + spin_lock(&server->req_lock); + server->credits = val; + server->oplocks = val > 1 ? enable_oplocks : false; + spin_unlock(&server->req_lock); +} diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index e2673aa34381..e5202ddef2fb 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -262,16 +262,16 @@ wait_for_free_request(struct TCP_Server_Info *server, const int long_op) if (long_op == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ server->in_flight++; + server->credits--; spin_unlock(&server->req_lock); return 0; } while (1) { - if (server->in_flight >= server->maxReq) { + if (server->credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); - wait_event(server->request_q, - in_flight(server) < server->maxReq); + wait_event(server->request_q, has_credits(server)); cifs_num_waiters_dec(server); spin_lock(&server->req_lock); } else { @@ -280,12 +280,16 @@ wait_for_free_request(struct TCP_Server_Info *server, const int long_op) return -ENOENT; } - /* can not count locking commands against total - as they are allowed to block on server */ + /* + * Can not count locking commands against total + * as they are allowed to block on server. + */ /* update # of requests on the wire to server */ - if (long_op != CIFS_BLOCKING_OP) + if (long_op != CIFS_BLOCKING_OP) { + server->credits--; server->in_flight++; + } spin_unlock(&server->req_lock); break; } @@ -360,7 +364,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, mid = AllocMidQEntry(hdr, server); if (mid == NULL) { mutex_unlock(&server->srv_mutex); - dec_in_flight(server); + cifs_add_credits(server, 1); wake_up(&server->request_q); return -ENOMEM; } @@ -393,7 +397,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, return rc; out_err: delete_mid(mid); - dec_in_flight(server); + cifs_add_credits(server, 1); wake_up(&server->request_q); return rc; } @@ -565,8 +569,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, mutex_unlock(&ses->server->srv_mutex); cifs_small_buf_release(in_buf); /* Update # of requests on wire to server */ - dec_in_flight(ses->server); - wake_up(&ses->server->request_q); + cifs_add_credits(ses->server, 1); return rc; } rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number); @@ -602,8 +605,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); cifs_small_buf_release(in_buf); - dec_in_flight(ses->server); - wake_up(&ses->server->request_q); + cifs_add_credits(ses->server, 1); return rc; } spin_unlock(&GlobalMid_Lock); @@ -613,8 +615,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, rc = cifs_sync_mid_result(midQ, ses->server); if (rc != 0) { - dec_in_flight(ses->server); - wake_up(&ses->server->request_q); + cifs_add_credits(ses->server, 1); return rc; } @@ -638,8 +639,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, midQ->resp_buf = NULL; out: delete_mid(midQ); - dec_in_flight(ses->server); - wake_up(&ses->server->request_q); + cifs_add_credits(ses->server, 1); return rc; } @@ -689,8 +689,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, if (rc) { mutex_unlock(&ses->server->srv_mutex); /* Update # of requests on wire to server */ - dec_in_flight(ses->server); - wake_up(&ses->server->request_q); + cifs_add_credits(ses->server, 1); return rc; } @@ -722,8 +721,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, /* no longer considered to be "in-flight" */ midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); - dec_in_flight(ses->server); - wake_up(&ses->server->request_q); + cifs_add_credits(ses->server, 1); return rc; } spin_unlock(&GlobalMid_Lock); @@ -731,8 +729,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, rc = cifs_sync_mid_result(midQ, ses->server); if (rc != 0) { - dec_in_flight(ses->server); - wake_up(&ses->server->request_q); + cifs_add_credits(ses->server, 1); return rc; } @@ -748,8 +745,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, rc = cifs_check_receive(midQ, ses->server, 0); out: delete_mid(midQ); - dec_in_flight(ses->server); - wake_up(&ses->server->request_q); + cifs_add_credits(ses->server, 1); return rc; } -- cgit From 5bc594982f49220d33e927e3c9e028bf87b4745c Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Tue, 21 Feb 2012 19:56:08 +0300 Subject: CIFS: Make wait_for_free_request killable to let us kill the proccess if it hangs waiting for a credit when the session is down and echo is disabled. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/transport.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index e5202ddef2fb..58b31da17fc5 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -257,6 +257,8 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, static int wait_for_free_request(struct TCP_Server_Info *server, const int long_op) { + int rc; + spin_lock(&server->req_lock); if (long_op == CIFS_ASYNC_OP) { @@ -271,8 +273,11 @@ wait_for_free_request(struct TCP_Server_Info *server, const int long_op) if (server->credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); - wait_event(server->request_q, has_credits(server)); + rc = wait_event_killable(server->request_q, + has_credits(server)); cifs_num_waiters_dec(server); + if (rc) + return rc; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { -- cgit From bc205ed19bdb56576b291830bc3f752aef5e3923 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Thu, 15 Mar 2012 13:22:27 +0300 Subject: CIFS: Prepare credits code for a slot reservation that is essential for CIFS/SMB/SMB2 oplock breaks and SMB2 echos. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 14 ++++++++++++-- fs/cifs/transport.c | 22 ++++++++++++++-------- 2 files changed, 26 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index d55de9684df9..2309a67738bf 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -315,12 +315,22 @@ in_flight(struct TCP_Server_Info *server) return num; } +static inline int* +get_credits_field(struct TCP_Server_Info *server) +{ + /* + * This will change to switch statement when we reserve slots for echos + * and oplock breaks. + */ + return &server->credits; +} + static inline bool -has_credits(struct TCP_Server_Info *server) +has_credits(struct TCP_Server_Info *server, int *credits) { int num; spin_lock(&server->req_lock); - num = server->credits; + num = *credits; spin_unlock(&server->req_lock); return num > 0; } diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 58b31da17fc5..310918b6fcb4 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -255,26 +255,26 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, } static int -wait_for_free_request(struct TCP_Server_Info *server, const int long_op) +wait_for_free_credits(struct TCP_Server_Info *server, const int optype, + int *credits) { int rc; spin_lock(&server->req_lock); - - if (long_op == CIFS_ASYNC_OP) { + if (optype == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ server->in_flight++; - server->credits--; + *credits -= 1; spin_unlock(&server->req_lock); return 0; } while (1) { - if (server->credits <= 0) { + if (*credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable(server->request_q, - has_credits(server)); + has_credits(server, credits)); cifs_num_waiters_dec(server); if (rc) return rc; @@ -291,8 +291,8 @@ wait_for_free_request(struct TCP_Server_Info *server, const int long_op) */ /* update # of requests on the wire to server */ - if (long_op != CIFS_BLOCKING_OP) { - server->credits--; + if (optype != CIFS_BLOCKING_OP) { + *credits -= 1; server->in_flight++; } spin_unlock(&server->req_lock); @@ -302,6 +302,12 @@ wait_for_free_request(struct TCP_Server_Info *server, const int long_op) return 0; } +static int +wait_for_free_request(struct TCP_Server_Info *server, const int optype) +{ + return wait_for_free_credits(server, optype, get_credits_field(server)); +} + static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, struct mid_q_entry **ppmidQ) { -- cgit From 6dae51a585008535858c29b489dbf90a913d511b Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Tue, 21 Feb 2012 16:50:23 +0300 Subject: CIFS: Delete echo_retries module parm It's the essential step before respecting MaxMpxCount value during negotiating because we will keep only one extra slot for sending echo requests. If there is no response during two echo intervals - reconnect the tcp session. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/README | 6 +----- fs/cifs/cifsfs.c | 5 ----- fs/cifs/cifsglob.h | 3 --- fs/cifs/connect.c | 18 ++++++++++++++---- 4 files changed, 15 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/cifs/README b/fs/cifs/README index 895da1dc1550..b7d782bab797 100644 --- a/fs/cifs/README +++ b/fs/cifs/README @@ -753,10 +753,6 @@ module loading or during the runtime by using the interface i.e. echo "value" > /sys/module/cifs/parameters/ -1. echo_retries - The number of echo attempts before giving up and - reconnecting to the server. The default is 5. The value 0 - means never reconnect. - -2. enable_oplocks - Enable or disable oplocks. Oplocks are enabled by default. +1. enable_oplocks - Enable or disable oplocks. Oplocks are enabled by default. [Y/y/1]. To disable use any of [N/n/0]. diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 6ee1cb45ca0d..f2661610fcf3 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -77,11 +77,6 @@ unsigned int cifs_max_pending = CIFS_MAX_REQ; module_param(cifs_max_pending, int, 0444); MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " "Default: 32767 Range: 2 to 32767."); -unsigned short echo_retries = 5; -module_param(echo_retries, ushort, 0644); -MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and " - "reconnecting server. Default: 5. 0 means " - "never reconnect."); module_param(enable_oplocks, bool, 0644); MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks (bool). Default:" "y/Y/1"); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 2309a67738bf..339ebe3ebc0d 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1038,9 +1038,6 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */ GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ -/* reconnect after this many failed echo attempts */ -GLOBAL_EXTERN unsigned short echo_retries; - #ifdef CONFIG_CIFS_ACL GLOBAL_EXTERN struct rb_root uidtree; GLOBAL_EXTERN struct rb_root gidtree; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 1d489010615b..5560e1d5e54b 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -373,12 +373,22 @@ allocate_buffers(struct TCP_Server_Info *server) static bool server_unresponsive(struct TCP_Server_Info *server) { - if (echo_retries > 0 && server->tcpStatus == CifsGood && - time_after(jiffies, server->lstrp + - (echo_retries * SMB_ECHO_INTERVAL))) { + /* + * We need to wait 2 echo intervals to make sure we handle such + * situations right: + * 1s client sends a normal SMB request + * 2s client gets a response + * 30s echo workqueue job pops, and decides we got a response recently + * and don't need to send another + * ... + * 65s kernel_recvmsg times out, and we see that we haven't gotten + * a response in >60s. + */ + if (server->tcpStatus == CifsGood && + time_after(jiffies, server->lstrp + 2 * SMB_ECHO_INTERVAL)) { cERROR(1, "Server %s has not responded in %d seconds. " "Reconnecting...", server->hostname, - (echo_retries * SMB_ECHO_INTERVAL / HZ)); + (2 * SMB_ECHO_INTERVAL) / HZ); cifs_reconnect(server); wake_up(&server->response_q); return true; -- cgit From 815465c4d724e851932843227b4b700d64216cf2 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 21 Mar 2012 06:27:54 -0400 Subject: cifs: clean up call to cifs_dfs_release_automount_timer() Take the #ifdef junk out of the code, and turn it into a noop macro when CONFIG_CIFS_DFS_UPCALL isn't defined. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 2 -- fs/cifs/cifsproto.h | 6 ++++++ 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f2661610fcf3..260025fd8c3b 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -1177,9 +1177,7 @@ exit_cifs(void) cFYI(DBG2, "exit_cifs"); cifs_proc_clean(); cifs_fscache_unregister(); -#ifdef CONFIG_CIFS_DFS_UPCALL cifs_dfs_release_automount_timer(); -#endif #ifdef CONFIG_CIFS_ACL cifs_destroy_idmaptrees(); exit_cifs_idmap(); diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 47a769e535b1..503e73d8bdb7 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -171,7 +171,13 @@ extern struct smb_vol *cifs_get_volume_info(char *mount_data, const char *devname); extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *); extern void cifs_umount(struct cifs_sb_info *); + +#if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) extern void cifs_dfs_release_automount_timer(void); +#else /* ! IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) */ +#define cifs_dfs_release_automount_timer() do { } while (0) +#endif /* ! IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) */ + void cifs_proc_init(void); void cifs_proc_clean(void); -- cgit From 3dd933061d3a4f33fb6ba1616e88fa55a8b8cb9c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 21 Mar 2012 06:27:55 -0400 Subject: cifs: clean up ordering in exit_cifs ...ensure that we undo things in the reverse order from the way they were done. In truth, the ordering doesn't matter for a lot of these, but it's still better to do it that way to be sure. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 260025fd8c3b..cc098ccac611 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -1175,8 +1175,7 @@ static void __exit exit_cifs(void) { cFYI(DBG2, "exit_cifs"); - cifs_proc_clean(); - cifs_fscache_unregister(); + unregister_filesystem(&cifs_fs_type); cifs_dfs_release_automount_timer(); #ifdef CONFIG_CIFS_ACL cifs_destroy_idmaptrees(); @@ -1185,10 +1184,11 @@ exit_cifs(void) #ifdef CONFIG_CIFS_UPCALL unregister_key_type(&cifs_spnego_key_type); #endif - unregister_filesystem(&cifs_fs_type); - cifs_destroy_inodecache(); - cifs_destroy_mids(); cifs_destroy_request_bufs(); + cifs_destroy_mids(); + cifs_destroy_inodecache(); + cifs_fscache_unregister(); + cifs_proc_clean(); } MODULE_AUTHOR("Steve French "); -- cgit From 1a5a9906d4e8d1976b701f889d8f35d54b928f25 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Wed, 21 Mar 2012 16:33:42 -0700 Subject: mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(¤t->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell Signed-off-by: Andrea Arcangeli Acked-by: Johannes Weiner Cc: Mel Gorman Cc: Hugh Dickins Cc: Dave Jones Acked-by: Larry Woodman Acked-by: Rik van Riel Cc: [2.6.38+] Cc: Mark Salter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 7dcd2a250495..3efa7253523e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } else { spin_unlock(&walk->mm->page_table_lock); } + + if (pmd_trans_unstable(pmd)) + return 0; /* * The mmap_sem held all the way back in m_start() is what * keeps khugepaged out of here and from collapsing things @@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, struct page *page; split_huge_page_pmd(walk->mm, pmd); + if (pmd_trans_unstable(pmd)) + return 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { @@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, int err = 0; split_huge_page_pmd(walk->mm, pmd); + if (pmd_trans_unstable(pmd)) + return 0; /* find the first VMA at or above 'addr' */ vma = find_vma(walk->mm, addr); @@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, spin_unlock(&walk->mm->page_table_lock); } + if (pmd_trans_unstable(pmd)) + return 0; orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); do { struct page *page = can_gather_numa_stats(*pte, md->vma, addr); -- cgit From 1de5b41cd3b2474c2770b825266d372073e1b28b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 21 Mar 2012 16:33:42 -0700 Subject: fs/namei.c: fix warnings on 32-bit i386 allnoconfig: fs/namei.c: In function 'has_zero': fs/namei.c:1617: warning: integer constant is too large for 'unsigned long' type fs/namei.c:1617: warning: integer constant is too large for 'unsigned long' type fs/namei.c: In function 'hash_name': fs/namei.c:1635: warning: integer constant is too large for 'unsigned long' type There must be a tidier way of doing this. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/namei.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 20a4fcf001ec..561db47ae041 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1455,9 +1455,15 @@ done: } EXPORT_SYMBOL(full_name_hash); +#ifdef CONFIG_64BIT #define ONEBYTES 0x0101010101010101ul #define SLASHBYTES 0x2f2f2f2f2f2f2f2ful #define HIGHBITS 0x8080808080808080ul +#else +#define ONEBYTES 0x01010101ul +#define SLASHBYTES 0x2f2f2f2ful +#define HIGHBITS 0x80808080ul +#endif /* Return the high bit set in the first byte that is a zero */ static inline unsigned long has_zero(unsigned long a) -- cgit From 7904ac84244b59f536c2a5d1066a10f46df07b08 Mon Sep 17 00:00:00 2001 From: Earl Chew Date: Wed, 21 Mar 2012 16:33:43 -0700 Subject: seq_file: fix mishandling of consecutive pread() invocations. The following program illustrates the problem: char buf[8192]; int fd = open("/proc/self/maps", O_RDONLY); n = pread(fd, buf, sizeof(buf), 0); printf("%d\n", n); /* lseek(fd, 0, SEEK_CUR); */ /* Uncomment to work around */ n = pread(fd, buf, sizeof(buf), 0); printf("%d\n", n); The second printf() prints zero, but uncommenting the lseek() corrects its behaviour. To fix, make seq_read() mirror seq_lseek() when processing changes in *ppos. Restore m->version first, then if required traverse and update read_pos on success. Addresses https://bugzilla.kernel.org/show_bug.cgi?id=11856 Signed-off-by: Earl Chew Cc: Alexey Dobriyan Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/seq_file.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/seq_file.c b/fs/seq_file.c index 4023d6be939b..aa242dc99373 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -140,9 +140,21 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) mutex_lock(&m->lock); + /* + * seq_file->op->..m_start/m_stop/m_next may do special actions + * or optimisations based on the file->f_version, so we want to + * pass the file->f_version to those methods. + * + * seq_file->version is just copy of f_version, and seq_file + * methods can treat it simply as file version. + * It is copied in first and copied out after all operations. + * It is convenient to have it as part of structure to avoid the + * need of passing another argument to all the seq_file methods. + */ + m->version = file->f_version; + /* Don't assume *ppos is where we left it */ if (unlikely(*ppos != m->read_pos)) { - m->read_pos = *ppos; while ((err = traverse(m, *ppos)) == -EAGAIN) ; if (err) { @@ -152,21 +164,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) m->index = 0; m->count = 0; goto Done; + } else { + m->read_pos = *ppos; } } - /* - * seq_file->op->..m_start/m_stop/m_next may do special actions - * or optimisations based on the file->f_version, so we want to - * pass the file->f_version to those methods. - * - * seq_file->version is just copy of f_version, and seq_file - * methods can treat it simply as file version. - * It is copied in first and copied out after all operations. - * It is convenient to have it as part of structure to avoid the - * need of passing another argument to all the seq_file methods. - */ - m->version = file->f_version; /* grab buffer if we didn't have one */ if (!m->buf) { m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); -- cgit From 4bfc130d5afa28395288d1b57092906349604b41 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 21 Mar 2012 16:33:54 -0700 Subject: hugetlbfs: fix hugetlb_get_unmapped_area() Use/update cached_hole_size and free_area_cache properly to speedup finding of a free region. Signed-off-by: Xiao Guangrong Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Michal Hocko Cc: Hillf Danton Cc: Andrea Arcangeli Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 1e85a7ac0217..b7bc7868c7b5 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -154,10 +154,12 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return addr; } - start_addr = mm->free_area_cache; - - if (len <= mm->cached_hole_size) + if (len > mm->cached_hole_size) + start_addr = mm->free_area_cache; + else { start_addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + } full_search: addr = ALIGN(start_addr, huge_page_size(h)); @@ -171,13 +173,18 @@ full_search: */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) + if (!vma || addr + len <= vma->vm_start) { + mm->free_area_cache = addr + len; return addr; + } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; addr = ALIGN(vma->vm_end, huge_page_size(h)); } } -- cgit From 5aaabe831eb527e0d9284f0745d830a755f70393 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 21 Mar 2012 16:33:57 -0700 Subject: pagemap: avoid splitting thp when reading /proc/pid/pagemap Thp split is not necessary if we explicitly check whether pmds are mapping thps or not. This patch introduces this check and adds code to generate pagemap entries for pmds mapping thps, which results in less performance impact of pagemap on thp. Signed-off-by: Naoya Horiguchi Reviewed-by: Andi Kleen Reviewed-by: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: Wu Fengguang Cc: Andrea Arcangeli Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 48 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 3efa7253523e..95264c0ef308 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -608,6 +608,9 @@ struct pagemapread { u64 *buffer; }; +#define PAGEMAP_WALK_SIZE (PMD_SIZE) +#define PAGEMAP_WALK_MASK (PMD_MASK) + #define PM_ENTRY_BYTES sizeof(u64) #define PM_STATUS_BITS 3 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) @@ -666,6 +669,27 @@ static u64 pte_to_pagemap_entry(pte_t pte) return pme; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset) +{ + u64 pme = 0; + /* + * Currently pmd for thp is always present because thp can not be + * swapped-out, migrated, or HWPOISONed (split in such cases instead.) + * This if-check is just to prepare for future implementation. + */ + if (pmd_present(pmd)) + pme = PM_PFRAME(pmd_pfn(pmd) + offset) + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; + return pme; +} +#else +static inline u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset) +{ + return 0; +} +#endif + static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -673,15 +697,37 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct pagemapread *pm = walk->private; pte_t *pte; int err = 0; + u64 pfn = PM_NOT_PRESENT; - split_huge_page_pmd(walk->mm, pmd); if (pmd_trans_unstable(pmd)) return 0; /* find the first VMA at or above 'addr' */ vma = find_vma(walk->mm, addr); + spin_lock(&walk->mm->page_table_lock); + if (pmd_trans_huge(*pmd)) { + if (pmd_trans_splitting(*pmd)) { + spin_unlock(&walk->mm->page_table_lock); + wait_split_huge_page(vma->anon_vma, pmd); + } else { + for (; addr != end; addr += PAGE_SIZE) { + unsigned long offset; + + offset = (addr & ~PAGEMAP_WALK_MASK) >> + PAGE_SHIFT; + pfn = thp_pmd_to_pagemap_entry(*pmd, offset); + err = add_to_pagemap(addr, pfn, pm); + if (err) + break; + } + spin_unlock(&walk->mm->page_table_lock); + return err; + } + } else { + spin_unlock(&walk->mm->page_table_lock); + } + for (; addr != end; addr += PAGE_SIZE) { - u64 pfn = PM_NOT_PRESENT; /* check to see if we've left 'vma' behind * and need a new, higher one */ @@ -764,8 +810,6 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, * determine which areas of memory are actually mapped and llseek to * skip over unmapped regions. */ -#define PAGEMAP_WALK_SIZE (PMD_SIZE) -#define PAGEMAP_WALK_MASK (PMD_MASK) static ssize_t pagemap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { -- cgit From 025c5b2451e42c9e8dfdecd6dc84956ce8f321b5 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 21 Mar 2012 16:33:57 -0700 Subject: thp: optimize away unnecessary page table locking Currently when we check if we can handle thp as it is or we need to split it into regular sized pages, we hold page table lock prior to check whether a given pmd is mapping thp or not. Because of this, when it's not "huge pmd" we suffer from unnecessary lock/unlock overhead. To remove it, this patch introduces a optimized check function and replace several similar logics with it. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Naoya Horiguchi Cc: David Rientjes Cc: Andi Kleen Cc: Wu Fengguang Cc: Andrea Arcangeli Cc: KOSAKI Motohiro Reviewed-by: KAMEZAWA Hiroyuki Cc: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 73 +++++++++++++++++++----------------------------------- 1 file changed, 25 insertions(+), 48 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 95264c0ef308..328843de6e9f 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -394,20 +394,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pte_t *pte; spinlock_t *ptl; - spin_lock(&walk->mm->page_table_lock); - if (pmd_trans_huge(*pmd)) { - if (pmd_trans_splitting(*pmd)) { - spin_unlock(&walk->mm->page_table_lock); - wait_split_huge_page(vma->anon_vma, pmd); - } else { - smaps_pte_entry(*(pte_t *)pmd, addr, - HPAGE_PMD_SIZE, walk); - spin_unlock(&walk->mm->page_table_lock); - mss->anonymous_thp += HPAGE_PMD_SIZE; - return 0; - } - } else { + if (pmd_trans_huge_lock(pmd, vma) == 1) { + smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk); spin_unlock(&walk->mm->page_table_lock); + mss->anonymous_thp += HPAGE_PMD_SIZE; + return 0; } if (pmd_trans_unstable(pmd)) @@ -705,26 +696,19 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, /* find the first VMA at or above 'addr' */ vma = find_vma(walk->mm, addr); spin_lock(&walk->mm->page_table_lock); - if (pmd_trans_huge(*pmd)) { - if (pmd_trans_splitting(*pmd)) { - spin_unlock(&walk->mm->page_table_lock); - wait_split_huge_page(vma->anon_vma, pmd); - } else { - for (; addr != end; addr += PAGE_SIZE) { - unsigned long offset; - - offset = (addr & ~PAGEMAP_WALK_MASK) >> - PAGE_SHIFT; - pfn = thp_pmd_to_pagemap_entry(*pmd, offset); - err = add_to_pagemap(addr, pfn, pm); - if (err) - break; - } - spin_unlock(&walk->mm->page_table_lock); - return err; + if (pmd_trans_huge_lock(pmd, vma) == 1) { + for (; addr != end; addr += PAGE_SIZE) { + unsigned long offset; + + offset = (addr & ~PAGEMAP_WALK_MASK) >> + PAGE_SHIFT; + pfn = thp_pmd_to_pagemap_entry(*pmd, offset); + err = add_to_pagemap(addr, pfn, pm); + if (err) + break; } - } else { spin_unlock(&walk->mm->page_table_lock); + return err; } for (; addr != end; addr += PAGE_SIZE) { @@ -992,24 +976,17 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, pte_t *pte; md = walk->private; - spin_lock(&walk->mm->page_table_lock); - if (pmd_trans_huge(*pmd)) { - if (pmd_trans_splitting(*pmd)) { - spin_unlock(&walk->mm->page_table_lock); - wait_split_huge_page(md->vma->anon_vma, pmd); - } else { - pte_t huge_pte = *(pte_t *)pmd; - struct page *page; - - page = can_gather_numa_stats(huge_pte, md->vma, addr); - if (page) - gather_stats(page, md, pte_dirty(huge_pte), - HPAGE_PMD_SIZE/PAGE_SIZE); - spin_unlock(&walk->mm->page_table_lock); - return 0; - } - } else { + + if (pmd_trans_huge_lock(pmd, md->vma) == 1) { + pte_t huge_pte = *(pte_t *)pmd; + struct page *page; + + page = can_gather_numa_stats(huge_pte, md->vma, addr); + if (page) + gather_stats(page, md, pte_dirty(huge_pte), + HPAGE_PMD_SIZE/PAGE_SIZE); spin_unlock(&walk->mm->page_table_lock); + return 0; } if (pmd_trans_unstable(pmd)) -- cgit From e873c49fbfdd595481976b915850e682441bcbec Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 21 Mar 2012 16:33:58 -0700 Subject: pagemap: export KPF_THP This flag shows that a given page is a subpage of a transparent hugepage. It helps us debug and test the kernel by showing physical address of thp. Signed-off-by: Naoya Horiguchi Reviewed-by: Wu Fengguang Reviewed-by: KAMEZAWA Hiroyuki Acked-by: KOSAKI Motohiro Cc: David Rientjes Cc: Andi Kleen Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/page.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/proc/page.c b/fs/proc/page.c index 6d8e6a9e93ab..7fcd0d60a968 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -115,6 +115,8 @@ u64 stable_page_flags(struct page *page) u |= 1 << KPF_COMPOUND_TAIL; if (PageHuge(page)) u |= 1 << KPF_HUGE; + else if (PageTransCompound(page)) + u |= 1 << KPF_THP; /* * Caveats on high order pages: page->_count will only be set -- cgit From 092b50bacd1cdbffef2643b7a46f2a215407919c Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 21 Mar 2012 16:33:59 -0700 Subject: pagemap: introduce data structure for pagemap entry Currently a local variable of pagemap entry in pagemap_pte_range() is named pfn and typed with u64, but it's not correct (pfn should be unsigned long.) This patch introduces special type for pagemap entries and replaces code with it. Signed-off-by: Naoya Horiguchi Cc: David Rientjes Cc: Andi Kleen Cc: Wu Fengguang Cc: Andrea Arcangeli Cc: KOSAKI Motohiro Reviewed-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 69 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 38 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 328843de6e9f..c7e3a163295c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -594,9 +594,13 @@ const struct file_operations proc_clear_refs_operations = { .llseek = noop_llseek, }; +typedef struct { + u64 pme; +} pagemap_entry_t; + struct pagemapread { int pos, len; - u64 *buffer; + pagemap_entry_t *buffer; }; #define PAGEMAP_WALK_SIZE (PMD_SIZE) @@ -619,10 +623,15 @@ struct pagemapread { #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) #define PM_END_OF_BUFFER 1 -static int add_to_pagemap(unsigned long addr, u64 pfn, +static inline pagemap_entry_t make_pme(u64 val) +{ + return (pagemap_entry_t) { .pme = val }; +} + +static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, struct pagemapread *pm) { - pm->buffer[pm->pos++] = pfn; + pm->buffer[pm->pos++] = *pme; if (pm->pos >= pm->len) return PM_END_OF_BUFFER; return 0; @@ -634,8 +643,10 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, struct pagemapread *pm = walk->private; unsigned long addr; int err = 0; + pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); + for (addr = start; addr < end; addr += PAGE_SIZE) { - err = add_to_pagemap(addr, PM_NOT_PRESENT, pm); + err = add_to_pagemap(addr, &pme, pm); if (err) break; } @@ -648,36 +659,33 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte) return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); } -static u64 pte_to_pagemap_entry(pte_t pte) +static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte) { - u64 pme = 0; if (is_swap_pte(pte)) - pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) - | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; + *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte)) + | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP); else if (pte_present(pte)) - pme = PM_PFRAME(pte_pfn(pte)) - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; - return pme; + *pme = make_pme(PM_PFRAME(pte_pfn(pte)) + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -static u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset) +static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, + pmd_t pmd, int offset) { - u64 pme = 0; /* * Currently pmd for thp is always present because thp can not be * swapped-out, migrated, or HWPOISONed (split in such cases instead.) * This if-check is just to prepare for future implementation. */ if (pmd_present(pmd)) - pme = PM_PFRAME(pmd_pfn(pmd) + offset) - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; - return pme; + *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); } #else -static inline u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset) +static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, + pmd_t pmd, int offset) { - return 0; } #endif @@ -688,7 +696,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct pagemapread *pm = walk->private; pte_t *pte; int err = 0; - u64 pfn = PM_NOT_PRESENT; + pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); if (pmd_trans_unstable(pmd)) return 0; @@ -702,8 +710,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, offset = (addr & ~PAGEMAP_WALK_MASK) >> PAGE_SHIFT; - pfn = thp_pmd_to_pagemap_entry(*pmd, offset); - err = add_to_pagemap(addr, pfn, pm); + thp_pmd_to_pagemap_entry(&pme, *pmd, offset); + err = add_to_pagemap(addr, &pme, pm); if (err) break; } @@ -723,11 +731,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (vma && (vma->vm_start <= addr) && !is_vm_hugetlb_page(vma)) { pte = pte_offset_map(pmd, addr); - pfn = pte_to_pagemap_entry(*pte); + pte_to_pagemap_entry(&pme, *pte); /* unmap before userspace copy */ pte_unmap(pte); } - err = add_to_pagemap(addr, pfn, pm); + err = add_to_pagemap(addr, &pme, pm); if (err) return err; } @@ -738,13 +746,12 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } #ifdef CONFIG_HUGETLB_PAGE -static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) +static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, + pte_t pte, int offset) { - u64 pme = 0; if (pte_present(pte)) - pme = PM_PFRAME(pte_pfn(pte) + offset) - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; - return pme; + *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); } /* This function walks within one hugetlb entry in the single call */ @@ -754,12 +761,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, { struct pagemapread *pm = walk->private; int err = 0; - u64 pfn; + pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); for (; addr != end; addr += PAGE_SIZE) { int offset = (addr & ~hmask) >> PAGE_SHIFT; - pfn = huge_pte_to_pagemap_entry(*pte, offset); - err = add_to_pagemap(addr, pfn, pm); + huge_pte_to_pagemap_entry(&pme, *pte, offset); + err = add_to_pagemap(addr, &pme, pm); if (err) return err; } -- cgit From b76437579d1344b612cf1851ae610c636cec7db0 Mon Sep 17 00:00:00 2001 From: Siddhesh Poyarekar Date: Wed, 21 Mar 2012 16:34:04 -0700 Subject: procfs: mark thread stack correctly in proc//maps Stack for a new thread is mapped by userspace code and passed via sys_clone. This memory is currently seen as anonymous in /proc//maps, which makes it difficult to ascertain which mappings are being used for thread stacks. This patch uses the individual task stack pointers to determine which vmas are actually thread stacks. For a multithreaded program like the following: #include void *thread_main(void *foo) { while(1); } int main() { pthread_t t; pthread_create(&t, NULL, thread_main, NULL); pthread_join(t, NULL); } proc/PID/maps looks like the following: 00400000-00401000 r-xp 00000000 fd:0a 3671804 /home/siddhesh/a.out 00600000-00601000 rw-p 00000000 fd:0a 3671804 /home/siddhesh/a.out 019ef000-01a10000 rw-p 00000000 00:00 0 [heap] 7f8a44491000-7f8a44492000 ---p 00000000 00:00 0 7f8a44492000-7f8a44c92000 rw-p 00000000 00:00 0 7f8a44c92000-7f8a44e3d000 r-xp 00000000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a44e3d000-7f8a4503d000 ---p 001ab000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a4503d000-7f8a45041000 r--p 001ab000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a45041000-7f8a45043000 rw-p 001af000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a45043000-7f8a45048000 rw-p 00000000 00:00 0 7f8a45048000-7f8a4505f000 r-xp 00000000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a4505f000-7f8a4525e000 ---p 00017000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a4525e000-7f8a4525f000 r--p 00016000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a4525f000-7f8a45260000 rw-p 00017000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a45260000-7f8a45264000 rw-p 00000000 00:00 0 7f8a45264000-7f8a45286000 r-xp 00000000 fd:00 2097348 /lib64/ld-2.14.90.so 7f8a45457000-7f8a4545a000 rw-p 00000000 00:00 0 7f8a45484000-7f8a45485000 rw-p 00000000 00:00 0 7f8a45485000-7f8a45486000 r--p 00021000 fd:00 2097348 /lib64/ld-2.14.90.so 7f8a45486000-7f8a45487000 rw-p 00022000 fd:00 2097348 /lib64/ld-2.14.90.so 7f8a45487000-7f8a45488000 rw-p 00000000 00:00 0 7fff6273b000-7fff6275c000 rw-p 00000000 00:00 0 [stack] 7fff627ff000-7fff62800000 r-xp 00000000 00:00 0 [vdso] ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] Here, one could guess that 7f8a44492000-7f8a44c92000 is a stack since the earlier vma that has no permissions (7f8a44e3d000-7f8a4503d000) but that is not always a reliable way to find out which vma is a thread stack. Also, /proc/PID/maps and /proc/PID/task/TID/maps has the same content. With this patch in place, /proc/PID/task/TID/maps are treated as 'maps as the task would see it' and hence, only the vma that that task uses as stack is marked as [stack]. All other 'stack' vmas are marked as anonymous memory. /proc/PID/maps acts as a thread group level view, where all thread stack vmas are marked as [stack:TID] where TID is the process ID of the task that uses that vma as stack, while the process stack is marked as [stack]. So /proc/PID/maps will look like this: 00400000-00401000 r-xp 00000000 fd:0a 3671804 /home/siddhesh/a.out 00600000-00601000 rw-p 00000000 fd:0a 3671804 /home/siddhesh/a.out 019ef000-01a10000 rw-p 00000000 00:00 0 [heap] 7f8a44491000-7f8a44492000 ---p 00000000 00:00 0 7f8a44492000-7f8a44c92000 rw-p 00000000 00:00 0 [stack:1442] 7f8a44c92000-7f8a44e3d000 r-xp 00000000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a44e3d000-7f8a4503d000 ---p 001ab000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a4503d000-7f8a45041000 r--p 001ab000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a45041000-7f8a45043000 rw-p 001af000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a45043000-7f8a45048000 rw-p 00000000 00:00 0 7f8a45048000-7f8a4505f000 r-xp 00000000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a4505f000-7f8a4525e000 ---p 00017000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a4525e000-7f8a4525f000 r--p 00016000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a4525f000-7f8a45260000 rw-p 00017000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a45260000-7f8a45264000 rw-p 00000000 00:00 0 7f8a45264000-7f8a45286000 r-xp 00000000 fd:00 2097348 /lib64/ld-2.14.90.so 7f8a45457000-7f8a4545a000 rw-p 00000000 00:00 0 7f8a45484000-7f8a45485000 rw-p 00000000 00:00 0 7f8a45485000-7f8a45486000 r--p 00021000 fd:00 2097348 /lib64/ld-2.14.90.so 7f8a45486000-7f8a45487000 rw-p 00022000 fd:00 2097348 /lib64/ld-2.14.90.so 7f8a45487000-7f8a45488000 rw-p 00000000 00:00 0 7fff6273b000-7fff6275c000 rw-p 00000000 00:00 0 [stack] 7fff627ff000-7fff62800000 r-xp 00000000 00:00 0 [vdso] ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] Thus marking all vmas that are used as stacks by the threads in the thread group along with the process stack. The task level maps will however like this: 00400000-00401000 r-xp 00000000 fd:0a 3671804 /home/siddhesh/a.out 00600000-00601000 rw-p 00000000 fd:0a 3671804 /home/siddhesh/a.out 019ef000-01a10000 rw-p 00000000 00:00 0 [heap] 7f8a44491000-7f8a44492000 ---p 00000000 00:00 0 7f8a44492000-7f8a44c92000 rw-p 00000000 00:00 0 [stack] 7f8a44c92000-7f8a44e3d000 r-xp 00000000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a44e3d000-7f8a4503d000 ---p 001ab000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a4503d000-7f8a45041000 r--p 001ab000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a45041000-7f8a45043000 rw-p 001af000 fd:00 2097482 /lib64/libc-2.14.90.so 7f8a45043000-7f8a45048000 rw-p 00000000 00:00 0 7f8a45048000-7f8a4505f000 r-xp 00000000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a4505f000-7f8a4525e000 ---p 00017000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a4525e000-7f8a4525f000 r--p 00016000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a4525f000-7f8a45260000 rw-p 00017000 fd:00 2099938 /lib64/libpthread-2.14.90.so 7f8a45260000-7f8a45264000 rw-p 00000000 00:00 0 7f8a45264000-7f8a45286000 r-xp 00000000 fd:00 2097348 /lib64/ld-2.14.90.so 7f8a45457000-7f8a4545a000 rw-p 00000000 00:00 0 7f8a45484000-7f8a45485000 rw-p 00000000 00:00 0 7f8a45485000-7f8a45486000 r--p 00021000 fd:00 2097348 /lib64/ld-2.14.90.so 7f8a45486000-7f8a45487000 rw-p 00022000 fd:00 2097348 /lib64/ld-2.14.90.so 7f8a45487000-7f8a45488000 rw-p 00000000 00:00 0 7fff6273b000-7fff6275c000 rw-p 00000000 00:00 0 7fff627ff000-7fff62800000 r-xp 00000000 00:00 0 [vdso] ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] where only the vma that is being used as a stack by *that* task is marked as [stack]. Analogous changes have been made to /proc/PID/smaps, /proc/PID/numa_maps, /proc/PID/task/TID/smaps and /proc/PID/task/TID/numa_maps. Relevant snippets from smaps and numa_maps: [siddhesh@localhost ~ ]$ pgrep a.out 1441 [siddhesh@localhost ~ ]$ cat /proc/1441/smaps | grep "\[stack" 7f8a44492000-7f8a44c92000 rw-p 00000000 00:00 0 [stack:1442] 7fff6273b000-7fff6275c000 rw-p 00000000 00:00 0 [stack] [siddhesh@localhost ~ ]$ cat /proc/1441/task/1442/smaps | grep "\[stack" 7f8a44492000-7f8a44c92000 rw-p 00000000 00:00 0 [stack] [siddhesh@localhost ~ ]$ cat /proc/1441/task/1441/smaps | grep "\[stack" 7fff6273b000-7fff6275c000 rw-p 00000000 00:00 0 [stack] [siddhesh@localhost ~ ]$ cat /proc/1441/numa_maps | grep "stack" 7f8a44492000 default stack:1442 anon=2 dirty=2 N0=2 7fff6273a000 default stack anon=3 dirty=3 N0=3 [siddhesh@localhost ~ ]$ cat /proc/1441/task/1442/numa_maps | grep "stack" 7f8a44492000 default stack anon=2 dirty=2 N0=2 [siddhesh@localhost ~ ]$ cat /proc/1441/task/1441/numa_maps | grep "stack" 7fff6273a000 default stack anon=3 dirty=3 N0=3 [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix build] Signed-off-by: Siddhesh Poyarekar Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Jamie Lokier Cc: Mike Frysinger Cc: Alexey Dobriyan Cc: Matt Mackall Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/base.c | 12 +-- fs/proc/internal.h | 9 ++- fs/proc/task_mmu.c | 210 +++++++++++++++++++++++++++++++++++++++++---------- fs/proc/task_nommu.c | 69 ++++++++++++++--- 4 files changed, 239 insertions(+), 61 deletions(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index 965d4bde3a3b..3b42c1418f31 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2989,9 +2989,9 @@ static const struct pid_entry tgid_base_stuff[] = { INF("cmdline", S_IRUGO, proc_pid_cmdline), ONE("stat", S_IRUGO, proc_tgid_stat), ONE("statm", S_IRUGO, proc_pid_statm), - REG("maps", S_IRUGO, proc_maps_operations), + REG("maps", S_IRUGO, proc_pid_maps_operations), #ifdef CONFIG_NUMA - REG("numa_maps", S_IRUGO, proc_numa_maps_operations), + REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations), #endif REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), LNK("cwd", proc_cwd_link), @@ -3002,7 +3002,7 @@ static const struct pid_entry tgid_base_stuff[] = { REG("mountstats", S_IRUSR, proc_mountstats_operations), #ifdef CONFIG_PROC_PAGE_MONITOR REG("clear_refs", S_IWUSR, proc_clear_refs_operations), - REG("smaps", S_IRUGO, proc_smaps_operations), + REG("smaps", S_IRUGO, proc_pid_smaps_operations), REG("pagemap", S_IRUGO, proc_pagemap_operations), #endif #ifdef CONFIG_SECURITY @@ -3348,9 +3348,9 @@ static const struct pid_entry tid_base_stuff[] = { INF("cmdline", S_IRUGO, proc_pid_cmdline), ONE("stat", S_IRUGO, proc_tid_stat), ONE("statm", S_IRUGO, proc_pid_statm), - REG("maps", S_IRUGO, proc_maps_operations), + REG("maps", S_IRUGO, proc_tid_maps_operations), #ifdef CONFIG_NUMA - REG("numa_maps", S_IRUGO, proc_numa_maps_operations), + REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations), #endif REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), LNK("cwd", proc_cwd_link), @@ -3360,7 +3360,7 @@ static const struct pid_entry tid_base_stuff[] = { REG("mountinfo", S_IRUGO, proc_mountinfo_operations), #ifdef CONFIG_PROC_PAGE_MONITOR REG("clear_refs", S_IWUSR, proc_clear_refs_operations), - REG("smaps", S_IRUGO, proc_smaps_operations), + REG("smaps", S_IRUGO, proc_tid_smaps_operations), REG("pagemap", S_IRUGO, proc_pagemap_operations), #endif #ifdef CONFIG_SECURITY diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 292577531ad1..c44efe19798f 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -53,9 +53,12 @@ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task); extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); -extern const struct file_operations proc_maps_operations; -extern const struct file_operations proc_numa_maps_operations; -extern const struct file_operations proc_smaps_operations; +extern const struct file_operations proc_pid_maps_operations; +extern const struct file_operations proc_tid_maps_operations; +extern const struct file_operations proc_pid_numa_maps_operations; +extern const struct file_operations proc_tid_numa_maps_operations; +extern const struct file_operations proc_pid_smaps_operations; +extern const struct file_operations proc_tid_smaps_operations; extern const struct file_operations proc_clear_refs_operations; extern const struct file_operations proc_pagemap_operations; extern const struct file_operations proc_net_operations; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c7e3a163295c..9694cc283511 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -209,16 +209,20 @@ static int do_maps_open(struct inode *inode, struct file *file, return ret; } -static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) +static void +show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) { struct mm_struct *mm = vma->vm_mm; struct file *file = vma->vm_file; + struct proc_maps_private *priv = m->private; + struct task_struct *task = priv->task; vm_flags_t flags = vma->vm_flags; unsigned long ino = 0; unsigned long long pgoff = 0; unsigned long start, end; dev_t dev = 0; int len; + const char *name = NULL; if (file) { struct inode *inode = vma->vm_file->f_path.dentry->d_inode; @@ -252,36 +256,57 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) if (file) { pad_len_spaces(m, len); seq_path(m, &file->f_path, "\n"); - } else { - const char *name = arch_vma_name(vma); - if (!name) { - if (mm) { - if (vma->vm_start <= mm->brk && - vma->vm_end >= mm->start_brk) { - name = "[heap]"; - } else if (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack) { - name = "[stack]"; - } + goto done; + } + + name = arch_vma_name(vma); + if (!name) { + pid_t tid; + + if (!mm) { + name = "[vdso]"; + goto done; + } + + if (vma->vm_start <= mm->brk && + vma->vm_end >= mm->start_brk) { + name = "[heap]"; + goto done; + } + + tid = vm_is_stack(task, vma, is_pid); + + if (tid != 0) { + /* + * Thread stack in /proc/PID/task/TID/maps or + * the main process stack. + */ + if (!is_pid || (vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack)) { + name = "[stack]"; } else { - name = "[vdso]"; + /* Thread stack in /proc/PID/maps */ + pad_len_spaces(m, len); + seq_printf(m, "[stack:%d]", tid); } } - if (name) { - pad_len_spaces(m, len); - seq_puts(m, name); - } + } + +done: + if (name) { + pad_len_spaces(m, len); + seq_puts(m, name); } seq_putc(m, '\n'); } -static int show_map(struct seq_file *m, void *v) +static int show_map(struct seq_file *m, void *v, int is_pid) { struct vm_area_struct *vma = v; struct proc_maps_private *priv = m->private; struct task_struct *task = priv->task; - show_map_vma(m, vma); + show_map_vma(m, vma, is_pid); if (m->count < m->size) /* vma is copied successfully */ m->version = (vma != get_gate_vma(task->mm)) @@ -289,20 +314,49 @@ static int show_map(struct seq_file *m, void *v) return 0; } +static int show_pid_map(struct seq_file *m, void *v) +{ + return show_map(m, v, 1); +} + +static int show_tid_map(struct seq_file *m, void *v) +{ + return show_map(m, v, 0); +} + static const struct seq_operations proc_pid_maps_op = { .start = m_start, .next = m_next, .stop = m_stop, - .show = show_map + .show = show_pid_map +}; + +static const struct seq_operations proc_tid_maps_op = { + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = show_tid_map }; -static int maps_open(struct inode *inode, struct file *file) +static int pid_maps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_pid_maps_op); } -const struct file_operations proc_maps_operations = { - .open = maps_open, +static int tid_maps_open(struct inode *inode, struct file *file) +{ + return do_maps_open(inode, file, &proc_tid_maps_op); +} + +const struct file_operations proc_pid_maps_operations = { + .open = pid_maps_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +const struct file_operations proc_tid_maps_operations = { + .open = tid_maps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, @@ -416,7 +470,7 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return 0; } -static int show_smap(struct seq_file *m, void *v) +static int show_smap(struct seq_file *m, void *v, int is_pid) { struct proc_maps_private *priv = m->private; struct task_struct *task = priv->task; @@ -434,7 +488,7 @@ static int show_smap(struct seq_file *m, void *v) if (vma->vm_mm && !is_vm_hugetlb_page(vma)) walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); - show_map_vma(m, vma); + show_map_vma(m, vma, is_pid); seq_printf(m, "Size: %8lu kB\n" @@ -473,20 +527,49 @@ static int show_smap(struct seq_file *m, void *v) return 0; } +static int show_pid_smap(struct seq_file *m, void *v) +{ + return show_smap(m, v, 1); +} + +static int show_tid_smap(struct seq_file *m, void *v) +{ + return show_smap(m, v, 0); +} + static const struct seq_operations proc_pid_smaps_op = { .start = m_start, .next = m_next, .stop = m_stop, - .show = show_smap + .show = show_pid_smap +}; + +static const struct seq_operations proc_tid_smaps_op = { + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = show_tid_smap }; -static int smaps_open(struct inode *inode, struct file *file) +static int pid_smaps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_pid_smaps_op); } -const struct file_operations proc_smaps_operations = { - .open = smaps_open, +static int tid_smaps_open(struct inode *inode, struct file *file) +{ + return do_maps_open(inode, file, &proc_tid_smaps_op); +} + +const struct file_operations proc_pid_smaps_operations = { + .open = pid_smaps_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +const struct file_operations proc_tid_smaps_operations = { + .open = tid_smaps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, @@ -1039,7 +1122,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, /* * Display pages allocated per node and memory policy via /proc. */ -static int show_numa_map(struct seq_file *m, void *v) +static int show_numa_map(struct seq_file *m, void *v, int is_pid) { struct numa_maps_private *numa_priv = m->private; struct proc_maps_private *proc_priv = &numa_priv->proc_maps; @@ -1076,9 +1159,19 @@ static int show_numa_map(struct seq_file *m, void *v) seq_path(m, &file->f_path, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_printf(m, " heap"); - } else if (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack) { - seq_printf(m, " stack"); + } else { + pid_t tid = vm_is_stack(proc_priv->task, vma, is_pid); + if (tid != 0) { + /* + * Thread stack in /proc/PID/task/TID/maps or + * the main process stack. + */ + if (!is_pid || (vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack)) + seq_printf(m, " stack"); + else + seq_printf(m, " stack:%d", tid); + } } if (is_vm_hugetlb_page(vma)) @@ -1121,21 +1214,39 @@ out: return 0; } +static int show_pid_numa_map(struct seq_file *m, void *v) +{ + return show_numa_map(m, v, 1); +} + +static int show_tid_numa_map(struct seq_file *m, void *v) +{ + return show_numa_map(m, v, 0); +} + static const struct seq_operations proc_pid_numa_maps_op = { - .start = m_start, - .next = m_next, - .stop = m_stop, - .show = show_numa_map, + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = show_pid_numa_map, }; -static int numa_maps_open(struct inode *inode, struct file *file) +static const struct seq_operations proc_tid_numa_maps_op = { + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = show_tid_numa_map, +}; + +static int numa_maps_open(struct inode *inode, struct file *file, + const struct seq_operations *ops) { struct numa_maps_private *priv; int ret = -ENOMEM; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv) { priv->proc_maps.pid = proc_pid(inode); - ret = seq_open(file, &proc_pid_numa_maps_op); + ret = seq_open(file, ops); if (!ret) { struct seq_file *m = file->private_data; m->private = priv; @@ -1146,8 +1257,25 @@ static int numa_maps_open(struct inode *inode, struct file *file) return ret; } -const struct file_operations proc_numa_maps_operations = { - .open = numa_maps_open, +static int pid_numa_maps_open(struct inode *inode, struct file *file) +{ + return numa_maps_open(inode, file, &proc_pid_numa_maps_op); +} + +static int tid_numa_maps_open(struct inode *inode, struct file *file) +{ + return numa_maps_open(inode, file, &proc_tid_numa_maps_op); +} + +const struct file_operations proc_pid_numa_maps_operations = { + .open = pid_numa_maps_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +const struct file_operations proc_tid_numa_maps_operations = { + .open = tid_numa_maps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 980de547c070..74fe164d1b23 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -134,9 +134,11 @@ static void pad_len_spaces(struct seq_file *m, int len) /* * display a single VMA to a sequenced file */ -static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) +static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, + int is_pid) { struct mm_struct *mm = vma->vm_mm; + struct proc_maps_private *priv = m->private; unsigned long ino = 0; struct file *file; dev_t dev = 0; @@ -168,10 +170,19 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) pad_len_spaces(m, len); seq_path(m, &file->f_path, ""); } else if (mm) { - if (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack) { + pid_t tid = vm_is_stack(priv->task, vma, is_pid); + + if (tid != 0) { pad_len_spaces(m, len); - seq_puts(m, "[stack]"); + /* + * Thread stack in /proc/PID/task/TID/maps or + * the main process stack. + */ + if (!is_pid || (vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack)) + seq_printf(m, "[stack]"); + else + seq_printf(m, "[stack:%d]", tid); } } @@ -182,11 +193,22 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) /* * display mapping lines for a particular process's /proc/pid/maps */ -static int show_map(struct seq_file *m, void *_p) +static int show_map(struct seq_file *m, void *_p, int is_pid) { struct rb_node *p = _p; - return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb)); + return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb), + is_pid); +} + +static int show_pid_map(struct seq_file *m, void *_p) +{ + return show_map(m, _p, 1); +} + +static int show_tid_map(struct seq_file *m, void *_p) +{ + return show_map(m, _p, 0); } static void *m_start(struct seq_file *m, loff_t *pos) @@ -240,10 +262,18 @@ static const struct seq_operations proc_pid_maps_ops = { .start = m_start, .next = m_next, .stop = m_stop, - .show = show_map + .show = show_pid_map +}; + +static const struct seq_operations proc_tid_maps_ops = { + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = show_tid_map }; -static int maps_open(struct inode *inode, struct file *file) +static int maps_open(struct inode *inode, struct file *file, + const struct seq_operations *ops) { struct proc_maps_private *priv; int ret = -ENOMEM; @@ -251,7 +281,7 @@ static int maps_open(struct inode *inode, struct file *file) priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv) { priv->pid = proc_pid(inode); - ret = seq_open(file, &proc_pid_maps_ops); + ret = seq_open(file, ops); if (!ret) { struct seq_file *m = file->private_data; m->private = priv; @@ -262,8 +292,25 @@ static int maps_open(struct inode *inode, struct file *file) return ret; } -const struct file_operations proc_maps_operations = { - .open = maps_open, +static int pid_maps_open(struct inode *inode, struct file *file) +{ + return maps_open(inode, file, &proc_pid_maps_ops); +} + +static int tid_maps_open(struct inode *inode, struct file *file) +{ + return maps_open(inode, file, &proc_tid_maps_ops); +} + +const struct file_operations proc_pid_maps_operations = { + .open = pid_maps_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +const struct file_operations proc_tid_maps_operations = { + .open = tid_maps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, -- cgit From a05b0855fd15504972dba2358e5faa172a1e50ba Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 21 Mar 2012 16:34:08 -0700 Subject: hugetlbfs: avoid taking i_mutex from hugetlbfs_read() Taking i_mutex in hugetlbfs_read() can result in deadlock with mmap as explained below Thread A: read() on hugetlbfs hugetlbfs_read() called i_mutex grabbed hugetlbfs_read_actor() called __copy_to_user() called page fault is triggered Thread B, sharing address space with A: mmap() the same file ->mmap_sem is grabbed on task_B->mm->mmap_sem hugetlbfs_file_mmap() is called attempt to grab ->i_mutex and block waiting for A to give it up Thread A: pagefault handled blocked on attempt to grab task_A->mm->mmap_sem, which happens to be the same thing as task_B->mm->mmap_sem. Block waiting for B to give it up. AFAIU the i_mutex locking was added to hugetlbfs_read() as per http://lkml.indiana.edu/hypermail/linux/kernel/0707.2/3066.html to take care of the race between truncate and read. This patch fixes this by looking at page->mapping under lock_page() (find_lock_page()) to ensure that the inode didn't get truncated in the range during a parallel read. Ideally we can extend the patch to make sure we don't increase i_size in mmap. But that will break userspace, because applications will now have to use truncate(2) to increase i_size in hugetlbfs. Based on the original patch from Hillf Danton. Signed-off-by: Aneesh Kumar K.V Cc: Hillf Danton Cc: KAMEZAWA Hiroyuki Cc: Al Viro Cc: Hugh Dickins Cc: [everything after 2007 :)] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index b7bc7868c7b5..19654cfe780b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -245,17 +245,10 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, loff_t isize; ssize_t retval = 0; - mutex_lock(&inode->i_mutex); - /* validate length */ if (len == 0) goto out; - isize = i_size_read(inode); - if (!isize) - goto out; - - end_index = (isize - 1) >> huge_page_shift(h); for (;;) { struct page *page; unsigned long nr, ret; @@ -263,18 +256,21 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, /* nr is the maximum number of bytes to copy from this page */ nr = huge_page_size(h); + isize = i_size_read(inode); + if (!isize) + goto out; + end_index = (isize - 1) >> huge_page_shift(h); if (index >= end_index) { if (index > end_index) goto out; nr = ((isize - 1) & ~huge_page_mask(h)) + 1; - if (nr <= offset) { + if (nr <= offset) goto out; - } } nr = nr - offset; /* Find the page */ - page = find_get_page(mapping, index); + page = find_lock_page(mapping, index); if (unlikely(page == NULL)) { /* * We have a HOLE, zero out the user-buffer for the @@ -286,17 +282,18 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, else ra = 0; } else { + unlock_page(page); + /* * We have the page, copy it to user space buffer. */ ra = hugetlbfs_read_actor(page, offset, buf, len, nr); ret = ra; + page_cache_release(page); } if (ra < 0) { if (retval == 0) retval = ra; - if (page) - page_cache_release(page); goto out; } @@ -306,16 +303,12 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, index += offset >> huge_page_shift(h); offset &= ~huge_page_mask(h); - if (page) - page_cache_release(page); - /* short read or no more work */ if ((ret != nr) || (len == 0)) break; } out: *ppos = ((loff_t)index << huge_page_shift(h)) + offset; - mutex_unlock(&inode->i_mutex); return retval; } -- cgit From a1d776ee3147cec2a54a645e92eb2e3e2f65a137 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 21 Mar 2012 16:34:12 -0700 Subject: hugetlb: cleanup hugetlb.h Make a couple of small cleanups to linux/include/hugetlb.h. The set_file_hugepages() function, which was not used anywhere is removed, and the hugetlbfs_config and hugetlbfs_inode_info structures with its HUGETLBFS_I helper function are moved into inode.c, the only place they were used. These structures are really linked to the hugetlbfs filesystem specifically not to hugepage mm handling in general, so they belong in the filesystem code not in a generally available header. It would be nice to move the hugetlbfs_sb_info (superblock) structure in there as well, but it's currently needed in a number of places via the hstate_vma() and hstate_inode(). Signed-off-by: David Gibson Cc: Hugh Dickins Cc: Paul Mackerras Cc: Andrew Barry Cc: Mel Gorman Cc: Minchan Kim Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'fs') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 19654cfe780b..4fbd9fccd550 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -41,6 +41,25 @@ const struct file_operations hugetlbfs_file_operations; static const struct inode_operations hugetlbfs_dir_inode_operations; static const struct inode_operations hugetlbfs_inode_operations; +struct hugetlbfs_config { + uid_t uid; + gid_t gid; + umode_t mode; + long nr_blocks; + long nr_inodes; + struct hstate *hstate; +}; + +struct hugetlbfs_inode_info { + struct shared_policy policy; + struct inode vfs_inode; +}; + +static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) +{ + return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); +} + static struct backing_dev_info hugetlbfs_backing_dev_info = { .name = "hugetlbfs", .ra_pages = 0, /* No readahead */ -- cgit From 90481622d75715bfcb68501280a917dbfe516029 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 21 Mar 2012 16:34:12 -0700 Subject: hugepages: fix use after free bug in "quota" handling hugetlbfs_{get,put}_quota() are badly named. They don't interact with the general quota handling code, and they don't much resemble its behaviour. Rather than being about maintaining limits on on-disk block usage by particular users, they are instead about maintaining limits on in-memory page usage (including anonymous MAP_PRIVATE copied-on-write pages) associated with a particular hugetlbfs filesystem instance. Worse, they work by having callbacks to the hugetlbfs filesystem code from the low-level page handling code, in particular from free_huge_page(). This is a layering violation of itself, but more importantly, if the kernel does a get_user_pages() on hugepages (which can happen from KVM amongst others), then the free_huge_page() can be delayed until after the associated inode has already been freed. If an unmount occurs at the wrong time, even the hugetlbfs superblock where the "quota" limits are stored may have been freed. Andrew Barry proposed a patch to fix this by having hugepages, instead of storing a pointer to their address_space and reaching the superblock from there, had the hugepages store pointers directly to the superblock, bumping the reference count as appropriate to avoid it being freed. Andrew Morton rejected that version, however, on the grounds that it made the existing layering violation worse. This is a reworked version of Andrew's patch, which removes the extra, and some of the existing, layering violation. It works by introducing the concept of a hugepage "subpool" at the lower hugepage mm layer - that is a finite logical pool of hugepages to allocate from. hugetlbfs now creates a subpool for each filesystem instance with a page limit set, and a pointer to the subpool gets added to each allocated hugepage, instead of the address_space pointer used now. The subpool has its own lifetime and is only freed once all pages in it _and_ all other references to it (i.e. superblocks) are gone. subpools are optional - a NULL subpool pointer is taken by the code to mean that no subpool limits are in effect. Previous discussion of this bug found in: "Fix refcounting in hugetlbfs quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or http://marc.info/?l=linux-mm&m=126928970510627&w=1 v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to alloc_huge_page() - since it already takes the vma, it is not necessary. Signed-off-by: Andrew Barry Signed-off-by: David Gibson Cc: Hugh Dickins Cc: Mel Gorman Cc: Minchan Kim Cc: Hillf Danton Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 54 ++++++++++++++++++++-------------------------------- 1 file changed, 21 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 4fbd9fccd550..7913e3252167 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -626,9 +626,15 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) spin_lock(&sbinfo->stat_lock); /* If no limits set, just report 0 for max/free/used * blocks, like simple_statfs() */ - if (sbinfo->max_blocks >= 0) { - buf->f_blocks = sbinfo->max_blocks; - buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; + if (sbinfo->spool) { + long free_pages; + + spin_lock(&sbinfo->spool->lock); + buf->f_blocks = sbinfo->spool->max_hpages; + free_pages = sbinfo->spool->max_hpages + - sbinfo->spool->used_hpages; + buf->f_bavail = buf->f_bfree = free_pages; + spin_unlock(&sbinfo->spool->lock); buf->f_files = sbinfo->max_inodes; buf->f_ffree = sbinfo->free_inodes; } @@ -644,6 +650,10 @@ static void hugetlbfs_put_super(struct super_block *sb) if (sbi) { sb->s_fs_info = NULL; + + if (sbi->spool) + hugepage_put_subpool(sbi->spool); + kfree(sbi); } } @@ -874,10 +884,14 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_fs_info = sbinfo; sbinfo->hstate = config.hstate; spin_lock_init(&sbinfo->stat_lock); - sbinfo->max_blocks = config.nr_blocks; - sbinfo->free_blocks = config.nr_blocks; sbinfo->max_inodes = config.nr_inodes; sbinfo->free_inodes = config.nr_inodes; + sbinfo->spool = NULL; + if (config.nr_blocks != -1) { + sbinfo->spool = hugepage_new_subpool(config.nr_blocks); + if (!sbinfo->spool) + goto out_free; + } sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = huge_page_size(config.hstate); sb->s_blocksize_bits = huge_page_shift(config.hstate); @@ -896,38 +910,12 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_root = root; return 0; out_free: + if (sbinfo->spool) + kfree(sbinfo->spool); kfree(sbinfo); return -ENOMEM; } -int hugetlb_get_quota(struct address_space *mapping, long delta) -{ - int ret = 0; - struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); - - if (sbinfo->free_blocks > -1) { - spin_lock(&sbinfo->stat_lock); - if (sbinfo->free_blocks - delta >= 0) - sbinfo->free_blocks -= delta; - else - ret = -ENOMEM; - spin_unlock(&sbinfo->stat_lock); - } - - return ret; -} - -void hugetlb_put_quota(struct address_space *mapping, long delta) -{ - struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); - - if (sbinfo->free_blocks > -1) { - spin_lock(&sbinfo->stat_lock); - sbinfo->free_blocks += delta; - spin_unlock(&sbinfo->stat_lock); - } -} - static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { -- cgit From 05af2e104a0c282dcd9303431e1360750ba76de6 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 21 Mar 2012 16:34:13 -0700 Subject: mm, counters: remove task argument to sync_mm_rss() and __sync_task_rss_stat() sync_mm_rss() can only be used for current to avoid race conditions in iterating and clearing its per-task counters. Remove the task argument for it and its helper function, __sync_task_rss_stat(), to avoid thinking it can be used safely for anything other than current. Signed-off-by: David Rientjes Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/exec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 3908544f5d18..6ed164d20d7d 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -824,7 +824,7 @@ static int exec_mmap(struct mm_struct *mm) /* Notify parent that we're no longer interested in the old VM */ tsk = current; old_mm = current->mm; - sync_mm_rss(tsk, old_mm); + sync_mm_rss(old_mm); mm_release(tsk, old_mm); if (old_mm) { -- cgit From 21a3c273f88c9cbbaf7e14505df0131d95c8f262 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 21 Mar 2012 16:34:13 -0700 Subject: mm, hugetlb: add thread name and pid to SHM_HUGETLB mlock rlimit warning Add the thread name and pid of the application that is allocating shm segments with MAP_HUGETLB without being a part of /proc/sys/vm/hugetlb_shm_group or having CAP_IPC_LOCK. This identifies the application so it may be fixed by avoiding using the deprecated exception (see Documentation/feature-removal-schedule.txt). Signed-off-by: David Rientjes Cc: Dave Jones Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 7913e3252167..79408159a001 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -953,7 +953,11 @@ struct file *hugetlb_file_setup(const char *name, size_t size, if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { *user = current_user(); if (user_shm_lock(size, *user)) { - printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n"); + task_lock(current); + printk_once(KERN_WARNING + "%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", + current->comm, current->pid); + task_unlock(current); } else { *user = NULL; return ERR_PTR(-EPERM); -- cgit From 40716e29243de46720e5773797791466c28904ec Mon Sep 17 00:00:00 2001 From: Steven Truelove Date: Wed, 21 Mar 2012 16:34:14 -0700 Subject: hugetlbfs: fix alignment of huge page requests When calling shmget() with SHM_HUGETLB, shmget aligns the request size to PAGE_SIZE, but this is not sufficient. Modify hugetlb_file_setup() to align requests to the huge page size, and to accept an address argument so that all alignment checks can be performed in hugetlb_file_setup(), rather than in its callers. Change newseg() and mmap_pgoff() to match the new prototype and eliminate a now redundant alignment check. [akpm@linux-foundation.org: fix build] Signed-off-by: Steven Truelove Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 79408159a001..631329f3de63 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -935,8 +935,8 @@ static int can_do_hugetlb_shm(void) return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); } -struct file *hugetlb_file_setup(const char *name, size_t size, - vm_flags_t acctflag, +struct file *hugetlb_file_setup(const char *name, unsigned long addr, + size_t size, vm_flags_t acctflag, struct user_struct **user, int creat_flags) { int error = -ENOMEM; @@ -945,6 +945,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size, struct path path; struct dentry *root; struct qstr quick_string; + struct hstate *hstate; + unsigned long num_pages; *user = NULL; if (!hugetlbfs_vfsmount) @@ -978,10 +980,12 @@ struct file *hugetlb_file_setup(const char *name, size_t size, if (!inode) goto out_dentry; + hstate = hstate_inode(inode); + size += addr & ~huge_page_mask(hstate); + num_pages = ALIGN(size, huge_page_size(hstate)) >> + huge_page_shift(hstate); error = -ENOMEM; - if (hugetlb_reserve_pages(inode, 0, - size >> huge_page_shift(hstate_inode(inode)), NULL, - acctflag)) + if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag)) goto out_inode; d_instantiate(path.dentry, inode); -- cgit From d1d5e05ffdc110021ae7937802e88ae0d223dcdc Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Wed, 21 Mar 2012 16:34:15 -0700 Subject: hugetlbfs: return error code when initializing module Return an errno upon failure to create inode kmem cache, and unregister the FS upon failure to mount. [akpm@linux-foundation.org: remove unneeded test of `error'] Signed-off-by: Hillf Danton Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 631329f3de63..269163324b73 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1021,6 +1021,7 @@ static int __init init_hugetlbfs_fs(void) if (error) return error; + error = -ENOMEM; hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", sizeof(struct hugetlbfs_inode_info), 0, 0, init_once); @@ -1039,10 +1040,10 @@ static int __init init_hugetlbfs_fs(void) } error = PTR_ERR(vfsmount); + unregister_filesystem(&hugetlbfs_fs_type); out: - if (error) - kmem_cache_destroy(hugetlbfs_inode_cachep); + kmem_cache_destroy(hugetlbfs_inode_cachep); out2: bdi_destroy(&hugetlbfs_backing_dev_info); return error; -- cgit From 4e474a00d7ff746ed177ddae14fa8b2d4bad7a00 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 22 Mar 2012 14:42:22 -0700 Subject: sysctl: protect poll() in entries that may go away Protect code accessing ctl_table by grabbing the header with grab_header() and after releasing with sysctl_head_finish(). This is needed if poll() is called in entries created by modules: currently only hostname and domainname support poll(), but this bug may be triggered when/if modules use it and if user called poll() in a file that doesn't support it. Dave Jones reported the following when using a syscall fuzzer while hibernating/resuming: RIP: 0010:[] [] proc_sys_poll+0x4e/0x90 RAX: 0000000000000145 RBX: ffff88020cab6940 RCX: 0000000000000000 RDX: ffffffff81233df0 RSI: 6b6b6b6b6b6b6b6b RDI: ffff88020cab6940 [ ... ] Code: 00 48 89 fb 48 89 f1 48 8b 40 30 4c 8b 60 e8 b8 45 01 00 00 49 83 7c 24 28 00 74 2e 49 8b 74 24 30 48 85 f6 74 24 48 85 c9 75 32 <8b> 16 b8 45 01 00 00 48 63 d2 49 39 d5 74 10 8b 06 48 98 48 89 If an entry goes away while we are polling() it, ctl_table may not exist anymore. Reported-by: Dave Jones Signed-off-by: Lucas De Marchi Cc: Al Viro Cc: Linus Torvalds Cc: Alexey Dobriyan Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton Signed-off-by: Eric W. Biederman --- fs/proc/proc_sysctl.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index a7708b7c957f..47b474b572c1 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -525,20 +525,32 @@ static ssize_t proc_sys_write(struct file *filp, const char __user *buf, static int proc_sys_open(struct inode *inode, struct file *filp) { + struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; + /* sysctl was unregistered */ + if (IS_ERR(head)) + return PTR_ERR(head); + if (table->poll) filp->private_data = proc_sys_poll_event(table->poll); + sysctl_head_finish(head); + return 0; } static unsigned int proc_sys_poll(struct file *filp, poll_table *wait) { struct inode *inode = filp->f_path.dentry->d_inode; + struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; - unsigned long event = (unsigned long)filp->private_data; unsigned int ret = DEFAULT_POLLMASK; + unsigned long event; + + /* sysctl was unregistered */ + if (IS_ERR(head)) + return POLLERR | POLLHUP; if (!table->proc_handler) goto out; @@ -546,6 +558,7 @@ static unsigned int proc_sys_poll(struct file *filp, poll_table *wait) if (!table->poll) goto out; + event = (unsigned long)filp->private_data; poll_wait(filp, &table->poll->wait, wait); if (event != atomic_read(&table->poll->event)) { @@ -554,6 +567,8 @@ static unsigned int proc_sys_poll(struct file *filp, poll_table *wait) } out: + sysctl_head_finish(head); + return ret; } -- cgit From f132c5be05e407a99cf582347a2ae0120acd3ad7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 22 Mar 2012 21:59:52 +0000 Subject: Fix full_name_hash() behaviour when length is a multiple of 8 We want it to match what hash_name() is doing, which means extra multiply by 9 in this case... Reported-and-Tested-by: Konrad Rzeszutek Wilk Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- fs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index a94a7f9a03ea..bd313d680d34 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1439,10 +1439,10 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len) for (;;) { a = *(unsigned long *)name; - hash *= 9; if (len < sizeof(unsigned long)) break; hash += a; + hash *= 9; name += sizeof(unsigned long); len -= sizeof(unsigned long); if (!len) -- cgit From 1f1e6e523e43e312c0e0d38c09828d53e9f709fc Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 18 Mar 2012 21:23:05 -0700 Subject: fs: fix kernel-doc warnings in dcache.c Fix kernel-doc warnings in fs/dcache.c: Warning(fs/dcache.c:1743): No description found for parameter 'seqp' Warning(fs/dcache.c:1743): Excess function parameter 'seq' description in '__d_lookup_rcu' Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds --- fs/dcache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index e441941c834d..2b55bd0c1061 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1713,7 +1713,7 @@ EXPORT_SYMBOL(d_add_ci); * __d_lookup_rcu - search for a dentry (racy, store-free) * @parent: parent dentry * @name: qstr of name we wish to find - * @seq: returns d_seq value at the point where the dentry was found + * @seqp: returns d_seq value at the point where the dentry was found * @inode: returns dentry->d_inode when the inode was found valid. * Returns: dentry, or NULL * -- cgit From 989412bbd2835f1475d1528846693eddbac744c8 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 22 Mar 2012 15:58:27 -0700 Subject: vfs: tidy up fs/namei.c byte-repeat word constants In commit commit 1de5b41cd3b2 ("fs/namei.c: fix warnings on 32-bit") Andrew said that there must be a tidier way of doing this. This is that tidier way. Signed-off-by: Linus Torvalds --- fs/namei.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index bd313d680d34..99a34717b2b0 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1455,15 +1455,10 @@ done: } EXPORT_SYMBOL(full_name_hash); -#ifdef CONFIG_64BIT -#define ONEBYTES 0x0101010101010101ul -#define SLASHBYTES 0x2f2f2f2f2f2f2f2ful -#define HIGHBITS 0x8080808080808080ul -#else -#define ONEBYTES 0x01010101ul -#define SLASHBYTES 0x2f2f2f2ful -#define HIGHBITS 0x80808080ul -#endif +#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) +#define ONEBYTES REPEAT_BYTE(0x01) +#define SLASHBYTES REPEAT_BYTE('/') +#define HIGHBITS REPEAT_BYTE(0x80) /* Return the high bit set in the first byte that is a zero */ static inline unsigned long has_zero(unsigned long a) -- cgit From f7493e5d9cc10ac97cf1f1579fdc14117460b40b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 22 Mar 2012 16:10:40 -0700 Subject: vfs: tidy up sparse warnings in fs/namei.c While doing the fs/namei.c cleanups, I ran sparse on it, and it pointed out other large integers and a couple of cases of us using '0' instead of the proper 'NULL'. Sparse still doesn't understand some of the conditional locking going on, but that's no excuse for not fixing up the trivial stuff. Signed-off-by: Linus Torvalds --- fs/namei.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 99a34717b2b0..73ec863a9896 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -161,7 +161,7 @@ static char *getname_flags(const char __user *filename, int flags, int *empty) char *getname(const char __user * filename) { - return getname_flags(filename, 0, 0); + return getname_flags(filename, 0, NULL); } #ifdef CONFIG_AUDITSYSCALL @@ -1408,7 +1408,7 @@ static inline int can_lookup(struct inode *inode) */ static inline long count_masked_bytes(unsigned long mask) { - return mask*0x0001020304050608 >> 56; + return mask*0x0001020304050608ul >> 56; } static inline unsigned int fold_hash(unsigned long hash) @@ -1972,7 +1972,7 @@ int user_path_at_empty(int dfd, const char __user *name, unsigned flags, int user_path_at(int dfd, const char __user *name, unsigned flags, struct path *path) { - return user_path_at_empty(dfd, name, flags, path, 0); + return user_path_at_empty(dfd, name, flags, path, NULL); } static int user_path_parent(int dfd, const char __user *path, -- cgit From b502bd1152472dc1b98c60434f23c23b280c7b94 Mon Sep 17 00:00:00 2001 From: Muthu Kumar Date: Fri, 23 Mar 2012 15:01:50 -0700 Subject: magic.h: move some FS magic numbers into magic.h - Move open-coded filesystem magic numbers into magic.h - Rearrange magic.h so that the filesystem-related constants are grouped together. Signed-off-by: Muthukumar R Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_misc.c | 3 ++- fs/block_dev.c | 3 ++- fs/pipe.c | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 1ffb60355cae..613aa0618235 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -699,7 +700,7 @@ static int bm_fill_super(struct super_block * sb, void * data, int silent) [3] = {"register", &bm_register_operations, S_IWUSR}, /* last one */ {""} }; - int err = simple_fill_super(sb, 0x42494e4d, bm_files); + int err = simple_fill_super(sb, BINFMTFS_MAGIC, bm_files); if (!err) sb->s_op = &s_ops; return err; diff --git a/fs/block_dev.c b/fs/block_dev.c index a9ff3000b83d..e08f6a20a5bb 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -506,7 +507,7 @@ static const struct super_operations bdev_sops = { static struct dentry *bd_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, 0x62646576); + return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC); } static struct file_system_type bd_type = { diff --git a/fs/pipe.c b/fs/pipe.c index fe0502f9beb2..25feaa3faac0 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include -- cgit From 9710a78e55fe29fa2d2f1a9cbd1d399797585fd9 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Fri, 23 Mar 2012 15:01:53 -0700 Subject: fs/notify/notification.c: make subsys_initcall function static Signed-off-by: H Hartley Sweeten Cc: Eric Dumazet Cc: Mike Frysinger Cc: Arun Sharma Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/notify/notification.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/notify/notification.c b/fs/notify/notification.c index ee188158a224..c887b1378f7e 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c @@ -447,7 +447,7 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, return event; } -__init int fsnotify_notification_init(void) +static __init int fsnotify_notification_init(void) { fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC); fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC); @@ -461,4 +461,3 @@ __init int fsnotify_notification_init(void) return 0; } subsys_initcall(fsnotify_notification_init); - -- cgit From 626cf236608505d376e4799adb4f7eb00a8594af Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 23 Mar 2012 15:02:27 -0700 Subject: poll: add poll_requested_events() and poll_does_not_wait() functions In some cases the poll() implementation in a driver has to do different things depending on the events the caller wants to poll for. An example is when a driver needs to start a DMA engine if the caller polls for POLLIN, but doesn't want to do that if POLLIN is not requested but instead only POLLOUT or POLLPRI is requested. This is something that can happen in the video4linux subsystem among others. Unfortunately, the current epoll/poll/select implementation doesn't provide that information reliably. The poll_table_struct does have it: it has a key field with the event mask. But once a poll() call matches one or more bits of that mask any following poll() calls are passed a NULL poll_table pointer. Also, the eventpoll implementation always left the key field at ~0 instead of using the requested events mask. This was changed in eventpoll.c so the key field now contains the actual events that should be polled for as set by the caller. The solution to the NULL poll_table pointer is to set the qproc field to NULL in poll_table once poll() matches the events, not the poll_table pointer itself. That way drivers can obtain the mask through a new poll_requested_events inline. The poll_table_struct can still be NULL since some kernel code calls it internally (netfs_state_poll() in ./drivers/staging/pohmelfs/netfs.h). In that case poll_requested_events() returns ~0 (i.e. all events). Very rarely drivers might want to know whether poll_wait will actually wait. If another earlier file descriptor in the set already matched the events the caller wanted to wait for, then the kernel will return from the select() call without waiting. This might be useful information in order to avoid doing expensive work. A new helper function poll_does_not_wait() is added that drivers can use to detect this situation. This is now used in sock_poll_wait() in include/net/sock.h. This was the only place in the kernel that needed this information. Drivers should no longer access any of the poll_table internals, but use the poll_requested_events() and poll_does_not_wait() access functions instead. In order to enforce that the poll_table fields are now prepended with an underscore and a comment was added warning against using them directly. This required a change in unix_dgram_poll() in unix/af_unix.c which used the key field to get the requested events. It's been replaced by a call to poll_requested_events(). For qproc it was especially important to change its name since the behavior of that field changes with this patch since this function pointer can now be NULL when that wasn't possible in the past. Any driver accessing the qproc or key fields directly will now fail to compile. Some notes regarding the correctness of this patch: the driver's poll() function is called with a 'struct poll_table_struct *wait' argument. This pointer may or may not be NULL, drivers can never rely on it being one or the other as that depends on whether or not an earlier file descriptor in the select()'s fdset matched the requested events. There are only three things a driver can do with the wait argument: 1) obtain the key field: events = wait ? wait->key : ~0; This will still work although it should be replaced with the new poll_requested_events() function (which does exactly the same). This will now even work better, since wait is no longer set to NULL unnecessarily. 2) use the qproc callback. This could be deadly since qproc can now be NULL. Renaming qproc should prevent this from happening. There are no kernel drivers that actually access this callback directly, BTW. 3) test whether wait == NULL to determine whether poll would return without waiting. This is no longer sufficient as the correct test is now wait == NULL || wait->_qproc == NULL. However, the worst that can happen here is a slight performance hit in the case where wait != NULL and wait->_qproc == NULL. In that case the driver will assume that poll_wait() will actually add the fd to the set of waiting file descriptors. Of course, poll_wait() will not do that since it tests for wait->_qproc. This will not break anything, though. There is only one place in the whole kernel where this happens (sock_poll_wait() in include/net/sock.h) and that code will be replaced by a call to poll_does_not_wait() in the next patch. Note that even if wait->_qproc != NULL drivers cannot rely on poll_wait() actually waiting. The next file descriptor from the set might match the event mask and thus any possible waits will never happen. Signed-off-by: Hans Verkuil Reviewed-by: Jonathan Corbet Reviewed-by: Al Viro Cc: Davide Libenzi Signed-off-by: Hans de Goede Cc: Mauro Carvalho Chehab Cc: David Miller Cc: Eric Dumazet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 18 +++++++++++++++--- fs/select.c | 40 ++++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4d9d3a45e356..ca300071e79c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -699,9 +699,12 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) { struct epitem *epi, *tmp; + poll_table pt; + init_poll_funcptr(&pt, NULL); list_for_each_entry_safe(epi, tmp, head, rdllink) { - if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & + pt._key = epi->event.events; + if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) & epi->event.events) return POLLIN | POLLRDNORM; else { @@ -1097,6 +1100,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); + epq.pt._key = event->events; /* * Attach the item to the poll hooks and get current event bits. @@ -1191,6 +1195,9 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even { int pwake = 0; unsigned int revents; + poll_table pt; + + init_poll_funcptr(&pt, NULL); /* * Set the new event interest mask before calling f_op->poll(); @@ -1198,13 +1205,14 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even * f_op->poll() call and the new event set registering. */ epi->event.events = event->events; + pt._key = event->events; epi->event.data = event->data; /* protected by mtx */ /* * Get current event bits. We can safely use the file* here because * its usage count has been increased by the caller of this function. */ - revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); + revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt); /* * If the item is "hot" and it is not registered inside the ready @@ -1239,6 +1247,9 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, unsigned int revents; struct epitem *epi; struct epoll_event __user *uevent; + poll_table pt; + + init_poll_funcptr(&pt, NULL); /* * We can loop without lock because we are passed a task private list. @@ -1251,7 +1262,8 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, list_del_init(&epi->rdllink); - revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & + pt._key = epi->event.events; + revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) & epi->event.events; /* diff --git a/fs/select.c b/fs/select.c index e782258d0de3..ecfd0b125ba2 100644 --- a/fs/select.c +++ b/fs/select.c @@ -223,7 +223,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, get_file(filp); entry->filp = filp; entry->wait_address = wait_address; - entry->key = p->key; + entry->key = p->_key; init_waitqueue_func_entry(&entry->wait, pollwake); entry->wait.private = pwq; add_wait_queue(wait_address, &entry->wait); @@ -386,13 +386,11 @@ get_max: static inline void wait_key_set(poll_table *wait, unsigned long in, unsigned long out, unsigned long bit) { - if (wait) { - wait->key = POLLEX_SET; - if (in & bit) - wait->key |= POLLIN_SET; - if (out & bit) - wait->key |= POLLOUT_SET; - } + wait->_key = POLLEX_SET; + if (in & bit) + wait->_key |= POLLIN_SET; + if (out & bit) + wait->_key |= POLLOUT_SET; } int do_select(int n, fd_set_bits *fds, struct timespec *end_time) @@ -414,7 +412,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) poll_initwait(&table); wait = &table.pt; if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { - wait = NULL; + wait->_qproc = NULL; timed_out = 1; } @@ -459,17 +457,17 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) if ((mask & POLLIN_SET) && (in & bit)) { res_in |= bit; retval++; - wait = NULL; + wait->_qproc = NULL; } if ((mask & POLLOUT_SET) && (out & bit)) { res_out |= bit; retval++; - wait = NULL; + wait->_qproc = NULL; } if ((mask & POLLEX_SET) && (ex & bit)) { res_ex |= bit; retval++; - wait = NULL; + wait->_qproc = NULL; } } } @@ -481,7 +479,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) *rexp = res_ex; cond_resched(); } - wait = NULL; + wait->_qproc = NULL; if (retval || timed_out || signal_pending(current)) break; if (table.error) { @@ -720,7 +718,7 @@ struct poll_list { * interested in events matching the pollfd->events mask, and the result * matching that mask is both recorded in pollfd->revents and returned. The * pwait poll_table will be used by the fd-provided poll handler for waiting, - * if non-NULL. + * if pwait->_qproc is non-NULL. */ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) { @@ -738,9 +736,7 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) if (file != NULL) { mask = DEFAULT_POLLMASK; if (file->f_op && file->f_op->poll) { - if (pwait) - pwait->key = pollfd->events | - POLLERR | POLLHUP; + pwait->_key = pollfd->events|POLLERR|POLLHUP; mask = file->f_op->poll(file, pwait); } /* Mask out unneeded events. */ @@ -763,7 +759,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { - pt = NULL; + pt->_qproc = NULL; timed_out = 1; } @@ -781,22 +777,22 @@ static int do_poll(unsigned int nfds, struct poll_list *list, for (; pfd != pfd_end; pfd++) { /* * Fish for events. If we found one, record it - * and kill the poll_table, so we don't + * and kill poll_table->_qproc, so we don't * needlessly register any other waiters after * this. They'll get immediately deregistered * when we break out and return. */ if (do_pollfd(pfd, pt)) { count++; - pt = NULL; + pt->_qproc = NULL; } } } /* * All waiters have already been registered, so don't provide - * a poll_table to them on the next loop iteration. + * a poll_table->_qproc to them on the next loop iteration. */ - pt = NULL; + pt->_qproc = NULL; if (!count) { count = wait->error; if (signal_pending(current)) -- cgit From 02edc6fc4d5feb3357aa44ba521e5504b4ff0b60 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 23 Mar 2012 15:02:27 -0700 Subject: epoll: comment the funky #ifdef Looking for a bug in -rt, I stumbled across this code here from: commit 2dfa4eeab0fc ("epoll keyed wakeups: teach epoll about hints coming with the wakeup key"), specifically: #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) { unsigned long flags; spin_lock_irqsave_nested(&wqueue->lock, flags, subclass); wake_up_locked_poll(wqueue, events); spin_unlock_irqrestore(&wqueue->lock, flags); } #else static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) { wake_up_poll(wqueue, events); } #endif You change the function of ep_wake_up_nested() depending on whether CONFIG_DEBUG_LOCK_ALLOC is set or not. This looks awfully suspicious, and there's no comment to explain why. I initially thought that this was trying to fool lockdep, and hiding a real bug. Investigating it, I found the creation of wake_up_nested() (which no longer exists) but was created for the sole purpose of epoll and its strange wake ups, as explained in commit 0ccf831cbee9 ("lockdep: annotate epoll") Although the commit message says "annotate epoll" the change log is much better at explaining what is happening than what is in the actual code. Thus a comment is really necessary here. And to save the time of other developers from having to go trudging through the git logs trying to figure out why this code exists. I took parts of the change log and placed it into a comment above the affected code. This will make the description of what is happening more visible to new developers that have to look at this code for the first time. Signed-off-by: Steven Rostedt Cc: Davide Libenzi Cc: Peter Zijlstra Cc: Alan Cox Cc: Ingo Molnar Cc: David Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ca300071e79c..23c220774d1b 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -427,6 +427,31 @@ out_unlock: return error; } +/* + * As described in commit 0ccf831cb lockdep: annotate epoll + * the use of wait queues used by epoll is done in a very controlled + * manner. Wake ups can nest inside each other, but are never done + * with the same locking. For example: + * + * dfd = socket(...); + * efd1 = epoll_create(); + * efd2 = epoll_create(); + * epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...); + * epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...); + * + * When a packet arrives to the device underneath "dfd", the net code will + * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a + * callback wakeup entry on that queue, and the wake_up() performed by the + * "dfd" net code will end up in ep_poll_callback(). At this point epoll + * (efd1) notices that it may have some event ready, so it needs to wake up + * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake() + * that ends up in another wake_up(), after having checked about the + * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to + * avoid stack blasting. + * + * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle + * this special case of epoll. + */ #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) -- cgit From da0503aae07f0410b6ff0a9e1d1d011701eb2758 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 23 Mar 2012 15:02:28 -0700 Subject: epoll: remove unneeded variable in reverse_path_check() We never use the length variable. Signed-off-by: Dan Carpenter Acked-by: Jason Baron Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 23c220774d1b..629e9ed99d0f 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1077,13 +1077,11 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests) */ static int reverse_path_check(void) { - int length = 0; int error = 0; struct file *current_file; /* let's call this for all tfiles */ list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) { - length++; path_count_init(); error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, reverse_path_check_proc, current_file, -- cgit From 41f0c02eacec9f984adb22e8fecda49e13b7eb13 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Fri, 23 Mar 2012 15:02:38 -0700 Subject: fat: clean up xlate_to_uni() xlate_to_uni() is called by vfat_build_slots() with sbi->nls_io as the final argument. nls_io can never be null at this point because the check is already being done in fat_fill_super() wherein the mount fails if it is null. Signed-off-by: Namjae Jeon Signed-off-by: Ravishankar N Acked-by: OGAWA Hirofumi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fat/namei_vfat.c | 83 +++++++++++++++++++++++------------------------------ 1 file changed, 36 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index a81eb2367d39..aae3b4e1057d 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -521,57 +521,46 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname, op = &outname[*outlen * sizeof(wchar_t)]; } else { - if (nls) { - for (i = 0, ip = name, op = outname, *outlen = 0; - i < len && *outlen <= FAT_LFN_LEN; - *outlen += 1) - { - if (escape && (*ip == ':')) { - if (i > len - 5) - return -EINVAL; - ec = 0; - for (k = 1; k < 5; k++) { - nc = ip[k]; - ec <<= 4; - if (nc >= '0' && nc <= '9') { - ec |= nc - '0'; - continue; - } - if (nc >= 'a' && nc <= 'f') { - ec |= nc - ('a' - 10); - continue; - } - if (nc >= 'A' && nc <= 'F') { - ec |= nc - ('A' - 10); - continue; - } - return -EINVAL; + for (i = 0, ip = name, op = outname, *outlen = 0; + i < len && *outlen <= FAT_LFN_LEN; + *outlen += 1) { + if (escape && (*ip == ':')) { + if (i > len - 5) + return -EINVAL; + ec = 0; + for (k = 1; k < 5; k++) { + nc = ip[k]; + ec <<= 4; + if (nc >= '0' && nc <= '9') { + ec |= nc - '0'; + continue; } - *op++ = ec & 0xFF; - *op++ = ec >> 8; - ip += 5; - i += 5; - } else { - if ((charlen = nls->char2uni(ip, len - i, (wchar_t *)op)) < 0) - return -EINVAL; - ip += charlen; - i += charlen; - op += 2; + if (nc >= 'a' && nc <= 'f') { + ec |= nc - ('a' - 10); + continue; + } + if (nc >= 'A' && nc <= 'F') { + ec |= nc - ('A' - 10); + continue; + } + return -EINVAL; } + *op++ = ec & 0xFF; + *op++ = ec >> 8; + ip += 5; + i += 5; + } else { + charlen = nls->char2uni(ip, len - i, + (wchar_t *)op); + if (charlen < 0) + return -EINVAL; + ip += charlen; + i += charlen; + op += 2; } - if (i < len) - return -ENAMETOOLONG; - } else { - for (i = 0, ip = name, op = outname, *outlen = 0; - i < len && *outlen <= FAT_LFN_LEN; - i++, *outlen += 1) - { - *op++ = *ip++; - *op++ = 0; - } - if (i < len) - return -ENAMETOOLONG; } + if (i < len) + return -ENAMETOOLONG; } *longlen = *outlen; -- cgit From d533df07c20c7b59b0559a3ac38fb45c81ffd6bb Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Fri, 23 Mar 2012 15:02:39 -0700 Subject: fat: fix bug in enforcing Long File Name length Since '*outlen' is initialized to zero, it is currently possible to create a filename of length (FAT_LFN_LEN + 1) when utf8 is not enabled. To enforce the FAT_LFN_LEN limit, we must perform one less iteration. Signed-off-by: Namjae Jeon Signed-off-by: Ravishankar N Acked-by: OGAWA Hirofumi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fat/namei_vfat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index aae3b4e1057d..98ae804f5273 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -522,7 +522,7 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname, op = &outname[*outlen * sizeof(wchar_t)]; } else { for (i = 0, ip = name, op = outname, *outlen = 0; - i < len && *outlen <= FAT_LFN_LEN; + i < len && *outlen < FAT_LFN_LEN; *outlen += 1) { if (escape && (*ip == ':')) { if (i > len - 5) -- cgit From 909af768e88867016f427264ae39d27a57b6a8ed Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 23 Mar 2012 15:02:51 -0700 Subject: coredump: remove VM_ALWAYSDUMP flag The motivation for this patchset was that I was looking at a way for a qemu-kvm process, to exclude the guest memory from its core dump, which can be quite large. There are already a number of filter flags in /proc//coredump_filter, however, these allow one to specify 'types' of kernel memory, not specific address ranges (which is needed in this case). Since there are no more vma flags available, the first patch eliminates the need for the 'VM_ALWAYSDUMP' flag. The flag is used internally by the kernel to mark vdso and vsyscall pages. However, it is simple enough to check if a vma covers a vdso or vsyscall page without the need for this flag. The second patch then replaces the 'VM_ALWAYSDUMP' flag with a new 'VM_NODUMP' flag, which can be set by userspace using new madvise flags: 'MADV_DONTDUMP', and unset via 'MADV_DODUMP'. The core dump filters continue to work the same as before unless 'MADV_DONTDUMP' is set on the region. The qemu code which implements this features is at: http://people.redhat.com/~jbaron/qemu-dump/qemu-dump.patch In my testing the qemu core dump shrunk from 383MB -> 13MB with this patch. I also believe that the 'MADV_DONTDUMP' flag might be useful for security sensitive apps, which might want to select which areas are dumped. This patch: The VM_ALWAYSDUMP flag is currently used by the coredump code to indicate that a vma is part of a vsyscall or vdso section. However, we can determine if a vma is in one these sections by checking it against the gate_vma and checking for a non-NULL return value from arch_vma_name(). Thus, freeing a valuable vma bit. Signed-off-by: Jason Baron Acked-by: Roland McGrath Cc: Chris Metcalf Cc: Avi Kivity Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_elf.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 81878b78c9d4..b64be5b5ac21 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1092,6 +1092,29 @@ out: * Jeremy Fitzhardinge */ +/* + * The purpose of always_dump_vma() is to make sure that special kernel mappings + * that are useful for post-mortem analysis are included in every core dump. + * In that way we ensure that the core dump is fully interpretable later + * without matching up the same kernel and hardware config to see what PC values + * meant. These special mappings include - vDSO, vsyscall, and other + * architecture specific mappings + */ +static bool always_dump_vma(struct vm_area_struct *vma) +{ + /* Any vsyscall mappings? */ + if (vma == get_gate_vma(vma->vm_mm)) + return true; + /* + * arch_vma_name() returns non-NULL for special architecture mappings, + * such as vDSO sections. + */ + if (arch_vma_name(vma)) + return true; + + return false; +} + /* * Decide what to dump of a segment, part, all or none. */ @@ -1100,8 +1123,8 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, { #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) - /* The vma can be set up to tell us the answer directly. */ - if (vma->vm_flags & VM_ALWAYSDUMP) + /* always dump the vdso and vsyscall sections */ + if (always_dump_vma(vma)) goto whole; /* Hugetlb memory check */ -- cgit From accb61fe7bb0f5c2a4102239e4981650f9048519 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 23 Mar 2012 15:02:51 -0700 Subject: coredump: add VM_NODUMP, MADV_NODUMP, MADV_CLEAR_NODUMP Since we no longer need the VM_ALWAYSDUMP flag, let's use the freed bit for 'VM_NODUMP' flag. The idea is is to add a new madvise() flag: MADV_DONTDUMP, which can be set by applications to specifically request memory regions which should not dump core. The specific application I have in mind is qemu: we can add a flag there that wouldn't dump all of guest memory when qemu dumps core. This flag might also be useful for security sensitive apps that want to absolutely make sure that parts of memory are not dumped. To clear the flag use: MADV_DODUMP. [akpm@linux-foundation.org: s/MADV_NODUMP/MADV_DONTDUMP/, s/MADV_CLEAR_NODUMP/MADV_DODUMP/, per Roland] [akpm@linux-foundation.org: fix up the architectures which broke] Signed-off-by: Jason Baron Acked-by: Roland McGrath Cc: Chris Metcalf Cc: Avi Kivity Cc: Ralf Baechle Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Cc: "James E.J. Bottomley" Cc: Helge Deller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_elf.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index b64be5b5ac21..504b6eee50a9 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1127,6 +1127,9 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, if (always_dump_vma(vma)) goto whole; + if (vma->vm_flags & VM_NODUMP) + return 0; + /* Hugetlb memory check */ if (vma->vm_flags & VM_HUGETLB) { if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) -- cgit From b908243c549448fc0662f9cdd8d5cfe620fcdc31 Mon Sep 17 00:00:00 2001 From: Djalal Harouni Date: Fri, 23 Mar 2012 15:02:52 -0700 Subject: fs/proc/kcore.c: make get_sparsemem_vmemmap_info() static get_sparsemem_vmemmap_info() is only used inside fs/proc/kcore.c Signed-off-by: Djalal Harouni Reviewed-by: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/kcore.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index e5e69aff6c69..86c67eee439f 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -157,7 +157,8 @@ static int kcore_update_ram(void) #ifdef CONFIG_SPARSEMEM_VMEMMAP /* calculate vmemmap's address from given system ram pfn and register it */ -int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) +static int +get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) { unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT; unsigned long nr_pages = ent->size >> PAGE_SHIFT; @@ -189,7 +190,8 @@ int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) } #else -int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) +static int +get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) { return 1; } -- cgit From 59a32e2ce5eb809967cac4e718bc527beca83c59 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 23 Mar 2012 15:02:53 -0700 Subject: proc: speed up /proc/stat handling On a typical 16 cpus machine, "cat /proc/stat" gives more than 4096 bytes, and is slow : # strace -T -o /tmp/STRACE cat /proc/stat | wc -c 5826 # grep "cpu " /tmp/STRACE read(0, "cpu 1949310 19 2144714 12117253"..., 32768) = 5826 <0.001504> Thats partly because show_stat() must be called twice since initial buffer size is too small (4096 bytes for less than 32 possible cpus) Fix this by : 1) Taking into account nr_irqs in the initial buffer sizing. 2) Using ksize() to allow better filling of initial buffer. Signed-off-by: Eric Dumazet Cc: Glauber Costa Cc: Russell King - ARM Linux Cc: KAMEZAWA Hiroyuki Cc: Paul Turner Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/stat.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 121f77cfef76..ac446114cd48 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -157,11 +157,14 @@ static int show_stat(struct seq_file *p, void *v) static int stat_open(struct inode *inode, struct file *file) { - unsigned size = 4096 * (1 + num_possible_cpus() / 32); + unsigned size = 1024 + 128 * num_possible_cpus(); char *buf; struct seq_file *m; int res; + /* minimum size to display an interrupt count : 2 bytes */ + size += 2 * nr_irqs; + /* don't ask for more than the kmalloc() max size */ if (size > KMALLOC_MAX_SIZE) size = KMALLOC_MAX_SIZE; @@ -173,7 +176,7 @@ static int stat_open(struct inode *inode, struct file *file) if (!res) { m = file->private_data; m->buf = buf; - m->size = size; + m->size = ksize(buf); } else kfree(buf); return res; -- cgit From 1ac101a5d675aca2426c5cd460c73fb95acb8391 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Fri, 23 Mar 2012 15:02:54 -0700 Subject: procfs: add num_to_str() to speed up /proc/stat == stat_check.py num = 0 with open("/proc/stat") as f: while num < 1000 : data = f.read() f.seek(0, 0) num = num + 1 == perf shows 20.39% stat_check.py [kernel.kallsyms] [k] format_decode 13.41% stat_check.py [kernel.kallsyms] [k] number 12.61% stat_check.py [kernel.kallsyms] [k] vsnprintf 10.85% stat_check.py [kernel.kallsyms] [k] memcpy 4.85% stat_check.py [kernel.kallsyms] [k] radix_tree_lookup 4.43% stat_check.py [kernel.kallsyms] [k] seq_printf This patch removes most of calls to vsnprintf() by adding num_to_str() and seq_print_decimal_ull(), which prints decimal numbers without rich functions provided by printf(). On my 8cpu box. == Before patch == [root@bluextal test]# time ./stat_check.py real 0m0.150s user 0m0.026s sys 0m0.121s == After patch == [root@bluextal test]# time ./stat_check.py real 0m0.055s user 0m0.022s sys 0m0.030s [akpm@linux-foundation.org: remove incorrect comment, use less statck in num_to_str(), move comment from .h to .c, simplify seq_put_decimal_ull()] [andrea@betterlinux.com: avoid breaking the ABI in /proc/stat] Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Andrea Righi Cc: Eric Dumazet Cc: Glauber Costa Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Paul Turner Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/stat.c | 55 +++++++++++++++++++++++++++---------------------------- fs/seq_file.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/proc/stat.c b/fs/proc/stat.c index ac446114cd48..6a0c62d6e442 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -89,18 +89,19 @@ static int show_stat(struct seq_file *p, void *v) } sum += arch_irq_stat(); - seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu " - "%llu\n", - (unsigned long long)cputime64_to_clock_t(user), - (unsigned long long)cputime64_to_clock_t(nice), - (unsigned long long)cputime64_to_clock_t(system), - (unsigned long long)cputime64_to_clock_t(idle), - (unsigned long long)cputime64_to_clock_t(iowait), - (unsigned long long)cputime64_to_clock_t(irq), - (unsigned long long)cputime64_to_clock_t(softirq), - (unsigned long long)cputime64_to_clock_t(steal), - (unsigned long long)cputime64_to_clock_t(guest), - (unsigned long long)cputime64_to_clock_t(guest_nice)); + seq_puts(p, "cpu "); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); + seq_putc(p, '\n'); + for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; @@ -113,26 +114,24 @@ static int show_stat(struct seq_file *p, void *v) steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; - seq_printf(p, - "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " - "%llu\n", - i, - (unsigned long long)cputime64_to_clock_t(user), - (unsigned long long)cputime64_to_clock_t(nice), - (unsigned long long)cputime64_to_clock_t(system), - (unsigned long long)cputime64_to_clock_t(idle), - (unsigned long long)cputime64_to_clock_t(iowait), - (unsigned long long)cputime64_to_clock_t(irq), - (unsigned long long)cputime64_to_clock_t(softirq), - (unsigned long long)cputime64_to_clock_t(steal), - (unsigned long long)cputime64_to_clock_t(guest), - (unsigned long long)cputime64_to_clock_t(guest_nice)); + seq_printf(p, "cpu%d", i); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); + seq_putc(p, '\n'); } seq_printf(p, "intr %llu", (unsigned long long)sum); /* sum again ? it could be updated? */ for_each_irq_nr(j) - seq_printf(p, " %u", kstat_irqs(j)); + seq_put_decimal_ull(p, ' ', kstat_irqs(j)); seq_printf(p, "\nctxt %llu\n" @@ -149,7 +148,7 @@ static int show_stat(struct seq_file *p, void *v) seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); for (i = 0; i < NR_SOFTIRQS; i++) - seq_printf(p, " %u", per_softirq_sums[i]); + seq_put_decimal_ull(p, ' ', per_softirq_sums[i]); seq_putc(p, '\n'); return 0; diff --git a/fs/seq_file.c b/fs/seq_file.c index aa242dc99373..7d19816c4cc9 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -644,6 +644,39 @@ int seq_puts(struct seq_file *m, const char *s) } EXPORT_SYMBOL(seq_puts); +/* + * A helper routine for putting decimal numbers without rich format of printf(). + * only 'unsigned long long' is supported. + * This routine will put one byte delimiter + number into seq_file. + * This routine is very quick when you show lots of numbers. + * In usual cases, it will be better to use seq_printf(). It's easier to read. + */ +int seq_put_decimal_ull(struct seq_file *m, char delimiter, + unsigned long long num) +{ + int len; + + if (m->count + 2 >= m->size) /* we'll write 2 bytes at least */ + goto overflow; + + m->buf[m->count++] = delimiter; + + if (num < 10) { + m->buf[m->count++] = num + '0'; + return 0; + } + + len = num_to_str(m->buf + m->count, m->size - m->count, num); + if (!len) + goto overflow; + m->count += len; + return 0; +overflow: + m->count = m->size; + return -1; +} +EXPORT_SYMBOL(seq_put_decimal_ull); + /** * seq_write - write arbitrary data to buffer * @seq: seq_file identifying the buffer to which data should be written -- cgit From bda7bad62bc4c4e0783348e8db51abe094153c56 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Fri, 23 Mar 2012 15:02:54 -0700 Subject: procfs: speed up /proc/pid/stat, statm Process accounting applications as top, ps visit some files under /proc/. With seq_put_decimal_ull(), we can optimize /proc//stat and /proc//statm files. This patch adds - seq_put_decimal_ll() for signed values. - allow delimiter == 0. - convert seq_printf() to seq_put_decimal_ull/ll in /proc/stat, statm. Test result on a system with 2000+ procs. Before patch: [kamezawa@bluextal test]$ top -b -n 1 | wc -l 2223 [kamezawa@bluextal test]$ time top -b -n 1 > /dev/null real 0m0.675s user 0m0.044s sys 0m0.121s [kamezawa@bluextal test]$ time ps -elf > /dev/null real 0m0.236s user 0m0.056s sys 0m0.176s After patch: kamezawa@bluextal ~]$ time top -b -n 1 > /dev/null real 0m0.657s user 0m0.052s sys 0m0.100s [kamezawa@bluextal ~]$ time ps -elf > /dev/null real 0m0.198s user 0m0.050s sys 0m0.145s Considering top, ps tend to scan /proc periodically, this will reduce cpu consumption by top/ps to some extent. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: KAMEZAWA Hiroyuki Cc: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/array.c | 119 ++++++++++++++++++++++++++++++-------------------------- fs/seq_file.c | 21 +++++++++- 2 files changed, 84 insertions(+), 56 deletions(-) (limited to 'fs') diff --git a/fs/proc/array.c b/fs/proc/array.c index c602b8d20f06..fbb53c249086 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -462,59 +462,56 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, /* convert nsec -> ticks */ start_time = nsec_to_clock_t(start_time); - seq_printf(m, "%d (%s) %c %d %d %d %d %d %u %lu \ -%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ -%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld %lu %lu %lu\n", - pid_nr_ns(pid, ns), - tcomm, - state, - ppid, - pgid, - sid, - tty_nr, - tty_pgrp, - task->flags, - min_flt, - cmin_flt, - maj_flt, - cmaj_flt, - cputime_to_clock_t(utime), - cputime_to_clock_t(stime), - cputime_to_clock_t(cutime), - cputime_to_clock_t(cstime), - priority, - nice, - num_threads, - start_time, - vsize, - mm ? get_mm_rss(mm) : 0, - rsslim, - mm ? (permitted ? mm->start_code : 1) : 0, - mm ? (permitted ? mm->end_code : 1) : 0, - (permitted && mm) ? mm->start_stack : 0, - esp, - eip, - /* The signal information here is obsolete. - * It must be decimal for Linux 2.0 compatibility. - * Use /proc/#/status for real-time signals. - */ - task->pending.signal.sig[0] & 0x7fffffffUL, - task->blocked.sig[0] & 0x7fffffffUL, - sigign .sig[0] & 0x7fffffffUL, - sigcatch .sig[0] & 0x7fffffffUL, - wchan, - 0UL, - 0UL, - task->exit_signal, - task_cpu(task), - task->rt_priority, - task->policy, - (unsigned long long)delayacct_blkio_ticks(task), - cputime_to_clock_t(gtime), - cputime_to_clock_t(cgtime), - (mm && permitted) ? mm->start_data : 0, - (mm && permitted) ? mm->end_data : 0, - (mm && permitted) ? mm->start_brk : 0); + seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state); + seq_put_decimal_ll(m, ' ', ppid); + seq_put_decimal_ll(m, ' ', pgid); + seq_put_decimal_ll(m, ' ', sid); + seq_put_decimal_ll(m, ' ', tty_nr); + seq_put_decimal_ll(m, ' ', tty_pgrp); + seq_put_decimal_ull(m, ' ', task->flags); + seq_put_decimal_ull(m, ' ', min_flt); + seq_put_decimal_ull(m, ' ', cmin_flt); + seq_put_decimal_ull(m, ' ', maj_flt); + seq_put_decimal_ull(m, ' ', cmaj_flt); + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(utime)); + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(stime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cutime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cstime)); + seq_put_decimal_ll(m, ' ', priority); + seq_put_decimal_ll(m, ' ', nice); + seq_put_decimal_ll(m, ' ', num_threads); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ull(m, ' ', start_time); + seq_put_decimal_ull(m, ' ', vsize); + seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0); + seq_put_decimal_ull(m, ' ', rsslim); + seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0); + seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0); + seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0); + seq_put_decimal_ull(m, ' ', esp); + seq_put_decimal_ull(m, ' ', eip); + /* The signal information here is obsolete. + * It must be decimal for Linux 2.0 compatibility. + * Use /proc/#/status for real-time signals. + */ + seq_put_decimal_ull(m, ' ', task->pending.signal.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', wchan); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ll(m, ' ', task->exit_signal); + seq_put_decimal_ll(m, ' ', task_cpu(task)); + seq_put_decimal_ull(m, ' ', task->rt_priority); + seq_put_decimal_ull(m, ' ', task->policy); + seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task)); + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime)); + seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0); + seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0); + seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0); + seq_putc(m, '\n'); if (mm) mmput(mm); return 0; @@ -542,8 +539,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, size = task_statm(mm, &shared, &text, &data, &resident); mmput(mm); } - seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n", - size, resident, shared, text, data); + /* + * For quick read, open code by putting numbers directly + * expected format is + * seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n", + * size, resident, shared, text, data); + */ + seq_put_decimal_ull(m, 0, size); + seq_put_decimal_ull(m, ' ', resident); + seq_put_decimal_ull(m, ' ', shared); + seq_put_decimal_ull(m, ' ', text); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ull(m, ' ', text); + seq_put_decimal_ull(m, ' ', 0); + seq_putc(m, '\n'); return 0; } diff --git a/fs/seq_file.c b/fs/seq_file.c index 7d19816c4cc9..55c293f7024d 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -659,7 +659,8 @@ int seq_put_decimal_ull(struct seq_file *m, char delimiter, if (m->count + 2 >= m->size) /* we'll write 2 bytes at least */ goto overflow; - m->buf[m->count++] = delimiter; + if (delimiter) + m->buf[m->count++] = delimiter; if (num < 10) { m->buf[m->count++] = num + '0'; @@ -677,6 +678,24 @@ overflow: } EXPORT_SYMBOL(seq_put_decimal_ull); +int seq_put_decimal_ll(struct seq_file *m, char delimiter, + long long num) +{ + if (num < 0) { + if (m->count + 3 >= m->size) { + m->count = m->size; + return -1; + } + if (delimiter) + m->buf[m->count++] = delimiter; + num = -num; + delimiter = '-'; + } + return seq_put_decimal_ull(m, delimiter, num); + +} +EXPORT_SYMBOL(seq_put_decimal_ll); + /** * seq_write - write arbitrary data to buffer * @seq: seq_file identifying the buffer to which data should be written -- cgit From 1b26c9b334044cff6d1d2698f2be41bc7d9a0864 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Fri, 23 Mar 2012 15:02:55 -0700 Subject: proc-ns: use d_set_d_op() API to set dentry ops in proc_ns_instantiate(). The namespace cleanup path leaks a dentry which holds a reference count on a network namespace. Keeping that network namespace from being freed when the last user goes away. Leaving things like vlan devices in the leaked network namespace. If you use ip netns add for much real work this problem becomes apparent pretty quickly. It light testing the problem hides because frequently you simply don't notice the leak. Use d_set_d_op() so that DCACHE_OP_* flags are set correctly. This issue exists back to 3.0. Acked-by: "Eric W. Biederman" Reported-by: Justin Pettit Signed-off-by: Pravin B Shelar Signed-off-by: Jesse Gross Cc: David Miller Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/namespaces.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index 27da860115c6..3551f1f839eb 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -53,7 +53,7 @@ static struct dentry *proc_ns_instantiate(struct inode *dir, ei->ns_ops = ns_ops; ei->ns = ns; - dentry->d_op = &pid_dentry_operations; + d_set_d_op(dentry, &pid_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, NULL)) -- cgit From e075f59152890ffd7e3d704afc997dd686c8a781 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Fri, 23 Mar 2012 15:02:55 -0700 Subject: seq_file: add seq_set_overflow(), seq_overflow() It is undocumented but a seq_file's overflow state is indicated by m->count == m->size. Add seq_set_overflow() and seq_overflow() to set/check overflow status explicitly. Based on an idea from Eric Dumazet. [akpm@linux-foundation.org: tweak code comment] Signed-off-by: KAMEZAWA Hiroyuki Cc: Eric Dumazet Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/seq_file.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/seq_file.c b/fs/seq_file.c index 55c293f7024d..46cfb067fc3a 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -13,6 +13,22 @@ #include #include + +/* + * seq_files have a buffer which can may overflow. When this happens a larger + * buffer is reallocated and all the data will be printed again. + * The overflow state is true when m->count == m->size. + */ +static bool seq_overflow(struct seq_file *m) +{ + return m->count == m->size; +} + +static void seq_set_overflow(struct seq_file *m) +{ + m->count = m->size; +} + /** * seq_open - initialize sequential file * @file: file we initialize @@ -92,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset) error = 0; m->count = 0; } - if (m->count == m->size) + if (seq_overflow(m)) goto Eoverflow; if (pos + m->count > offset) { m->from = offset - pos; @@ -234,7 +250,7 @@ Fill: break; } err = m->op->show(m, p); - if (m->count == m->size || err) { + if (seq_overflow(m) || err) { m->count = offs; if (likely(err <= 0)) break; @@ -361,7 +377,7 @@ int seq_escape(struct seq_file *m, const char *s, const char *esc) *p++ = '0' + (c & 07); continue; } - m->count = m->size; + seq_set_overflow(m); return -1; } m->count = p - m->buf; @@ -383,7 +399,7 @@ int seq_printf(struct seq_file *m, const char *f, ...) return 0; } } - m->count = m->size; + seq_set_overflow(m); return -1; } EXPORT_SYMBOL(seq_printf); @@ -512,7 +528,7 @@ int seq_bitmap(struct seq_file *m, const unsigned long *bits, return 0; } } - m->count = m->size; + seq_set_overflow(m); return -1; } EXPORT_SYMBOL(seq_bitmap); @@ -528,7 +544,7 @@ int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, return 0; } } - m->count = m->size; + seq_set_overflow(m); return -1; } EXPORT_SYMBOL(seq_bitmap_list); @@ -639,7 +655,7 @@ int seq_puts(struct seq_file *m, const char *s) m->count += len; return 0; } - m->count = m->size; + seq_set_overflow(m); return -1; } EXPORT_SYMBOL(seq_puts); @@ -673,7 +689,7 @@ int seq_put_decimal_ull(struct seq_file *m, char delimiter, m->count += len; return 0; overflow: - m->count = m->size; + seq_set_overflow(m); return -1; } EXPORT_SYMBOL(seq_put_decimal_ull); @@ -683,7 +699,7 @@ int seq_put_decimal_ll(struct seq_file *m, char delimiter, { if (num < 0) { if (m->count + 3 >= m->size) { - m->count = m->size; + seq_set_overflow(m); return -1; } if (delimiter) @@ -711,7 +727,7 @@ int seq_write(struct seq_file *seq, const void *data, size_t len) seq->count += len; return 0; } - seq->count = seq->size; + seq_set_overflow(seq); return -1; } EXPORT_SYMBOL(seq_write); -- cgit From 6308191f6f55d3629c7dbe72dfb856ad9fa560fd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 30 Mar 2012 18:26:36 +0200 Subject: tracing, sched, vfs: Fix 'old_pid' usage in trace_sched_process_exec() 1. TRACE_EVENT(sched_process_exec) forgets to actually use the old pid argument, it sets ->old_pid = p->pid. 2. search_binary_handler() uses the wrong pid number. tracepoint needs the global pid_t from the root namespace, while old_pid is the virtual pid number as it seen by the tracer/parent. With this patch we have two pid_t's in search_binary_handler(), not really nice. Perhaps we should switch to "struct pid*", but in this case it would be better to cleanup the current code first and move the "depth == 0" code outside. Signed-off-by: Oleg Nesterov Cc: David Smith Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Denys Vlasenko Link: http://lkml.kernel.org/r/20120330162636.GA4857@redhat.com Signed-off-by: Ingo Molnar --- fs/exec.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 23559c227d9c..644f6c4eb606 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1370,7 +1370,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; - pid_t old_pid; + pid_t old_pid, old_vpid; retval = security_bprm_check(bprm); if (retval) @@ -1381,8 +1381,9 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) return retval; /* Need to fetch pid before load_binary changes it */ + old_pid = current->pid; rcu_read_lock(); - old_pid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); + old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); rcu_read_unlock(); retval = -ENOENT; @@ -1405,7 +1406,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) if (retval >= 0) { if (depth == 0) { trace_sched_process_exec(current, old_pid, bprm); - ptrace_event(PTRACE_EVENT_EXEC, old_pid); + ptrace_event(PTRACE_EVENT_EXEC, old_vpid); } put_binfmt(fmt); allow_write_access(bprm->file); -- cgit