fs/Kconfig | 20 fs/cifs/file.c | 2 fs/lockd/clntlock.c | 4 fs/lockd/clntproc.c | 182 +++- fs/lockd/host.c | 18 fs/lockd/svc4proc.c | 1 fs/lockd/svclock.c | 13 fs/lockd/svcproc.c | 1 fs/lockd/svcsubs.c | 2 fs/locks.c | 130 ++- fs/nfs/Makefile | 3 fs/nfs/callback.c | 325 ++++++++ fs/nfs/callback.h | 70 + fs/nfs/callback_proc.c | 85 ++ fs/nfs/callback_xdr.c | 481 ++++++++++++ fs/nfs/delegation.c | 320 ++++++++ fs/nfs/delegation.h | 56 + fs/nfs/dir.c | 125 +-- fs/nfs/direct.c | 47 - fs/nfs/file.c | 147 ++- fs/nfs/inode.c | 355 +++++---- fs/nfs/mount_clnt.c | 2 fs/nfs/nfs2xdr.c | 27 fs/nfs/nfs3proc.c | 74 - fs/nfs/nfs3xdr.c | 4 fs/nfs/nfs4proc.c | 1239 +++++++++++++++++++++++--------- fs/nfs/nfs4state.c | 371 +++++---- fs/nfs/nfs4xdr.c | 436 ++++++++--- fs/nfs/nfsroot.c | 6 fs/nfs/pagelist.c | 63 - fs/nfs/proc.c | 48 - fs/nfs/read.c | 62 - fs/nfs/unlink.c | 3 fs/nfs/write.c | 169 ++-- fs/nfsd/nfs4state.c | 13 include/linux/fs.h | 20 include/linux/lockd/lockd.h | 19 include/linux/nfs.h | 17 include/linux/nfs4.h | 5 include/linux/nfs_fs.h | 118 +-- include/linux/nfs_fs_i.h | 4 include/linux/nfs_fs_sb.h | 1 include/linux/nfs_page.h | 31 include/linux/nfs_xdr.h | 61 - include/linux/sunrpc/gss_asn1.h | 1 include/linux/sunrpc/gss_spkm3.h | 61 + include/linux/sunrpc/sched.h | 55 - include/linux/sunrpc/svc.h | 10 net/sunrpc/auth_gss/Makefile | 4 net/sunrpc/auth_gss/auth_gss.c | 2 net/sunrpc/auth_gss/gss_generic_token.c | 2 net/sunrpc/auth_gss/gss_krb5_unseal.c | 2 net/sunrpc/auth_gss/gss_spkm3_mech.c | 296 +++++++ net/sunrpc/auth_gss/gss_spkm3_seal.c | 132 +++ net/sunrpc/auth_gss/gss_spkm3_token.c | 266 ++++++ net/sunrpc/auth_gss/gss_spkm3_unseal.c | 128 +++ net/sunrpc/clnt.c | 30 net/sunrpc/sched.c | 519 +++---------- net/sunrpc/sunrpc_syms.c | 1 net/sunrpc/svc.c | 9 net/sunrpc/xprt.c | 2 61 files changed, 4996 insertions(+), 1704 deletions(-) diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/cifs/file.c linux-2.6.7-43-rpc_queue_lock/fs/cifs/file.c --- linux-2.6.7/fs/cifs/file.c 2004-07-02 18:43:20.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/cifs/file.c 2004-07-02 22:17:16.000000000 -0400 @@ -569,6 +569,8 @@ cifs_lock(struct file *file, int cmd, st netfid, length, pfLock->fl_start, numUnlock, numLock, lockType, wait_flag); + if (rc == 0 && (pfLock->fl_flags & FL_POSIX)) + posix_lock_file(file, pfLock); FreeXid(xid); return rc; } diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/Kconfig linux-2.6.7-43-rpc_queue_lock/fs/Kconfig --- linux-2.6.7/fs/Kconfig 2004-07-02 18:43:44.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/Kconfig 2004-07-02 22:18:41.000000000 -0400 @@ -1382,8 +1382,8 @@ config NFS_V3 bool "Provide NFSv3 client support" depends on NFS_FS help - Say Y here if you want your NFS client to be able to speak the newer - version 3 of the NFS protocol. + Say Y here if you want your NFS client to be able to speak version + 3 of the NFS protocol. If unsure, say Y. @@ -1526,6 +1526,22 @@ config RPCSEC_GSS_KRB5 If unsure, say N. +config RPCSEC_GSS_SPKM3 + tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)" + depends on SUNRPC && EXPERIMENTAL + select SUNRPC_GSS + select CRYPTO + select CRYPTO_MD5 + select CRYPTO_DES + help + Provides for secure RPC calls by means of a gss-api + mechanism based on the SPKM3 public-key mechanism. + + Note: Requires an auxiliary userspace daemon which may be found on + http://www.citi.umich.edu/projects/nfsv4/ + + If unsure, say N. + config SMB_FS tristate "SMB file system support (to mount Windows shares etc.)" depends on INET diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/lockd/clntlock.c linux-2.6.7-43-rpc_queue_lock/fs/lockd/clntlock.c --- linux-2.6.7/fs/lockd/clntlock.c 2004-07-02 18:44:05.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/lockd/clntlock.c 2004-07-02 22:17:08.000000000 -0400 @@ -146,7 +146,7 @@ void nlmclnt_mark_reclaim(struct nlm_hos inode = fl->fl_file->f_dentry->d_inode; if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) continue; - if (fl->fl_u.nfs_fl.host != host) + if (fl->fl_u.nfs_fl.owner->host != host) continue; if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) continue; @@ -215,7 +215,7 @@ restart: inode = fl->fl_file->f_dentry->d_inode; if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) continue; - if (fl->fl_u.nfs_fl.host != host) + if (fl->fl_u.nfs_fl.owner->host != host) continue; if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM)) continue; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/lockd/clntproc.c linux-2.6.7-43-rpc_queue_lock/fs/lockd/clntproc.c --- linux-2.6.7/fs/lockd/clntproc.c 2004-07-02 18:43:21.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/lockd/clntproc.c 2004-07-02 22:17:16.000000000 -0400 @@ -27,6 +27,7 @@ static int nlmclnt_unlock(struct nlm_rqs static void nlmclnt_unlock_callback(struct rpc_task *); static void nlmclnt_cancel_callback(struct rpc_task *); static int nlm_stat_to_errno(u32 stat); +static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); /* * Cookie counter for NLM requests @@ -41,11 +42,83 @@ static inline void nlmclnt_next_cookie(s nlm_cookie++; } +static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner) +{ + atomic_inc(&lockowner->count); + return lockowner; +} + +static void nlm_put_lockowner(struct nlm_lockowner *lockowner) +{ + if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) + return; + list_del(&lockowner->list); + spin_unlock(&lockowner->host->h_lock); + nlm_release_host(lockowner->host); + kfree(lockowner); +} + +static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) +{ + struct nlm_lockowner *lockowner; + list_for_each_entry(lockowner, &host->h_lockowners, list) { + if (lockowner->pid == pid) + return -EBUSY; + } + return 0; +} + +static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) +{ + uint32_t res; + do { + res = host->h_pidcount++; + } while (nlm_pidbusy(host, res) < 0); + return res; +} + +static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) +{ + struct nlm_lockowner *lockowner; + list_for_each_entry(lockowner, &host->h_lockowners, list) { + if (lockowner->owner != owner) + continue; + return nlm_get_lockowner(lockowner); + } + return NULL; +} + +static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) +{ + struct nlm_lockowner *res, *new = NULL; + + spin_lock(&host->h_lock); + res = __nlm_find_lockowner(host, owner); + if (res == NULL) { + spin_unlock(&host->h_lock); + new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL); + spin_lock(&host->h_lock); + res = __nlm_find_lockowner(host, owner); + if (res == NULL && new != NULL) { + res = new; + atomic_set(&new->count, 1); + new->owner = owner; + new->pid = __nlm_alloc_pid(host); + new->host = nlm_get_host(host); + list_add(&new->list, &host->h_lockowners); + new = NULL; + } + } + spin_unlock(&host->h_lock); + if (new != NULL) + kfree(new); + return res; +} + /* * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls */ -static inline void -nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) +static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) { struct nlm_args *argp = &req->a_args; struct nlm_lock *lock = &argp->lock; @@ -60,6 +133,14 @@ nlmclnt_setlockargs(struct nlm_rqst *req locks_copy_lock(&lock->fl, fl); } +static void nlmclnt_release_lockargs(struct nlm_rqst *req) +{ + struct file_lock *fl = &req->a_args.lock.fl; + + if (fl->fl_ops && fl->fl_ops->fl_release_private) + fl->fl_ops->fl_release_private(fl); +} + /* * Initialize arguments for GRANTED call. The nlm_rqst structure * has been cleared already. @@ -77,8 +158,10 @@ nlmclnt_setgrantargs(struct nlm_rqst *ca if (lock->oh.len > NLMCLNT_OHSIZE) { void *data = kmalloc(lock->oh.len, GFP_KERNEL); - if (!data) + if (!data) { + nlmclnt_freegrantargs(call); return 0; + } call->a_args.lock.oh.data = (u8 *) data; } @@ -89,12 +172,15 @@ nlmclnt_setgrantargs(struct nlm_rqst *ca void nlmclnt_freegrantargs(struct nlm_rqst *call) { + struct file_lock *fl = &call->a_args.lock.fl; /* * Check whether we allocated memory for the owner. */ if (call->a_args.lock.oh.data != (u8 *) call->a_owner) { kfree(call->a_args.lock.oh.data); } + if (fl->fl_ops && fl->fl_ops->fl_release_private) + fl->fl_ops->fl_release_private(fl); } /* @@ -165,6 +251,8 @@ nlmclnt_proc(struct inode *inode, int cm } call->a_host = host; + nlmclnt_locks_init_private(fl, host); + /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); @@ -179,9 +267,6 @@ nlmclnt_proc(struct inode *inode, int cm else status = -EINVAL; - if (status < 0 && (call->a_flags & RPC_TASK_ASYNC)) - kfree(call); - out_restore: spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; @@ -382,7 +467,9 @@ nlmclnt_test(struct nlm_rqst *req, struc { int status; - if ((status = nlmclnt_call(req, NLMPROC_TEST)) < 0) + status = nlmclnt_call(req, NLMPROC_TEST); + nlmclnt_release_lockargs(req); + if (status < 0) return status; status = req->a_res.status; @@ -391,10 +478,9 @@ nlmclnt_test(struct nlm_rqst *req, struc } if (status == NLM_LCK_DENIED) { /* * Report the conflicting lock back to the application. - * FIXME: Is it OK to report the pid back as well? */ locks_copy_lock(fl, &req->a_res.lock.fl); - /* fl->fl_pid = 0; */ + fl->fl_pid = 0; } else { return nlm_stat_to_errno(req->a_res.status); } @@ -402,18 +488,36 @@ nlmclnt_test(struct nlm_rqst *req, struc return 0; } -static -void nlmclnt_insert_lock_callback(struct file_lock *fl) +static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) { - nlm_get_host(fl->fl_u.nfs_fl.host); + memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl)); + nlm_get_lockowner(new->fl_u.nfs_fl.owner); } -static -void nlmclnt_remove_lock_callback(struct file_lock *fl) + +static void nlmclnt_locks_release_private(struct file_lock *fl) { - if (fl->fl_u.nfs_fl.host) { - nlm_release_host(fl->fl_u.nfs_fl.host); - fl->fl_u.nfs_fl.host = NULL; - } + nlm_put_lockowner(fl->fl_u.nfs_fl.owner); + fl->fl_ops = NULL; +} + +static void nlmclnt_steal_locks(struct file_lock *fl, fl_owner_t owner) +{ + locks_remove_posix(fl->fl_file, owner); +} + +static struct file_lock_operations nlmclnt_lock_ops = { + .fl_copy_lock = nlmclnt_locks_copy_lock, + .fl_release_private = nlmclnt_locks_release_private, + .fl_steal_locks = nlmclnt_steal_locks, +}; + +static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) +{ + BUG_ON(fl->fl_ops != NULL); + fl->fl_u.nfs_fl.state = 0; + fl->fl_u.nfs_fl.flags = 0; + fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); + fl->fl_ops = &nlmclnt_lock_ops; } /* @@ -446,7 +550,8 @@ nlmclnt_lock(struct nlm_rqst *req, struc if (!host->h_monitored && nsm_monitor(host) < 0) { printk(KERN_NOTICE "lockd: failed to monitor %s\n", host->h_name); - return -ENOLCK; + status = -ENOLCK; + goto out; } do { @@ -456,18 +561,21 @@ nlmclnt_lock(struct nlm_rqst *req, struc status = nlmclnt_block(host, fl, &resp->status); } if (status < 0) - return status; + goto out; } while (resp->status == NLM_LCK_BLOCKED && req->a_args.block); if (resp->status == NLM_LCK_GRANTED) { fl->fl_u.nfs_fl.state = host->h_state; fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED; - fl->fl_u.nfs_fl.host = host; - fl->fl_insert = nlmclnt_insert_lock_callback; - fl->fl_remove = nlmclnt_remove_lock_callback; - } - - return nlm_stat_to_errno(resp->status); + fl->fl_flags |= FL_SLEEP; + if (posix_lock_file_wait(fl->fl_file, fl) < 0) + printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", + __FUNCTION__); + } + status = nlm_stat_to_errno(resp->status); +out: + nlmclnt_release_lockargs(req); + return status; } /* @@ -527,13 +635,24 @@ nlmclnt_unlock(struct nlm_rqst *req, str fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED; if (req->a_flags & RPC_TASK_ASYNC) { - return nlmclnt_async_call(req, NLMPROC_UNLOCK, + status = nlmclnt_async_call(req, NLMPROC_UNLOCK, nlmclnt_unlock_callback); + /* Hrmf... Do the unlock early since locks_remove_posix() + * really expects us to free the lock synchronously */ + posix_lock_file(fl->fl_file, fl); + if (status < 0) { + nlmclnt_release_lockargs(req); + kfree(req); + } + return status; } - if ((status = nlmclnt_call(req, NLMPROC_UNLOCK)) < 0) + status = nlmclnt_call(req, NLMPROC_UNLOCK); + nlmclnt_release_lockargs(req); + if (status < 0) return status; + posix_lock_file(fl->fl_file, fl); if (resp->status == NLM_LCK_GRANTED) return 0; @@ -564,9 +683,9 @@ nlmclnt_unlock_callback(struct rpc_task } if (status != NLM_LCK_GRANTED) printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); - die: nlm_release_host(req->a_host); + nlmclnt_release_lockargs(req); kfree(req); return; retry_rebind: @@ -605,8 +724,10 @@ nlmclnt_cancel(struct nlm_host *host, st status = nlmclnt_async_call(req, NLMPROC_CANCEL, nlmclnt_cancel_callback); - if (status < 0) + if (status < 0) { + nlmclnt_release_lockargs(req); kfree(req); + } spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; @@ -648,6 +769,7 @@ nlmclnt_cancel_callback(struct rpc_task die: nlm_release_host(req->a_host); + nlmclnt_release_lockargs(req); kfree(req); return; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/lockd/host.c linux-2.6.7-43-rpc_queue_lock/fs/lockd/host.c --- linux-2.6.7/fs/lockd/host.c 2004-07-02 18:43:43.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/lockd/host.c 2004-07-02 22:17:08.000000000 -0400 @@ -119,13 +119,15 @@ nlm_lookup_host(int server, struct socka init_MUTEX(&host->h_sema); host->h_nextrebind = jiffies + NLM_HOST_REBIND; host->h_expires = jiffies + NLM_HOST_EXPIRE; - host->h_count = 1; + atomic_set(&host->h_count, 1); init_waitqueue_head(&host->h_gracewait); host->h_state = 0; /* pseudo NSM state */ host->h_nsmstate = 0; /* real NSM state */ host->h_server = server; host->h_next = nlm_hosts[hash]; nlm_hosts[hash] = host; + INIT_LIST_HEAD(&host->h_lockowners); + spin_lock_init(&host->h_lock); if (++nrhosts > NLM_HOST_MAX) next_gc = 0; @@ -235,7 +237,7 @@ struct nlm_host * nlm_get_host(struct nl { if (host) { dprintk("lockd: get host %s\n", host->h_name); - host->h_count ++; + atomic_inc(&host->h_count); host->h_expires = jiffies + NLM_HOST_EXPIRE; } return host; @@ -246,9 +248,10 @@ struct nlm_host * nlm_get_host(struct nl */ void nlm_release_host(struct nlm_host *host) { - if (host && host->h_count) { + if (host != NULL) { dprintk("lockd: release host %s\n", host->h_name); - host->h_count --; + atomic_dec(&host->h_count); + BUG_ON(atomic_read(&host->h_count) < 0); } } @@ -283,7 +286,7 @@ nlm_shutdown_hosts(void) for (i = 0; i < NLM_HOST_NRHASH; i++) { for (host = nlm_hosts[i]; host; host = host->h_next) { dprintk(" %s (cnt %d use %d exp %ld)\n", - host->h_name, host->h_count, + host->h_name, atomic_read(&host->h_count), host->h_inuse, host->h_expires); } } @@ -314,10 +317,10 @@ nlm_gc_hosts(void) for (i = 0; i < NLM_HOST_NRHASH; i++) { q = &nlm_hosts[i]; while ((host = *q) != NULL) { - if (host->h_count || host->h_inuse + if (atomic_read(&host->h_count) || host->h_inuse || time_before(jiffies, host->h_expires)) { dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", - host->h_name, host->h_count, + host->h_name, atomic_read(&host->h_count), host->h_inuse, host->h_expires); q = &host->h_next; continue; @@ -336,6 +339,7 @@ nlm_gc_hosts(void) rpc_destroy_client(host->h_rpcclnt); } } + BUG_ON(!list_empty(&host->h_lockowners)); kfree(host); nrhosts--; } diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/lockd/svc4proc.c linux-2.6.7-43-rpc_queue_lock/fs/lockd/svc4proc.c --- linux-2.6.7/fs/lockd/svc4proc.c 2004-07-02 18:43:25.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/lockd/svc4proc.c 2004-07-02 22:17:04.000000000 -0400 @@ -55,6 +55,7 @@ nlm4svc_retrieve_args(struct svc_rqst *r /* Set up the missing parts of the file_lock structure */ lock->fl.fl_file = &file->f_file; lock->fl.fl_owner = (fl_owner_t) host; + lock->fl.fl_lmops = &nlmsvc_lock_operations; } return 0; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/lockd/svclock.c linux-2.6.7-43-rpc_queue_lock/fs/lockd/svclock.c --- linux-2.6.7/fs/lockd/svclock.c 2004-07-02 18:43:24.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/lockd/svclock.c 2004-07-02 22:17:12.000000000 -0400 @@ -42,7 +42,6 @@ static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); static int nlmsvc_remove_block(struct nlm_block *block); static void nlmsvc_grant_callback(struct rpc_task *task); -static void nlmsvc_notify_blocked(struct file_lock *); /* * The list of blocked locks to retry @@ -193,7 +192,7 @@ nlmsvc_create_block(struct svc_rqst *rqs goto failed_free; /* Set notifier function for VFS, and init args */ - block->b_call.a_args.lock.fl.fl_notify = nlmsvc_notify_blocked; + block->b_call.a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; block->b_call.a_args.cookie = *cookie; /* see above */ dprintk("lockd: created block %p...\n", block); @@ -479,6 +478,16 @@ nlmsvc_notify_blocked(struct file_lock * printk(KERN_WARNING "lockd: notification for unknown block!\n"); } +static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2) +{ + return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid; +} + +struct lock_manager_operations nlmsvc_lock_operations = { + .fl_compare_owner = nlmsvc_same_owner, + .fl_notify = nlmsvc_notify_blocked, +}; + /* * Try to claim a lock that was previously blocked. * diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/lockd/svcproc.c linux-2.6.7-43-rpc_queue_lock/fs/lockd/svcproc.c --- linux-2.6.7/fs/lockd/svcproc.c 2004-07-02 18:43:45.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/lockd/svcproc.c 2004-07-02 22:17:04.000000000 -0400 @@ -84,6 +84,7 @@ nlmsvc_retrieve_args(struct svc_rqst *rq /* Set up the missing parts of the file_lock structure */ lock->fl.fl_file = &file->f_file; lock->fl.fl_owner = (fl_owner_t) host; + lock->fl.fl_lmops = &nlmsvc_lock_operations; } return 0; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/lockd/svcsubs.c linux-2.6.7-43-rpc_queue_lock/fs/lockd/svcsubs.c --- linux-2.6.7/fs/lockd/svcsubs.c 2004-07-02 18:43:45.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/lockd/svcsubs.c 2004-07-02 22:18:32.000000000 -0400 @@ -67,7 +67,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, down(&nlm_file_sema); for (file = nlm_files[hash]; file; file = file->f_next) - if (!memcmp(&file->f_handle, f, sizeof(*f))) + if (!nfs_compare_fh(&file->f_handle, f)) goto found; dprintk("lockd: creating file for (%08x %08x %08x %08x %08x %08x)\n", diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/locks.c linux-2.6.7-43-rpc_queue_lock/fs/locks.c --- linux-2.6.7/fs/locks.c 2004-07-02 18:44:05.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/locks.c 2004-07-02 22:17:16.000000000 -0400 @@ -167,6 +167,12 @@ static inline void locks_free_lock(struc if (!list_empty(&fl->fl_link)) panic("Attempting to free lock on active lock list"); + if (fl->fl_ops && fl->fl_ops->fl_release_private) { + fl->fl_ops->fl_release_private(fl); + fl->fl_ops = NULL; + } + fl->fl_lmops = NULL; + kmem_cache_free(filelock_cache, fl); } @@ -183,9 +189,8 @@ void locks_init_lock(struct file_lock *f fl->fl_flags = 0; fl->fl_type = 0; fl->fl_start = fl->fl_end = 0; - fl->fl_notify = NULL; - fl->fl_insert = NULL; - fl->fl_remove = NULL; + fl->fl_ops = NULL; + fl->fl_lmops = NULL; } EXPORT_SYMBOL(locks_init_lock); @@ -217,10 +222,10 @@ void locks_copy_lock(struct file_lock *n new->fl_type = fl->fl_type; new->fl_start = fl->fl_start; new->fl_end = fl->fl_end; - new->fl_notify = fl->fl_notify; - new->fl_insert = fl->fl_insert; - new->fl_remove = fl->fl_remove; - new->fl_u = fl->fl_u; + new->fl_ops = fl->fl_ops; + new->fl_lmops = fl->fl_lmops; + if (fl->fl_ops && fl->fl_ops->fl_copy_lock) + fl->fl_ops->fl_copy_lock(new, fl); } EXPORT_SYMBOL(locks_copy_lock); @@ -321,9 +326,8 @@ static int flock_to_posix_lock(struct fi fl->fl_pid = current->tgid; fl->fl_file = filp; fl->fl_flags = FL_POSIX; - fl->fl_notify = NULL; - fl->fl_insert = NULL; - fl->fl_remove = NULL; + fl->fl_ops = NULL; + fl->fl_lmops = NULL; return assign_type(fl, l->l_type); } @@ -361,9 +365,8 @@ static int flock64_to_posix_lock(struct fl->fl_pid = current->tgid; fl->fl_file = filp; fl->fl_flags = FL_POSIX; - fl->fl_notify = NULL; - fl->fl_insert = NULL; - fl->fl_remove = NULL; + fl->fl_ops = NULL; + fl->fl_lmops = NULL; switch (l->l_type) { case F_RDLCK: @@ -397,9 +400,8 @@ static int lease_alloc(struct file *filp } fl->fl_start = 0; fl->fl_end = OFFSET_MAX; - fl->fl_notify = NULL; - fl->fl_insert = NULL; - fl->fl_remove = NULL; + fl->fl_ops = NULL; + fl->fl_lmops = NULL; *flp = fl; return 0; @@ -414,14 +416,15 @@ static inline int locks_overlap(struct f } /* - * Check whether two locks have the same owner. The apparently superfluous - * check for fl_pid enables us to distinguish between locks set by lockd. + * Check whether two locks have the same owner. */ static inline int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) { - return (fl1->fl_owner == fl2->fl_owner) && - (fl1->fl_pid == fl2->fl_pid); + if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) + return fl2->fl_lmops == fl1->fl_lmops && + fl1->fl_lmops->fl_compare_owner(fl1, fl2); + return fl1->fl_owner == fl2->fl_owner; } /* Remove waiter from blocker's block list. @@ -472,8 +475,8 @@ static void locks_wake_up_blocks(struct struct file_lock *waiter = list_entry(blocker->fl_block.next, struct file_lock, fl_block); __locks_delete_block(waiter); - if (waiter->fl_notify) - waiter->fl_notify(waiter); + if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) + waiter->fl_lmops->fl_notify(waiter); else wake_up(&waiter->fl_wait); } @@ -490,8 +493,8 @@ static void locks_insert_lock(struct fil fl->fl_next = *pos; *pos = fl; - if (fl->fl_insert) - fl->fl_insert(fl); + if (fl->fl_ops && fl->fl_ops->fl_insert) + fl->fl_ops->fl_insert(fl); } /* @@ -514,8 +517,8 @@ static void locks_delete_lock(struct fil fl->fl_fasync = NULL; } - if (fl->fl_remove) - fl->fl_remove(fl); + if (fl->fl_ops && fl->fl_ops->fl_remove) + fl->fl_ops->fl_remove(fl); locks_wake_up_blocks(fl); locks_free_lock(fl); @@ -631,24 +634,15 @@ int posix_locks_deadlock(struct file_loc struct file_lock *block_fl) { struct list_head *tmp; - fl_owner_t caller_owner, blocked_owner; - unsigned int caller_pid, blocked_pid; - - caller_owner = caller_fl->fl_owner; - caller_pid = caller_fl->fl_pid; - blocked_owner = block_fl->fl_owner; - blocked_pid = block_fl->fl_pid; next_task: - if (caller_owner == blocked_owner && caller_pid == blocked_pid) + if (posix_same_owner(caller_fl, block_fl)) return 1; list_for_each(tmp, &blocked_list) { struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); - if ((fl->fl_owner == blocked_owner) - && (fl->fl_pid == blocked_pid)) { + if (posix_same_owner(fl, block_fl)) { fl = fl->fl_next; - blocked_owner = fl->fl_owner; - blocked_pid = fl->fl_pid; + block_fl = fl; goto next_task; } } @@ -912,6 +906,34 @@ int posix_lock_file(struct file *filp, s } /** + * posix_lock_file_wait - Apply a POSIX-style lock to a file + * @filp: The file to apply the lock to + * @fl: The lock to be applied + * + * Add a POSIX style lock to a file. + * We merge adjacent & overlapping locks whenever possible. + * POSIX locks are sorted by owner task, then by starting address + */ +int posix_lock_file_wait(struct file *filp, struct file_lock *fl) +{ + int error; + might_sleep (); + for (;;) { + error = __posix_lock_file(filp->f_dentry->d_inode, fl); + if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) + break; + error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); + if (!error) + continue; + + locks_delete_block(fl); + break; + } + return error; +} +EXPORT_SYMBOL(posix_lock_file_wait); + +/** * locks_mandatory_locked - Check for an active lock * @inode: the file to check * @@ -988,6 +1010,8 @@ int locks_mandatory_area(int read_write, break; } + if (fl.fl_ops && fl.fl_ops->fl_release_private) + fl.fl_ops->fl_release_private(&fl); return error; } @@ -1422,7 +1446,6 @@ int fcntl_getlk(struct file *filp, struc error = -EFAULT; if (!copy_to_user(l, &flock, sizeof(flock))) error = 0; - out: return error; } @@ -1489,8 +1512,7 @@ int fcntl_setlk(struct file *filp, unsig if (filp->f_op && filp->f_op->lock != NULL) { error = filp->f_op->lock(filp, cmd, file_lock); - if (error < 0) - goto out; + goto out; } for (;;) { @@ -1624,8 +1646,7 @@ int fcntl_setlk64(struct file *filp, uns if (filp->f_op && filp->f_op->lock != NULL) { error = filp->f_op->lock(filp, cmd, file_lock); - if (error < 0) - goto out; + goto out; } for (;;) { @@ -1672,10 +1693,12 @@ void locks_remove_posix(struct file *fil lock.fl_owner = owner; lock.fl_pid = current->tgid; lock.fl_file = filp; + lock.fl_ops = NULL; + lock.fl_lmops = NULL; if (filp->f_op && filp->f_op->lock != NULL) { filp->f_op->lock(filp, F_SETLK, &lock); - /* Ignore any error -- we must remove the locks anyway */ + goto out; } /* Can't use posix_lock_file here; we need to remove it no matter @@ -1684,13 +1707,16 @@ void locks_remove_posix(struct file *fil lock_kernel(); while (*before != NULL) { struct file_lock *fl = *before; - if (IS_POSIX(fl) && (fl->fl_owner == owner)) { + if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) { locks_delete_lock(before); continue; } before = &fl->fl_next; } unlock_kernel(); +out: + if (lock.fl_ops && lock.fl_ops->fl_release_private) + lock.fl_ops->fl_release_private(&lock); } EXPORT_SYMBOL(locks_remove_posix); @@ -1985,12 +2011,18 @@ EXPORT_SYMBOL(lock_may_write); static inline void __steal_locks(struct file *file, fl_owner_t from) { struct inode *inode = file->f_dentry->d_inode; - struct file_lock *fl = inode->i_flock; + struct file_lock *fl; - while (fl) { - if (fl->fl_file == file && fl->fl_owner == from) +restart: + for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { + if (fl->fl_file == file && fl->fl_owner == from) { + if (fl->fl_ops && fl->fl_ops->fl_steal_locks) { + fl->fl_ops->fl_steal_locks(fl, from); + /* Some filesystems may just drop the lock */ + goto restart; + } fl->fl_owner = current->files; - fl = fl->fl_next; + } } } diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/callback.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/callback.c --- linux-2.6.7/fs/nfs/callback.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/callback.c 2004-07-02 22:19:28.000000000 -0400 @@ -0,0 +1,325 @@ +/* + * linux/fs/nfs/callback.c + * + * Copyright (C) 2004 Trond Myklebust + * + * NFSv4 callback handling + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "callback.h" + +#define NFSDBG_FACILITY NFSDBG_CALLBACK + +struct nfs_callback_data { + unsigned int users; + struct svc_serv *serv; + pid_t pid; + struct completion started; + struct completion stopped; +}; + +static struct nfs_callback_data nfs_callback_info; +static DECLARE_MUTEX(nfs_callback_sema); +static struct svc_program nfs4_callback_program; + +unsigned short nfs_callback_tcpport; + +/* + * This is the callback kernel thread. + */ +static void nfs_callback_svc(struct svc_rqst *rqstp) +{ + struct svc_serv *serv = rqstp->rq_server; + int err; + + __module_get(THIS_MODULE); + lock_kernel(); + + nfs_callback_info.pid = current->pid; + daemonize("nfsv4-svc"); + /* Process request with signals blocked, but allow SIGKILL. */ + allow_signal(SIGKILL); + + complete(&nfs_callback_info.started); + + while (nfs_callback_info.users != 0 || !signalled()) { + /* + * Listen for a request on the socket + */ + err = svc_recv(serv, rqstp, MAX_SCHEDULE_TIMEOUT); + if (err == -EAGAIN || err == -EINTR) + continue; + if (err < 0) { + printk(KERN_WARNING + "%s: terminating on error %d\n", + __FUNCTION__, -err); + break; + } + dprintk("%s: request from %u.%u.%u.%u\n", __FUNCTION__, + NIPQUAD(rqstp->rq_addr.sin_addr.s_addr)); + svc_process(serv, rqstp); + } + + nfs_callback_info.pid = 0; + complete(&nfs_callback_info.stopped); + unlock_kernel(); + module_put_and_exit(0); +} + +/* + * Bring up the server process if it is not already up. + */ +int nfs_callback_up(void) +{ + struct svc_serv *serv; + struct svc_sock *svsk; + int ret = 0; + + lock_kernel(); + down(&nfs_callback_sema); + if (nfs_callback_info.users++ || nfs_callback_info.pid != 0) + goto out; + init_completion(&nfs_callback_info.started); + init_completion(&nfs_callback_info.stopped); + serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE); + ret = -ENOMEM; + if (!serv) + goto out_err; + /* FIXME: We don't want to register this socket with the portmapper */ + ret = svc_makesock(serv, IPPROTO_TCP, 0); + if (ret < 0) + goto out_destroy; + if (!list_empty(&serv->sv_permsocks)) { + svsk = list_entry(serv->sv_permsocks.next, + struct svc_sock, sk_list); + nfs_callback_tcpport = ntohs(inet_sk(svsk->sk_sk)->sport); + dprintk ("Callback port = 0x%x\n", nfs_callback_tcpport); + } else + BUG(); + ret = svc_create_thread(nfs_callback_svc, serv); + if (ret < 0) + goto out_destroy; + nfs_callback_info.serv = serv; + wait_for_completion(&nfs_callback_info.started); +out: + up(&nfs_callback_sema); + unlock_kernel(); + return ret; +out_destroy: + svc_destroy(serv); +out_err: + nfs_callback_info.users--; + goto out; +} + +/* + * Kill the server process if it is not already up. + */ +int nfs_callback_down(void) +{ + int ret = 0; + + lock_kernel(); + down(&nfs_callback_sema); + if (--nfs_callback_info.users || nfs_callback_info.pid == 0) + goto out; + kill_proc(nfs_callback_info.pid, SIGKILL, 1); + wait_for_completion(&nfs_callback_info.stopped); +out: + up(&nfs_callback_sema); + unlock_kernel(); + return ret; +} + +/* + * AUTH_NULL authentication + */ +static int nfs_callback_null_accept(struct svc_rqst *rqstp, u32 *authp) +{ + struct iovec *argv = &rqstp->rq_arg.head[0]; + struct iovec *resv = &rqstp->rq_res.head[0]; + + if (argv->iov_len < 3*4) + return SVC_GARBAGE; + + if (svc_getu32(argv) != 0) { + dprintk("svc: bad null cred\n"); + *authp = rpc_autherr_badcred; + return SVC_DENIED; + } + if (svc_getu32(argv) != RPC_AUTH_NULL || svc_getu32(argv) != 0) { + dprintk("svc: bad null verf\n"); + *authp = rpc_autherr_badverf; + return SVC_DENIED; + } + + /* Signal that mapping to nobody uid/gid is required */ + rqstp->rq_cred.cr_uid = (uid_t) -1; + rqstp->rq_cred.cr_gid = (gid_t) -1; + rqstp->rq_cred.cr_group_info = groups_alloc(0); + if (rqstp->rq_cred.cr_group_info == NULL) + return SVC_DROP; /* kmalloc failure - client must retry */ + + /* Put NULL verifier */ + svc_putu32(resv, RPC_AUTH_NULL); + svc_putu32(resv, 0); + dprintk("%s: success, returning %d!\n", __FUNCTION__, SVC_OK); + return SVC_OK; +} + +static int nfs_callback_null_release(struct svc_rqst *rqstp) +{ + if (rqstp->rq_cred.cr_group_info) + put_group_info(rqstp->rq_cred.cr_group_info); + rqstp->rq_cred.cr_group_info = NULL; + return 0; /* don't drop */ +} + +static struct auth_ops nfs_callback_auth_null = { + .name = "null", + .flavour = RPC_AUTH_NULL, + .accept = nfs_callback_null_accept, + .release = nfs_callback_null_release, +}; + +/* + * AUTH_SYS authentication + */ +static int nfs_callback_unix_accept(struct svc_rqst *rqstp, u32 *authp) +{ + struct iovec *argv = &rqstp->rq_arg.head[0]; + struct iovec *resv = &rqstp->rq_res.head[0]; + struct svc_cred *cred = &rqstp->rq_cred; + u32 slen, i; + int len = argv->iov_len; + + dprintk("%s: start\n", __FUNCTION__); + cred->cr_group_info = NULL; + rqstp->rq_client = NULL; + if ((len -= 3*4) < 0) + return SVC_GARBAGE; + + /* Get length, time stamp and machine name */ + svc_getu32(argv); + svc_getu32(argv); + slen = XDR_QUADLEN(ntohl(svc_getu32(argv))); + if (slen > 64 || (len -= (slen + 3)*4) < 0) + goto badcred; + argv->iov_base = (void*)((u32*)argv->iov_base + slen); + argv->iov_len -= slen*4; + + cred->cr_uid = ntohl(svc_getu32(argv)); + cred->cr_gid = ntohl(svc_getu32(argv)); + slen = ntohl(svc_getu32(argv)); + if (slen > 16 || (len -= (slen + 2)*4) < 0) + goto badcred; + cred->cr_group_info = groups_alloc(slen); + if (cred->cr_group_info == NULL) + return SVC_DROP; + for (i = 0; i < slen; i++) + GROUP_AT(cred->cr_group_info, i) = ntohl(svc_getu32(argv)); + + if (svc_getu32(argv) != RPC_AUTH_NULL || svc_getu32(argv) != 0) { + *authp = rpc_autherr_badverf; + return SVC_DENIED; + } + /* Put NULL verifier */ + svc_putu32(resv, RPC_AUTH_NULL); + svc_putu32(resv, 0); + dprintk("%s: success, returning %d!\n", __FUNCTION__, SVC_OK); + return SVC_OK; +badcred: + *authp = rpc_autherr_badcred; + return SVC_DENIED; +} + +static int nfs_callback_unix_release(struct svc_rqst *rqstp) +{ + if (rqstp->rq_cred.cr_group_info) + put_group_info(rqstp->rq_cred.cr_group_info); + rqstp->rq_cred.cr_group_info = NULL; + return 0; +} + +static struct auth_ops nfs_callback_auth_unix = { + .name = "unix", + .flavour = RPC_AUTH_UNIX, + .accept = nfs_callback_unix_accept, + .release = nfs_callback_unix_release, +}; + +/* + * Hook the authentication protocol + */ +static int nfs_callback_auth(struct svc_rqst *rqstp, u32 *authp) +{ + struct in_addr *addr = &rqstp->rq_addr.sin_addr; + struct nfs4_client *clp; + struct iovec *argv = &rqstp->rq_arg.head[0]; + int flavour; + int retval; + + /* Don't talk to strangers */ + clp = nfs4_find_client(addr); + if (clp == NULL) + return SVC_DROP; + dprintk("%s: %u.%u.%u.%u NFSv4 callback!\n", __FUNCTION__, NIPQUAD(addr)); + nfs4_put_client(clp); + flavour = ntohl(svc_getu32(argv)); + switch(flavour) { + case RPC_AUTH_NULL: + if (rqstp->rq_proc != CB_NULL) { + *authp = rpc_autherr_tooweak; + retval = SVC_DENIED; + break; + } + rqstp->rq_authop = &nfs_callback_auth_null; + retval = nfs_callback_null_accept(rqstp, authp); + break; + case RPC_AUTH_UNIX: + /* Eat the authentication flavour */ + rqstp->rq_authop = &nfs_callback_auth_unix; + retval = nfs_callback_unix_accept(rqstp, authp); + break; + default: + /* FIXME: need to add RPCSEC_GSS upcalls */ +#if 0 + svc_ungetu32(argv); + retval = svc_authenticate(rqstp, authp); +#else + *authp = rpc_autherr_rejectedcred; + retval = SVC_DENIED; +#endif + } + dprintk("%s: flavour %d returning error %d\n", __FUNCTION__, flavour, retval); + return retval; +} + +/* + * Define NFS4 callback program + */ +extern struct svc_version nfs4_callback_version1; + +static struct svc_version *nfs4_callback_version[] = { + [1] = &nfs4_callback_version1, +}; + +static struct svc_stat nfs4_callback_stats; + +static struct svc_program nfs4_callback_program = { + .pg_prog = NFS4_CALLBACK, /* RPC service number */ + .pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */ + .pg_vers = nfs4_callback_version, /* version table */ + .pg_name = "NFSv4 callback", /* service name */ + .pg_class = "nfs", /* authentication class */ + .pg_stats = &nfs4_callback_stats, + .pg_authenticate = nfs_callback_auth, +}; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/callback.h linux-2.6.7-43-rpc_queue_lock/fs/nfs/callback.h --- linux-2.6.7/fs/nfs/callback.h 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/callback.h 2004-07-02 22:19:28.000000000 -0400 @@ -0,0 +1,70 @@ +/* + * linux/fs/nfs/callback.h + * + * Copyright (C) 2004 Trond Myklebust + * + * NFSv4 callback definitions + */ +#ifndef __LINUX_FS_NFS_CALLBACK_H +#define __LINUX_FS_NFS_CALLBACK_H + +#define NFS4_CALLBACK 0x40000000 +#define NFS4_CALLBACK_XDRSIZE 2048 +#define NFS4_CALLBACK_BUFSIZE (1024 + NFS4_CALLBACK_XDRSIZE) + +enum nfs4_callback_procnum { + CB_NULL = 0, + CB_COMPOUND = 1, +}; + +enum nfs4_callback_opnum { + OP_CB_GETATTR = 3, + OP_CB_RECALL = 4, + OP_CB_ILLEGAL = 10044, +}; + +struct cb_compound_hdr_arg { + int taglen; + const char *tag; + unsigned int callback_ident; + unsigned nops; +}; + +struct cb_compound_hdr_res { + uint32_t *status; + int taglen; + const char *tag; + uint32_t *nops; +}; + +struct cb_getattrargs { + struct sockaddr_in *addr; + struct nfs_fh fh; + uint32_t bitmap[2]; +}; + +struct cb_getattrres { + uint32_t status; + uint32_t bitmap[2]; + uint64_t size; + uint64_t change_attr; + struct timespec ctime; + struct timespec mtime; +}; + +struct cb_recallargs { + struct sockaddr_in *addr; + struct nfs_fh fh; + nfs4_stateid stateid; + uint32_t truncate; +}; + +extern unsigned nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res); +extern unsigned nfs4_callback_recall(struct cb_recallargs *args, void *dummy); + +extern int nfs_callback_up(void); +extern int nfs_callback_down(void); + +extern unsigned short nfs_callback_tcpport; + +#endif /* __LINUX_FS_NFS_CALLBACK_H */ diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/callback_proc.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/callback_proc.c --- linux-2.6.7/fs/nfs/callback_proc.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/callback_proc.c 2004-07-02 22:19:28.000000000 -0400 @@ -0,0 +1,85 @@ +/* + * linux/fs/nfs/callback_proc.c + * + * Copyright (C) 2004 Trond Myklebust + * + * NFSv4 callback procedures + */ +#include +#include +#include +#include "callback.h" +#include "delegation.h" + +#define NFSDBG_FACILITY NFSDBG_CALLBACK + +unsigned nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res) +{ + struct nfs4_client *clp; + struct nfs_delegation *delegation; + struct nfs_inode *nfsi; + struct inode *inode; + + res->bitmap[0] = res->bitmap[1] = 0; + res->status = htonl(NFS4ERR_BADHANDLE); + clp = nfs4_find_client(&args->addr->sin_addr); + if (clp == NULL) + goto out; + inode = nfs_delegation_find_inode(clp, &args->fh); + if (inode == NULL) + goto out_putclient; + nfsi = NFS_I(inode); + down_read(&nfsi->rwsem); + delegation = nfsi->delegation; + if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0) + goto out_iput; + res->size = i_size_read(inode); + res->change_attr = NFS_CHANGE_ATTR(inode); + res->ctime = inode->i_ctime; + res->mtime = inode->i_mtime; + res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) & + args->bitmap[0]; + res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) & + args->bitmap[1]; + res->status = 0; +out_iput: + up_read(&nfsi->rwsem); + iput(inode); +out_putclient: + nfs4_put_client(clp); +out: + dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res->status)); + return res->status; +} + +unsigned nfs4_callback_recall(struct cb_recallargs *args, void *dummy) +{ + struct nfs4_client *clp; + struct inode *inode; + unsigned res; + + res = htonl(NFS4ERR_BADHANDLE); + clp = nfs4_find_client(&args->addr->sin_addr); + if (clp == NULL) + goto out; + inode = nfs_delegation_find_inode(clp, &args->fh); + if (inode == NULL) + goto out_putclient; + /* Set up a helper thread to actually return the delegation */ + switch(nfs_async_inode_return_delegation(inode, &args->stateid)) { + case 0: + res = 0; + break; + case -ENOENT: + res = htonl(NFS4ERR_BAD_STATEID); + break; + default: + res = htonl(NFS4ERR_RESOURCE); + } + iput(inode); +out_putclient: + nfs4_put_client(clp); +out: + dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res)); + return res; +} diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/callback_xdr.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/callback_xdr.c --- linux-2.6.7/fs/nfs/callback_xdr.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/callback_xdr.c 2004-07-02 22:19:28.000000000 -0400 @@ -0,0 +1,481 @@ +/* + * linux/fs/nfs/callback_xdr.c + * + * Copyright (C) 2004 Trond Myklebust + * + * NFSv4 callback encode/decode procedures + */ +#include +#include +#include +#include +#include +#include "callback.h" + +#define CB_OP_TAGLEN_MAXSZ (512) +#define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ) +#define CB_OP_GETATTR_BITMAP_MAXSZ (4) +#define CB_OP_GETATTR_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ + CB_OP_GETATTR_BITMAP_MAXSZ + \ + 2 + 2 + 3 + 3) +#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) + +#define NFSDBG_FACILITY NFSDBG_CALLBACK + +typedef unsigned (*callback_process_op_t)(void *, void *); +typedef unsigned (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); +typedef unsigned (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); + + +struct callback_op { + callback_process_op_t process_op; + callback_decode_arg_t decode_args; + callback_encode_res_t encode_res; + long res_maxsize; +}; + +static struct callback_op callback_ops[]; + +static int nfs4_callback_null(struct svc_rqst *rqstp, void *argp, void *resp) +{ + return htonl(NFS4_OK); +} + +static int nfs4_decode_void(struct svc_rqst *rqstp, uint32_t *p, void *dummy) +{ + return xdr_argsize_check(rqstp, p); +} + +static int nfs4_encode_void(struct svc_rqst *rqstp, uint32_t *p, void *dummy) +{ + return xdr_ressize_check(rqstp, p); +} + +static uint32_t *read_buf(struct xdr_stream *xdr, int nbytes) +{ + uint32_t *p; + + p = xdr_inline_decode(xdr, nbytes); + if (unlikely(p == NULL)) + printk(KERN_WARNING "NFSv4 callback reply buffer overflowed!\n"); + return p; +} + +static unsigned decode_string(struct xdr_stream *xdr, unsigned int *len, const char **str) +{ + uint32_t *p; + + p = read_buf(xdr, 4); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + *len = ntohl(*p); + + if (*len != 0) { + p = read_buf(xdr, *len); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + *str = (const char *)p; + } else + *str = NULL; + + return 0; +} + +static unsigned decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh) +{ + uint32_t *p; + + p = read_buf(xdr, 4); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + fh->size = ntohl(*p); + if (fh->size > NFS4_FHSIZE) + return htonl(NFS4ERR_BADHANDLE); + p = read_buf(xdr, fh->size); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + memcpy(&fh->data[0], p, fh->size); + memset(&fh->data[fh->size], 0, sizeof(fh->data) - fh->size); + return 0; +} + +static unsigned decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap) +{ + uint32_t *p; + unsigned int attrlen; + + p = read_buf(xdr, 4); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + attrlen = ntohl(*p); + p = read_buf(xdr, attrlen << 2); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + if (likely(attrlen > 0)) + bitmap[0] = ntohl(*p++); + if (attrlen > 1) + bitmap[1] = ntohl(*p); + return 0; +} + +static unsigned decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) +{ + uint32_t *p; + + p = read_buf(xdr, 16); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + memcpy(stateid->data, p, 16); + return 0; +} + +static unsigned decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr) +{ + uint32_t *p; + unsigned int minor_version; + unsigned status; + + status = decode_string(xdr, &hdr->taglen, &hdr->tag); + if (unlikely(status != 0)) + return status; + /* We do not like overly long tags! */ + if (hdr->taglen > CB_OP_TAGLEN_MAXSZ-12 || hdr->taglen < 0) { + printk("NFSv4 CALLBACK %s: client sent tag of length %u\n", + __FUNCTION__, hdr->taglen); + return htonl(NFS4ERR_RESOURCE); + } + p = read_buf(xdr, 12); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + minor_version = ntohl(*p++); + /* Check minor version is zero. */ + if (minor_version != 0) { + printk(KERN_WARNING "%s: NFSv4 server callback with illegal minor version %u!\n", + __FUNCTION__, minor_version); + return htonl(NFS4ERR_MINOR_VERS_MISMATCH); + } + hdr->callback_ident = ntohl(*p++); + hdr->nops = ntohl(*p); + return 0; +} + +static unsigned decode_op_hdr(struct xdr_stream *xdr, unsigned int *op) +{ + uint32_t *p; + p = read_buf(xdr, 4); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + *op = ntohl(*p); + return 0; +} + +static unsigned decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_getattrargs *args) +{ + unsigned status; + + status = decode_fh(xdr, &args->fh); + if (unlikely(status != 0)) + goto out; + args->addr = &rqstp->rq_addr; + status = decode_bitmap(xdr, args->bitmap); +out: + dprintk("%s: exit with status = %d\n", __FUNCTION__, status); + return status; +} + +static unsigned decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallargs *args) +{ + uint32_t *p; + unsigned status; + + args->addr = &rqstp->rq_addr; + status = decode_stateid(xdr, &args->stateid); + if (unlikely(status != 0)) + goto out; + p = read_buf(xdr, 4); + if (unlikely(p == NULL)) { + status = htonl(NFS4ERR_RESOURCE); + goto out; + } + args->truncate = ntohl(*p); + status = decode_fh(xdr, &args->fh); +out: + dprintk("%s: exit with status = %d\n", __FUNCTION__, status); + return 0; +} + +static unsigned encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) +{ + uint32_t *p; + + p = xdr_reserve_space(xdr, 4 + len); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + xdr_encode_opaque(p, str, len); + return 0; +} + +#define CB_SUPPORTED_ATTR0 (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) +#define CB_SUPPORTED_ATTR1 (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) +static unsigned encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, uint32_t **savep) +{ + uint32_t bm[2]; + uint32_t *p; + + bm[0] = htonl(bitmap[0] & CB_SUPPORTED_ATTR0); + bm[1] = htonl(bitmap[1] & CB_SUPPORTED_ATTR1); + if (bm[1] != 0) { + p = xdr_reserve_space(xdr, 16); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + *p++ = htonl(2); + *p++ = bm[0]; + *p++ = bm[1]; + } else if (bm[0] != 0) { + p = xdr_reserve_space(xdr, 12); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + *p++ = htonl(1); + *p++ = bm[0]; + } else { + p = xdr_reserve_space(xdr, 8); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + *p++ = htonl(0); + } + *savep = p; + return 0; +} + +static unsigned encode_attr_change(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t change) +{ + uint32_t *p; + + if (!(bitmap[0] & FATTR4_WORD0_CHANGE)) + return 0; + p = xdr_reserve_space(xdr, 8); + if (unlikely(p == 0)) + return htonl(NFS4ERR_RESOURCE); + p = xdr_encode_hyper(p, change); + return 0; +} + +static unsigned encode_attr_size(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t size) +{ + uint32_t *p; + + if (!(bitmap[0] & FATTR4_WORD0_SIZE)) + return 0; + p = xdr_reserve_space(xdr, 8); + if (unlikely(p == 0)) + return htonl(NFS4ERR_RESOURCE); + p = xdr_encode_hyper(p, size); + return 0; +} + +static unsigned encode_attr_time(struct xdr_stream *xdr, const struct timespec *time) +{ + uint32_t *p; + + p = xdr_reserve_space(xdr, 12); + if (unlikely(p == 0)) + return htonl(NFS4ERR_RESOURCE); + p = xdr_encode_hyper(p, time->tv_sec); + *p = htonl(time->tv_nsec); + return 0; +} + +static unsigned encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time) +{ + if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA)) + return 0; + return encode_attr_time(xdr,time); +} + +static unsigned encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time) +{ + if (!(bitmap[1] & FATTR4_WORD1_TIME_MODIFY)) + return 0; + return encode_attr_time(xdr,time); +} + +static unsigned encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr) +{ + unsigned status; + + hdr->status = xdr_reserve_space(xdr, 4); + if (unlikely(hdr->status == NULL)) + return htonl(NFS4ERR_RESOURCE); + status = encode_string(xdr, hdr->taglen, hdr->tag); + if (unlikely(status != 0)) + return status; + hdr->nops = xdr_reserve_space(xdr, 4); + if (unlikely(hdr->nops == NULL)) + return htonl(NFS4ERR_RESOURCE); + return 0; +} + +static unsigned encode_op_hdr(struct xdr_stream *xdr, uint32_t op, uint32_t res) +{ + uint32_t *p; + + p = xdr_reserve_space(xdr, 8); + if (unlikely(p == NULL)) + return htonl(NFS4ERR_RESOURCE); + *p++ = htonl(op); + *p = htonl(res); + return 0; +} + +static unsigned encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res) +{ + uint32_t *savep; + unsigned status = res->status; + + if (unlikely(status != 0)) + goto out; + status = encode_attr_bitmap(xdr, res->bitmap, &savep); + if (unlikely(status != 0)) + goto out; + status = encode_attr_change(xdr, res->bitmap, res->change_attr); + if (unlikely(status != 0)) + goto out; + status = encode_attr_size(xdr, res->bitmap, res->size); + if (unlikely(status != 0)) + goto out; + status = encode_attr_ctime(xdr, res->bitmap, &res->ctime); + if (unlikely(status != 0)) + goto out; + status = encode_attr_mtime(xdr, res->bitmap, &res->mtime); + *savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1))); +out: + dprintk("%s: exit with status = %d\n", __FUNCTION__, status); + return status; +} + +static unsigned process_op(struct svc_rqst *rqstp, + struct xdr_stream *xdr_in, void *argp, + struct xdr_stream *xdr_out, void *resp) +{ + struct callback_op *op; + unsigned int op_nr; + unsigned int status = 0; + long maxlen; + unsigned res; + + dprintk("%s: start\n", __FUNCTION__); + status = decode_op_hdr(xdr_in, &op_nr); + if (unlikely(status != 0)) { + op_nr = OP_CB_ILLEGAL; + op = &callback_ops[0]; + } else if (unlikely(op_nr != OP_CB_GETATTR && op_nr != OP_CB_RECALL)) { + op_nr = OP_CB_ILLEGAL; + op = &callback_ops[0]; + status = htonl(NFS4ERR_OP_ILLEGAL); + } else + op = &callback_ops[op_nr]; + + maxlen = xdr_out->end - xdr_out->p; + if (maxlen > 0 && maxlen < PAGE_SIZE) { + if (likely(status == 0 && op->decode_args != NULL)) + status = op->decode_args(rqstp, xdr_in, argp); + if (likely(status == 0 && op->process_op != NULL)) + status = op->process_op(argp, resp); + } else + status = htonl(NFS4ERR_RESOURCE); + + res = encode_op_hdr(xdr_out, op_nr, status); + if (status == 0) + status = res; + if (op->encode_res != NULL && status == 0) + status = op->encode_res(rqstp, xdr_out, resp); + dprintk("%s: done, status = %d\n", __FUNCTION__, status); + return status; +} + +/* + * Decode, process and encode a COMPOUND + */ +static int nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp) +{ + struct cb_compound_hdr_arg hdr_arg; + struct cb_compound_hdr_res hdr_res; + struct xdr_stream xdr_in, xdr_out; + uint32_t *p; + unsigned int status; + unsigned int nops = 1; + + dprintk("%s: start\n", __FUNCTION__); + + xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); + + p = (uint32_t*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); + rqstp->rq_res.head[0].iov_len = PAGE_SIZE; + xdr_init_encode(&xdr_out, &rqstp->rq_res, p); + + decode_compound_hdr_arg(&xdr_in, &hdr_arg); + hdr_res.taglen = hdr_arg.taglen; + hdr_res.tag = hdr_arg.tag; + encode_compound_hdr_res(&xdr_out, &hdr_res); + + for (;;) { + status = process_op(rqstp, &xdr_in, argp, &xdr_out, resp); + if (status != 0) + break; + if (nops == hdr_arg.nops) + break; + nops++; + } + *hdr_res.status = status; + *hdr_res.nops = htonl(nops); + dprintk("%s: done, status = %u\n", __FUNCTION__, status); + return rpc_success; +} + +/* + * Define NFS4 callback COMPOUND ops. + */ +static struct callback_op callback_ops[] = { + [0] = { + .res_maxsize = CB_OP_HDR_RES_MAXSZ, + }, + [OP_CB_GETATTR] = { + .process_op = (callback_process_op_t)nfs4_callback_getattr, + .decode_args = (callback_decode_arg_t)decode_getattr_args, + .encode_res = (callback_encode_res_t)encode_getattr_res, + .res_maxsize = CB_OP_GETATTR_RES_MAXSZ, + }, + [OP_CB_RECALL] = { + .process_op = (callback_process_op_t)nfs4_callback_recall, + .decode_args = (callback_decode_arg_t)decode_recall_args, + .res_maxsize = CB_OP_RECALL_RES_MAXSZ, + } +}; + +/* + * Define NFS4 callback procedures + */ +static struct svc_procedure nfs4_callback_procedures1[] = { + [CB_NULL] = { + .pc_func = nfs4_callback_null, + .pc_decode = (kxdrproc_t)nfs4_decode_void, + .pc_encode = (kxdrproc_t)nfs4_encode_void, + .pc_xdrressize = 1, + }, + [CB_COMPOUND] = { + .pc_func = nfs4_callback_compound, + .pc_encode = (kxdrproc_t)nfs4_encode_void, + .pc_argsize = 256, + .pc_ressize = 256, + .pc_xdrressize = NFS4_CALLBACK_BUFSIZE, + } +}; + +struct svc_version nfs4_callback_version1 = { + .vs_vers = 1, + .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1), + .vs_proc = nfs4_callback_procedures1, + .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, + .vs_dispatch = NULL, +}; + diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/delegation.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/delegation.c --- linux-2.6.7/fs/nfs/delegation.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/delegation.c 2004-07-02 22:20:11.000000000 -0400 @@ -0,0 +1,320 @@ +/* + * linux/fs/nfs/delegation.c + * + * Copyright (C) 2004 Trond Myklebust + * + * NFS file delegation management + * + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "delegation.h" + +static struct nfs_delegation *nfs_alloc_delegation(void) +{ + return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL); +} + +static void nfs_free_delegation(struct nfs_delegation *delegation) +{ + if (delegation->cred) + put_rpccred(delegation->cred); + kfree(delegation); +} + +static void nfs_delegation_claim_opens(struct inode *inode) +{ + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_open_context *ctx; + struct nfs4_state *state; + +again: + spin_lock(&inode->i_lock); + list_for_each_entry(ctx, &nfsi->open_files, list) { + state = ctx->state; + if (state == NULL) + continue; + if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) + continue; + get_nfs_open_context(ctx); + spin_unlock(&inode->i_lock); + if (nfs4_open_delegation_recall(ctx->dentry, state) < 0) + return; + put_nfs_open_context(ctx); + goto again; + } + spin_unlock(&inode->i_lock); +} + +/* + * Set up a delegation on an inode + */ +void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) +{ + struct nfs_delegation *delegation = NFS_I(inode)->delegation; + + if (delegation == NULL) + return; + memcpy(delegation->stateid.data, res->delegation.data, + sizeof(delegation->stateid.data)); + delegation->type = res->delegation_type; + delegation->maxsize = res->maxsize; + put_rpccred(cred); + delegation->cred = get_rpccred(cred); + delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM; + NFS_I(inode)->delegation_state = delegation->type; + smp_wmb(); +} + +/* + * Set up a delegation on an inode + */ +int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) +{ + struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state; + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_delegation *delegation; + int status = 0; + + delegation = nfs_alloc_delegation(); + if (delegation == NULL) + return -ENOMEM; + memcpy(delegation->stateid.data, res->delegation.data, + sizeof(delegation->stateid.data)); + delegation->type = res->delegation_type; + delegation->maxsize = res->maxsize; + delegation->cred = get_rpccred(cred); + delegation->inode = inode; + + spin_lock(&clp->cl_lock); + if (nfsi->delegation == NULL) { + list_add(&delegation->super_list, &clp->cl_delegations); + nfsi->delegation = delegation; + nfsi->delegation_state = delegation->type; + delegation = NULL; + } else { + if (memcmp(&delegation->stateid, &nfsi->delegation->stateid, + sizeof(delegation->stateid)) != 0 || + delegation->type != nfsi->delegation->type) { + printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n", + __FUNCTION__, NIPQUAD(clp->cl_addr)); + status = -EIO; + } + } + spin_unlock(&clp->cl_lock); + if (delegation != NULL) + kfree(delegation); + return status; +} + +static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation) +{ + int res = 0; + + __nfs_revalidate_inode(NFS_SERVER(inode), inode); + + res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid); + nfs_free_delegation(delegation); + return res; +} + +/* Sync all data to disk upon delegation return */ +static void nfs_msync_inode(struct inode *inode) +{ + down(&inode->i_sem); + filemap_fdatawrite(inode->i_mapping); + nfs_wb_all(inode); + filemap_fdatawait(inode->i_mapping); + up(&inode->i_sem); +} + +/* + * Basic procedure for returning a delegation to the server + */ +int nfs_inode_return_delegation(struct inode *inode) +{ + struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state; + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_delegation *delegation; + int res = 0; + + nfs_msync_inode(inode); + down_read(&clp->cl_sem); + /* Guard against new delegated open calls */ + down_write(&nfsi->rwsem); + spin_lock(&clp->cl_lock); + delegation = nfsi->delegation; + if (delegation != NULL) { + list_del_init(&delegation->super_list); + nfsi->delegation = NULL; + nfsi->delegation_state = 0; + } + spin_unlock(&clp->cl_lock); + nfs_delegation_claim_opens(inode); + up_write(&nfsi->rwsem); + up_read(&clp->cl_sem); + nfs_msync_inode(inode); + + if (delegation != NULL) + res = nfs_do_return_delegation(inode, delegation); + return res; +} + +/* + * Return all delegations associated to a super block + */ +void nfs_return_all_delegations(struct super_block *sb) +{ + struct nfs4_client *clp = NFS_SB(sb)->nfs4_state; + struct nfs_delegation *delegation; + struct inode *inode; + + if (clp == NULL) + return; +restart: + spin_lock(&clp->cl_lock); + list_for_each_entry(delegation, &clp->cl_delegations, super_list) { + if (delegation->inode->i_sb != sb) + continue; + inode = igrab(delegation->inode); + if (inode == NULL) + continue; + spin_unlock(&clp->cl_lock); + nfs_inode_return_delegation(inode); + iput(inode); + goto restart; + } + spin_unlock(&clp->cl_lock); +} + +struct recall_threadargs { + struct inode *inode; + struct nfs4_client *clp; + const nfs4_stateid *stateid; + + struct completion started; + int result; +}; + +static int recall_thread(void *data) +{ + struct recall_threadargs *args = (struct recall_threadargs *)data; + struct inode *inode = igrab(args->inode); + struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state; + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_delegation *delegation; + + daemonize("nfsv4-delegreturn"); + + nfs_msync_inode(inode); + down_read(&clp->cl_sem); + down_write(&nfsi->rwsem); + spin_lock(&clp->cl_lock); + delegation = nfsi->delegation; + if (delegation != NULL && memcmp(delegation->stateid.data, + args->stateid->data, + sizeof(delegation->stateid.data)) == 0) { + list_del_init(&delegation->super_list); + nfsi->delegation = NULL; + nfsi->delegation_state = 0; + args->result = 0; + } else { + delegation = NULL; + args->result = -ENOENT; + } + spin_unlock(&clp->cl_lock); + complete(&args->started); + nfs_delegation_claim_opens(inode); + up_write(&nfsi->rwsem); + up_read(&clp->cl_sem); + nfs_msync_inode(inode); + + if (delegation != NULL) + nfs_do_return_delegation(inode, delegation); + iput(inode); + module_put_and_exit(0); +} + +/* + * Asynchronous delegation recall! + */ +int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid) +{ + struct recall_threadargs data = { + .inode = inode, + .stateid = stateid, + }; + int status; + + init_completion(&data.started); + __module_get(THIS_MODULE); + status = kernel_thread(recall_thread, &data, CLONE_KERNEL); + if (status < 0) + goto out_module_put; + wait_for_completion(&data.started); + return data.result; +out_module_put: + module_put(THIS_MODULE); + return status; +} + +/* + * Retrieve the inode associated with a delegation + */ +struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle) +{ + struct nfs_delegation *delegation; + struct inode *res = NULL; + spin_lock(&clp->cl_lock); + list_for_each_entry(delegation, &clp->cl_delegations, super_list) { + if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { + res = igrab(delegation->inode); + break; + } + } + spin_unlock(&clp->cl_lock); + return res; +} + +/* + * Mark all delegations as needing to be reclaimed + */ +void nfs_delegation_mark_reclaim(struct nfs4_client *clp) +{ + struct nfs_delegation *delegation; + spin_lock(&clp->cl_lock); + list_for_each_entry(delegation, &clp->cl_delegations, super_list) + delegation->flags |= NFS_DELEGATION_NEED_RECLAIM; + spin_unlock(&clp->cl_lock); +} + +/* + * Reap all unclaimed delegations after reboot recovery is done + */ +void nfs_delegation_reap_unclaimed(struct nfs4_client *clp) +{ + struct nfs_delegation *delegation, *n; + LIST_HEAD(head); + spin_lock(&clp->cl_lock); + list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) { + if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) + continue; + list_move(&delegation->super_list, &head); + NFS_I(delegation->inode)->delegation = NULL; + NFS_I(delegation->inode)->delegation_state = 0; + } + spin_unlock(&clp->cl_lock); + while(!list_empty(&head)) { + delegation = list_entry(head.next, struct nfs_delegation, super_list); + list_del(&delegation->super_list); + nfs_free_delegation(delegation); + } +} diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/delegation.h linux-2.6.7-43-rpc_queue_lock/fs/nfs/delegation.h --- linux-2.6.7/fs/nfs/delegation.h 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/delegation.h 2004-07-02 22:20:11.000000000 -0400 @@ -0,0 +1,56 @@ +/* + * linux/fs/nfs/delegation.h + * + * Copyright (c) Trond Myklebust + * + * Definitions pertaining to NFS delegated files + */ +#ifndef FS_NFS_DELEGATION_H +#define FS_NFS_DELEGATION_H + +#if defined(CONFIG_NFS_V4) +/* + * NFSv4 delegation + */ +struct nfs_delegation { + struct list_head super_list; + struct rpc_cred *cred; + struct inode *inode; + nfs4_stateid stateid; + int type; +#define NFS_DELEGATION_NEED_RECLAIM 1 + long flags; + loff_t maxsize; +}; + +int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); +void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); +int nfs_inode_return_delegation(struct inode *inode); +int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); + +struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle); +void nfs_return_all_delegations(struct super_block *sb); + +void nfs_delegation_mark_reclaim(struct nfs4_client *clp); +void nfs_delegation_reap_unclaimed(struct nfs4_client *clp); + +/* NFSv4 delegation-related procedures */ +int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid); +int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state); + +static inline int nfs_have_delegation(struct inode *inode, int flags) +{ + flags &= FMODE_READ|FMODE_WRITE; + smp_rmb(); + if ((NFS_I(inode)->delegation_state & flags) == flags) + return 1; + return 0; +} +#else +static inline int nfs_have_delegation(struct inode *inode, int flags) +{ + return 0; +} +#endif + +#endif diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/dir.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/dir.c --- linux-2.6.7/fs/nfs/dir.c 2004-07-02 18:43:40.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/dir.c 2004-07-02 22:20:11.000000000 -0400 @@ -32,6 +32,8 @@ #include #include +#include "delegation.h" + #define NFS_PARANOIA 1 /* #define NFS_DEBUG_VERBOSE 1 */ @@ -610,7 +612,7 @@ static int nfs_lookup_revalidate(struct verifier = nfs_save_change_attribute(dir); error = nfs_cached_lookup(dir, dentry, &fhandle, &fattr); if (!error) { - if (memcmp(NFS_FH(inode), &fhandle, sizeof(struct nfs_fh))!= 0) + if (nfs_compare_fh(NFS_FH(inode), &fhandle)) goto out_bad; if (nfs_lookup_verify_inode(inode, isopen)) goto out_zap_parent; @@ -623,7 +625,7 @@ static int nfs_lookup_revalidate(struct error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr); if (error) goto out_bad; - if (memcmp(NFS_FH(inode), &fhandle, sizeof(struct nfs_fh))!= 0) + if (nfs_compare_fh(NFS_FH(inode), &fhandle)) goto out_bad; if ((error = nfs_refresh_inode(inode, &fattr)) != 0) goto out_bad; @@ -850,22 +852,22 @@ static int nfs_open_revalidate(struct de unsigned long verifier; int openflags, ret = 0; - /* NFS only supports OPEN for regular files */ - if (inode && !S_ISREG(inode->i_mode)) - goto no_open; parent = dget_parent(dentry); dir = parent->d_inode; if (!is_atomic_open(dir, nd)) goto no_open; + /* We can't create new files in nfs_open_revalidate(), so we + * optimize away revalidation of negative dentries. + */ + if (inode == NULL) + goto out; + /* NFS only supports OPEN on regular files */ + if (!S_ISREG(inode->i_mode)) + goto no_open; openflags = nd->intent.open.flags; - if (openflags & O_CREAT) { - /* If this is a negative dentry, just drop it */ - if (!inode) - goto out; - /* If this is exclusive open, just revalidate */ - if (openflags & O_EXCL) - goto no_open; - } + /* We cannot do exclusive creation on a positive dentry */ + if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) + goto no_open; /* We can't create new files, or truncate existing ones here */ openflags &= ~(O_CREAT|O_TRUNC); @@ -887,6 +889,8 @@ out: return ret; no_open: dput(parent); + if (inode != NULL && nfs_have_delegation(inode, FMODE_READ)) + return 1; return nfs_lookup_revalidate(dentry, nd); } #endif /* CONFIG_NFSV4 */ @@ -1299,19 +1303,6 @@ nfs_symlink(struct inode *dir, struct de dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s)\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name, symname); - error = -ENAMETOOLONG; - switch (NFS_PROTO(dir)->version) { - case 2: - if (strlen(symname) > NFS2_MAXPATHLEN) - goto out; - break; - case 3: - if (strlen(symname) > NFS3_MAXPATHLEN) - goto out; - default: - break; - } - #ifdef NFS_PARANOIA if (dentry->d_inode) printk("nfs_proc_symlink: %s/%s not negative!\n", @@ -1341,8 +1332,6 @@ dentry->d_parent->d_name.name, dentry->d d_drop(dentry); } unlock_kernel(); - -out: return error; } @@ -1498,10 +1487,56 @@ out: return error; } -int -nfs_permission(struct inode *inode, int mask, struct nameidata *nd) +int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res) +{ + struct nfs_access_entry *cache = &NFS_I(inode)->cache_access; + + if (cache->cred != cred + || time_after(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode)) + || (NFS_FLAGS(inode) & NFS_INO_INVALID_ATTR)) + return -ENOENT; + memcpy(res, cache, sizeof(*res)); + return 0; +} + +void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set) +{ + struct nfs_access_entry *cache = &NFS_I(inode)->cache_access; + + if (cache->cred != set->cred) { + if (cache->cred) + put_rpccred(cache->cred); + cache->cred = get_rpccred(set->cred); + } + cache->jiffies = set->jiffies; + cache->mask = set->mask; +} + +static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask) +{ + struct nfs_access_entry cache; + int status; + + status = nfs_access_get_cached(inode, cred, &cache); + if (status == 0) + goto out; + + /* Be clever: ask server to check for all possible rights */ + cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ; + cache.cred = cred; + cache.jiffies = jiffies; + status = NFS_PROTO(inode)->access(inode, &cache); + if (status != 0) + return status; + nfs_access_add_cache(inode, &cache); +out: + if ((cache.mask & mask) == mask) + return 0; + return -EACCES; +} + +int nfs_permission(struct inode *inode, int mask, struct nameidata *nd) { - struct nfs_access_cache *cache = &NFS_I(inode)->cache_access; struct rpc_cred *cred; int mode = inode->i_mode; int res; @@ -1542,24 +1577,7 @@ nfs_permission(struct inode *inode, int goto out_notsup; cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0); - if (cache->cred == cred - && time_before(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode)) - && !(NFS_FLAGS(inode) & NFS_INO_INVALID_ATTR)) { - if (!(res = cache->err)) { - /* Is the mask a subset of an accepted mask? */ - if ((cache->mask & mask) == mask) - goto out; - } else { - /* ...or is it a superset of a rejected mask? */ - if ((cache->mask & mask) == cache->mask) - goto out; - } - } - - res = NFS_PROTO(inode)->access(inode, cred, mask); - if (!res || res == -EACCES) - goto add_cache; -out: + res = nfs_do_access(inode, cred, mask); put_rpccred(cred); unlock_kernel(); return res; @@ -1568,15 +1586,6 @@ out_notsup: res = vfs_permission(inode, mask); unlock_kernel(); return res; -add_cache: - cache->jiffies = jiffies; - if (cache->cred) - put_rpccred(cache->cred); - cache->cred = cred; - cache->mask = mask; - cache->err = res; - unlock_kernel(); - return res; } /* diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/direct.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/direct.c --- linux-2.6.7/fs/nfs/direct.c 2004-07-02 18:43:59.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/direct.c 2004-07-02 22:19:18.000000000 -0400 @@ -110,7 +110,7 @@ nfs_free_user_pages(struct page **pages, * nfs_direct_read_seg - Read in one iov segment. Generate separate * read RPCs for each "rsize" bytes. * @inode: target inode - * @file: target file (may be NULL) + * @ctx: target file open context * user_addr: starting address of this segment of user's buffer * count: size of this segment * file_offset: offset in file to begin the operation @@ -118,7 +118,7 @@ nfs_free_user_pages(struct page **pages, * nr_pages: size of pages array */ static int -nfs_direct_read_seg(struct inode *inode, struct file *file, +nfs_direct_read_seg(struct inode *inode, struct nfs_open_context *ctx, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, int nr_pages) { @@ -127,9 +127,10 @@ nfs_direct_read_seg(struct inode *inode, int curpage = 0; struct nfs_read_data rdata = { .inode = inode, + .cred = ctx->cred, .args = { .fh = NFS_FH(inode), - .lockowner = current->files, + .context = ctx, }, .res = { .fattr = &rdata.fattr, @@ -151,7 +152,7 @@ nfs_direct_read_seg(struct inode *inode, user_addr + tot_bytes, rdata.args.pgbase, curpage); lock_kernel(); - result = NFS_PROTO(inode)->read(&rdata, file); + result = NFS_PROTO(inode)->read(&rdata); unlock_kernel(); if (result <= 0) { @@ -183,7 +184,7 @@ nfs_direct_read_seg(struct inode *inode, * nfs_direct_read - For each iov segment, map the user's buffer * then generate read RPCs. * @inode: target inode - * @file: target file (may be NULL) + * @ctx: target file open context * @iov: array of vectors that define I/O buffer * file_offset: offset in file to begin the operation * nr_segs: size of iovec array @@ -193,7 +194,7 @@ nfs_direct_read_seg(struct inode *inode, * server. */ static ssize_t -nfs_direct_read(struct inode *inode, struct file *file, +nfs_direct_read(struct inode *inode, struct nfs_open_context *ctx, const struct iovec *iov, loff_t file_offset, unsigned long nr_segs) { @@ -216,7 +217,7 @@ nfs_direct_read(struct inode *inode, str return page_count; } - result = nfs_direct_read_seg(inode, file, user_addr, size, + result = nfs_direct_read_seg(inode, ctx, user_addr, size, file_offset, pages, page_count); nfs_free_user_pages(pages, page_count, 1); @@ -239,7 +240,7 @@ nfs_direct_read(struct inode *inode, str * nfs_direct_write_seg - Write out one iov segment. Generate separate * write RPCs for each "wsize" bytes, then commit. * @inode: target inode - * @file: target file (may be NULL) + * @ctx: target file open context * user_addr: starting address of this segment of user's buffer * count: size of this segment * file_offset: offset in file to begin the operation @@ -247,7 +248,7 @@ nfs_direct_read(struct inode *inode, str * nr_pages: size of pages array */ static int -nfs_direct_write_seg(struct inode *inode, struct file *file, +nfs_direct_write_seg(struct inode *inode, struct nfs_open_context *ctx, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, int nr_pages) { @@ -257,9 +258,10 @@ nfs_direct_write_seg(struct inode *inode struct nfs_writeverf first_verf; struct nfs_write_data wdata = { .inode = inode, + .cred = ctx->cred, .args = { .fh = NFS_FH(inode), - .lockowner = current->files, + .context = ctx, }, .res = { .fattr = &wdata.fattr, @@ -290,7 +292,7 @@ retry: user_addr + tot_bytes, wdata.args.pgbase, curpage); lock_kernel(); - result = NFS_PROTO(inode)->write(&wdata, file); + result = NFS_PROTO(inode)->write(&wdata); unlock_kernel(); if (result <= 0) { @@ -325,7 +327,7 @@ retry: wdata.args.offset = file_offset; lock_kernel(); - result = NFS_PROTO(inode)->commit(&wdata, file); + result = NFS_PROTO(inode)->commit(&wdata); unlock_kernel(); if (result < 0 || memcmp(&first_verf.verifier, @@ -349,7 +351,7 @@ sync_retry: * nfs_direct_write - For each iov segment, map the user's buffer * then generate write and commit RPCs. * @inode: target inode - * @file: target file (may be NULL) + * @ctx: target file open context * @iov: array of vectors that define I/O buffer * file_offset: offset in file to begin the operation * nr_segs: size of iovec array @@ -358,8 +360,7 @@ sync_retry: * that non-direct readers might access, so they will pick up these * writes immediately. */ -static ssize_t -nfs_direct_write(struct inode *inode, struct file *file, +static int nfs_direct_write(struct inode *inode, struct nfs_open_context *ctx, const struct iovec *iov, loff_t file_offset, unsigned long nr_segs) { @@ -382,7 +383,7 @@ nfs_direct_write(struct inode *inode, st return page_count; } - result = nfs_direct_write_seg(inode, file, user_addr, size, + result = nfs_direct_write_seg(inode, ctx, user_addr, size, file_offset, pages, page_count); nfs_free_user_pages(pages, page_count, 0); @@ -414,6 +415,7 @@ nfs_direct_IO(int rw, struct kiocb *iocb { ssize_t result = -EINVAL; struct file *file = iocb->ki_filp; + struct nfs_open_context *ctx; struct dentry *dentry = file->f_dentry; struct inode *inode = dentry->d_inode; @@ -423,19 +425,20 @@ nfs_direct_IO(int rw, struct kiocb *iocb if (!is_sync_kiocb(iocb)) return result; + ctx = (struct nfs_open_context *)file->private_data; switch (rw) { case READ: dprintk("NFS: direct_IO(read) (%s) off/no(%Lu/%lu)\n", dentry->d_name.name, file_offset, nr_segs); - result = nfs_direct_read(inode, file, iov, + result = nfs_direct_read(inode, ctx, iov, file_offset, nr_segs); break; case WRITE: dprintk("NFS: direct_IO(write) (%s) off/no(%Lu/%lu)\n", dentry->d_name.name, file_offset, nr_segs); - result = nfs_direct_write(inode, file, iov, + result = nfs_direct_write(inode, ctx, iov, file_offset, nr_segs); break; default: @@ -471,6 +474,8 @@ nfs_file_direct_read(struct kiocb *iocb, ssize_t retval = -EINVAL; loff_t *ppos = &iocb->ki_pos; struct file *file = iocb->ki_filp; + struct nfs_open_context *ctx = + (struct nfs_open_context *) file->private_data; struct dentry *dentry = file->f_dentry; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; @@ -502,7 +507,7 @@ nfs_file_direct_read(struct kiocb *iocb, goto out; } - retval = nfs_direct_read(inode, file, &iov, pos, 1); + retval = nfs_direct_read(inode, ctx, &iov, pos, 1); if (retval > 0) *ppos = pos + retval; @@ -542,6 +547,8 @@ nfs_file_direct_write(struct kiocb *iocb loff_t *ppos = &iocb->ki_pos; unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur; struct file *file = iocb->ki_filp; + struct nfs_open_context *ctx = + (struct nfs_open_context *) file->private_data; struct dentry *dentry = file->f_dentry; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; @@ -589,7 +596,7 @@ nfs_file_direct_write(struct kiocb *iocb goto out; } - retval = nfs_direct_write(inode, file, &iov, pos, 1); + retval = nfs_direct_write(inode, ctx, &iov, pos, 1); if (mapping->nrpages) invalidate_inode_pages2(mapping); if (retval > 0) diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/file.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/file.c --- linux-2.6.7/fs/nfs/file.c 2004-07-02 18:43:45.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/file.c 2004-07-02 22:20:11.000000000 -0400 @@ -31,6 +31,8 @@ #include #include +#include "delegation.h" + #define NFSDBG_FACILITY NFSDBG_FILE static long nfs_file_fcntl(int fd, unsigned int cmd, @@ -127,6 +129,7 @@ nfs_file_release(struct inode *inode, st static int nfs_file_flush(struct file *file) { + struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; struct inode *inode = file->f_dentry->d_inode; int status; @@ -138,9 +141,9 @@ nfs_file_flush(struct file *file) /* Ensure that data+attribute caches are up to date after close() */ status = nfs_wb_all(inode); if (!status) { - status = file->f_error; - file->f_error = 0; - if (!status) + status = ctx->error; + ctx->error = 0; + if (!status && !nfs_have_delegation(inode, FMODE_READ)) __nfs_revalidate_inode(NFS_SERVER(inode), inode); } unlock_kernel(); @@ -211,6 +214,7 @@ nfs_file_mmap(struct file * file, struct static int nfs_fsync(struct file *file, struct dentry *dentry, int datasync) { + struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; struct inode *inode = dentry->d_inode; int status; @@ -219,8 +223,8 @@ nfs_fsync(struct file *file, struct dent lock_kernel(); status = nfs_wb_all(inode); if (!status) { - status = file->f_error; - file->f_error = 0; + status = ctx->error; + ctx->error = 0; } unlock_kernel(); return status; @@ -302,6 +306,90 @@ out_swapfile: goto out; } +static int do_getlk(struct file *filp, int cmd, struct file_lock *fl) +{ + struct inode *inode = filp->f_mapping->host; + int status; + + lock_kernel(); + status = NFS_PROTO(inode)->lock(filp, cmd, fl); + unlock_kernel(); + return status; +} + +static int do_unlk(struct file *filp, int cmd, struct file_lock *fl) +{ + struct inode *inode = filp->f_mapping->host; + sigset_t oldset; + int status; + + rpc_clnt_sigmask(NFS_CLIENT(inode), &oldset); + /* + * Flush all pending writes before doing anything + * with locks.. + */ + filemap_fdatawrite(filp->f_mapping); + down(&inode->i_sem); + nfs_wb_all(inode); + up(&inode->i_sem); + filemap_fdatawait(filp->f_mapping); + + /* NOTE: special case + * If we're signalled while cleaning up locks on process exit, we + * still need to complete the unlock. + */ + lock_kernel(); + status = NFS_PROTO(inode)->lock(filp, cmd, fl); + rpc_clnt_sigunmask(NFS_CLIENT(inode), &oldset); + return status; +} + +static int do_setlk(struct file *filp, int cmd, struct file_lock *fl) +{ + struct inode *inode = filp->f_mapping->host; + int status; + + /* + * Flush all pending writes before doing anything + * with locks.. + */ + status = filemap_fdatawrite(filp->f_mapping); + if (status == 0) { + down(&inode->i_sem); + status = nfs_wb_all(inode); + up(&inode->i_sem); + if (status == 0) + status = filemap_fdatawait(filp->f_mapping); + } + if (status < 0) + return status; + + lock_kernel(); + status = NFS_PROTO(inode)->lock(filp, cmd, fl); + /* If we were signalled we still need to ensure that + * we clean up any state on the server. We therefore + * record the lock call as having succeeded in order to + * ensure that locks_remove_posix() cleans it out when + * the process exits. + */ + if (status == -EINTR || status == -ERESTARTSYS) + posix_lock_file(filp, fl); + unlock_kernel(); + if (status < 0) + return status; + /* + * Make sure we clear the cache whenever we try to get the lock. + * This makes locking act as a cache coherency point. + */ + filemap_fdatawrite(filp->f_mapping); + down(&inode->i_sem); + nfs_wb_all(inode); /* we may have slept */ + up(&inode->i_sem); + filemap_fdatawait(filp->f_mapping); + nfs_zap_caches(inode); + return 0; +} + /* * Lock a (portion of) a file */ @@ -309,8 +397,6 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) { struct inode * inode = filp->f_mapping->host; - int status = 0; - int status2; dprintk("NFS: nfs_lock(f=%s/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n", inode->i_sb->s_id, inode->i_ino, @@ -328,8 +414,8 @@ nfs_lock(struct file *filp, int cmd, str /* Fake OK code if mounted without NLM support */ if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) { if (IS_GETLK(cmd)) - status = LOCK_USE_CLNT; - goto out_ok; + return LOCK_USE_CLNT; + return 0; } } @@ -343,42 +429,9 @@ nfs_lock(struct file *filp, int cmd, str if (!fl->fl_owner || !(fl->fl_flags & FL_POSIX)) return -ENOLCK; - /* - * Flush all pending writes before doing anything - * with locks.. - */ - status = filemap_fdatawrite(filp->f_mapping); - down(&inode->i_sem); - status2 = nfs_wb_all(inode); - if (!status) - status = status2; - up(&inode->i_sem); - status2 = filemap_fdatawait(filp->f_mapping); - if (!status) - status = status2; - if (status < 0) - return status; - - lock_kernel(); - status = NFS_PROTO(inode)->lock(filp, cmd, fl); - unlock_kernel(); - if (status < 0) - return status; - - status = 0; - - /* - * Make sure we clear the cache whenever we try to get the lock. - * This makes locking act as a cache coherency point. - */ - out_ok: - if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { - filemap_fdatawrite(filp->f_mapping); - down(&inode->i_sem); - nfs_wb_all(inode); /* we may have slept */ - up(&inode->i_sem); - filemap_fdatawait(filp->f_mapping); - nfs_zap_caches(inode); - } - return status; + if (IS_GETLK(cmd)) + return do_getlk(filp, cmd, fl); + if (fl->fl_type == F_UNLCK) + return do_unlk(filp, cmd, fl); + return do_setlk(filp, cmd, fl); } diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/inode.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/inode.c --- linux-2.6.7/fs/nfs/inode.c 2004-07-02 18:43:54.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/inode.c 2004-07-02 22:20:11.000000000 -0400 @@ -39,6 +39,8 @@ #include #include +#include "delegation.h" + #define NFSDBG_FACILITY NFSDBG_VFS #define NFS_PARANOIA 1 @@ -57,7 +59,6 @@ static struct inode *nfs_alloc_inode(str static void nfs_destroy_inode(struct inode *); static void nfs_write_inode(struct inode *,int); static void nfs_delete_inode(struct inode *); -static void nfs_put_super(struct super_block *); static void nfs_clear_inode(struct inode *); static void nfs_umount_begin(struct super_block *); static int nfs_statfs(struct super_block *, struct kstatfs *); @@ -68,7 +69,6 @@ static struct super_operations nfs_sops .destroy_inode = nfs_destroy_inode, .write_inode = nfs_write_inode, .delete_inode = nfs_delete_inode, - .put_super = nfs_put_super, .statfs = nfs_statfs, .clear_inode = nfs_clear_inode, .umount_begin = nfs_umount_begin, @@ -123,8 +123,9 @@ nfs_delete_inode(struct inode * inode) { dprintk("NFS: delete_inode(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino); + nfs_wb_all(inode); /* - * The following can never actually happen... + * The following should never happen... */ if (nfs_have_writebacks(inode)) { printk(KERN_ERR "nfs_delete_inode: inode %ld has pending RPC requests\n", inode->i_ino); @@ -141,10 +142,10 @@ static void nfs_clear_inode(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); - struct rpc_cred *cred = nfsi->mm_cred; + struct rpc_cred *cred; - if (cred) - put_rpccred(cred); + nfs_wb_all(inode); + BUG_ON (!list_empty(&nfsi->open_files)); cred = nfsi->cache_access.cred; if (cred) put_rpccred(cred); @@ -152,27 +153,6 @@ nfs_clear_inode(struct inode *inode) } void -nfs_put_super(struct super_block *sb) -{ - struct nfs_server *server = NFS_SB(sb); - - nfs4_renewd_prepare_shutdown(server); - - if (server->client != NULL) - rpc_shutdown_client(server->client); - if (server->client_sys != NULL) - rpc_shutdown_client(server->client_sys); - - if (!(server->flags & NFS_MOUNT_NONLM)) - lockd_down(); /* release rpc.lockd */ - rpciod_down(); /* release rpciod */ - - destroy_nfsv4_state(server); - - kfree(server->hostname); -} - -void nfs_umount_begin(struct super_block *sb) { struct nfs_server *server = NFS_SB(sb); @@ -293,14 +273,6 @@ nfs_sb_init(struct super_block *sb, rpc_ server->rsize = nfs_block_size(fsinfo.rtpref, NULL); if (server->wsize == 0) server->wsize = nfs_block_size(fsinfo.wtpref, NULL); - if (sb->s_blocksize == 0) { - if (fsinfo.wtmult == 0) { - sb->s_blocksize = 512; - sb->s_blocksize_bits = 9; - } else - sb->s_blocksize = nfs_block_bits(fsinfo.wtmult, - &sb->s_blocksize_bits); - } if (fsinfo.rtmax >= 512 && server->rsize > fsinfo.rtmax) server->rsize = nfs_block_size(fsinfo.rtmax, NULL); @@ -319,6 +291,11 @@ nfs_sb_init(struct super_block *sb, rpc_ server->wsize = server->wpages << PAGE_CACHE_SHIFT; } + if (sb->s_blocksize == 0) + sb->s_blocksize = nfs_block_bits(server->wsize, + &sb->s_blocksize_bits); + server->wtmult = nfs_block_bits(fsinfo.wtmult, NULL); + server->dtsize = nfs_block_size(fsinfo.dtpref, NULL); if (server->dtsize > PAGE_CACHE_SIZE) server->dtsize = PAGE_CACHE_SIZE; @@ -405,7 +382,7 @@ static int nfs_fill_super(struct super_block *sb, struct nfs_mount_data *data, int silent) { struct nfs_server *server; - int err = -EIO; + int err = -ENOMEM; rpc_authflavor_t authflavor; server = NFS_SB(sb); @@ -424,25 +401,30 @@ nfs_fill_super(struct super_block *sb, s server->acdirmin = data->acdirmin*HZ; server->acdirmax = data->acdirmax*HZ; + /* Start lockd here, before we might error out */ + if (!(server->flags & NFS_MOUNT_NONLM)) + lockd_up(); + server->namelen = data->namlen; server->hostname = kmalloc(strlen(data->hostname) + 1, GFP_KERNEL); if (!server->hostname) - goto out_fail; + return err; strcpy(server->hostname, data->hostname); /* Check NFS protocol revision and initialize RPC op vector * and file handle pool. */ + err = -EIO; if (server->flags & NFS_MOUNT_VER3) { #ifdef CONFIG_NFS_V3 server->rpc_ops = &nfs_v3_clientops; server->caps |= NFS_CAP_READDIRPLUS; if (data->version < 4) { printk(KERN_NOTICE "NFS: NFSv3 not supported by mount program.\n"); - goto out_fail; + return err; } #else printk(KERN_NOTICE "NFS: NFSv3 not supported.\n"); - goto out_fail; + return err; #endif } else { server->rpc_ops = &nfs_v2_clientops; @@ -457,30 +439,19 @@ nfs_fill_super(struct super_block *sb, s /* Create RPC client handles */ server->client = nfs_create_client(server, data); if (server->client == NULL) - goto out_fail; + return err; /* RFC 2623, sec 2.3.2 */ if (authflavor != RPC_AUTH_UNIX) { server->client_sys = rpc_clone_client(server->client); if (server->client_sys == NULL) - goto out_shutdown; + return err; if (!rpcauth_create(RPC_AUTH_UNIX, server->client_sys)) - goto out_shutdown; + return err; } else { atomic_inc(&server->client->cl_count); server->client_sys = server->client; } - /* Fire up rpciod if not yet running */ - if (rpciod_up() != 0) { - printk(KERN_WARNING "NFS: couldn't start rpciod!\n"); - goto out_shutdown; - } - - sb->s_op = &nfs_sops; - err = nfs_sb_init(sb, authflavor); - if (err != 0) - goto out_noinit; - if (server->flags & NFS_MOUNT_VER3) { if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) server->namelen = NFS3_MAXNAMLEN; @@ -489,21 +460,8 @@ nfs_fill_super(struct super_block *sb, s server->namelen = NFS2_MAXNAMLEN; } - /* Check whether to start the lockd process */ - if (!(server->flags & NFS_MOUNT_NONLM)) - lockd_up(); - return 0; -out_noinit: - rpciod_down(); -out_shutdown: - if (server->client) - rpc_shutdown_client(server->client); - if (server->client_sys) - rpc_shutdown_client(server->client_sys); -out_fail: - if (server->hostname) - kfree(server->hostname); - return err; + sb->s_op = &nfs_sops; + return nfs_sb_init(sb, authflavor); } static int @@ -526,6 +484,7 @@ nfs_statfs(struct super_block *sb, struc if (error < 0) goto out_err; + buf->f_frsize = server->wtmult; buf->f_bsize = sb->s_blocksize; blockbits = sb->s_blocksize_bits; blockres = (1 << blockbits) - 1; @@ -642,7 +601,7 @@ nfs_find_actor(struct inode *inode, void if (NFS_FILEID(inode) != fattr->fileid) return 0; - if (memcmp(NFS_FH(inode), fh, sizeof(struct nfs_fh)) != 0) + if (nfs_compare_fh(NFS_FH(inode), fh)) return 0; if (is_bad_inode(inode)) return 0; @@ -653,11 +612,10 @@ static int nfs_init_locked(struct inode *inode, void *opaque) { struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque; - struct nfs_fh *fh = desc->fh; struct nfs_fattr *fattr = desc->fattr; NFS_FILEID(inode) = fattr->fileid; - memcpy(NFS_FH(inode), fh, sizeof(struct nfs_fh)); + nfs_copy_fh(NFS_FH(inode), desc->fh); return 0; } @@ -859,53 +817,114 @@ int nfs_getattr(struct vfsmount *mnt, st return err; } +struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred) +{ + struct nfs_open_context *ctx; + + ctx = (struct nfs_open_context *)kmalloc(sizeof(*ctx), GFP_KERNEL); + if (ctx != NULL) { + atomic_set(&ctx->count, 1); + ctx->dentry = dget(dentry); + ctx->cred = get_rpccred(cred); + ctx->state = NULL; + ctx->lockowner = current->files; + ctx->error = 0; + init_waitqueue_head(&ctx->waitq); + } + return ctx; +} + +struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx) +{ + if (ctx != NULL) + atomic_inc(&ctx->count); + return ctx; +} + +void put_nfs_open_context(struct nfs_open_context *ctx) +{ + if (atomic_dec_and_test(&ctx->count)) { + if (ctx->state != NULL) + nfs4_close_state(ctx->state, ctx->mode); + if (ctx->cred != NULL) + put_rpccred(ctx->cred); + dput(ctx->dentry); + kfree(ctx); + } +} + /* * Ensure that mmap has a recent RPC credential for use when writing out * shared pages */ -void -nfs_set_mmcred(struct inode *inode, struct rpc_cred *cred) +void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx) { - struct rpc_cred **p = &NFS_I(inode)->mm_cred, - *oldcred = *p; + struct inode *inode = filp->f_dentry->d_inode; + struct nfs_inode *nfsi = NFS_I(inode); - *p = get_rpccred(cred); - if (oldcred) - put_rpccred(oldcred); + filp->private_data = get_nfs_open_context(ctx); + spin_lock(&inode->i_lock); + list_add(&ctx->list, &nfsi->open_files); + spin_unlock(&inode->i_lock); +} + +struct nfs_open_context *nfs_find_open_context(struct inode *inode, int mode) +{ + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_open_context *pos, *ctx = NULL; + + spin_lock(&inode->i_lock); + list_for_each_entry(pos, &nfsi->open_files, list) { + if ((pos->mode & mode) == mode) { + ctx = get_nfs_open_context(pos); + break; + } + } + spin_unlock(&inode->i_lock); + return ctx; +} + +void nfs_file_clear_open_context(struct file *filp) +{ + struct inode *inode = filp->f_dentry->d_inode; + struct nfs_open_context *ctx = (struct nfs_open_context *)filp->private_data; + + if (ctx) { + filp->private_data = NULL; + spin_lock(&inode->i_lock); + list_del(&ctx->list); + spin_unlock(&inode->i_lock); + put_nfs_open_context(ctx); + } } /* - * These are probably going to contain hooks for - * allocating and releasing RPC credentials for - * the file. I'll have to think about Tronds patch - * a bit more.. + * These allocate and release file read/write context information. */ int nfs_open(struct inode *inode, struct file *filp) { - struct rpc_auth *auth; + struct nfs_open_context *ctx; struct rpc_cred *cred; - auth = NFS_CLIENT(inode)->cl_auth; - cred = rpcauth_lookupcred(auth, 0); - filp->private_data = cred; - if ((filp->f_mode & FMODE_WRITE) != 0) { - nfs_set_mmcred(inode, cred); + if ((cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0)) == NULL) + return -ENOMEM; + ctx = alloc_nfs_open_context(filp->f_dentry, cred); + put_rpccred(cred); + if (ctx == NULL) + return -ENOMEM; + ctx->mode = filp->f_mode; + nfs_file_set_open_context(filp, ctx); + put_nfs_open_context(ctx); + if ((filp->f_mode & FMODE_WRITE) != 0) nfs_begin_data_update(inode); - } return 0; } int nfs_release(struct inode *inode, struct file *filp) { - struct rpc_cred *cred; - - lock_kernel(); if ((filp->f_mode & FMODE_WRITE) != 0) nfs_end_data_update(inode); - cred = nfs_file_cred(filp); - if (cred) - put_rpccred(cred); - unlock_kernel(); + nfs_file_clear_open_context(filp); return 0; } @@ -1002,6 +1021,30 @@ out: return status; } +int nfs_attribute_timeout(struct inode *inode) +{ + struct nfs_inode *nfsi = NFS_I(inode); + + if (nfs_have_delegation(inode, FMODE_READ)) + return 0; + return time_after(jiffies, nfsi->read_cache_jiffies+nfsi->attrtimeo); +} + +/** + * nfs_revalidate_inode - Revalidate the inode attributes + * @server - pointer to nfs_server struct + * @inode - pointer to inode struct + * + * Updates inode attribute information by retrieving the data from the server. + */ +int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) +{ + if (!(NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA)) + && !nfs_attribute_timeout(inode)) + return NFS_STALE(inode) ? -ESTALE : 0; + return __nfs_revalidate_inode(server, inode); +} + /** * nfs_begin_data_update * @inode - pointer to inode @@ -1023,11 +1066,13 @@ void nfs_end_data_update(struct inode *i { struct nfs_inode *nfsi = NFS_I(inode); - /* Mark the attribute cache for revalidation */ - nfsi->flags |= NFS_INO_INVALID_ATTR; - /* Directories and symlinks: invalidate page cache too */ - if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - nfsi->flags |= NFS_INO_INVALID_DATA; + if (!nfs_have_delegation(inode, FMODE_READ)) { + /* Mark the attribute cache for revalidation */ + nfsi->flags |= NFS_INO_INVALID_ATTR; + /* Directories and symlinks: invalidate page cache too */ + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + nfsi->flags |= NFS_INO_INVALID_DATA; + } nfsi->cache_change_attribute ++; atomic_dec(&nfsi->data_updates); } @@ -1068,6 +1113,10 @@ int nfs_refresh_inode(struct inode *inod loff_t cur_size, new_isize; int data_unstable; + /* Do we hold a delegation? */ + if (nfs_have_delegation(inode, FMODE_READ)) + return 0; + /* Are we in the process of updating data on the server? */ data_unstable = nfs_caches_unstable(inode); @@ -1265,7 +1314,8 @@ static int nfs_update_inode(struct inode if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) invalid &= ~NFS_INO_INVALID_DATA; - nfsi->flags |= invalid; + if (!nfs_have_delegation(inode, FMODE_READ)) + nfsi->flags |= invalid; return 0; out_changed: @@ -1305,7 +1355,7 @@ static int nfs_compare_super(struct supe return 0; if (old->addr.sin_port != server->addr.sin_port) return 0; - return !memcmp(&old->fh, &server->fh, sizeof(struct nfs_fh)); + return !nfs_compare_fh(&old->fh, &server->fh); } static struct super_block *nfs_get_sb(struct file_system_type *fs_type, @@ -1330,9 +1380,7 @@ static struct super_block *nfs_get_sb(st init_nfsv4_state(server); root = &server->fh; - memcpy(root, &data->root, sizeof(*root)); - if (root->size < sizeof(root->data)) - memset(root->data+root->size, 0, sizeof(root->data)-root->size); + nfs_copy_fh(root, (struct nfs_fh *) &data->root); if (data->version != NFS_MOUNT_VERSION) { printk("nfs warning: mount version %s than kernel\n", @@ -1343,7 +1391,6 @@ static struct super_block *nfs_get_sb(st data->bsize = 0; if (data->version < 4) { data->flags &= ~NFS_MOUNT_VER3; - memset(root, 0, sizeof(*root)); root->size = NFS2_FHSIZE; memcpy(root->data, data->old_root.data, NFS2_FHSIZE); } @@ -1373,6 +1420,13 @@ static struct super_block *nfs_get_sb(st s->s_flags = flags; + /* Fire up rpciod if not yet running */ + if (rpciod_up() != 0) { + printk(KERN_WARNING "NFS: couldn't start rpciod!\n"); + kfree(server); + return ERR_PTR(-EIO); + } + error = nfs_fill_super(s, data, flags & MS_VERBOSE ? 1 : 0); if (error) { up_write(&s->s_umount); @@ -1386,7 +1440,25 @@ static struct super_block *nfs_get_sb(st static void nfs_kill_super(struct super_block *s) { struct nfs_server *server = NFS_SB(s); + kill_anon_super(s); + + nfs4_renewd_prepare_shutdown(server); + + if (server->client) + rpc_shutdown_client(server->client); + if (server->client_sys) + rpc_shutdown_client(server->client_sys); + + if (!(server->flags & NFS_MOUNT_NONLM)) + lockd_down(); /* release rpc.lockd */ + + rpciod_down(); /* release rpciod */ + + destroy_nfsv4_state(server); + + if (server->hostname) + kfree(server->hostname); kfree(server); } @@ -1402,12 +1474,12 @@ static struct file_system_type nfs_fs_ty static void nfs4_clear_inode(struct inode *); + static struct super_operations nfs4_sops = { .alloc_inode = nfs_alloc_inode, .destroy_inode = nfs_destroy_inode, .write_inode = nfs_write_inode, .delete_inode = nfs_delete_inode, - .put_super = nfs_put_super, .statfs = nfs_statfs, .clear_inode = nfs4_clear_inode, .umount_begin = nfs_umount_begin, @@ -1423,6 +1495,12 @@ static void nfs4_clear_inode(struct inod { struct nfs_inode *nfsi = NFS_I(inode); + /* If we are holding a delegation, return it! */ + if (nfsi->delegation != NULL) + nfs_inode_return_delegation(inode); + /* First call standard NFS clear_inode() code */ + nfs_clear_inode(inode); + /* Now clear out any remaining state */ while (!list_empty(&nfsi->open_states)) { struct nfs4_state *state; @@ -1437,8 +1515,6 @@ static void nfs4_clear_inode(struct inod BUG_ON(atomic_read(&state->count) != 1); nfs4_close_state(state, state->state); } - /* Now call standard NFS clear_inode() code */ - nfs_clear_inode(inode); } @@ -1498,7 +1574,7 @@ static int nfs4_fill_super(struct super_ clp = nfs4_get_client(&server->addr.sin_addr); if (!clp) { printk(KERN_WARNING "NFS: failed to create NFS4 client.\n"); - goto out_fail; + return -EIO; } /* Now create transport and client */ @@ -1536,8 +1612,13 @@ static int nfs4_fill_super(struct super_ memcpy(clp->cl_ipaddr, server->ip_addr, sizeof(clp->cl_ipaddr)); nfs_idmap_new(clp); } - if (list_empty(&clp->cl_superblocks)) - clear_bit(NFS4CLNT_OK, &clp->cl_state); + if (list_empty(&clp->cl_superblocks)) { + err = nfs4_init_client(clp); + if (err != 0) { + up_write(&clp->cl_sem); + goto out_fail; + } + } list_add_tail(&server->nfs4_siblings, &clp->cl_superblocks); clnt = rpc_clone_client(clp->cl_rpcclient); if (!IS_ERR(clnt)) @@ -1547,13 +1628,12 @@ static int nfs4_fill_super(struct super_ if (IS_ERR(clnt)) { printk(KERN_WARNING "NFS: cannot create RPC client.\n"); - err = PTR_ERR(clnt); - goto out_remove_list; + return PTR_ERR(clnt); } err = -ENOMEM; if (server->nfs4_state->cl_idmap == NULL) { printk(KERN_WARNING "NFS: failed to create idmapper.\n"); - goto out_shutdown; + return err; } clnt->cl_intr = (server->flags & NFS4_MOUNT_INTR) ? 1 : 0; @@ -1563,28 +1643,14 @@ static int nfs4_fill_super(struct super_ if (clnt->cl_auth->au_flavor != authflavour) { if (rpcauth_create(authflavour, clnt) == NULL) { printk(KERN_WARNING "NFS: couldn't create credcache!\n"); - goto out_shutdown; + return err; } } - /* Fire up rpciod if not yet running */ - if (rpciod_up() != 0) { - printk(KERN_WARNING "NFS: couldn't start rpciod!\n"); - goto out_shutdown; - } - sb->s_op = &nfs4_sops; err = nfs_sb_init(sb, authflavour); if (err == 0) return 0; - rpciod_down(); -out_shutdown: - rpc_shutdown_client(server->client); -out_remove_list: - down_write(&server->nfs4_state->cl_sem); - list_del_init(&server->nfs4_siblings); - up_write(&server->nfs4_state->cl_sem); - destroy_nfsv4_state(server); out_fail: if (clp) nfs4_put_client(clp); @@ -1690,6 +1756,13 @@ static struct super_block *nfs4_get_sb(s s->s_flags = flags; + /* Fire up rpciod if not yet running */ + if (rpciod_up() != 0) { + printk(KERN_WARNING "NFS: couldn't start rpciod!\n"); + s = ERR_PTR(-EIO); + goto out_free; + } + error = nfs4_fill_super(s, data, flags & MS_VERBOSE ? 1 : 0); if (error) { up_write(&s->s_umount); @@ -1709,22 +1782,31 @@ out_free: return s; } +static void nfs4_kill_super(struct super_block *sb) +{ + nfs_return_all_delegations(sb); + nfs_kill_super(sb); +} + static struct file_system_type nfs4_fs_type = { .owner = THIS_MODULE, .name = "nfs4", .get_sb = nfs4_get_sb, - .kill_sb = nfs_kill_super, + .kill_sb = nfs4_kill_super, .fs_flags = FS_ODD_RENAME|FS_REVAL_DOT|FS_BINARY_MOUNTDATA, }; -#define nfs4_zero_state(nfsi) \ +#define nfs4_init_once(nfsi) \ do { \ INIT_LIST_HEAD(&(nfsi)->open_states); \ + nfsi->delegation = NULL; \ + nfsi->delegation_state = 0; \ + init_rwsem(&nfsi->rwsem); \ } while(0) #define register_nfs4fs() register_filesystem(&nfs4_fs_type) #define unregister_nfs4fs() unregister_filesystem(&nfs4_fs_type) #else -#define nfs4_zero_state(nfsi) \ +#define nfs4_init_once(nfsi) \ do { } while (0) #define register_nfs4fs() (0) #define unregister_nfs4fs() @@ -1746,8 +1828,6 @@ static struct inode *nfs_alloc_inode(str if (!nfsi) return NULL; nfsi->flags = 0; - nfsi->mm_cred = NULL; - nfs4_zero_state(nfsi); return &nfsi->vfs_inode; } @@ -1763,14 +1843,17 @@ static void init_once(void * foo, kmem_c if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { inode_init_once(&nfsi->vfs_inode); + nfsi->req_lock = SPIN_LOCK_UNLOCKED; INIT_LIST_HEAD(&nfsi->dirty); INIT_LIST_HEAD(&nfsi->commit); + INIT_LIST_HEAD(&nfsi->open_files); INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); atomic_set(&nfsi->data_updates, 0); nfsi->ndirty = 0; nfsi->ncommit = 0; nfsi->npages = 0; init_waitqueue_head(&nfsi->nfs_i_wait); + nfs4_init_once(nfsi); } } diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/Makefile linux-2.6.7-43-rpc_queue_lock/fs/nfs/Makefile --- linux-2.6.7/fs/nfs/Makefile 2004-07-02 18:43:23.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/Makefile 2004-07-02 22:19:28.000000000 -0400 @@ -9,6 +9,7 @@ nfs-y := dir.o file.o inode.o nfs2xdr nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \ - idmap.o + delegation.o idmap.o \ + callback.o callback_xdr.o callback_proc.o nfs-$(CONFIG_NFS_DIRECTIO) += direct.o nfs-objs := $(nfs-y) diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/mount_clnt.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/mount_clnt.c --- linux-2.6.7/fs/nfs/mount_clnt.c 2004-07-02 18:43:22.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/mount_clnt.c 2004-07-02 22:18:36.000000000 -0400 @@ -108,7 +108,6 @@ xdr_decode_fhstatus(struct rpc_rqst *req { struct nfs_fh *fh = res->fh; - memset((void *)fh, 0, sizeof(*fh)); if ((res->status = ntohl(*p++)) == 0) { fh->size = NFS2_FHSIZE; memcpy(fh->data, p, NFS2_FHSIZE); @@ -121,7 +120,6 @@ xdr_decode_fhstatus3(struct rpc_rqst *re { struct nfs_fh *fh = res->fh; - memset((void *)fh, 0, sizeof(*fh)); if ((res->status = ntohl(*p++)) == 0) { int size = ntohl(*p++); if (size <= NFS3_FHSIZE) { diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/nfs2xdr.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs2xdr.c --- linux-2.6.7/fs/nfs/nfs2xdr.c 2004-07-02 18:43:25.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs2xdr.c 2004-07-02 22:18:47.000000000 -0400 @@ -77,8 +77,6 @@ xdr_encode_fhandle(u32 *p, struct nfs_fh static inline u32 * xdr_decode_fhandle(u32 *p, struct nfs_fh *fhandle) { - /* Zero handle first to allow comparisons */ - memset(fhandle, 0, sizeof(*fhandle)); /* NFSv2 handles have a fixed length */ fhandle->size = NFS2_FHSIZE; memcpy(fhandle->data, p, NFS2_FHSIZE); @@ -95,6 +93,23 @@ xdr_encode_time(u32 *p, struct timespec } static inline u32* +xdr_encode_current_server_time(u32 *p, struct timespec *timep) +{ + /* + * Passing the invalid value useconds=1000000 is a + * Sun convention for "set to current server time". + * It's needed to make permissions checks for the + * "touch" program across v2 mounts to Solaris and + * Irix boxes work correctly. See description of + * sattr in section 6.1 of "NFS Illustrated" by + * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5 + */ + *p++ = htonl(timep->tv_sec); + *p++ = htonl(1000000); + return p; +} + +static inline u32* xdr_decode_time(u32 *p, struct timespec *timep) { timep->tv_sec = ntohl(*p++); @@ -142,15 +157,19 @@ xdr_encode_sattr(u32 *p, struct iattr *a SATTR(p, attr, ATTR_GID, ia_gid); SATTR(p, attr, ATTR_SIZE, ia_size); - if (attr->ia_valid & (ATTR_ATIME|ATTR_ATIME_SET)) { + if (attr->ia_valid & ATTR_ATIME_SET) { p = xdr_encode_time(p, &attr->ia_atime); + } else if (attr->ia_valid & ATTR_ATIME) { + p = xdr_encode_current_server_time(p, &attr->ia_atime); } else { *p++ = ~(u32) 0; *p++ = ~(u32) 0; } - if (attr->ia_valid & (ATTR_MTIME|ATTR_MTIME_SET)) { + if (attr->ia_valid & ATTR_MTIME_SET) { p = xdr_encode_time(p, &attr->ia_mtime); + } else if (attr->ia_valid & ATTR_MTIME) { + p = xdr_encode_current_server_time(p, &attr->ia_mtime); } else { *p++ = ~(u32) 0; *p++ = ~(u32) 0; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/nfs3proc.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs3proc.c --- linux-2.6.7/fs/nfs/nfs3proc.c 2004-07-02 18:43:40.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs3proc.c 2004-07-02 22:19:13.000000000 -0400 @@ -68,18 +68,6 @@ nfs3_async_handle_jukebox(struct rpc_tas return 1; } -static struct rpc_cred * -nfs_cred(struct inode *inode, struct file *filp) -{ - struct rpc_cred *cred = NULL; - - if (filp) - cred = (struct rpc_cred *)filp->private_data; - if (!cred) - cred = NFS_I(inode)->mm_cred; - return cred; -} - /* * Bare-bones access to getattr: this is for nfs_read_super. */ @@ -164,8 +152,7 @@ nfs3_proc_lookup(struct inode *dir, stru return status; } -static int -nfs3_proc_access(struct inode *inode, struct rpc_cred *cred, int mode) +static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry) { struct nfs_fattr fattr; struct nfs3_accessargs arg = { @@ -178,9 +165,10 @@ nfs3_proc_access(struct inode *inode, st .rpc_proc = &nfs3_procedures[NFS3PROC_ACCESS], .rpc_argp = &arg, .rpc_resp = &res, - .rpc_cred = cred + .rpc_cred = entry->cred }; - int status; + int mode = entry->mask; + int status; dprintk("NFS call access\n"); fattr.valid = 0; @@ -200,10 +188,16 @@ nfs3_proc_access(struct inode *inode, st } status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_refresh_inode(inode, &fattr); - dprintk("NFS reply access\n"); - - if (status == 0 && (arg.access & res.access) != arg.access) - status = -EACCES; + if (status == 0) { + entry->mask = 0; + if (res.access & NFS3_ACCESS_READ) + entry->mask |= MAY_READ; + if (res.access & (NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND | NFS3_ACCESS_DELETE)) + entry->mask |= MAY_WRITE; + if (res.access & (NFS3_ACCESS_LOOKUP|NFS3_ACCESS_EXECUTE)) + entry->mask |= MAY_EXEC; + } + dprintk("NFS reply access, status = %d\n", status); return status; } @@ -227,8 +221,7 @@ nfs3_proc_readlink(struct inode *inode, return status; } -static int -nfs3_proc_read(struct nfs_read_data *rdata, struct file *filp) +static int nfs3_proc_read(struct nfs_read_data *rdata) { int flags = rdata->flags; struct inode * inode = rdata->inode; @@ -237,13 +230,13 @@ nfs3_proc_read(struct nfs_read_data *rda .rpc_proc = &nfs3_procedures[NFS3PROC_READ], .rpc_argp = &rdata->args, .rpc_resp = &rdata->res, + .rpc_cred = rdata->cred, }; int status; dprintk("NFS call read %d @ %Ld\n", rdata->args.count, (long long) rdata->args.offset); fattr->valid = 0; - msg.rpc_cred = nfs_cred(inode, filp); status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); if (status >= 0) nfs_refresh_inode(inode, fattr); @@ -251,8 +244,7 @@ nfs3_proc_read(struct nfs_read_data *rda return status; } -static int -nfs3_proc_write(struct nfs_write_data *wdata, struct file *filp) +static int nfs3_proc_write(struct nfs_write_data *wdata) { int rpcflags = wdata->flags; struct inode * inode = wdata->inode; @@ -261,13 +253,13 @@ nfs3_proc_write(struct nfs_write_data *w .rpc_proc = &nfs3_procedures[NFS3PROC_WRITE], .rpc_argp = &wdata->args, .rpc_resp = &wdata->res, + .rpc_cred = wdata->cred, }; int status; dprintk("NFS call write %d @ %Ld\n", wdata->args.count, (long long) wdata->args.offset); fattr->valid = 0; - msg.rpc_cred = nfs_cred(inode, filp); status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags); if (status >= 0) nfs_refresh_inode(inode, fattr); @@ -275,8 +267,7 @@ nfs3_proc_write(struct nfs_write_data *w return status < 0? status : wdata->res.count; } -static int -nfs3_proc_commit(struct nfs_write_data *cdata, struct file *filp) +static int nfs3_proc_commit(struct nfs_write_data *cdata) { struct inode * inode = cdata->inode; struct nfs_fattr * fattr = cdata->res.fattr; @@ -284,13 +275,13 @@ nfs3_proc_commit(struct nfs_write_data * .rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT], .rpc_argp = &cdata->args, .rpc_resp = &cdata->res, + .rpc_cred = cdata->cred, }; int status; dprintk("NFS call commit %d @ %Ld\n", cdata->args.count, (long long) cdata->args.offset); fattr->valid = 0; - msg.rpc_cred = nfs_cred(inode, filp); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); if (status >= 0) nfs_refresh_inode(inode, fattr); @@ -534,6 +525,8 @@ nfs3_proc_symlink(struct inode *dir, str }; int status; + if (path->len > NFS3_MAXPATHLEN) + return -ENAMETOOLONG; dprintk("NFS call symlink %s -> %s\n", name->name, path->name); dir_attr.valid = 0; fattr->valid = 0; @@ -832,27 +825,6 @@ nfs3_proc_commit_setup(struct nfs_write_ rpc_call_setup(task, &msg, 0); } -/* - * Set up the nfspage struct with the right credentials - */ -void -nfs3_request_init(struct nfs_page *req, struct file *filp) -{ - req->wb_cred = get_rpccred(nfs_cred(req->wb_inode, filp)); -} - -static int -nfs3_request_compatible(struct nfs_page *req, struct file *filp, struct page *page) -{ - if (req->wb_file != filp) - return 0; - if (req->wb_page != page) - return 0; - if (req->wb_cred != nfs_file_cred(filp)) - return 0; - return 1; -} - static int nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl) { @@ -892,7 +864,5 @@ struct nfs_rpc_ops nfs_v3_clientops = { .commit_setup = nfs3_proc_commit_setup, .file_open = nfs_open, .file_release = nfs_release, - .request_init = nfs3_request_init, - .request_compatible = nfs3_request_compatible, .lock = nfs3_proc_lock, }; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/nfs3xdr.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs3xdr.c --- linux-2.6.7/fs/nfs/nfs3xdr.c 2004-07-02 18:44:16.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs3xdr.c 2004-07-02 22:18:37.000000000 -0400 @@ -109,10 +109,6 @@ xdr_encode_fhandle(u32 *p, struct nfs_fh static inline u32 * xdr_decode_fhandle(u32 *p, struct nfs_fh *fh) { - /* - * Zero all nonused bytes - */ - memset((u8 *)fh, 0, sizeof(*fh)); if ((fh->size = ntohl(*p++)) <= NFS3_FHSIZE) { memcpy(fh->data, p, fh->size); return p + XDR_QUADLEN(fh->size); diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/nfs4proc.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs4proc.c --- linux-2.6.7/fs/nfs/nfs4proc.c 2004-07-02 18:43:51.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs4proc.c 2004-07-02 22:20:03.000000000 -0400 @@ -47,12 +47,16 @@ #include #include +#include "delegation.h" + #define NFSDBG_FACILITY NFSDBG_PROC -#define NFS4_POLL_RETRY_TIME (15*HZ) +#define NFS4_POLL_RETRY_MIN (1*HZ) +#define NFS4_POLL_RETRY_MAX (15*HZ) static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); static int nfs4_async_handle_error(struct rpc_task *, struct nfs_server *); +static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry); extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus); extern struct rpc_procinfo nfs4_procedures[]; @@ -189,53 +193,296 @@ static void update_changeattr(struct ino * reclaim state on the server after a reboot. * Assumes caller is holding the sp->so_sem */ -int -nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) +static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) { struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); - struct nfs_fattr fattr = { - .valid = 0, - }; - struct nfs_open_reclaimargs o_arg = { + struct nfs_delegation *delegation = NFS_I(inode)->delegation; + struct nfs_openargs o_arg = { .fh = NFS_FH(inode), .seqid = sp->so_seqid, .id = sp->so_id, - .share_access = state->state, + .open_flags = state->state, .clientid = server->nfs4_state->cl_clientid, .claim = NFS4_OPEN_CLAIM_PREVIOUS, .bitmask = server->attr_bitmask, }; struct nfs_openres o_res = { - .f_attr = &fattr, .server = server, /* Grrr */ }; struct rpc_message msg = { - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_RECLAIM], + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR], .rpc_argp = &o_arg, .rpc_resp = &o_res, .rpc_cred = sp->so_cred, }; int status; + if (delegation != NULL) { + if (!(delegation->flags & NFS_DELEGATION_NEED_RECLAIM)) { + memcpy(&state->stateid, &delegation->stateid, + sizeof(state->stateid)); + set_bit(NFS_DELEGATED_STATE, &state->flags); + return 0; + } + o_arg.u.delegation_type = delegation->type; + } status = rpc_call_sync(server->client, &msg, 0); nfs4_increment_seqid(status, sp); - if (status == 0) + if (status == 0) { memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid)); - /* Update the inode attributes */ - nfs_refresh_inode(inode, &fattr); + if (o_res.delegation_type != 0) { + nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res); + /* Did the server issue an immediate delegation recall? */ + if (o_res.do_recall) + nfs_async_inode_return_delegation(inode, &o_res.stateid); + } + } + clear_bit(NFS_DELEGATED_STATE, &state->flags); + /* Ensure we update the inode attributes */ + NFS_CACHEINV(inode); return status; } +int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) +{ + struct nfs_server *server = NFS_SERVER(state->inode); + struct nfs4_exception exception = { }; + int err; + do { + err = _nfs4_open_reclaim(sp, state); + switch (err) { + case 0: + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_EXPIRED: + return err; + } + err = nfs4_handle_exception(server, err, &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state) +{ + struct nfs4_state_owner *sp = state->owner; + struct inode *inode = dentry->d_inode; + struct nfs_server *server = NFS_SERVER(inode); + struct dentry *parent = dget_parent(dentry); + struct nfs_openargs arg = { + .fh = NFS_FH(parent->d_inode), + .clientid = server->nfs4_state->cl_clientid, + .name = &dentry->d_name, + .id = sp->so_id, + .server = server, + .bitmask = server->attr_bitmask, + .claim = NFS4_OPEN_CLAIM_DELEGATE_CUR, + }; + struct nfs_openres res = { + .server = server, + }; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR], + .rpc_argp = &arg, + .rpc_resp = &res, + .rpc_cred = sp->so_cred, + }; + int status = 0; + + down(&sp->so_sema); + if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) + goto out; + if (state->state == 0) + goto out; + arg.seqid = sp->so_seqid; + arg.open_flags = state->state; + memcpy(arg.u.delegation.data, state->stateid.data, sizeof(arg.u.delegation.data)); + status = rpc_call_sync(server->client, &msg, 0); + nfs4_increment_seqid(status, sp); + if (status >= 0) { + memcpy(state->stateid.data, res.stateid.data, + sizeof(state->stateid.data)); + clear_bit(NFS_DELEGATED_STATE, &state->flags); + } +out: + up(&sp->so_sema); + dput(parent); + return status; +} + +int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state) +{ + struct nfs4_exception exception = { }; + struct nfs_server *server = NFS_SERVER(dentry->d_inode); + int err; + do { + err = _nfs4_open_delegation_recall(dentry, state); + switch (err) { + case 0: + return err; + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_EXPIRED: + /* Don't recall a delegation if it was lost */ + nfs4_schedule_state_recovery(server->nfs4_state); + return err; + } + err = nfs4_handle_exception(server, err, &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid) +{ + struct nfs_open_confirmargs arg = { + .fh = fh, + .seqid = sp->so_seqid, + .stateid = *stateid, + }; + struct nfs_open_confirmres res; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], + .rpc_argp = &arg, + .rpc_resp = &res, + .rpc_cred = sp->so_cred, + }; + int status; + + status = rpc_call_sync(clnt, &msg, 0); + nfs4_increment_seqid(status, sp); + if (status >= 0) + memcpy(stateid, &res.stateid, sizeof(*stateid)); + return status; +} + +static int _nfs4_do_access(struct inode *inode, struct rpc_cred *cred, int mask) +{ + struct nfs_access_entry cache; + int status; + + status = nfs_access_get_cached(inode, cred, &cache); + if (status == 0) + goto out; + + /* Be clever: ask server to check for all possible rights */ + cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ; + cache.cred = cred; + cache.jiffies = jiffies; + status = _nfs4_proc_access(inode, &cache); + if (status != 0) + return status; + nfs_access_add_cache(inode, &cache); +out: + if ((cache.mask & mask) == mask) + return 0; + return -EACCES; +} + +/* + * Returns an nfs4_state + an extra reference to the inode + */ +int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred, struct nfs4_state **res) +{ + struct nfs_delegation *delegation; + struct nfs_server *server = NFS_SERVER(inode); + struct nfs4_client *clp = server->nfs4_state; + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs4_state_owner *sp = NULL; + struct nfs4_state *state = NULL; + int open_flags = flags & (FMODE_READ|FMODE_WRITE); + int mask = 0; + int err; + + /* Protect against reboot recovery - NOTE ORDER! */ + down_read(&clp->cl_sem); + /* Protect against delegation recall */ + down_read(&nfsi->rwsem); + delegation = NFS_I(inode)->delegation; + err = -ENOENT; + if (delegation == NULL || (delegation->type & open_flags) != open_flags) + goto out_err; + err = -ENOMEM; + if (!(sp = nfs4_get_state_owner(server, cred))) { + dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__); + goto out_err; + } + down(&sp->so_sema); + state = nfs4_get_open_state(inode, sp); + if (state == NULL) + goto out_err; + + err = -ENOENT; + if ((state->state & open_flags) == open_flags) { + spin_lock(&inode->i_lock); + if (open_flags & FMODE_READ) + state->nreaders++; + if (open_flags & FMODE_WRITE) + state->nwriters++; + spin_unlock(&inode->i_lock); + goto out_ok; + } else if (state->state != 0) + goto out_err; + + lock_kernel(); + err = _nfs4_do_access(inode, cred, mask); + unlock_kernel(); + if (err != 0) + goto out_err; + spin_lock(&inode->i_lock); + memcpy(state->stateid.data, delegation->stateid.data, + sizeof(state->stateid.data)); + state->state |= open_flags; + if (open_flags & FMODE_READ) + state->nreaders++; + if (open_flags & FMODE_WRITE) + state->nwriters++; + set_bit(NFS_DELEGATED_STATE, &state->flags); + spin_unlock(&inode->i_lock); +out_ok: + up(&sp->so_sema); + nfs4_put_state_owner(sp); + up_read(&nfsi->rwsem); + up_read(&clp->cl_sem); + igrab(inode); + *res = state; + return 0; +out_err: + if (sp != NULL) { + if (state != NULL) + nfs4_put_open_state(state); + up(&sp->so_sema); + nfs4_put_state_owner(sp); + } + up_read(&nfsi->rwsem); + up_read(&clp->cl_sem); + return err; +} + +static struct nfs4_state *nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred) +{ + struct nfs4_exception exception = { }; + struct nfs4_state *res; + int err; + + do { + err = _nfs4_open_delegated(inode, flags, cred, &res); + if (err == 0) + break; + res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(inode), + err, &exception)); + } while (exception.retry); + return res; +} + /* * Returns an nfs4_state + an referenced inode */ -struct nfs4_state * -nfs4_do_open(struct inode *dir, struct qstr *name, int flags, struct iattr *sattr, struct rpc_cred *cred) +static int _nfs4_do_open(struct inode *dir, struct qstr *name, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) { struct nfs4_state_owner *sp; struct nfs4_state *state = NULL; struct nfs_server *server = NFS_SERVER(dir); + struct nfs4_client *clp = server->nfs4_state; struct inode *inode = NULL; int status; struct nfs_fattr f_attr = { @@ -243,12 +490,11 @@ nfs4_do_open(struct inode *dir, struct q }; struct nfs_openargs o_arg = { .fh = NFS_FH(dir), - .share_access = flags & (FMODE_READ|FMODE_WRITE), - .opentype = (flags & O_CREAT) ? NFS4_OPEN_CREATE : NFS4_OPEN_NOCREATE, - .createmode = (flags & O_EXCL) ? NFS4_CREATE_EXCLUSIVE : NFS4_CREATE_UNCHECKED, + .open_flags = flags, .name = name, .server = server, .bitmask = server->attr_bitmask, + .claim = NFS4_OPEN_CLAIM_NULL, }; struct nfs_openres o_res = { .f_attr = &f_attr, @@ -261,60 +507,44 @@ nfs4_do_open(struct inode *dir, struct q .rpc_cred = cred, }; -retry: + /* Protect against reboot recovery conflicts */ + down_read(&clp->cl_sem); status = -ENOMEM; - if (!(sp = nfs4_get_state_owner(NFS_SERVER(dir), cred))) { + if (!(sp = nfs4_get_state_owner(server, cred))) { dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); - goto out; + goto out_err; } - if (o_arg.createmode & NFS4_CREATE_EXCLUSIVE){ + if (flags & O_EXCL) { u32 *p = (u32 *) o_arg.u.verifier.data; p[0] = jiffies; p[1] = current->pid; - } else if (o_arg.createmode == NFS4_CREATE_UNCHECKED) { + } else o_arg.u.attrs = sattr; - } /* Serialization for the sequence id */ down(&sp->so_sema); o_arg.seqid = sp->so_seqid; o_arg.id = sp->so_id; - o_arg.clientid = NFS_SERVER(dir)->nfs4_state->cl_clientid, + o_arg.clientid = clp->cl_clientid; status = rpc_call_sync(server->client, &msg, 0); nfs4_increment_seqid(status, sp); if (status) - goto out_up; + goto out_err; update_changeattr(dir, &o_res.cinfo); + if(o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) { + status = _nfs4_proc_open_confirm(server->client, &o_res.fh, sp, &o_res.stateid); + if (status) + goto out_err; + } status = -ENOMEM; inode = nfs_fhget(dir->i_sb, &o_res.fh, &f_attr); if (!inode) - goto out_up; + goto out_err; state = nfs4_get_open_state(inode, sp); if (!state) - goto out_up; - - if(o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) { - struct nfs_open_confirmargs oc_arg = { - .fh = &o_res.fh, - .seqid = sp->so_seqid, - }; - struct nfs_open_confirmres oc_res; - struct rpc_message msg = { - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], - .rpc_argp = &oc_arg, - .rpc_resp = &oc_res, - .rpc_cred = cred, - }; - - memcpy(&oc_arg.stateid, &o_res.stateid, sizeof(oc_arg.stateid)); - status = rpc_call_sync(server->client, &msg, 0); - nfs4_increment_seqid(status, sp); - if (status) - goto out_up; - memcpy(&state->stateid, &oc_res.stateid, sizeof(state->stateid)); - } else - memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid)); + goto out_err; + memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid)); spin_lock(&inode->i_lock); if (flags & FMODE_READ) state->nreaders++; @@ -322,47 +552,62 @@ retry: state->nwriters++; state->state |= flags & (FMODE_READ|FMODE_WRITE); spin_unlock(&inode->i_lock); - + if (o_res.delegation_type != 0) + nfs_inode_set_delegation(inode, cred, &o_res); up(&sp->so_sema); nfs4_put_state_owner(sp); - return state; - -out_up: - up(&sp->so_sema); - nfs4_put_state_owner(sp); - if (state) { - nfs4_put_open_state(state); - state = NULL; - } - if (inode) { + up_read(&clp->cl_sem); + *res = state; + return 0; +out_err: + if (sp != NULL) { + if (state != NULL) + nfs4_put_open_state(state); + up(&sp->so_sema); + nfs4_put_state_owner(sp); + } + /* Note: clp->cl_sem must be released before nfs4_put_open_state()! */ + up_read(&clp->cl_sem); + if (inode != NULL) iput(inode); - inode = NULL; - } - /* NOTE: BAD_SEQID means the server and client disagree about the - * book-keeping w.r.t. state-changing operations - * (OPEN/CLOSE/LOCK/LOCKU...) - * It is actually a sign of a bug on the client or on the server. - * - * If we receive a BAD_SEQID error in the particular case of - * doing an OPEN, we assume that nfs4_increment_seqid() will - * have unhashed the old state_owner for us, and that we can - * therefore safely retry using a new one. We should still warn - * the user though... - */ - if (status == -NFS4ERR_BAD_SEQID) { - printk(KERN_WARNING "NFS: v4 server returned a bad sequence-id error!\n"); - goto retry; - } - status = nfs4_handle_error(server, status); - if (!status) - goto retry; - BUG_ON(status < -1000 || status > 0); -out: - return ERR_PTR(status); + *res = NULL; + return status; } -int -nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr, + +struct nfs4_state *nfs4_do_open(struct inode *dir, struct qstr *name, int flags, struct iattr *sattr, struct rpc_cred *cred) +{ + struct nfs4_exception exception = { }; + struct nfs4_state *res; + int status; + + do { + status = _nfs4_do_open(dir, name, flags, sattr, cred, &res); + if (status == 0) + break; + /* NOTE: BAD_SEQID means the server and client disagree about the + * book-keeping w.r.t. state-changing operations + * (OPEN/CLOSE/LOCK/LOCKU...) + * It is actually a sign of a bug on the client or on the server. + * + * If we receive a BAD_SEQID error in the particular case of + * doing an OPEN, we assume that nfs4_increment_seqid() will + * have unhashed the old state_owner for us, and that we can + * therefore safely retry using a new one. We should still warn + * the user though... + */ + if (status == -NFS4ERR_BAD_SEQID) { + printk(KERN_WARNING "NFS: v4 server returned a bad sequence-id error!\n"); + exception.retry = 1; + continue; + } + res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), + status, &exception)); + } while (exception.retry); + return res; +} + +static int _nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr, struct nfs_fh *fhandle, struct iattr *sattr, struct nfs4_state *state) { @@ -381,9 +626,7 @@ nfs4_do_setattr(struct nfs_server *serve .rpc_argp = &arg, .rpc_resp = &res, }; - int status; -retry: fattr->valid = 0; if (sattr->ia_valid & ATTR_SIZE) @@ -391,13 +634,22 @@ retry: else memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid)); - status = rpc_call_sync(server->client, &msg, 0); - if (status) { - status = nfs4_handle_error(server, status); - if (!status) - goto retry; - } - return status; + return rpc_call_sync(server->client, &msg, 0); +} + +int nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr, + struct nfs_fh *fhandle, struct iattr *sattr, + struct nfs4_state *state) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(server, + _nfs4_do_setattr(server, fattr, fhandle, sattr, + state), + &exception); + } while (exception.retry); + return err; } /* @@ -411,8 +663,7 @@ retry: * * NOTE: Caller must be holding the sp->so_owner semaphore! */ -int -nfs4_do_close(struct inode *inode, struct nfs4_state *state) +static int _nfs4_do_close(struct inode *inode, struct nfs4_state *state) { struct nfs4_state_owner *sp = state->owner; int status = 0; @@ -426,6 +677,8 @@ nfs4_do_close(struct inode *inode, struc .rpc_resp = &res, }; + if (test_bit(NFS_DELEGATED_STATE, &state->flags)) + return 0; memcpy(&arg.stateid, &state->stateid, sizeof(arg.stateid)); /* Serialization for the sequence id */ arg.seqid = sp->so_seqid, @@ -441,15 +694,34 @@ nfs4_do_close(struct inode *inode, struc return status; } -int -nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode) +int nfs4_do_close(struct inode *inode, struct nfs4_state *state) +{ + struct nfs_server *server = NFS_SERVER(state->inode); + struct nfs4_exception exception = { }; + int err; + do { + err = _nfs4_do_close(inode, state); + switch (err) { + case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_EXPIRED: + nfs4_schedule_state_recovery(server->nfs4_state); + case 0: + state->state = 0; + return 0; + } + err = nfs4_handle_exception(server, err, &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode) { struct nfs4_state_owner *sp = state->owner; int status = 0; struct nfs_closeargs arg = { .fh = NFS_FH(inode), .seqid = sp->so_seqid, - .share_access = mode, + .open_flags = mode, }; struct nfs_closeres res; struct rpc_message msg = { @@ -458,6 +730,8 @@ nfs4_do_downgrade(struct inode *inode, s .rpc_resp = &res, }; + if (test_bit(NFS_DELEGATED_STATE, &state->flags)) + return 0; memcpy(&arg.stateid, &state->stateid, sizeof(arg.stateid)); status = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0); nfs4_increment_seqid(status, sp); @@ -467,6 +741,26 @@ nfs4_do_downgrade(struct inode *inode, s return status; } +int nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode) +{ + struct nfs_server *server = NFS_SERVER(state->inode); + struct nfs4_exception exception = { }; + int err; + do { + err = _nfs4_do_downgrade(inode, state, mode); + switch (err) { + case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_EXPIRED: + nfs4_schedule_state_recovery(server->nfs4_state); + case 0: + state->state = mode; + return 0; + } + err = nfs4_handle_exception(server, err, &exception); + } while (exception.retry); + return err; +} + struct inode * nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { @@ -500,7 +794,9 @@ nfs4_open_revalidate(struct inode *dir, struct inode *inode; cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0); - state = nfs4_do_open(dir, &dentry->d_name, openflags, NULL, cred); + state = nfs4_open_delegated(dentry->d_inode, openflags, cred); + if (IS_ERR(state)) + state = nfs4_do_open(dir, &dentry->d_name, openflags, NULL, cred); put_rpccred(cred); if (state == ERR_PTR(-ENOENT) && dentry->d_inode == 0) return 1; @@ -518,7 +814,7 @@ nfs4_open_revalidate(struct inode *dir, } -static int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) +static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { struct nfs4_server_caps_res res = {}; struct rpc_message msg = { @@ -542,7 +838,19 @@ static int nfs4_server_capabilities(stru return status; } -static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, +static int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(server, + _nfs4_server_capabilities(server, fhandle), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { struct nfs_fattr * fattr = info->fattr; @@ -563,6 +871,19 @@ static int nfs4_lookup_root(struct nfs_s return rpc_call_sync(server->client, &msg, 0); } +static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_fsinfo *info) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(server, + _nfs4_lookup_root(server, fhandle, info), + &exception); + } while (exception.retry); + return err; +} + static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { @@ -597,6 +918,8 @@ static int nfs4_proc_get_root(struct nfs p = server->mnt_path; for (;;) { + struct nfs4_exception exception = { }; + while (*p == '/') p++; if (!*p) @@ -606,9 +929,13 @@ static int nfs4_proc_get_root(struct nfs p++; q.len = p - q.name; - fattr->valid = 0; - status = rpc_call_sync(server->client, &msg, 0); - if (!status) + do { + fattr->valid = 0; + status = nfs4_handle_exception(server, + rpc_call_sync(server->client, &msg, 0), + &exception); + } while (exception.retry); + if (status == 0) continue; if (status == -ENOENT) { printk(KERN_NOTICE "NFS: mount path %s does not exist!\n", server->mnt_path); @@ -621,10 +948,10 @@ static int nfs4_proc_get_root(struct nfs if (status == 0) status = nfs4_do_fsinfo(server, fhandle, info); out: - return nfs4_map_errors(status); + return status; } -static int nfs4_proc_getattr(struct inode *inode, struct nfs_fattr *fattr) +static int _nfs4_proc_getattr(struct inode *inode, struct nfs_fattr *fattr) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_getattr_arg args = { @@ -642,8 +969,19 @@ static int nfs4_proc_getattr(struct inod }; fattr->valid = 0; + return rpc_call_sync(NFS_CLIENT(inode), &msg, 0); +} - return nfs4_map_errors(rpc_call_sync(NFS_CLIENT(inode), &msg, 0)); +static int nfs4_proc_getattr(struct inode *inode, struct nfs_fattr *fattr) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), + _nfs4_proc_getattr(inode, fattr), + &exception); + } while (exception.retry); + return err; } /* @@ -678,9 +1016,13 @@ nfs4_proc_setattr(struct dentry *dentry, if (size_change) { struct rpc_cred *cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0); state = nfs4_find_state(inode, cred, FMODE_WRITE); - if (!state) { - state = nfs4_do_open(dentry->d_parent->d_inode, - &dentry->d_name, FMODE_WRITE, NULL, cred); + if (state == NULL) { + state = nfs4_open_delegated(dentry->d_inode, + FMODE_WRITE, cred); + if (IS_ERR(state)) + state = nfs4_do_open(dentry->d_parent->d_inode, + &dentry->d_name, FMODE_WRITE, + NULL, cred); need_iput = 1; } put_rpccred(cred); @@ -705,7 +1047,7 @@ out: return status; } -static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, +static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { int status; @@ -731,12 +1073,23 @@ static int nfs4_proc_lookup(struct inode dprintk("NFS call lookup %s\n", name->name); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); dprintk("NFS reply lookup: %d\n", status); - return nfs4_map_errors(status); + return status; } -static int nfs4_proc_access(struct inode *inode, struct rpc_cred *cred, int mode) +static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), + _nfs4_proc_lookup(dir, name, fhandle, fattr), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) { - int status; struct nfs4_accessargs args = { .fh = NFS_FH(inode), }; @@ -745,8 +1098,10 @@ static int nfs4_proc_access(struct inode .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], .rpc_argp = &args, .rpc_resp = &res, - .rpc_cred = cred, + .rpc_cred = entry->cred, }; + int mode = entry->mask; + int status; /* * Determine which access bits we want to ask for... @@ -758,8 +1113,7 @@ static int nfs4_proc_access(struct inode args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; if (mode & MAY_EXEC) args.access |= NFS4_ACCESS_LOOKUP; - } - else { + } else { if (mode & MAY_WRITE) args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; if (mode & MAY_EXEC) @@ -767,13 +1121,27 @@ static int nfs4_proc_access(struct inode } status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); if (!status) { - if (args.access != res.supported) { - printk(KERN_NOTICE "NFS: server didn't support all access bits!\n"); - status = -ENOTSUPP; - } else if ((args.access & res.access) != args.access) - status = -EACCES; + entry->mask = 0; + if (res.access & NFS4_ACCESS_READ) + entry->mask |= MAY_READ; + if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE)) + entry->mask |= MAY_WRITE; + if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE)) + entry->mask |= MAY_EXEC; } - return nfs4_map_errors(status); + return status; +} + +static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), + _nfs4_proc_access(inode, entry), + &exception); + } while (exception.retry); + return err; } /* @@ -800,7 +1168,7 @@ static int nfs4_proc_access(struct inode * Both of these changes to the XDR layer would in fact be quite * minor, but I decided to leave them for a subsequent patch. */ -static int nfs4_proc_readlink(struct inode *inode, struct page *page) +static int _nfs4_proc_readlink(struct inode *inode, struct page *page) { struct nfs4_readlink args = { .fh = NFS_FH(inode), @@ -813,11 +1181,22 @@ static int nfs4_proc_readlink(struct ino .rpc_resp = NULL, }; - return nfs4_map_errors(rpc_call_sync(NFS_CLIENT(inode), &msg, 0)); + return rpc_call_sync(NFS_CLIENT(inode), &msg, 0); } -static int -nfs4_proc_read(struct nfs_read_data *rdata, struct file *filp) +static int nfs4_proc_readlink(struct inode *inode, struct page *page) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), + _nfs4_proc_readlink(inode, page), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_read(struct nfs_read_data *rdata) { int flags = rdata->flags; struct inode *inode = rdata->inode; @@ -827,6 +1206,7 @@ nfs4_proc_read(struct nfs_read_data *rda .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ], .rpc_argp = &rdata->args, .rpc_resp = &rdata->res, + .rpc_cred = rdata->cred, }; unsigned long timestamp = jiffies; int status; @@ -834,29 +1214,27 @@ nfs4_proc_read(struct nfs_read_data *rda dprintk("NFS call read %d @ %Ld\n", rdata->args.count, (long long) rdata->args.offset); - /* - * Try first to use O_RDONLY, then O_RDWR stateid. - */ - if (filp) { - struct nfs4_state *state; - state = (struct nfs4_state *)filp->private_data; - rdata->args.state = state; - msg.rpc_cred = state->owner->so_cred; - } else { - rdata->args.state = NULL; - msg.rpc_cred = NFS_I(inode)->mm_cred; - } - fattr->valid = 0; status = rpc_call_sync(server->client, &msg, flags); if (!status) renew_lease(server, timestamp); dprintk("NFS reply read: %d\n", status); - return nfs4_map_errors(status); + return status; } -static int -nfs4_proc_write(struct nfs_write_data *wdata, struct file *filp) +static int nfs4_proc_read(struct nfs_read_data *rdata) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(rdata->inode), + _nfs4_proc_read(rdata), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_write(struct nfs_write_data *wdata) { int rpcflags = wdata->flags; struct inode *inode = wdata->inode; @@ -866,33 +1244,32 @@ nfs4_proc_write(struct nfs_write_data *w .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE], .rpc_argp = &wdata->args, .rpc_resp = &wdata->res, + .rpc_cred = wdata->cred, }; int status; dprintk("NFS call write %d @ %Ld\n", wdata->args.count, (long long) wdata->args.offset); - /* - * Try first to use O_WRONLY, then O_RDWR stateid. - */ - if (filp) { - struct nfs4_state *state; - state = (struct nfs4_state *)filp->private_data; - wdata->args.state = state; - msg.rpc_cred = state->owner->so_cred; - } else { - wdata->args.state = NULL; - msg.rpc_cred = NFS_I(inode)->mm_cred; - } - fattr->valid = 0; status = rpc_call_sync(server->client, &msg, rpcflags); dprintk("NFS reply write: %d\n", status); - return nfs4_map_errors(status); + return status; } -static int -nfs4_proc_commit(struct nfs_write_data *cdata, struct file *filp) +static int nfs4_proc_write(struct nfs_write_data *wdata) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(wdata->inode), + _nfs4_proc_write(wdata), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_commit(struct nfs_write_data *cdata) { struct inode *inode = cdata->inode; struct nfs_fattr *fattr = cdata->res.fattr; @@ -901,24 +1278,29 @@ nfs4_proc_commit(struct nfs_write_data * .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], .rpc_argp = &cdata->args, .rpc_resp = &cdata->res, + .rpc_cred = cdata->cred, }; int status; dprintk("NFS call commit %d @ %Ld\n", cdata->args.count, (long long) cdata->args.offset); - /* - * Try first to use O_WRONLY, then O_RDWR stateid. - */ - if (filp) - msg.rpc_cred = ((struct nfs4_state *)filp->private_data)->owner->so_cred; - else - msg.rpc_cred = NFS_I(inode)->mm_cred; - fattr->valid = 0; status = rpc_call_sync(server->client, &msg, 0); dprintk("NFS reply commit: %d\n", status); - return nfs4_map_errors(status); + return status; +} + +static int nfs4_proc_commit(struct nfs_write_data *cdata) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(cdata->inode), + _nfs4_proc_commit(cdata), + &exception); + } while (exception.retry); + return err; } /* @@ -965,7 +1347,7 @@ nfs4_proc_create(struct inode *dir, stru return inode; } -static int nfs4_proc_remove(struct inode *dir, struct qstr *name) +static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) { struct nfs4_remove_arg args = { .fh = NFS_FH(dir), @@ -982,7 +1364,19 @@ static int nfs4_proc_remove(struct inode status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); if (status == 0) update_changeattr(dir, &res); - return nfs4_map_errors(status); + return status; +} + +static int nfs4_proc_remove(struct inode *dir, struct qstr *name) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), + _nfs4_proc_remove(dir, name), + &exception); + } while (exception.retry); + return err; } struct unlink_desc { @@ -1023,7 +1417,7 @@ static int nfs4_proc_unlink_done(struct return 0; } -static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, +static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, struct inode *new_dir, struct qstr *new_name) { struct nfs4_rename_arg arg = { @@ -1046,10 +1440,24 @@ static int nfs4_proc_rename(struct inode update_changeattr(old_dir, &res.old_cinfo); update_changeattr(new_dir, &res.new_cinfo); } - return nfs4_map_errors(status); + return status; } -static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) +static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, + struct inode *new_dir, struct qstr *new_name) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(old_dir), + _nfs4_proc_rename(old_dir, old_name, + new_dir, new_name), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { struct nfs4_link_arg arg = { .fh = NFS_FH(inode), @@ -1068,10 +1476,22 @@ static int nfs4_proc_link(struct inode * if (!status) update_changeattr(dir, &cinfo); - return nfs4_map_errors(status); + return status; } -static int nfs4_proc_symlink(struct inode *dir, struct qstr *name, +static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), + _nfs4_proc_link(inode, dir, name), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path, struct iattr *sattr, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { @@ -1090,22 +1510,39 @@ static int nfs4_proc_symlink(struct inod .fattr = fattr, }; struct rpc_message msg = { - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE], + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK], .rpc_argp = &arg, .rpc_resp = &res, }; int status; + if (path->len > NFS4_MAXPATHLEN) + return -ENAMETOOLONG; arg.u.symlink = path; fattr->valid = 0; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); if (!status) update_changeattr(dir, &res.dir_cinfo); - return nfs4_map_errors(status); + return status; } -static int nfs4_proc_mkdir(struct inode *dir, struct qstr *name, +static int nfs4_proc_symlink(struct inode *dir, struct qstr *name, + struct qstr *path, struct iattr *sattr, struct nfs_fh *fhandle, + struct nfs_fattr *fattr) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), + _nfs4_proc_symlink(dir, name, path, sattr, + fhandle, fattr), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_mkdir(struct inode *dir, struct qstr *name, struct iattr *sattr, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { @@ -1135,10 +1572,25 @@ static int nfs4_proc_mkdir(struct inode status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); if (!status) update_changeattr(dir, &res.dir_cinfo); - return nfs4_map_errors(status); + return status; } -static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, +static int nfs4_proc_mkdir(struct inode *dir, struct qstr *name, + struct iattr *sattr, struct nfs_fh *fhandle, + struct nfs_fattr *fattr) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), + _nfs4_proc_mkdir(dir, name, sattr, + fhandle, fattr), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page *page, unsigned int count, int plus) { struct inode *dir = dentry->d_inode; @@ -1164,10 +1616,24 @@ static int nfs4_proc_readdir(struct dent if (status == 0) memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); unlock_kernel(); - return nfs4_map_errors(status); + return status; } -static int nfs4_proc_mknod(struct inode *dir, struct qstr *name, +static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, + u64 cookie, struct page *page, unsigned int count, int plus) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), + _nfs4_proc_readdir(dentry, cred, cookie, + page, count, plus), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_mknod(struct inode *dir, struct qstr *name, struct iattr *sattr, dev_t rdev, struct nfs_fh *fh, struct nfs_fattr *fattr) { @@ -1214,10 +1680,25 @@ static int nfs4_proc_mknod(struct inode status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); if (!status) update_changeattr(dir, &res.dir_cinfo); - return nfs4_map_errors(status); + return status; +} + +static int nfs4_proc_mknod(struct inode *dir, struct qstr *name, + struct iattr *sattr, dev_t rdev, struct nfs_fh *fh, + struct nfs_fattr *fattr) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), + _nfs4_proc_mknod(dir, name, sattr, rdev, + fh, fattr), + &exception); + } while (exception.retry); + return err; } -static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, +static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) { struct nfs4_statfs_arg args = { @@ -1231,10 +1712,22 @@ static int nfs4_proc_statfs(struct nfs_s }; fsstat->fattr->valid = 0; - return nfs4_map_errors(rpc_call_sync(server->client, &msg, 0)); + return rpc_call_sync(server->client, &msg, 0); +} + +static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(server, + _nfs4_proc_statfs(server, fhandle, fsstat), + &exception); + } while (exception.retry); + return err; } -static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, +static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { struct nfs4_fsinfo_arg args = { @@ -1247,16 +1740,29 @@ static int nfs4_do_fsinfo(struct nfs_ser .rpc_resp = fsinfo, }; - return nfs4_map_errors(rpc_call_sync(server->client, &msg, 0)); + return rpc_call_sync(server->client, &msg, 0); +} + +static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) +{ + struct nfs4_exception exception = { }; + int err; + + do { + err = nfs4_handle_exception(server, + _nfs4_do_fsinfo(server, fhandle, fsinfo), + &exception); + } while (exception.retry); + return err; } static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { fsinfo->fattr->valid = 0; - return nfs4_map_errors(nfs4_do_fsinfo(server, fhandle, fsinfo)); + return nfs4_do_fsinfo(server, fhandle, fsinfo); } -static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, +static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_pathconf *pathconf) { struct nfs4_pathconf_arg args = { @@ -1276,7 +1782,21 @@ static int nfs4_proc_pathconf(struct nfs } pathconf->fattr->valid = 0; - return nfs4_map_errors(rpc_call_sync(server->client, &msg, 0)); + return rpc_call_sync(server->client, &msg, 0); +} + +static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_pathconf *pathconf) +{ + struct nfs4_exception exception = { }; + int err; + + do { + err = nfs4_handle_exception(server, + _nfs4_proc_pathconf(server, fhandle, pathconf), + &exception); + } while (exception.retry); + return err; } static void @@ -1467,8 +1987,10 @@ static int nfs4_proc_file_open(struct inode *inode, struct file *filp) { struct dentry *dentry = filp->f_dentry; - struct nfs4_state *state; + struct nfs_open_context *ctx; + struct nfs4_state *state = NULL; struct rpc_cred *cred; + int status = -ENOMEM; dprintk("nfs4_proc_file_open: starting on (%.*s/%.*s)\n", (int)dentry->d_parent->d_name.len, @@ -1478,21 +2000,28 @@ nfs4_proc_file_open(struct inode *inode, /* Find our open stateid */ cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0); - state = nfs4_find_state(inode, cred, filp->f_mode); + if (unlikely(cred == NULL)) + return -ENOMEM; + ctx = alloc_nfs_open_context(dentry, cred); put_rpccred(cred); - if (state == NULL) { - printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__); - return -EIO; /* ERACE actually */ - } + if (unlikely(ctx == NULL)) + return -ENOMEM; + status = -EIO; /* ERACE actually */ + state = nfs4_find_state(inode, cred, filp->f_mode); + if (unlikely(state == NULL)) + goto no_state; + ctx->state = state; nfs4_close_state(state, filp->f_mode); - if (filp->f_mode & FMODE_WRITE) { - lock_kernel(); - nfs_set_mmcred(inode, state->owner->so_cred); + ctx->mode = filp->f_mode; + nfs_file_set_open_context(filp, ctx); + put_nfs_open_context(ctx); + if (filp->f_mode & FMODE_WRITE) nfs_begin_data_update(inode); - unlock_kernel(); - } - filp->private_data = state; return 0; +no_state: + printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__); + put_nfs_open_context(ctx); + return status; } /* @@ -1501,37 +2030,12 @@ nfs4_proc_file_open(struct inode *inode, static int nfs4_proc_file_release(struct inode *inode, struct file *filp) { - struct nfs4_state *state = (struct nfs4_state *)filp->private_data; - - if (state) - nfs4_close_state(state, filp->f_mode); - if (filp->f_mode & FMODE_WRITE) { - lock_kernel(); + if (filp->f_mode & FMODE_WRITE) nfs_end_data_update(inode); - unlock_kernel(); - } + nfs_file_clear_open_context(filp); return 0; } -/* - * Set up the nfspage struct with the right state info and credentials - */ -static void -nfs4_request_init(struct nfs_page *req, struct file *filp) -{ - struct nfs4_state *state; - - if (!filp) { - req->wb_cred = get_rpccred(NFS_I(req->wb_inode)->mm_cred); - req->wb_state = NULL; - return; - } - state = (struct nfs4_state *)filp->private_data; - req->wb_state = state; - req->wb_cred = get_rpccred(state->owner->so_cred); - req->wb_lockowner = current->files; -} - static int nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server) { @@ -1545,11 +2049,13 @@ nfs4_async_handle_error(struct rpc_task case -NFS4ERR_EXPIRED: rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL, NULL); nfs4_schedule_state_recovery(clp); + if (test_bit(NFS4CLNT_OK, &clp->cl_state)) + rpc_wake_up_task(task); task->tk_status = 0; return -EAGAIN; case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: - rpc_delay(task, NFS4_POLL_RETRY_TIME); + rpc_delay(task, NFS4_POLL_RETRY_MAX); task->tk_status = 0; return -EAGAIN; case -NFS4ERR_OLD_STATEID: @@ -1560,12 +2066,11 @@ nfs4_async_handle_error(struct rpc_task return 0; } -int -nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs4_client *clp) +int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs4_client *clp) { DEFINE_WAIT(wait); sigset_t oldset; - int interruptible, res; + int interruptible, res = 0; might_sleep(); @@ -1573,101 +2078,85 @@ nfs4_wait_clnt_recover(struct rpc_clnt * interruptible = TASK_UNINTERRUPTIBLE; if (clnt->cl_intr) interruptible = TASK_INTERRUPTIBLE; - do { - res = 0; - prepare_to_wait(&clp->cl_waitq, &wait, interruptible); - nfs4_schedule_state_recovery(clp); - if (test_bit(NFS4CLNT_OK, &clp->cl_state) && - !test_bit(NFS4CLNT_SETUP_STATE, &clp->cl_state)) - break; - if (clnt->cl_intr && signalled()) { - res = -ERESTARTSYS; - break; - } + prepare_to_wait(&clp->cl_waitq, &wait, interruptible); + nfs4_schedule_state_recovery(clp); + if (clnt->cl_intr && signalled()) + res = -ERESTARTSYS; + else if (!test_bit(NFS4CLNT_OK, &clp->cl_state)) schedule(); - } while(!test_bit(NFS4CLNT_OK, &clp->cl_state)); finish_wait(&clp->cl_waitq, &wait); rpc_clnt_sigunmask(clnt, &oldset); return res; } -static int -nfs4_delay(struct rpc_clnt *clnt) +static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) { sigset_t oldset; int res = 0; might_sleep(); + if (*timeout <= 0) + *timeout = NFS4_POLL_RETRY_MIN; + if (*timeout > NFS4_POLL_RETRY_MAX) + *timeout = NFS4_POLL_RETRY_MAX; rpc_clnt_sigmask(clnt, &oldset); if (clnt->cl_intr) { set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(NFS4_POLL_RETRY_TIME); + schedule_timeout(*timeout); if (signalled()) res = -ERESTARTSYS; } else { set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(NFS4_POLL_RETRY_TIME); + schedule_timeout(*timeout); } rpc_clnt_sigunmask(clnt, &oldset); + *timeout <<= 1; return res; } /* This is the error handling routine for processes that are allowed * to sleep. */ -int -nfs4_handle_error(struct nfs_server *server, int errorcode) +int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) { struct nfs4_client *clp = server->nfs4_state; int ret = errorcode; + exception->retry = 0; switch(errorcode) { + case 0: + return 0; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: ret = nfs4_wait_clnt_recover(server->client, clp); + if (ret == 0) + exception->retry = 1; break; case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: - ret = nfs4_delay(server->client); + ret = nfs4_delay(server->client, &exception->timeout); + if (ret == 0) + exception->retry = 1; break; case -NFS4ERR_OLD_STATEID: - ret = 0; + if (ret == 0) + exception->retry = 1; } /* We failed to handle the error */ return nfs4_map_errors(ret); } - -static int -nfs4_request_compatible(struct nfs_page *req, struct file *filp, struct page *page) -{ - struct nfs4_state *state = NULL; - struct rpc_cred *cred = NULL; - - if (req->wb_file != filp) - return 0; - if (req->wb_page != page) - return 0; - state = (struct nfs4_state *)filp->private_data; - if (req->wb_state != state) - return 0; - if (req->wb_lockowner != current->files) - return 0; - cred = state->owner->so_cred; - if (req->wb_cred != cred) - return 0; - return 1; -} - -int -nfs4_proc_setclientid(struct nfs4_client *clp, - u32 program, unsigned short port) +int nfs4_proc_setclientid(struct nfs4_client *clp, u32 program, unsigned short port) { - u32 *p; - struct nfs4_setclientid setclientid; - struct timespec tv; + static nfs4_verifier sc_verifier; + static int initialized; + + struct nfs4_setclientid setclientid = { + .sc_verifier = &sc_verifier, + .sc_prog = program, + }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], .rpc_argp = &setclientid, @@ -1675,15 +2164,24 @@ nfs4_proc_setclientid(struct nfs4_client .rpc_cred = clp->cl_cred, }; - tv = CURRENT_TIME; - p = (u32*)setclientid.sc_verifier.data; - *p++ = (u32)tv.tv_sec; - *p = (u32)tv.tv_nsec; - setclientid.sc_name = clp->cl_ipaddr; - sprintf(setclientid.sc_netid, "tcp"); - sprintf(setclientid.sc_uaddr, "%s.%d.%d", clp->cl_ipaddr, port >> 8, port & 255); - setclientid.sc_prog = htonl(program); - setclientid.sc_cb_ident = 0; + if (!initialized) { + struct timespec boot_time; + u32 *p; + + initialized = 1; + boot_time = CURRENT_TIME; + p = (u32*)sc_verifier.data; + *p++ = htonl((u32)boot_time.tv_sec); + *p = htonl((u32)boot_time.tv_nsec); + } + setclientid.sc_name_len = scnprintf(setclientid.sc_name, + sizeof(setclientid.sc_name), "%s/%u.%u.%u.%u", + clp->cl_ipaddr, NIPQUAD(clp->cl_addr.s_addr)); + setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, + sizeof(setclientid.sc_netid), "tcp"); + setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, + sizeof(setclientid.sc_uaddr), "%s.%d.%d", + clp->cl_ipaddr, port >> 8, port & 255); return rpc_call_sync(clp->cl_rpcclient, &msg, 0); } @@ -1712,6 +2210,40 @@ nfs4_proc_setclientid_confirm(struct nfs return status; } +static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid) +{ + struct nfs4_delegreturnargs args = { + .fhandle = NFS_FH(inode), + .stateid = stateid, + }; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], + .rpc_argp = &args, + .rpc_cred = cred, + }; + + return rpc_call_sync(NFS_CLIENT(inode), &msg, 0); +} + +int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid) +{ + struct nfs_server *server = NFS_SERVER(inode); + struct nfs4_exception exception = { }; + int err; + do { + err = _nfs4_proc_delegreturn(inode, cred, stateid); + switch (err) { + case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_EXPIRED: + nfs4_schedule_state_recovery(server->nfs4_state); + case 0: + return 0; + } + err = nfs4_handle_exception(server, err, &exception); + } while (exception.retry); + return err; +} + #define NFS4_LOCK_MINTIMEOUT (1 * HZ) #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) @@ -1753,8 +2285,7 @@ nfs4_lck_length(struct file_lock *reques return request->fl_end - request->fl_start + 1; } -int -nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) +static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); @@ -1778,6 +2309,7 @@ nfs4_proc_getlk(struct nfs4_state *state struct nfs4_lock_state *lsp; int status; + down_read(&clp->cl_sem); nlo.clientid = clp->cl_clientid; down(&state->lock_sema); lsp = nfs4_find_lock_state(state, request->fl_owner); @@ -1811,14 +2343,28 @@ nfs4_proc_getlk(struct nfs4_state *state if (lsp) nfs4_put_lock_state(lsp); up(&state->lock_sema); - return nfs4_map_errors(status); + up_read(&clp->cl_sem); + return status; } -int -nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) +static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) +{ + struct nfs4_exception exception = { }; + int err; + + do { + err = nfs4_handle_exception(NFS_SERVER(state->inode), + _nfs4_proc_getlk(state, cmd, request), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) { struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); + struct nfs4_client *clp = server->nfs4_state; struct nfs_lockargs arg = { .fh = NFS_FH(inode), .type = nfs4_lck_type(cmd, request), @@ -1838,29 +2384,48 @@ nfs4_proc_unlck(struct nfs4_state *state struct nfs_locku_opargs luargs; int status = 0; + down_read(&clp->cl_sem); down(&state->lock_sema); lsp = nfs4_find_lock_state(state, request->fl_owner); if (!lsp) goto out; - luargs.seqid = lsp->ls_seqid; - memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid)); - arg.u.locku = &luargs; - status = rpc_call_sync(server->client, &msg, 0); - nfs4_increment_lock_seqid(status, lsp); + /* We might have lost the locks! */ + if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) { + luargs.seqid = lsp->ls_seqid; + memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid)); + arg.u.locku = &luargs; + status = rpc_call_sync(server->client, &msg, 0); + nfs4_increment_lock_seqid(status, lsp); + } if (status == 0) { memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(lsp->ls_stateid)); - nfs4_notify_unlck(inode, request, lsp); + nfs4_notify_unlck(state, request, lsp); } nfs4_put_lock_state(lsp); out: up(&state->lock_sema); - return nfs4_map_errors(status); + if (status == 0) + posix_lock_file(request->fl_file, request); + up_read(&clp->cl_sem); + return status; } -static int -nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) +static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) +{ + struct nfs4_exception exception = { }; + int err; + + do { + err = nfs4_handle_exception(NFS_SERVER(state->inode), + _nfs4_proc_unlck(state, cmd, request), + &exception); + } while (exception.retry); + return err; +} + +static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim) { struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); @@ -1881,23 +2446,22 @@ nfs4_proc_setlk(struct nfs4_state *state .rpc_cred = state->owner->so_cred, }; struct nfs_lock_opargs largs = { + .reclaim = reclaim, .new_lock_owner = 0, }; int status; - down(&state->lock_sema); - lsp = nfs4_find_lock_state(state, request->fl_owner); - if (lsp == NULL) { + lsp = nfs4_get_lock_state(state, request->fl_owner); + if (lsp == NULL) + return -ENOMEM; + if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) { struct nfs4_state_owner *owner = state->owner; struct nfs_open_to_lock otl = { .lock_owner = { .clientid = server->nfs4_state->cl_clientid, }, }; - status = -ENOMEM; - lsp = nfs4_alloc_lock_state(state, request->fl_owner); - if (!lsp) - goto out; + otl.lock_seqid = lsp->ls_seqid; otl.lock_owner.id = lsp->ls_id; memcpy(&otl.open_stateid, &state->stateid, sizeof(otl.open_stateid)); @@ -1926,25 +2490,62 @@ nfs4_proc_setlk(struct nfs4_state *state /* save the returned stateid. */ if (status == 0) { memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid)); - nfs4_notify_setlk(inode, request, lsp); + if (!reclaim) + nfs4_notify_setlk(state, request, lsp); } else if (status == -NFS4ERR_DENIED) status = -EAGAIN; nfs4_put_lock_state(lsp); -out: + return status; +} + +int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) +{ + return _nfs4_do_setlk(state, F_SETLK64, request, 1); +} + +static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) +{ + struct nfs4_client *clp = state->owner->so_client; + int status; + + down_read(&clp->cl_sem); + down(&state->lock_sema); + status = _nfs4_do_setlk(state, cmd, request, 0); up(&state->lock_sema); - return nfs4_map_errors(status); + if (status == 0) { + /* Note: we always want to sleep here! */ + request->fl_flags |= FL_SLEEP; + if (posix_lock_file_wait(request->fl_file, request) < 0) + printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); + } + up_read(&clp->cl_sem); + return status; +} + +static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) +{ + struct nfs4_exception exception = { }; + int err; + + do { + err = nfs4_handle_exception(NFS_SERVER(state->inode), + _nfs4_proc_setlk(state, cmd, request), + &exception); + } while (exception.retry); + return err; } static int nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) { + struct nfs_open_context *ctx; struct nfs4_state *state; unsigned long timeout = NFS4_LOCK_MINTIMEOUT; int status; /* verify open state */ - state = (struct nfs4_state *)filp->private_data; - BUG_ON(!state); + ctx = (struct nfs_open_context *)filp->private_data; + state = ctx->state; if (request->fl_start < 0 || request->fl_end < 0) return -EINVAL; @@ -2004,8 +2605,6 @@ struct nfs_rpc_ops nfs_v4_clientops = { .commit_setup = nfs4_proc_commit_setup, .file_open = nfs4_proc_file_open, .file_release = nfs4_proc_file_release, - .request_init = nfs4_request_init, - .request_compatible = nfs4_request_compatible, .lock = nfs4_proc_lock, }; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/nfs4state.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs4state.c --- linux-2.6.7/fs/nfs/nfs4state.c 2004-07-02 18:43:21.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs4state.c 2004-07-02 22:20:16.000000000 -0400 @@ -40,11 +40,15 @@ #include #include +#include #include #include #include #include +#include "callback.h" +#include "delegation.h" + #define OPENOWNER_POOL_SIZE 8 static spinlock_t state_spinlock = SPIN_LOCK_UNLOCKED; @@ -93,21 +97,26 @@ nfs4_alloc_client(struct in_addr *addr) { struct nfs4_client *clp; - if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL))) { - memset(clp, 0, sizeof(*clp)); - memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr)); - init_rwsem(&clp->cl_sem); - INIT_LIST_HEAD(&clp->cl_state_owners); - INIT_LIST_HEAD(&clp->cl_unused); - spin_lock_init(&clp->cl_lock); - atomic_set(&clp->cl_count, 1); - INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp); - INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); - INIT_LIST_HEAD(&clp->cl_superblocks); - init_waitqueue_head(&clp->cl_waitq); - rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client"); - clp->cl_state = 1 << NFS4CLNT_NEW; + if (nfs_callback_up() < 0) + return NULL; + if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) { + nfs_callback_down(); + return NULL; } + memset(clp, 0, sizeof(*clp)); + memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr)); + init_rwsem(&clp->cl_sem); + INIT_LIST_HEAD(&clp->cl_delegations); + INIT_LIST_HEAD(&clp->cl_state_owners); + INIT_LIST_HEAD(&clp->cl_unused); + spin_lock_init(&clp->cl_lock); + atomic_set(&clp->cl_count, 1); + INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp); + INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); + INIT_LIST_HEAD(&clp->cl_superblocks); + init_waitqueue_head(&clp->cl_waitq); + rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client"); + clp->cl_state = 1 << NFS4CLNT_OK; return clp; } @@ -130,25 +139,52 @@ nfs4_free_client(struct nfs4_client *clp if (clp->cl_rpcclient) rpc_shutdown_client(clp->cl_rpcclient); kfree(clp); + nfs_callback_down(); +} + +static struct nfs4_client *__nfs4_find_client(struct in_addr *addr) +{ + struct nfs4_client *clp; + list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) { + if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) { + atomic_inc(&clp->cl_count); + return clp; + } + } + return NULL; +} + +struct nfs4_client *nfs4_find_client(struct in_addr *addr) +{ + struct nfs4_client *clp; + spin_lock(&state_spinlock); + clp = __nfs4_find_client(addr); + spin_unlock(&state_spinlock); + return clp; } struct nfs4_client * nfs4_get_client(struct in_addr *addr) { - struct nfs4_client *new, *clp = NULL; + struct nfs4_client *clp, *new = NULL; - new = nfs4_alloc_client(addr); spin_lock(&state_spinlock); - list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) { - if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) - goto found; + for (;;) { + clp = __nfs4_find_client(addr); + if (clp != NULL) + break; + clp = new; + if (clp != NULL) { + list_add(&clp->cl_servers, &nfs4_clientid_list); + new = NULL; + break; + } + spin_unlock(&state_spinlock); + new = nfs4_alloc_client(addr); + spin_lock(&state_spinlock); + if (new == NULL) + break; } - if (new) - list_add(&new->cl_servers, &nfs4_clientid_list); - spin_unlock(&state_spinlock); - return new; -found: - atomic_inc(&clp->cl_count); spin_unlock(&state_spinlock); if (new) nfs4_free_client(new); @@ -169,6 +205,16 @@ nfs4_put_client(struct nfs4_client *clp) nfs4_free_client(clp); } +int nfs4_init_client(struct nfs4_client *clp) +{ + int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport); + if (status == 0) + status = nfs4_proc_setclientid_confirm(clp); + if (status == 0) + nfs4_schedule_state_renewal(clp); + return status; +} + u32 nfs4_alloc_lockowner_id(struct nfs4_client *clp) { @@ -185,7 +231,6 @@ nfs4_client_grab_unused(struct nfs4_clie atomic_inc(&sp->so_count); sp->so_cred = cred; list_move(&sp->so_list, &clp->cl_state_owners); - sp->so_generation = clp->cl_generation; clp->cl_nunused--; } return sp; @@ -224,6 +269,7 @@ nfs4_alloc_state_owner(void) init_MUTEX(&sp->so_sema); sp->so_seqid = 0; /* arbitrary */ INIT_LIST_HEAD(&sp->so_states); + INIT_LIST_HEAD(&sp->so_delegations); atomic_set(&sp->so_count, 1); return sp; } @@ -237,8 +283,11 @@ nfs4_unhash_state_owner(struct nfs4_stat spin_unlock(&clp->cl_lock); } -struct nfs4_state_owner * -nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) +/* + * Note: must be called with clp->cl_sem held in order to prevent races + * with reboot recovery! + */ +struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) { struct nfs4_client *clp = server->nfs4_state; struct nfs4_state_owner *sp, *new; @@ -254,23 +303,23 @@ nfs4_get_state_owner(struct nfs_server * new->so_client = clp; new->so_id = nfs4_alloc_lockowner_id(clp); new->so_cred = cred; - new->so_generation = clp->cl_generation; sp = new; new = NULL; } spin_unlock(&clp->cl_lock); if (new) kfree(new); - if (sp) { - if (!test_bit(NFS4CLNT_OK, &clp->cl_state)) - nfs4_wait_clnt_recover(server->client, clp); - } else - put_rpccred(cred); - return sp; + if (sp != NULL) + return sp; + put_rpccred(cred); + return NULL; } -void -nfs4_put_state_owner(struct nfs4_state_owner *sp) +/* + * Must be called with clp->cl_sem held in order to avoid races + * with state recovery... + */ +void nfs4_put_state_owner(struct nfs4_state_owner *sp) { struct nfs4_client *clp = sp->so_client; struct rpc_cred *cred = sp->so_cred; @@ -330,8 +379,6 @@ __nfs4_find_state(struct inode *inode, s continue; if ((state->state & mode) != mode) continue; - /* Add the state to the head of the inode's list */ - list_move(&state->inode_states, &nfsi->open_states); atomic_inc(&state->count); if (mode & FMODE_READ) state->nreaders++; @@ -353,8 +400,6 @@ __nfs4_find_state_byowner(struct inode * if (state->nreaders == 0 && state->nwriters == 0) continue; if (state->owner == owner) { - /* Add the state to the head of the inode's list */ - list_move(&state->inode_states, &nfsi->open_states); atomic_inc(&state->count); return state; } @@ -411,51 +456,40 @@ out: return state; } -static void -__nfs4_put_open_state(struct nfs4_state *state) +/* + * Beware! Caller must be holding exactly one + * reference to clp->cl_sem and owner->so_sema! + */ +void nfs4_put_open_state(struct nfs4_state *state) { struct inode *inode = state->inode; struct nfs4_state_owner *owner = state->owner; - int status = 0; - if (!atomic_dec_and_lock(&state->count, &inode->i_lock)) { - up(&owner->so_sema); + if (!atomic_dec_and_lock(&state->count, &inode->i_lock)) return; - } if (!list_empty(&state->inode_states)) list_del(&state->inode_states); spin_unlock(&inode->i_lock); list_del(&state->open_states); - if (state->state != 0) { - do { - status = nfs4_do_close(inode, state); - if (!status) - break; - up(&owner->so_sema); - status = nfs4_handle_error(NFS_SERVER(inode), status); - down(&owner->so_sema); - } while (!status); - } - up(&owner->so_sema); + BUG_ON (state->state != 0); nfs4_free_open_state(state); nfs4_put_state_owner(owner); } -void -nfs4_put_open_state(struct nfs4_state *state) -{ - down(&state->owner->so_sema); - __nfs4_put_open_state(state); -} - -void -nfs4_close_state(struct nfs4_state *state, mode_t mode) +/* + * Beware! Caller must be holding no references to clp->cl_sem! + * of owner->so_sema! + */ +void nfs4_close_state(struct nfs4_state *state, mode_t mode) { struct inode *inode = state->inode; struct nfs4_state_owner *owner = state->owner; + struct nfs4_client *clp = owner->so_client; int newstate; int status = 0; + atomic_inc(&owner->so_count); + down_read(&clp->cl_sem); down(&owner->so_sema); /* Protect against nfs4_find_state() */ spin_lock(&inode->i_lock); @@ -466,29 +500,24 @@ nfs4_close_state(struct nfs4_state *stat if (state->nwriters == 0 && state->nreaders == 0) list_del_init(&state->inode_states); spin_unlock(&inode->i_lock); - do { - newstate = 0; - if (state->state == 0) - break; + newstate = 0; + if (state->state != 0) { if (state->nreaders) newstate |= FMODE_READ; if (state->nwriters) newstate |= FMODE_WRITE; if (state->state == newstate) - break; + goto out; if (newstate != 0) status = nfs4_do_downgrade(inode, state, newstate); else status = nfs4_do_close(inode, state); - if (!status) { - state->state = newstate; - break; - } - up(&owner->so_sema); - status = nfs4_handle_error(NFS_SERVER(inode), status); - down(&owner->so_sema); - } while (!status); - __nfs4_put_open_state(state); + } +out: + nfs4_put_open_state(state); + up(&owner->so_sema); + nfs4_put_state_owner(owner); + up_read(&clp->cl_sem); } /* @@ -524,8 +553,7 @@ nfs4_find_lock_state(struct nfs4_state * * * The caller must be holding state->lock_sema */ -struct nfs4_lock_state * -nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) +static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) { struct nfs4_lock_state *lsp; struct nfs4_client *clp = state->owner->so_client; @@ -533,12 +561,12 @@ nfs4_alloc_lock_state(struct nfs4_state lsp = kmalloc(sizeof(*lsp), GFP_KERNEL); if (lsp == NULL) return NULL; + lsp->ls_flags = 0; lsp->ls_seqid = 0; /* arbitrary */ lsp->ls_id = -1; memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); atomic_set(&lsp->ls_count, 1); lsp->ls_owner = fl_owner; - lsp->ls_parent = state; INIT_LIST_HEAD(&lsp->ls_locks); spin_lock(&clp->cl_lock); lsp->ls_id = nfs4_alloc_lockowner_id(clp); @@ -547,6 +575,22 @@ nfs4_alloc_lock_state(struct nfs4_state } /* + * Return a compatible lock_state. If no initialized lock_state structure + * exists, return an uninitialized one. + * + * The caller must be holding state->lock_sema and clp->cl_sem + */ +struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) +{ + struct nfs4_lock_state * lsp; + + lsp = nfs4_find_lock_state(state, owner); + if (lsp == NULL) + lsp = nfs4_alloc_lock_state(state, owner); + return lsp; +} + +/* * Byte-range lock aware utility to initialize the stateid of read/write * requests. */ @@ -567,13 +611,14 @@ nfs4_copy_stateid(nfs4_stateid *dst, str } /* -* Called with state->lock_sema held. +* Called with state->lock_sema and clp->cl_sem held. */ -void -nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp) +void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp) { - if (status == NFS_OK || seqid_mutating_err(-status)) + if (status == NFS_OK || seqid_mutating_err(-status)) { lsp->ls_seqid++; + lsp->ls_flags |= NFS_LOCK_INITIALIZED; + } } /* @@ -597,13 +642,11 @@ nfs4_check_unlock(struct file_lock *fl, /* * Post an initialized lock_state on the state->lock_states list. */ -void -nfs4_notify_setlk(struct inode *inode, struct file_lock *request, struct nfs4_lock_state *lsp) +void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp) { - struct nfs4_state *state = lsp->ls_parent; - if (!list_empty(&lsp->ls_locks)) return; + atomic_inc(&lsp->ls_count); write_lock(&state->state_lock); list_add(&lsp->ls_locks, &state->lock_states); set_bit(LK_STATE_IN_USE, &state->flags); @@ -620,9 +663,9 @@ nfs4_notify_setlk(struct inode *inode, s * */ void -nfs4_notify_unlck(struct inode *inode, struct file_lock *request, struct nfs4_lock_state *lsp) +nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp) { - struct nfs4_state *state = lsp->ls_parent; + struct inode *inode = state->inode; struct file_lock *fl; for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { @@ -640,6 +683,7 @@ nfs4_notify_unlck(struct inode *inode, s if (list_empty(&state->lock_states)) clear_bit(LK_STATE_IN_USE, &state->flags); write_unlock(&state->state_lock); + nfs4_put_lock_state(lsp); } /* @@ -651,20 +695,18 @@ nfs4_put_lock_state(struct nfs4_lock_sta { if (!atomic_dec_and_test(&lsp->ls_count)) return; - if (!list_empty(&lsp->ls_locks)) - return; + BUG_ON (!list_empty(&lsp->ls_locks)); kfree(lsp); } /* -* Called with sp->so_sema held. +* Called with sp->so_sema and clp->cl_sem held. * * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or * failed with a seqid incrementing error - * see comments nfs_fs.h:seqid_mutating_error() */ -void -nfs4_increment_seqid(int status, struct nfs4_state_owner *sp) +void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp) { if (status == NFS_OK || seqid_mutating_err(-status)) sp->so_seqid++; @@ -693,21 +735,14 @@ nfs4_recover_state(void *data) init_completion(&args.complete); - down_read(&clp->cl_sem); - if (test_and_set_bit(NFS4CLNT_SETUP_STATE, &clp->cl_state)) - goto out_failed; if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0) goto out_failed_clear; wait_for_completion(&args.complete); return; out_failed_clear: - smp_mb__before_clear_bit(); - clear_bit(NFS4CLNT_SETUP_STATE, &clp->cl_state); - smp_mb__after_clear_bit(); + set_bit(NFS4CLNT_OK, &clp->cl_state); wake_up_all(&clp->cl_waitq); rpc_wake_up(&clp->cl_rpcwaitq); -out_failed: - up_read(&clp->cl_sem); } /* @@ -718,24 +753,66 @@ nfs4_schedule_state_recovery(struct nfs4 { if (!clp) return; - smp_mb__before_clear_bit(); - clear_bit(NFS4CLNT_OK, &clp->cl_state); - smp_mb__after_clear_bit(); - schedule_work(&clp->cl_recoverd); + if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state)) + schedule_work(&clp->cl_recoverd); } -static int -nfs4_reclaim_open_state(struct nfs4_state_owner *sp) +static int nfs4_reclaim_locks(struct nfs4_state *state) +{ + struct inode *inode = state->inode; + struct file_lock *fl; + int status = 0; + + for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) { + if (!(fl->fl_flags & FL_POSIX)) + continue; + if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state) + continue; + status = nfs4_lock_reclaim(state, fl); + if (status >= 0) + continue; + switch (status) { + default: + printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", + __FUNCTION__, status); + case -NFS4ERR_EXPIRED: + case -NFS4ERR_NO_GRACE: + case -NFS4ERR_RECLAIM_BAD: + case -NFS4ERR_RECLAIM_CONFLICT: + /* kill_proc(fl->fl_owner, SIGLOST, 1); */ + break; + case -NFS4ERR_STALE_CLIENTID: + goto out_err; + } + } + return 0; +out_err: + return status; +} + +static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp) { struct nfs4_state *state; + struct nfs4_lock_state *lock; int status = 0; list_for_each_entry(state, &sp->so_states, open_states) { if (state->state == 0) continue; status = nfs4_open_reclaim(sp, state); - if (status >= 0) + list_for_each_entry(lock, &state->lock_states, ls_locks) + lock->ls_flags &= ~NFS_LOCK_INITIALIZED; + if (status >= 0) { + status = nfs4_reclaim_locks(state); + if (status < 0) + goto out_err; + list_for_each_entry(lock, &state->lock_states, ls_locks) { + if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) + printk("%s: Lock reclaim failed!\n", + __FUNCTION__); + } continue; + } switch (status) { default: printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", @@ -762,75 +839,55 @@ out_err: return status; } -static int -reclaimer(void *ptr) +static int reclaimer(void *ptr) { struct reclaimer_args *args = (struct reclaimer_args *)ptr; struct nfs4_client *clp = args->clp; struct nfs4_state_owner *sp; - int generation; int status; daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr)); allow_signal(SIGKILL); + atomic_inc(&clp->cl_count); complete(&args->complete); + /* Ensure exclusive access to NFSv4 state */ + lock_kernel(); + down_write(&clp->cl_sem); /* Are there any NFS mounts out there? */ if (list_empty(&clp->cl_superblocks)) goto out; - if (!test_bit(NFS4CLNT_NEW, &clp->cl_state)) { - status = nfs4_proc_renew(clp); - if (status == 0) { - set_bit(NFS4CLNT_OK, &clp->cl_state); - goto out; - } - } - status = nfs4_proc_setclientid(clp, 0, 0); - if (status) - goto out_error; - status = nfs4_proc_setclientid_confirm(clp); +restart_loop: + status = nfs4_proc_renew(clp); + if (status == 0) + goto out; + status = nfs4_init_client(clp); if (status) goto out_error; - generation = ++(clp->cl_generation); - clear_bit(NFS4CLNT_NEW, &clp->cl_state); - set_bit(NFS4CLNT_OK, &clp->cl_state); - up_read(&clp->cl_sem); - nfs4_schedule_state_renewal(clp); -restart_loop: - spin_lock(&clp->cl_lock); + /* Mark all delagations for reclaim */ + nfs_delegation_mark_reclaim(clp); + /* Note: list is protected by exclusive lock on cl->cl_sem */ list_for_each_entry(sp, &clp->cl_state_owners, so_list) { - if (sp->so_generation - generation >= 0) - continue; - atomic_inc(&sp->so_count); - spin_unlock(&clp->cl_lock); - down(&sp->so_sema); - if (sp->so_generation - generation < 0) { - smp_rmb(); - sp->so_generation = clp->cl_generation; - status = nfs4_reclaim_open_state(sp); - } - up(&sp->so_sema); - nfs4_put_state_owner(sp); + status = nfs4_reclaim_open_state(sp); if (status < 0) { if (status == -NFS4ERR_STALE_CLIENTID) - nfs4_schedule_state_recovery(clp); - goto out; + goto restart_loop; + goto out_error; } - goto restart_loop; } - spin_unlock(&clp->cl_lock); + nfs_delegation_reap_unclaimed(clp); out: - smp_mb__before_clear_bit(); - clear_bit(NFS4CLNT_SETUP_STATE, &clp->cl_state); - smp_mb__after_clear_bit(); + set_bit(NFS4CLNT_OK, &clp->cl_state); + up_write(&clp->cl_sem); + unlock_kernel(); wake_up_all(&clp->cl_waitq); rpc_wake_up(&clp->cl_rpcwaitq); + nfs4_put_client(clp); return 0; out_error: - printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u\n", - NIPQUAD(clp->cl_addr.s_addr)); - up_read(&clp->cl_sem); + printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n", + NIPQUAD(clp->cl_addr.s_addr), -status); goto out; } diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/nfs4xdr.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs4xdr.c --- linux-2.6.7/fs/nfs/nfs4xdr.c 2004-07-02 18:44:13.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfs4xdr.c 2004-07-02 22:19:47.000000000 -0400 @@ -84,6 +84,7 @@ static int nfs_stat_to_errno(int); ((3+NFS4_FHSIZE) >> 2)) #define encode_getattr_maxsz (op_encode_hdr_maxsz + 3) #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) +#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) #define nfs4_fattr_bitmap_maxsz (36 + 2 * nfs4_name_maxsz) #define decode_getattr_maxsz (op_decode_hdr_maxsz + 3 + \ nfs4_fattr_bitmap_maxsz) @@ -118,10 +119,17 @@ static int nfs_stat_to_errno(int); #define encode_link_maxsz (op_encode_hdr_maxsz + \ nfs4_name_maxsz) #define decode_link_maxsz (op_decode_hdr_maxsz + 5) +#define encode_symlink_maxsz (op_encode_hdr_maxsz + \ + 1 + nfs4_name_maxsz + \ + nfs4_path_maxsz + \ + nfs4_fattr_bitmap_maxsz) +#define decode_symlink_maxsz (op_decode_hdr_maxsz + 8) #define encode_create_maxsz (op_encode_hdr_maxsz + \ - 2 + 2 * nfs4_name_maxsz + \ + 2 + nfs4_name_maxsz + \ nfs4_fattr_bitmap_maxsz) #define decode_create_maxsz (op_decode_hdr_maxsz + 8) +#define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4) +#define decode_delegreturn_maxsz (op_decode_hdr_maxsz) #define NFS4_enc_compound_sz (1024) /* XXX: large enough? */ #define NFS4_dec_compound_sz (1024) /* XXX: large enough? */ #define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \ @@ -172,16 +180,14 @@ static int nfs_stat_to_errno(int); #define NFS4_dec_open_confirm_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ op_decode_hdr_maxsz + 4) -#define NFS4_enc_open_reclaim_sz (compound_encode_hdr_maxsz + \ +#define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ op_encode_hdr_maxsz + \ - 11 + \ - encode_getattr_maxsz) -#define NFS4_dec_open_reclaim_sz (compound_decode_hdr_maxsz + \ + 11) +#define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ op_decode_hdr_maxsz + \ - 4 + 5 + 2 + 3 + \ - decode_getattr_maxsz) + 4 + 5 + 2 + 3) #define NFS4_enc_open_downgrade_sz \ (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ @@ -313,6 +319,16 @@ static int nfs_stat_to_errno(int); decode_savefh_maxsz + \ decode_putfh_maxsz + \ decode_link_maxsz) +#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \ + encode_putfh_maxsz + \ + encode_symlink_maxsz + \ + encode_getattr_maxsz + \ + encode_getfh_maxsz) +#define NFS4_dec_symlink_sz (compound_decode_hdr_maxsz + \ + decode_putfh_maxsz + \ + decode_symlink_maxsz + \ + decode_getattr_maxsz + \ + decode_getfh_maxsz) #define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ encode_create_maxsz + \ @@ -339,6 +355,11 @@ static int nfs_stat_to_errno(int); encode_getattr_maxsz) #define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \ decode_getattr_maxsz) +#define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \ + encode_putfh_maxsz + \ + encode_delegreturn_maxsz) +#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \ + decode_delegreturn_maxsz) static struct { unsigned int mode; @@ -388,6 +409,15 @@ struct compound_hdr { BUG_ON(!p); \ } while (0) +static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) +{ + uint32_t *p; + + p = xdr_reserve_space(xdr, 4 + len); + BUG_ON(p == NULL); + xdr_encode_opaque(p, str, len); +} + static int encode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) { uint32_t *p; @@ -402,6 +432,15 @@ static int encode_compound_hdr(struct xd return 0; } +static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf) +{ + uint32_t *p; + + p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE); + BUG_ON(p == NULL); + xdr_encode_opaque_fixed(p, verf->data, NFS4_VERIFIER_SIZE); +} + static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server) { char owner_name[IDMAP_NAMESZ]; @@ -742,19 +781,12 @@ static int encode_lookup(struct xdr_stre return 0; } -static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg) +static void encode_share_access(struct xdr_stream *xdr, int open_flags) { - int status; uint32_t *p; - /* - * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, - * owner 4, opentype 4 = 36 - */ - RESERVE_SPACE(36); - WRITE32(OP_OPEN); - WRITE32(arg->seqid); - switch (arg->share_access) { + RESERVE_SPACE(8); + switch (open_flags & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ: WRITE32(NFS4_SHARE_ACCESS_READ); break; @@ -767,84 +799,135 @@ static int encode_open(struct xdr_stream default: BUG(); } - WRITE32(0); /* for linux, share_deny = 0 always */ + WRITE32(0); /* for linux, share_deny = 0 always */ +} + +static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg) +{ + uint32_t *p; + /* + * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, + * owner 4 = 32 + */ + RESERVE_SPACE(8); + WRITE32(OP_OPEN); + WRITE32(arg->seqid); + encode_share_access(xdr, arg->open_flags); + RESERVE_SPACE(16); WRITE64(arg->clientid); WRITE32(4); WRITE32(arg->id); - WRITE32(arg->opentype); +} - if (arg->opentype == NFS4_OPEN_CREATE) { - if (arg->createmode == NFS4_CREATE_EXCLUSIVE) { - RESERVE_SPACE(12); - WRITE32(arg->createmode); - WRITEMEM(arg->u.verifier.data, sizeof(arg->u.verifier.data)); - } - else if (arg->u.attrs) { - RESERVE_SPACE(4); - WRITE32(arg->createmode); - if ((status = encode_attrs(xdr, arg->u.attrs, arg->server))) - return status; - } - else { - RESERVE_SPACE(12); - WRITE32(arg->createmode); - WRITE32(0); - WRITE32(0); - } +static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) +{ + uint32_t *p; + + RESERVE_SPACE(4); + switch(arg->open_flags & O_EXCL) { + case 0: + WRITE32(NFS4_CREATE_UNCHECKED); + encode_attrs(xdr, arg->u.attrs, arg->server); + break; + default: + WRITE32(NFS4_CREATE_EXCLUSIVE); + encode_nfs4_verifier(xdr, &arg->u.verifier); } +} - RESERVE_SPACE(8 + arg->name->len); - WRITE32(NFS4_OPEN_CLAIM_NULL); - WRITE32(arg->name->len); - WRITEMEM(arg->name->name, arg->name->len); +static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *arg) +{ + uint32_t *p; - return 0; + RESERVE_SPACE(4); + switch (arg->open_flags & O_CREAT) { + case 0: + WRITE32(NFS4_OPEN_NOCREATE); + break; + default: + BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL); + WRITE32(NFS4_OPEN_CREATE); + encode_createmode(xdr, arg); + } } -static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg) +static inline void encode_delegation_type(struct xdr_stream *xdr, int delegation_type) { uint32_t *p; - RESERVE_SPACE(8+sizeof(arg->stateid.data)); - WRITE32(OP_OPEN_CONFIRM); - WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); - WRITE32(arg->seqid); + RESERVE_SPACE(4); + switch (delegation_type) { + case 0: + WRITE32(NFS4_OPEN_DELEGATE_NONE); + break; + case FMODE_READ: + WRITE32(NFS4_OPEN_DELEGATE_READ); + break; + case FMODE_WRITE|FMODE_READ: + WRITE32(NFS4_OPEN_DELEGATE_WRITE); + break; + default: + BUG(); + } +} - return 0; +static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *name) +{ + uint32_t *p; + + RESERVE_SPACE(4); + WRITE32(NFS4_OPEN_CLAIM_NULL); + encode_string(xdr, name->len, name->name); } +static inline void encode_claim_previous(struct xdr_stream *xdr, int type) +{ + uint32_t *p; + + RESERVE_SPACE(4); + WRITE32(NFS4_OPEN_CLAIM_PREVIOUS); + encode_delegation_type(xdr, type); +} -static int encode_open_reclaim(struct xdr_stream *xdr, const struct nfs_open_reclaimargs *arg) +static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struct qstr *name, const nfs4_stateid *stateid) { uint32_t *p; - /* - * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, - * owner 4, opentype 4, claim 4, delegation_type 4 = 44 - */ - RESERVE_SPACE(44); - WRITE32(OP_OPEN); - WRITE32(arg->seqid); - switch (arg->share_access) { - case FMODE_READ: - WRITE32(NFS4_SHARE_ACCESS_READ); + RESERVE_SPACE(4+sizeof(stateid->data)); + WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR); + WRITEMEM(stateid->data, sizeof(stateid->data)); + encode_string(xdr, name->len, name->name); +} + +static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg) +{ + encode_openhdr(xdr, arg); + encode_opentype(xdr, arg); + switch (arg->claim) { + case NFS4_OPEN_CLAIM_NULL: + encode_claim_null(xdr, arg->name); break; - case FMODE_WRITE: - WRITE32(NFS4_SHARE_ACCESS_WRITE); + case NFS4_OPEN_CLAIM_PREVIOUS: + encode_claim_previous(xdr, arg->u.delegation_type); break; - case FMODE_READ|FMODE_WRITE: - WRITE32(NFS4_SHARE_ACCESS_BOTH); + case NFS4_OPEN_CLAIM_DELEGATE_CUR: + encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation); break; default: BUG(); } - WRITE32(0); /* for linux, share_deny = 0 always */ - WRITE64(arg->clientid); - WRITE32(4); - WRITE32(arg->id); - WRITE32(NFS4_OPEN_NOCREATE); - WRITE32(NFS4_OPEN_CLAIM_PREVIOUS); - WRITE32(NFS4_OPEN_DELEGATE_NONE); + return 0; +} + +static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg) +{ + uint32_t *p; + + RESERVE_SPACE(8+sizeof(arg->stateid.data)); + WRITE32(OP_OPEN_CONFIRM); + WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); + WRITE32(arg->seqid); + return 0; } @@ -852,14 +935,11 @@ static int encode_open_downgrade(struct { uint32_t *p; - RESERVE_SPACE(16+sizeof(arg->stateid.data)); + RESERVE_SPACE(8+sizeof(arg->stateid.data)); WRITE32(OP_OPEN_DOWNGRADE); WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); WRITE32(arg->seqid); - WRITE32(arg->share_access); - /* No deny modes */ - WRITE32(0); - + encode_share_access(xdr, arg->open_flags); return 0; } @@ -887,15 +967,15 @@ static int encode_putrootfh(struct xdr_s return 0; } -static void encode_stateid(struct xdr_stream *xdr, struct nfs4_state *state, fl_owner_t lockowner) +static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx) { extern nfs4_stateid zero_stateid; nfs4_stateid stateid; uint32_t *p; RESERVE_SPACE(16); - if (state != NULL) { - nfs4_copy_stateid(&stateid, state, lockowner); + if (ctx->state != NULL) { + nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner); WRITEMEM(stateid.data, sizeof(stateid.data)); } else WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data)); @@ -908,7 +988,7 @@ static int encode_read(struct xdr_stream RESERVE_SPACE(4); WRITE32(OP_READ); - encode_stateid(xdr, args->state, args->lockowner); + encode_stateid(xdr, args->context); RESERVE_SPACE(12); WRITE64(args->offset); @@ -927,7 +1007,7 @@ static int encode_readdir(struct xdr_str WRITE32(OP_READDIR); WRITE64(readdir->cookie); WRITEMEM(readdir->verifier.data, sizeof(readdir->verifier.data)); - WRITE32(readdir->count >> 5); /* meaningless "dircount" field */ + WRITE32(readdir->count >> 1); /* We're not doing readdirplus */ WRITE32(readdir->count); WRITE32(2); WRITE32(FATTR4_WORD0_FILEID); @@ -1031,26 +1111,18 @@ static int encode_setattr(struct xdr_str static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid) { - uint32_t total_len; - uint32_t len1, len2, len3; uint32_t *p; - len1 = strlen(setclientid->sc_name); - len2 = strlen(setclientid->sc_netid); - len3 = strlen(setclientid->sc_uaddr); - total_len = XDR_QUADLEN(len1) + XDR_QUADLEN(len2) + XDR_QUADLEN(len3); - total_len = (total_len << 2) + 24 + sizeof(setclientid->sc_verifier.data); - - RESERVE_SPACE(total_len); + RESERVE_SPACE(4 + sizeof(setclientid->sc_verifier->data)); WRITE32(OP_SETCLIENTID); - WRITEMEM(setclientid->sc_verifier.data, sizeof(setclientid->sc_verifier.data)); - WRITE32(len1); - WRITEMEM(setclientid->sc_name, len1); + WRITEMEM(setclientid->sc_verifier->data, sizeof(setclientid->sc_verifier->data)); + + encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); + RESERVE_SPACE(4); WRITE32(setclientid->sc_prog); - WRITE32(len2); - WRITEMEM(setclientid->sc_netid, len2); - WRITE32(len3); - WRITEMEM(setclientid->sc_uaddr, len3); + encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid); + encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr); + RESERVE_SPACE(4); WRITE32(setclientid->sc_cb_ident); return 0; @@ -1075,7 +1147,7 @@ static int encode_write(struct xdr_strea RESERVE_SPACE(4); WRITE32(OP_WRITE); - encode_stateid(xdr, args->state, args->lockowner); + encode_stateid(xdr, args->context); RESERVE_SPACE(16); WRITE64(args->offset); @@ -1086,6 +1158,18 @@ static int encode_write(struct xdr_strea return 0; } + +static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid) +{ + uint32_t *p; + + RESERVE_SPACE(20); + + WRITE32(OP_DELEGRETURN); + WRITEMEM(stateid->data, sizeof(stateid->data)); + return 0; + +} /* * END OF "GENERIC" ENCODE ROUTINES. */ @@ -1244,6 +1328,14 @@ out: } /* + * Encode SYMLINK request + */ +static int nfs4_xdr_enc_symlink(struct rpc_rqst *req, uint32_t *p, const struct nfs4_create_arg *args) +{ + return nfs4_xdr_enc_create(req, p, args); +} + +/* * Encode GETATTR request */ static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, uint32_t *p, const struct nfs4_getattr_arg *args) @@ -1331,13 +1423,13 @@ out: } /* - * Encode an OPEN request + * Encode an OPEN request with no attributes. */ -static int nfs4_xdr_enc_open_reclaim(struct rpc_rqst *req, uint32_t *p, struct nfs_open_reclaimargs *args) +static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, uint32_t *p, struct nfs_openargs *args) { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 2, }; int status; @@ -1346,10 +1438,7 @@ static int nfs4_xdr_enc_open_reclaim(str status = encode_putfh(&xdr, args->fh); if (status) goto out; - status = encode_open_reclaim(&xdr, args); - if (status) - goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_open(&xdr, args); out: return status; } @@ -1716,6 +1805,24 @@ static int nfs4_xdr_enc_setclientid_conf } /* + * DELEGRETURN request + */ +static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, uint32_t *p, const struct nfs4_delegreturnargs *args) +{ + struct xdr_stream xdr; + struct compound_hdr hdr = { + .nops = 2, + }; + int status; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_compound_hdr(&xdr, &hdr); + if ((status = encode_putfh(&xdr, args->fhandle)) == 0) + status = encode_delegreturn(&xdr, args->stateid); + return status; +} + +/* * START OF "GENERIC" DECODE ROUTINES. * These may look a little ugly since they are imported from a "generic" * set of XDR encode/decode routines which are intended to be shared by @@ -1749,6 +1856,17 @@ static int nfs4_xdr_enc_setclientid_conf } \ } while (0) +static int decode_opaque_inline(struct xdr_stream *xdr, uint32_t *len, char **string) +{ + uint32_t *p; + + READ_BUF(4); + READ32(*len); + READ_BUF(*len); + *string = (char *)p; + return 0; +} + static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) { uint32_t *p; @@ -1785,6 +1903,17 @@ static int decode_op_hdr(struct xdr_stre return 0; } +/* Dummy routine */ +static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs4_client *clp) +{ + uint32_t *p; + uint32_t strlen; + char *str; + + READ_BUF(12); + return decode_opaque_inline(xdr, &strlen, &str); +} + static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap) { uint32_t bmlen, *p; @@ -2717,10 +2846,56 @@ static int decode_lookup(struct xdr_stre return decode_op_hdr(xdr, OP_LOOKUP); } +/* This is too sick! */ +static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize) +{ + uint32_t *p; + uint32_t limit_type, nblocks, blocksize; + + READ_BUF(12); + READ32(limit_type); + switch (limit_type) { + case 1: + READ64(*maxsize); + break; + case 2: + READ32(nblocks); + READ32(blocksize); + *maxsize = (uint64_t)nblocks * (uint64_t)blocksize; + } + return 0; +} + +static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) +{ + uint32_t *p; + uint32_t delegation_type; + + READ_BUF(4); + READ32(delegation_type); + if (delegation_type == NFS4_OPEN_DELEGATE_NONE) { + res->delegation_type = 0; + return 0; + } + READ_BUF(20); + COPYMEM(res->delegation.data, sizeof(res->delegation.data)); + READ32(res->do_recall); + switch (delegation_type) { + case NFS4_OPEN_DELEGATE_READ: + res->delegation_type = FMODE_READ; + break; + case NFS4_OPEN_DELEGATE_WRITE: + res->delegation_type = FMODE_WRITE|FMODE_READ; + if (decode_space_limit(xdr, &res->maxsize) < 0) + return -EIO; + } + return decode_ace(xdr, NULL, res->server->nfs4_state); +} + static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) { uint32_t *p; - uint32_t bmlen, delegation_type; + uint32_t bmlen; int status; status = decode_op_hdr(xdr, OP_OPEN); @@ -2737,11 +2912,9 @@ static int decode_open(struct xdr_stream if (bmlen > 10) goto xdr_error; - READ_BUF((bmlen << 2) + 4); + READ_BUF(bmlen << 2); p += bmlen; - READ32(delegation_type); - if (delegation_type == NFS4_OPEN_DELEGATE_NONE) - return 0; + return decode_delegation(xdr, res); xdr_error: printk(KERN_NOTICE "%s: xdr error!\n", __FUNCTION__); return -EIO; @@ -3048,6 +3221,11 @@ static int decode_write(struct xdr_strea return 0; } +static int decode_delegreturn(struct xdr_stream *xdr) +{ + return decode_op_hdr(xdr, OP_DELEGRETURN); +} + /* * Decode OPEN_DOWNGRADE response */ @@ -3222,6 +3400,14 @@ out: } /* + * Decode SYMLINK response + */ +static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_create_res *res) +{ + return nfs4_xdr_dec_create(rqstp, p, res); +} + +/* * Decode GETATTR response */ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_getattr_res *res) @@ -3314,9 +3500,9 @@ out: } /* - * Decode OPEN_RECLAIM response + * Decode OPEN response */ -static int nfs4_xdr_dec_open_reclaim(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_openres *res) +static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_openres *res) { struct xdr_stream xdr; struct compound_hdr hdr; @@ -3330,9 +3516,6 @@ static int nfs4_xdr_dec_open_reclaim(str if (status) goto out; status = decode_open(&xdr, res); - if (status) - goto out; - status = decode_getfattr(&xdr, res->f_attr, res->server); out: return status; } @@ -3665,6 +3848,25 @@ static int nfs4_xdr_dec_setclientid_conf return status; } +/* + * DELEGRETURN request + */ +static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, uint32_t *p, void *dummy) +{ + struct xdr_stream xdr; + struct compound_hdr hdr; + int status; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); + status = decode_compound_hdr(&xdr, &hdr); + if (status == 0) { + status = decode_putfh(&xdr); + if (status == 0) + status = decode_delegreturn(&xdr); + } + return status; +} + uint32_t *nfs4_decode_dirent(uint32_t *p, struct nfs_entry *entry, int plus) { uint32_t len; @@ -3756,7 +3958,7 @@ nfs_stat_to_errno(int stat) if (nfs_errtbl[i].stat == stat) return nfs_errtbl[i].errno; } - if (stat < 0) { + if (stat <= 10000 || stat > 10100) { /* The server is looney tunes. */ return ESERVERFAULT; } @@ -3786,7 +3988,7 @@ struct rpc_procinfo nfs4_procedures[] = PROC(COMMIT, enc_commit, dec_commit), PROC(OPEN, enc_open, dec_open), PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm), - PROC(OPEN_RECLAIM, enc_open_reclaim, dec_open_reclaim), + PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr), PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade), PROC(CLOSE, enc_close, dec_close), PROC(SETATTR, enc_setattr, dec_setattr), @@ -3804,12 +4006,14 @@ struct rpc_procinfo nfs4_procedures[] = PROC(REMOVE, enc_remove, dec_remove), PROC(RENAME, enc_rename, dec_rename), PROC(LINK, enc_link, dec_link), + PROC(SYMLINK, enc_symlink, dec_symlink), PROC(CREATE, enc_create, dec_create), PROC(PATHCONF, enc_pathconf, dec_pathconf), PROC(STATFS, enc_statfs, dec_statfs), PROC(READLINK, enc_readlink, dec_readlink), PROC(READDIR, enc_readdir, dec_readdir), PROC(SERVER_CAPS, enc_server_caps, dec_server_caps), + PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn), }; struct rpc_version nfs_version4 = { diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/nfsroot.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfsroot.c --- linux-2.6.7/fs/nfs/nfsroot.c 2004-07-02 18:43:51.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/nfsroot.c 2004-07-02 22:18:37.000000000 -0400 @@ -495,10 +495,8 @@ static int __init root_nfs_get_handle(vo if (status < 0) printk(KERN_ERR "Root-NFS: Server returned error %d " "while mounting %s\n", status, nfs_path); - else { - nfs_data.root.size = fh.size; - memcpy(nfs_data.root.data, fh.data, fh.size); - } + else + nfs_copy_fh(nfs_data.root, fh); return status; } diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/pagelist.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/pagelist.c --- linux-2.6.7/fs/nfs/pagelist.c 2004-07-02 18:44:05.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/pagelist.c 2004-07-02 22:19:13.000000000 -0400 @@ -21,11 +21,6 @@ #define NFS_PARANOIA 1 -/* - * Spinlock - */ -spinlock_t nfs_wreq_lock = SPIN_LOCK_UNLOCKED; - static kmem_cache_t *nfs_page_cachep; static inline struct nfs_page * @@ -36,7 +31,6 @@ nfs_page_alloc(void) if (p) { memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->wb_list); - init_waitqueue_head(&p->wb_wait); } return p; } @@ -62,7 +56,7 @@ nfs_page_free(struct nfs_page *p) * User should ensure it is safe to sleep in this function. */ struct nfs_page * -nfs_create_request(struct file *file, struct inode *inode, +nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, struct page *page, unsigned int offset, unsigned int count) { @@ -94,33 +88,38 @@ nfs_create_request(struct file *file, st req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = count; - req->wb_inode = inode; req->wb_count = 1; - server->rpc_ops->request_init(req, file); + req->wb_context = get_nfs_open_context(ctx); return req; } /** + * nfs_unlock_request - Unlock request and wake up sleepers. + * @req: + */ +void nfs_unlock_request(struct nfs_page *req) +{ + if (!NFS_WBACK_BUSY(req)) { + printk(KERN_ERR "NFS: Invalid unlock attempted\n"); + BUG(); + } + smp_mb__before_clear_bit(); + clear_bit(PG_BUSY, &req->wb_flags); + smp_mb__after_clear_bit(); + wake_up_all(&req->wb_context->waitq); + nfs_release_request(req); +} + +/** * nfs_clear_request - Free up all resources allocated to the request * @req: * - * Release all resources associated with a write request after it + * Release page resources associated with a write request after it * has completed. */ void nfs_clear_request(struct nfs_page *req) { - if (req->wb_state) - req->wb_state = NULL; - /* Release struct file or cached credential */ - if (req->wb_file) { - fput(req->wb_file); - req->wb_file = NULL; - } - if (req->wb_cred) { - put_rpccred(req->wb_cred); - req->wb_cred = NULL; - } if (req->wb_page) { page_cache_release(req->wb_page); req->wb_page = NULL; @@ -137,12 +136,14 @@ void nfs_clear_request(struct nfs_page * void nfs_release_request(struct nfs_page *req) { - spin_lock(&nfs_wreq_lock); + struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); + + spin_lock(&nfsi->req_lock); if (--req->wb_count) { - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); return; } - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); #ifdef NFS_PARANOIA BUG_ON (!list_empty(&req->wb_list)); @@ -151,6 +152,7 @@ nfs_release_request(struct nfs_page *req /* Release struct file or cached credential */ nfs_clear_request(req); + put_nfs_open_context(req->wb_context); nfs_page_free(req); } @@ -194,12 +196,12 @@ nfs_list_add_request(struct nfs_page *re int nfs_wait_on_request(struct nfs_page *req) { - struct inode *inode = req->wb_inode; + struct inode *inode = req->wb_context->dentry->d_inode; struct rpc_clnt *clnt = NFS_CLIENT(inode); if (!NFS_WBACK_BUSY(req)) return 0; - return nfs_wait_event(clnt, req->wb_wait, !NFS_WBACK_BUSY(req)); + return nfs_wait_event(clnt, req->wb_context->waitq, !NFS_WBACK_BUSY(req)); } /** @@ -224,7 +226,11 @@ nfs_coalesce_requests(struct list_head * req = nfs_list_entry(head->next); if (prev) { - if (req->wb_cred != prev->wb_cred) + if (req->wb_context->cred != prev->wb_context->cred) + break; + if (req->wb_context->lockowner != prev->wb_context->lockowner) + break; + if (req->wb_context->state != prev->wb_context->state) break; if (req->wb_index != (prev->wb_index + 1)) break; @@ -254,7 +260,8 @@ nfs_coalesce_requests(struct list_head * * If the number of requests is set to 0, the entire address_space * starting at index idx_start, is scanned. * The requests are *not* checked to ensure that they form a contiguous set. - * You must be holding the nfs_wreq_lock when calling this function + * + * Caller must hold the appropriate inode's req_lock. */ int nfs_scan_list(struct list_head *head, struct list_head *dst, diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/proc.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/proc.c --- linux-2.6.7/fs/nfs/proc.c 2004-07-02 18:44:05.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/proc.c 2004-07-02 22:19:13.000000000 -0400 @@ -49,18 +49,6 @@ extern struct rpc_procinfo nfs_procedures[]; -static struct rpc_cred * -nfs_cred(struct inode *inode, struct file *filp) -{ - struct rpc_cred *cred = NULL; - - if (filp) - cred = (struct rpc_cred *)filp->private_data; - if (!cred) - cred = NFS_I(inode)->mm_cred; - return cred; -} - /* * Bare-bones access to getattr: this is for nfs_read_super. */ @@ -167,8 +155,7 @@ nfs_proc_readlink(struct inode *inode, s return status; } -static int -nfs_proc_read(struct nfs_read_data *rdata, struct file *filp) +static int nfs_proc_read(struct nfs_read_data *rdata) { int flags = rdata->flags; struct inode * inode = rdata->inode; @@ -177,15 +164,14 @@ nfs_proc_read(struct nfs_read_data *rdat .rpc_proc = &nfs_procedures[NFSPROC_READ], .rpc_argp = &rdata->args, .rpc_resp = &rdata->res, + .rpc_resp = rdata->cred, }; int status; dprintk("NFS call read %d @ %Ld\n", rdata->args.count, (long long) rdata->args.offset); fattr->valid = 0; - msg.rpc_cred = nfs_cred(inode, filp); status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); - if (status >= 0) { nfs_refresh_inode(inode, fattr); /* Emulate the eof flag, which isn't normally needed in NFSv2 @@ -198,8 +184,7 @@ nfs_proc_read(struct nfs_read_data *rdat return status; } -static int -nfs_proc_write(struct nfs_write_data *wdata, struct file *filp) +static int nfs_proc_write(struct nfs_write_data *wdata) { int flags = wdata->flags; struct inode * inode = wdata->inode; @@ -208,13 +193,13 @@ nfs_proc_write(struct nfs_write_data *wd .rpc_proc = &nfs_procedures[NFSPROC_WRITE], .rpc_argp = &wdata->args, .rpc_resp = &wdata->res, + .rpc_resp = wdata->cred, }; int status; dprintk("NFS call write %d @ %Ld\n", wdata->args.count, (long long) wdata->args.offset); fattr->valid = 0; - msg.rpc_cred = nfs_cred(inode, filp); status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); if (status >= 0) { nfs_refresh_inode(inode, fattr); @@ -400,6 +385,8 @@ nfs_proc_symlink(struct inode *dir, stru }; int status; + if (path->len > NFS2_MAXPATHLEN) + return -ENAMETOOLONG; dprintk("NFS call symlink %s -> %s\n", name->name, path->name); fattr->valid = 0; status = rpc_call(NFS_CLIENT(dir), NFSPROC_SYMLINK, &arg, NULL, 0); @@ -619,27 +606,6 @@ nfs_proc_commit_setup(struct nfs_write_d BUG(); } -/* - * Set up the nfspage struct with the right credentials - */ -static void -nfs_request_init(struct nfs_page *req, struct file *filp) -{ - req->wb_cred = get_rpccred(nfs_cred(req->wb_inode, filp)); -} - -static int -nfs_request_compatible(struct nfs_page *req, struct file *filp, struct page *page) -{ - if (req->wb_file != filp) - return 0; - if (req->wb_page != page) - return 0; - if (req->wb_cred != nfs_file_cred(filp)) - return 0; - return 1; -} - static int nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl) { @@ -680,7 +646,5 @@ struct nfs_rpc_ops nfs_v2_clientops = { .commit_setup = nfs_proc_commit_setup, .file_open = nfs_open, .file_release = nfs_release, - .request_init = nfs_request_init, - .request_compatible = nfs_request_compatible, .lock = nfs_proc_lock, }; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/read.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/read.c --- linux-2.6.7/fs/nfs/read.c 2004-07-02 18:43:20.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/read.c 2004-07-02 22:19:18.000000000 -0400 @@ -91,8 +91,8 @@ int nfs_return_empty_page(struct page *p /* * Read a page synchronously. */ -static int -nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page) +static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, + struct page *page) { unsigned int rsize = NFS_SERVER(inode)->rsize; unsigned int count = PAGE_CACHE_SIZE; @@ -105,10 +105,11 @@ nfs_readpage_sync(struct file *file, str memset(rdata, 0, sizeof(*rdata)); rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0); + rdata->cred = ctx->cred; rdata->inode = inode; INIT_LIST_HEAD(&rdata->pages); rdata->args.fh = NFS_FH(inode); - rdata->args.lockowner = current->files; + rdata->args.context = ctx; rdata->args.pages = &page; rdata->args.pgbase = 0UL; rdata->args.count = rsize; @@ -134,7 +135,7 @@ nfs_readpage_sync(struct file *file, str rdata->args.count); lock_kernel(); - result = NFS_PROTO(inode)->read(rdata, file); + result = NFS_PROTO(inode)->read(rdata); unlock_kernel(); /* @@ -169,8 +170,8 @@ io_error: return result; } -static int -nfs_readpage_async(struct file *file, struct inode *inode, struct page *page) +static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, + struct page *page) { LIST_HEAD(one_request); struct nfs_page *new; @@ -179,7 +180,7 @@ nfs_readpage_async(struct file *file, st len = nfs_page_length(inode, page); if (len == 0) return nfs_return_empty_page(page); - new = nfs_create_request(file, inode, page, 0, len); + new = nfs_create_request(ctx, inode, page, 0, len); if (IS_ERR(new)) { unlock_page(page); return PTR_ERR(new); @@ -202,8 +203,8 @@ static void nfs_readpage_release(struct nfs_unlock_request(req); dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", - req->wb_inode->i_sb->s_id, - (long long)NFS_FILEID(req->wb_inode), + req->wb_context->dentry->d_inode->i_sb->s_id, + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); } @@ -217,16 +218,15 @@ static void nfs_read_rpcsetup(struct nfs struct inode *inode; data->req = req; - data->inode = inode = req->wb_inode; - data->cred = req->wb_cred; + data->inode = inode = req->wb_context->dentry->d_inode; + data->cred = req->wb_context->cred; data->args.fh = NFS_FH(inode); data->args.offset = req_offset(req) + offset; data->args.pgbase = req->wb_pgbase + offset; data->args.pages = data->pagevec; data->args.count = count; - data->args.lockowner = req->wb_lockowner; - data->args.state = req->wb_state; + data->args.context = req->wb_context; data->res.fattr = &data->fattr; data->res.count = count; @@ -396,7 +396,7 @@ nfs_pagein_list(struct list_head *head, while (!list_empty(head)) { pages += nfs_coalesce_requests(head, &one_request, rpages); req = nfs_list_entry(one_request.next); - error = nfs_pagein_one(&one_request, req->wb_inode); + error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode); if (error < 0) break; } @@ -500,9 +500,9 @@ void nfs_readpage_result(struct rpc_task * - The error flag is set for this page. This happens only when a * previous async read operation failed. */ -int -nfs_readpage(struct file *file, struct page *page) +int nfs_readpage(struct file *file, struct page *page) { + struct nfs_open_context *ctx; struct inode *inode = page->mapping->host; int error; @@ -519,25 +519,33 @@ nfs_readpage(struct file *file, struct p if (error) goto out_error; + if (file == NULL) { + ctx = nfs_find_open_context(inode, FMODE_READ); + if (ctx == NULL) + return -EBADF; + } else + ctx = get_nfs_open_context((struct nfs_open_context *) + file->private_data); if (!IS_SYNC(inode)) { - error = nfs_readpage_async(file, inode, page); + error = nfs_readpage_async(ctx, inode, page); goto out; } - error = nfs_readpage_sync(file, inode, page); + error = nfs_readpage_sync(ctx, inode, page); if (error < 0 && IS_SWAPFILE(inode)) printk("Aiee.. nfs swap-in of page failed!\n"); out: + put_nfs_open_context(ctx); return error; out_error: unlock_page(page); - goto out; + return error; } struct nfs_readdesc { struct list_head *head; - struct file *filp; + struct nfs_open_context *ctx; }; static int @@ -552,7 +560,7 @@ readpage_async_filler(void *data, struct len = nfs_page_length(inode, page); if (len == 0) return nfs_return_empty_page(page); - new = nfs_create_request(desc->filp, inode, page, 0, len); + new = nfs_create_request(desc->ctx, inode, page, 0, len); if (IS_ERR(new)) { SetPageError(page); unlock_page(page); @@ -565,13 +573,11 @@ readpage_async_filler(void *data, struct return 0; } -int -nfs_readpages(struct file *filp, struct address_space *mapping, +int nfs_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { LIST_HEAD(head); struct nfs_readdesc desc = { - .filp = filp, .head = &head, }; struct inode *inode = mapping->host; @@ -583,12 +589,20 @@ nfs_readpages(struct file *filp, struct (long long)NFS_FILEID(inode), nr_pages); + if (filp == NULL) { + desc.ctx = nfs_find_open_context(inode, FMODE_READ); + if (desc.ctx == NULL) + return -EBADF; + } else + desc.ctx = get_nfs_open_context((struct nfs_open_context *) + filp->private_data); ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); if (!list_empty(&head)) { int err = nfs_pagein_list(&head, server->rpages); if (!ret) ret = err; } + put_nfs_open_context(desc.ctx); return ret; } diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/unlink.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/unlink.c --- linux-2.6.7/fs/nfs/unlink.c 2004-07-02 18:44:06.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/unlink.c 2004-07-02 22:20:23.000000000 -0400 @@ -215,7 +215,6 @@ nfs_complete_unlink(struct dentry *dentr spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_NFSFS_RENAMED; spin_unlock(&dentry->d_lock); - if (data->task.tk_rpcwait == &nfs_delete_queue) - rpc_wake_up_task(&data->task); + rpc_wake_up_task(&data->task); nfs_put_unlinkdata(data); } diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfs/write.c linux-2.6.7-43-rpc_queue_lock/fs/nfs/write.c --- linux-2.6.7/fs/nfs/write.c 2004-07-02 18:43:51.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfs/write.c 2004-07-02 22:20:11.000000000 -0400 @@ -63,6 +63,8 @@ #include #include +#include "delegation.h" + #define NFSDBG_FACILITY NFSDBG_PAGECACHE #define MIN_POOL_WRITE (32) @@ -71,7 +73,8 @@ /* * Local function declarations */ -static struct nfs_page * nfs_update_request(struct file*, struct inode *, +static struct nfs_page * nfs_update_request(struct nfs_open_context*, + struct inode *, struct page *, unsigned int, unsigned int); static void nfs_writeback_done_partial(struct nfs_write_data *, int); @@ -173,7 +176,7 @@ static void nfs_mark_uptodate(struct pag * Write a page synchronously. * Offset is the data offset within the page. */ -static int nfs_writepage_sync(struct file *file, struct inode *inode, +static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode, struct page *page, unsigned int offset, unsigned int count, int how) { @@ -187,9 +190,10 @@ static int nfs_writepage_sync(struct fil memset(wdata, 0, sizeof(*wdata)); wdata->flags = how; + wdata->cred = ctx->cred; wdata->inode = inode; wdata->args.fh = NFS_FH(inode); - wdata->args.lockowner = current->files; + wdata->args.context = ctx; wdata->args.pages = &page; wdata->args.stable = NFS_FILE_SYNC; wdata->args.pgbase = offset; @@ -208,7 +212,7 @@ static int nfs_writepage_sync(struct fil wdata->args.count = count; wdata->args.offset = page_offset(page) + wdata->args.pgbase; - result = NFS_PROTO(inode)->write(wdata, file); + result = NFS_PROTO(inode)->write(wdata); if (result < 0) { /* Must mark the page invalid after I/O error */ @@ -241,13 +245,14 @@ io_error: return written ? written : result; } -static int nfs_writepage_async(struct file *file, struct inode *inode, - struct page *page, unsigned int offset, unsigned int count) +static int nfs_writepage_async(struct nfs_open_context *ctx, + struct inode *inode, struct page *page, + unsigned int offset, unsigned int count) { struct nfs_page *req; int status; - req = nfs_update_request(file, inode, page, offset, count); + req = nfs_update_request(ctx, inode, page, offset, count); status = (IS_ERR(req)) ? PTR_ERR(req) : 0; if (status < 0) goto out; @@ -274,6 +279,7 @@ static int wb_priority(struct writeback_ */ int nfs_writepage(struct page *page, struct writeback_control *wbc) { + struct nfs_open_context *ctx; struct inode *inode = page->mapping->host; unsigned long end_index; unsigned offset = PAGE_CACHE_SIZE; @@ -308,16 +314,21 @@ int nfs_writepage(struct page *page, str if (page->index >= end_index+1 || !offset) goto out; do_it: + ctx = nfs_find_open_context(inode, FMODE_WRITE); + if (ctx == NULL) { + err = -EBADF; + goto out; + } lock_kernel(); if (!IS_SYNC(inode) && inode_referenced) { - err = nfs_writepage_async(NULL, inode, page, 0, offset); + err = nfs_writepage_async(ctx, inode, page, 0, offset); if (err >= 0) { err = 0; if (wbc->for_reclaim) nfs_flush_inode(inode, 0, 0, FLUSH_STABLE); } } else { - err = nfs_writepage_sync(NULL, inode, page, 0, + err = nfs_writepage_sync(ctx, inode, page, 0, offset, priority); if (err >= 0) { if (err != offset) @@ -326,6 +337,7 @@ do_it: } } unlock_kernel(); + put_nfs_open_context(ctx); out: unlock_page(page); if (inode_referenced) @@ -373,9 +385,10 @@ out: /* * Insert a write request into an inode + * + * The inode's req_lock is held by the caller. */ -static inline int -nfs_inode_add_request(struct inode *inode, struct nfs_page *req) +static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) { struct nfs_inode *nfsi = NFS_I(inode); int error; @@ -387,6 +400,8 @@ nfs_inode_add_request(struct inode *inod if (!nfsi->npages) { igrab(inode); nfs_begin_data_update(inode); + if (nfs_have_delegation(inode, FMODE_WRITE)) + nfsi->change_attr++; } nfsi->npages++; req->wb_count++; @@ -394,26 +409,27 @@ nfs_inode_add_request(struct inode *inod } /* - * Insert a write request into an inode + * Remove a write request from an inode + * + * A positive wb_count keeps req->wb_inode good while + * we're in here. */ -static void -nfs_inode_remove_request(struct nfs_page *req) +static void nfs_inode_remove_request(struct nfs_page *req) { - struct nfs_inode *nfsi; - struct inode *inode; + struct inode *inode = req->wb_context->dentry->d_inode; + struct nfs_inode *nfsi = NFS_I(inode); BUG_ON (!NFS_WBACK_BUSY(req)); - spin_lock(&nfs_wreq_lock); - inode = req->wb_inode; - nfsi = NFS_I(inode); + + spin_lock(&nfsi->req_lock); radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); nfsi->npages--; if (!nfsi->npages) { - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); nfs_end_data_update_defer(inode); iput(inode); } else - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); nfs_clear_request(req); nfs_release_request(req); } @@ -438,9 +454,9 @@ nfs_find_request(struct inode *inode, un { struct nfs_page *req; - spin_lock(&nfs_wreq_lock); + spin_lock(&NFS_I(inode)->req_lock); req = _nfs_find_request(inode, index); - spin_unlock(&nfs_wreq_lock); + spin_unlock(&NFS_I(inode)->req_lock); return req; } @@ -450,13 +466,13 @@ nfs_find_request(struct inode *inode, un static void nfs_mark_request_dirty(struct nfs_page *req) { - struct inode *inode = req->wb_inode; + struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); - spin_lock(&nfs_wreq_lock); + spin_lock(&nfsi->req_lock); nfs_list_add_request(req, &nfsi->dirty); nfsi->ndirty++; - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); inc_page_state(nr_dirty); mark_inode_dirty(inode); } @@ -467,7 +483,7 @@ nfs_mark_request_dirty(struct nfs_page * static inline int nfs_dirty_request(struct nfs_page *req) { - struct nfs_inode *nfsi = NFS_I(req->wb_inode); + struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty; } @@ -478,13 +494,13 @@ nfs_dirty_request(struct nfs_page *req) static void nfs_mark_request_commit(struct nfs_page *req) { - struct inode *inode = req->wb_inode; + struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); - spin_lock(&nfs_wreq_lock); + spin_lock(&nfsi->req_lock); nfs_list_add_request(req, &nfsi->commit); nfsi->ncommit++; - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); inc_page_state(nr_unstable); mark_inode_dirty(inode); } @@ -509,7 +525,7 @@ nfs_wait_on_requests(struct inode *inode else idx_end = idx_start + npages - 1; - spin_lock(&nfs_wreq_lock); + spin_lock(&nfsi->req_lock); next = idx_start; while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) { if (req->wb_index > idx_end) @@ -520,15 +536,15 @@ nfs_wait_on_requests(struct inode *inode continue; req->wb_count++; - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); error = nfs_wait_on_request(req); nfs_release_request(req); if (error < 0) return error; - spin_lock(&nfs_wreq_lock); + spin_lock(&nfsi->req_lock); res++; } - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); return res; } @@ -619,11 +635,12 @@ static int nfs_wait_on_write_congestion( * * Note: Should always be called with the Page Lock held! */ -static struct nfs_page * -nfs_update_request(struct file* file, struct inode *inode, struct page *page, - unsigned int offset, unsigned int bytes) +static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, + struct inode *inode, struct page *page, + unsigned int offset, unsigned int bytes) { - struct nfs_server *server = NFS_SERVER(inode); + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_server *server = NFS_SERVER(inode); struct nfs_page *req, *new = NULL; unsigned long rqend, end; @@ -635,19 +652,19 @@ nfs_update_request(struct file* file, st /* Loop over all inode entries and see if we find * A request for the page we wish to update */ - spin_lock(&nfs_wreq_lock); + spin_lock(&nfsi->req_lock); req = _nfs_find_request(inode, page->index); if (req) { if (!nfs_lock_request_dontget(req)) { int error; - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); error = nfs_wait_on_request(req); nfs_release_request(req); if (error < 0) return ERR_PTR(error); continue; } - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); if (new) nfs_release_request(new); break; @@ -658,23 +675,19 @@ nfs_update_request(struct file* file, st nfs_lock_request_dontget(new); error = nfs_inode_add_request(inode, new); if (error) { - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); nfs_unlock_request(new); return ERR_PTR(error); } - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); nfs_mark_request_dirty(new); return new; } - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); - new = nfs_create_request(file, inode, page, offset, bytes); + new = nfs_create_request(ctx, inode, page, offset, bytes); if (IS_ERR(new)) return new; - if (file) { - new->wb_file = file; - get_file(file); - } } /* We have a request for our page. @@ -684,7 +697,7 @@ nfs_update_request(struct file* file, st * request. */ rqend = req->wb_offset + req->wb_bytes; - if (req->wb_file != file + if (req->wb_context != ctx || req->wb_page != page || !nfs_dirty_request(req) || offset > rqend || end < req->wb_offset) { @@ -705,9 +718,9 @@ nfs_update_request(struct file* file, st return req; } -int -nfs_flush_incompatible(struct file *file, struct page *page) +int nfs_flush_incompatible(struct file *file, struct page *page) { + struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; struct inode *inode = page->mapping->host; struct nfs_page *req; int status = 0; @@ -721,7 +734,7 @@ nfs_flush_incompatible(struct file *file */ req = nfs_find_request(inode, page->index); if (req) { - if (!NFS_PROTO(inode)->request_compatible(req, file, page)) + if (req->wb_page != page || ctx != req->wb_context) status = nfs_wb_page(inode, page); nfs_release_request(req); } @@ -737,6 +750,7 @@ nfs_flush_incompatible(struct file *file int nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsigned int count) { + struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; struct dentry *dentry = file->f_dentry; struct inode *inode = page->mapping->host; struct nfs_page *req; @@ -747,7 +761,7 @@ int nfs_updatepage(struct file *file, st count, (long long)(page_offset(page) +offset)); if (IS_SYNC(inode)) { - status = nfs_writepage_sync(file, inode, page, offset, count, 0); + status = nfs_writepage_sync(ctx, inode, page, offset, count, 0); if (status > 0) { if (offset == 0 && status == PAGE_CACHE_SIZE) SetPageUptodate(page); @@ -784,7 +798,7 @@ int nfs_updatepage(struct file *file, st * it out now. */ do { - req = nfs_update_request(file, inode, page, offset, count); + req = nfs_update_request(ctx, inode, page, offset, count); status = (IS_ERR(req)) ? PTR_ERR(req) : 0; if (status != -EBUSY) break; @@ -860,16 +874,15 @@ static void nfs_write_rpcsetup(struct nf * NB: take care not to mess about with data->commit et al. */ data->req = req; - data->inode = inode = req->wb_inode; - data->cred = req->wb_cred; + data->inode = inode = req->wb_context->dentry->d_inode; + data->cred = req->wb_context->cred; data->args.fh = NFS_FH(inode); data->args.offset = req_offset(req) + offset; data->args.pgbase = req->wb_pgbase + offset; data->args.pages = data->pagevec; data->args.count = count; - data->args.lockowner = req->wb_lockowner; - data->args.state = req->wb_state; + data->args.context = req->wb_context; data->res.fattr = &data->fattr; data->res.count = count; @@ -1029,7 +1042,7 @@ nfs_flush_list(struct list_head *head, i while (!list_empty(head)) { pages += nfs_coalesce_requests(head, &one_request, wpages); req = nfs_list_entry(one_request.next); - error = nfs_flush_one(&one_request, req->wb_inode, how); + error = nfs_flush_one(&one_request, req->wb_context->dentry->d_inode, how); if (error < 0) break; } @@ -1054,16 +1067,15 @@ static void nfs_writeback_done_partial(s struct page *page = req->wb_page; dprintk("NFS: write (%s/%Ld %d@%Ld)", - req->wb_inode->i_sb->s_id, - (long long)NFS_FILEID(req->wb_inode), + req->wb_context->dentry->d_inode->i_sb->s_id, + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); if (status < 0) { ClearPageUptodate(page); SetPageError(page); - if (req->wb_file) - req->wb_file->f_error = status; + req->wb_context->error = status; dprintk(", error = %d\n", status); } else { #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) @@ -1104,16 +1116,15 @@ static void nfs_writeback_done_full(stru page = req->wb_page; dprintk("NFS: write (%s/%Ld %d@%Ld)", - req->wb_inode->i_sb->s_id, - (long long)NFS_FILEID(req->wb_inode), + req->wb_context->dentry->d_inode->i_sb->s_id, + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); if (status < 0) { ClearPageUptodate(page); SetPageError(page); - if (req->wb_file) - req->wb_file->f_error = status; + req->wb_context->error = status; end_page_writeback(page); nfs_inode_remove_request(req); dprintk(", error = %d\n", status); @@ -1232,7 +1243,7 @@ static void nfs_commit_rpcsetup(struct l list_splice_init(head, &data->pages); first = nfs_list_entry(data->pages.next); last = nfs_list_entry(data->pages.prev); - inode = first->wb_inode; + inode = first->wb_context->dentry->d_inode; /* * Determine the offset range of requests in the COMMIT call. @@ -1246,7 +1257,7 @@ static void nfs_commit_rpcsetup(struct l len = 0; data->inode = inode; - data->cred = first->wb_cred; + data->cred = first->wb_context->cred; data->args.fh = NFS_FH(data->inode); data->args.offset = start; @@ -1313,13 +1324,12 @@ nfs_commit_done(struct rpc_task *task) nfs_list_remove_request(req); dprintk("NFS: commit (%s/%Ld %d@%Ld)", - req->wb_inode->i_sb->s_id, - (long long)NFS_FILEID(req->wb_inode), + req->wb_context->dentry->d_inode->i_sb->s_id, + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); if (task->tk_status < 0) { - if (req->wb_file) - req->wb_file->f_error = task->tk_status; + req->wb_context->error = task->tk_status; nfs_inode_remove_request(req); dprintk(", error = %d\n", task->tk_status); goto next; @@ -1351,9 +1361,9 @@ int nfs_flush_inode(struct inode *inode, int res, error = 0; - spin_lock(&nfs_wreq_lock); + spin_lock(&NFS_I(inode)->req_lock); res = nfs_scan_dirty(inode, &head, idx_start, npages); - spin_unlock(&nfs_wreq_lock); + spin_unlock(&NFS_I(inode)->req_lock); if (res) error = nfs_flush_list(&head, NFS_SERVER(inode)->wpages, how); if (error < 0) @@ -1365,18 +1375,19 @@ int nfs_flush_inode(struct inode *inode, int nfs_commit_inode(struct inode *inode, unsigned long idx_start, unsigned int npages, int how) { + struct nfs_inode *nfsi = NFS_I(inode); LIST_HEAD(head); int res, error = 0; - spin_lock(&nfs_wreq_lock); + spin_lock(&nfsi->req_lock); res = nfs_scan_commit(inode, &head, idx_start, npages); if (res) { res += nfs_scan_commit(inode, &head, 0, 0); - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); error = nfs_commit_list(&head, how); } else - spin_unlock(&nfs_wreq_lock); + spin_unlock(&nfsi->req_lock); if (error < 0) return error; return res; diff -u --recursive --new-file --show-c-function linux-2.6.7/fs/nfsd/nfs4state.c linux-2.6.7-43-rpc_queue_lock/fs/nfsd/nfs4state.c --- linux-2.6.7/fs/nfsd/nfs4state.c 2004-07-02 18:43:45.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/fs/nfsd/nfs4state.c 2004-07-02 22:17:01.000000000 -0400 @@ -2180,6 +2180,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struc goto out; } + locks_init_lock(&file_lock); switch (lock->lk_type) { case NFS4_READ_LT: case NFS4_READW_LT: @@ -2197,9 +2198,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struc file_lock.fl_pid = lockownerid_hashval(lock->lk_stateowner->so_id); file_lock.fl_file = filp; file_lock.fl_flags = FL_POSIX; - file_lock.fl_notify = NULL; - file_lock.fl_insert = NULL; - file_lock.fl_remove = NULL; file_lock.fl_start = lock->lk_offset; if ((lock->lk_length == ~(u64)0) || @@ -2215,6 +2213,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc */ status = posix_lock_file(filp, &file_lock); + if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) + file_lock.fl_ops->fl_release_private(&file_lock); dprintk("NFSD: nfsd4_lock: posix_test_lock passed. posix_lock_file status %d\n",status); switch (-status) { case 0: /* success! */ @@ -2296,6 +2296,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, stru } inode = current_fh->fh_dentry->d_inode; + locks_init_lock(&file_lock); switch (lockt->lt_type) { case NFS4_READ_LT: case NFS4_READW_LT: @@ -2381,14 +2382,12 @@ nfsd4_locku(struct svc_rqst *rqstp, stru filp = &stp->st_vfs_file; BUG_ON(!filp); + locks_init_lock(&file_lock); file_lock.fl_type = F_UNLCK; file_lock.fl_owner = (fl_owner_t) locku->lu_stateowner; file_lock.fl_pid = lockownerid_hashval(locku->lu_stateowner->so_id); file_lock.fl_file = filp; file_lock.fl_flags = FL_POSIX; - file_lock.fl_notify = NULL; - file_lock.fl_insert = NULL; - file_lock.fl_remove = NULL; file_lock.fl_start = locku->lu_offset; if ((locku->lu_length == ~(u64)0) || LOFF_OVERFLOW(locku->lu_offset, locku->lu_length)) @@ -2401,6 +2400,8 @@ nfsd4_locku(struct svc_rqst *rqstp, stru * Try to unlock the file in the VFS. */ status = posix_lock_file(filp, &file_lock); + if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) + file_lock.fl_ops->fl_release_private(&file_lock); if (status) { printk("NFSD: nfs4_locku: posix_lock_file failed!\n"); goto out_nfserr; diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/fs.h linux-2.6.7-43-rpc_queue_lock/include/linux/fs.h --- linux-2.6.7/include/linux/fs.h 2004-07-02 18:43:24.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/fs.h 2004-07-02 22:17:16.000000000 -0400 @@ -622,6 +622,19 @@ extern void close_private_file(struct fi */ typedef struct files_struct *fl_owner_t; +struct file_lock_operations { + void (*fl_insert)(struct file_lock *); /* lock insertion callback */ + void (*fl_remove)(struct file_lock *); /* lock removal callback */ + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); + void (*fl_steal_locks)(struct file_lock *, fl_owner_t); +}; + +struct lock_manager_operations { + int (*fl_compare_owner)(struct file_lock *, struct file_lock *); + void (*fl_notify)(struct file_lock *); /* unblock callback */ +}; + /* that will die - we need it for nfs_lock_info */ #include @@ -638,13 +651,11 @@ struct file_lock { loff_t fl_start; loff_t fl_end; - void (*fl_notify)(struct file_lock *); /* unblock callback */ - void (*fl_insert)(struct file_lock *); /* lock insertion callback */ - void (*fl_remove)(struct file_lock *); /* lock removal callback */ - struct fasync_struct * fl_fasync; /* for lease break notifications */ unsigned long fl_break_time; /* for nonblocking lease breaks */ + struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ + struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ union { struct nfs_lock_info nfs_fl; } fl_u; @@ -683,6 +694,7 @@ extern void locks_remove_posix(struct fi extern void locks_remove_flock(struct file *); extern struct file_lock *posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *); +extern int posix_lock_file_wait(struct file *, struct file_lock *); extern void posix_block_lock(struct file_lock *, struct file_lock *); extern void posix_unblock_lock(struct file *, struct file_lock *); extern int posix_locks_deadlock(struct file_lock *, struct file_lock *); diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/lockd/lockd.h linux-2.6.7-43-rpc_queue_lock/include/linux/lockd/lockd.h --- linux-2.6.7/include/linux/lockd/lockd.h 2004-07-02 18:43:23.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/lockd/lockd.h 2004-07-02 22:17:08.000000000 -0400 @@ -52,10 +52,25 @@ struct nlm_host { wait_queue_head_t h_gracewait; /* wait while reclaiming */ u32 h_state; /* pseudo-state counter */ u32 h_nsmstate; /* true remote NSM state */ - unsigned int h_count; /* reference count */ + u32 h_pidcount; /* Pseudopids */ + atomic_t h_count; /* reference count */ struct semaphore h_sema; /* mutex for pmap binding */ unsigned long h_nextrebind; /* next portmap call */ unsigned long h_expires; /* eligible for GC */ + struct list_head h_lockowners; /* Lockowners for the client */ + spinlock_t h_lock; +}; + +/* + * Map an fl_owner_t into a unique 32-bit "pid" + */ +struct nlm_lockowner { + struct list_head list; + atomic_t count; + + struct nlm_host *host; + fl_owner_t owner; + uint32_t pid; }; /* @@ -205,6 +220,8 @@ nlm_compare_locks(struct file_lock *fl1, &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK); } +extern struct lock_manager_operations nlmsvc_lock_operations; + #endif /* __KERNEL__ */ #endif /* LINUX_LOCKD_LOCKD_H */ diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/nfs4.h linux-2.6.7-43-rpc_queue_lock/include/linux/nfs4.h --- linux-2.6.7/include/linux/nfs4.h 2004-07-02 18:43:40.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/nfs4.h 2004-07-02 22:19:47.000000000 -0400 @@ -15,6 +15,7 @@ #define NFS4_VERIFIER_SIZE 8 #define NFS4_FHSIZE 128 +#define NFS4_MAXPATHLEN PATH_MAX #define NFS4_MAXNAMLEN NAME_MAX #define NFS4_ACCESS_READ 0x0001 @@ -297,7 +298,7 @@ enum { NFSPROC4_CLNT_COMMIT, NFSPROC4_CLNT_OPEN, NFSPROC4_CLNT_OPEN_CONFIRM, - NFSPROC4_CLNT_OPEN_RECLAIM, + NFSPROC4_CLNT_OPEN_NOATTR, NFSPROC4_CLNT_OPEN_DOWNGRADE, NFSPROC4_CLNT_CLOSE, NFSPROC4_CLNT_SETATTR, @@ -315,12 +316,14 @@ enum { NFSPROC4_CLNT_REMOVE, NFSPROC4_CLNT_RENAME, NFSPROC4_CLNT_LINK, + NFSPROC4_CLNT_SYMLINK, NFSPROC4_CLNT_CREATE, NFSPROC4_CLNT_PATHCONF, NFSPROC4_CLNT_STATFS, NFSPROC4_CLNT_READLINK, NFSPROC4_CLNT_READDIR, NFSPROC4_CLNT_SERVER_CAPS, + NFSPROC4_CLNT_DELEGRETURN, }; #endif diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/nfs_fs.h linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_fs.h --- linux-2.6.7/include/linux/nfs_fs.h 2004-07-02 18:43:25.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_fs.h 2004-07-02 22:20:11.000000000 -0400 @@ -28,8 +28,11 @@ #include #include #include +#include #include +#include + /* * Enable debugging support for nfs client. * Requires RPC_DEBUG. @@ -75,15 +78,33 @@ #ifdef __KERNEL__ /* - * NFSv3 Access mode cache + * NFSv3/v4 Access mode cache entry */ -struct nfs_access_cache { +struct nfs_access_entry { unsigned long jiffies; struct rpc_cred * cred; int mask; - int err; }; +struct nfs4_state; +struct nfs_open_context { + atomic_t count; + struct dentry *dentry; + struct rpc_cred *cred; + struct nfs4_state *state; + fl_owner_t lockowner; + int mode; + int error; + + struct list_head list; + wait_queue_head_t waitq; +}; + +/* + * NFSv4 delegation + */ +struct nfs_delegation; + /* * nfs fs inode data in memory */ @@ -137,7 +158,7 @@ struct nfs_inode { */ atomic_t data_updates; - struct nfs_access_cache cache_access; + struct nfs_access_entry cache_access; /* * This is the cookie verifier used for NFSv3 readdir @@ -148,6 +169,7 @@ struct nfs_inode { /* * This is the list of dirty unwritten pages. */ + spinlock_t req_lock; struct list_head dirty; struct list_head commit; struct radix_tree_root nfs_page_tree; @@ -156,14 +178,17 @@ struct nfs_inode { ncommit, npages; - /* Credentials for shared mmap */ - struct rpc_cred *mm_cred; + /* Open contexts for shared mmap writes */ + struct list_head open_files; wait_queue_head_t nfs_i_wait; #ifdef CONFIG_NFS_V4 /* NFSv4 state */ struct list_head open_states; + struct nfs_delegation *delegation; + int delegation_state; + struct rw_semaphore rwsem; #endif /* CONFIG_NFS_V4*/ struct inode vfs_inode; @@ -268,9 +293,12 @@ extern struct inode *nfs_fhget(struct su extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int nfs_permission(struct inode *, int, struct nameidata *); -extern void nfs_set_mmcred(struct inode *, struct rpc_cred *); +extern int nfs_access_get_cached(struct inode *, struct rpc_cred *, struct nfs_access_entry *); +extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern int nfs_open(struct inode *, struct file *); extern int nfs_release(struct inode *, struct file *); +extern int nfs_attribute_timeout(struct inode *inode); +extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_begin_attr_update(struct inode *); @@ -278,6 +306,12 @@ extern void nfs_end_attr_update(struct i extern void nfs_begin_data_update(struct inode *); extern void nfs_end_data_update(struct inode *); extern void nfs_end_data_update_defer(struct inode *); +extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred); +extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); +extern void put_nfs_open_context(struct nfs_open_context *ctx); +extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); +extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, int mode); +extern void nfs_file_clear_open_context(struct file *filp); /* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ extern u32 root_nfs_parse_addr(char *name); /*__init*/ @@ -289,16 +323,15 @@ extern struct inode_operations nfs_file_ extern struct file_operations nfs_file_operations; extern struct address_space_operations nfs_file_aops; -static __inline__ struct rpc_cred * -nfs_file_cred(struct file *file) +static inline struct rpc_cred *nfs_file_cred(struct file *file) { - struct rpc_cred *cred = NULL; - if (file) - cred = (struct rpc_cred *)file->private_data; -#ifdef RPC_DEBUG - BUG_ON(cred && cred->cr_magic != RPCAUTH_CRED_MAGIC); -#endif - return cred; + if (file != NULL) { + struct nfs_open_context *ctx; + + ctx = (struct nfs_open_context*)file->private_data; + return ctx->cred; + } + return NULL; } /* @@ -418,28 +451,6 @@ extern int nfsroot_mount(struct sockadd * inline functions */ -static inline int nfs_attribute_timeout(struct inode *inode) -{ - struct nfs_inode *nfsi = NFS_I(inode); - - return time_after(jiffies, nfsi->read_cache_jiffies+nfsi->attrtimeo); -} - -/** - * nfs_revalidate_inode - Revalidate the inode attributes - * @server - pointer to nfs_server struct - * @inode - pointer to inode struct - * - * Updates inode attribute information by retrieving the data from the server. - */ -static inline int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) -{ - if (!(NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA)) - && !nfs_attribute_timeout(inode)) - return NFS_STALE(inode) ? -ESTALE : 0; - return __nfs_revalidate_inode(server, inode); -} - static inline loff_t nfs_size_to_loff_t(__u64 size) { @@ -507,8 +518,6 @@ struct idmap; enum nfs4_client_state { NFS4CLNT_OK = 0, - NFS4CLNT_NEW, - NFS4CLNT_SETUP_STATE, }; /* @@ -520,7 +529,6 @@ struct nfs4_client { u64 cl_clientid; /* constant */ nfs4_verifier cl_confirm; unsigned long cl_state; - long cl_generation; u32 cl_lockowner_id; @@ -530,6 +538,7 @@ struct nfs4_client { */ struct rw_semaphore cl_sem; + struct list_head cl_delegations; struct list_head cl_state_owners; struct list_head cl_unused; int cl_nunused; @@ -573,12 +582,11 @@ struct nfs4_state_owner { u32 so_id; /* 32-bit identifier, unique */ struct semaphore so_sema; u32 so_seqid; /* protected by so_sema */ - unsigned int so_flags; /* protected by so_sema */ atomic_t so_count; - long so_generation; struct rpc_cred *so_cred; /* Associated cred */ struct list_head so_states; + struct list_head so_delegations; }; /* @@ -596,7 +604,8 @@ struct nfs4_state_owner { struct nfs4_lock_state { struct list_head ls_locks; /* Other lock stateids */ fl_owner_t ls_owner; /* POSIX lock owner */ - struct nfs4_state * ls_parent; /* Parent nfs4_state */ +#define NFS_LOCK_INITIALIZED 1 + int ls_flags; u32 ls_seqid; u32 ls_id; nfs4_stateid ls_stateid; @@ -606,6 +615,7 @@ struct nfs4_lock_state { /* bits for nfs4_state->flags */ enum { LK_STATE_IN_USE, + NFS_DELEGATED_STATE, }; struct nfs4_state { @@ -629,6 +639,11 @@ struct nfs4_state { }; +struct nfs4_exception { + long timeout; + int retry; +}; + extern struct dentry_operations nfs4_dentry_operations; extern struct inode_operations nfs4_dir_inode_operations; @@ -639,10 +654,12 @@ extern int nfs4_open_reclaim(struct nfs4 extern int nfs4_proc_async_renew(struct nfs4_client *); extern int nfs4_proc_renew(struct nfs4_client *); extern int nfs4_do_close(struct inode *, struct nfs4_state *); -int nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode); +extern int nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode); extern int nfs4_wait_clnt_recover(struct rpc_clnt *, struct nfs4_client *); extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); extern int nfs4_open_revalidate(struct inode *, struct dentry *, int); +extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception *); +extern int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request); /* nfs4renewd.c */ extern void nfs4_schedule_state_renewal(struct nfs4_client *); @@ -654,6 +671,8 @@ extern void init_nfsv4_state(struct nfs_ extern void destroy_nfsv4_state(struct nfs_server *); extern struct nfs4_client *nfs4_get_client(struct in_addr *); extern void nfs4_put_client(struct nfs4_client *clp); +extern int nfs4_init_client(struct nfs4_client *clp); +extern struct nfs4_client *nfs4_find_client(struct in_addr *); extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *); extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); @@ -663,14 +682,13 @@ extern void nfs4_put_open_state(struct n extern void nfs4_close_state(struct nfs4_state *, mode_t); extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode); extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp); -extern int nfs4_handle_error(struct nfs_server *, int); extern void nfs4_schedule_state_recovery(struct nfs4_client *); extern struct nfs4_lock_state *nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t); -extern struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t); +extern struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t); extern void nfs4_put_lock_state(struct nfs4_lock_state *state); extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls); -extern void nfs4_notify_setlk(struct inode *, struct file_lock *, struct nfs4_lock_state *); -extern void nfs4_notify_unlck(struct inode *, struct file_lock *, struct nfs4_lock_state *); +extern void nfs4_notify_setlk(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *); +extern void nfs4_notify_unlck(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *); extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); @@ -681,6 +699,7 @@ struct nfs4_mount_data; #define destroy_nfsv4_state(server) do { } while (0) #define nfs4_put_state_owner(inode, owner) do { } while (0) #define nfs4_put_open_state(state) do { } while (0) +#define nfs4_close_state(a, b) do { } while (0) #define nfs4_renewd_prepare_shutdown(server) do { } while (0) #endif @@ -697,6 +716,7 @@ struct nfs4_mount_data; #define NFSDBG_XDR 0x0020 #define NFSDBG_FILE 0x0040 #define NFSDBG_ROOT 0x0080 +#define NFSDBG_CALLBACK 0x0100 #define NFSDBG_ALL 0xFFFF #ifdef __KERNEL__ diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/nfs_fs_i.h linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_fs_i.h --- linux-2.6.7/include/linux/nfs_fs_i.h 2004-07-02 18:43:25.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_fs_i.h 2004-07-02 22:17:08.000000000 -0400 @@ -5,13 +5,15 @@ #include #include +struct nlm_lockowner; + /* * NFS lock info */ struct nfs_lock_info { u32 state; u32 flags; - struct nlm_host *host; + struct nlm_lockowner *owner; }; /* diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/nfs_fs_sb.h linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_fs_sb.h --- linux-2.6.7/include/linux/nfs_fs_sb.h 2004-07-02 18:44:18.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_fs_sb.h 2004-07-02 22:18:26.000000000 -0400 @@ -18,6 +18,7 @@ struct nfs_server { unsigned int rpages; /* read size (in pages) */ unsigned int wsize; /* write size */ unsigned int wpages; /* write size (in pages) */ + unsigned int wtmult; /* server disk block size */ unsigned int dtsize; /* readdir size */ unsigned int bsize; /* server block size */ unsigned int acregmin; /* attr cache timeouts */ diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/nfs.h linux-2.6.7-43-rpc_queue_lock/include/linux/nfs.h --- linux-2.6.7/include/linux/nfs.h 2004-07-02 18:43:51.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/nfs.h 2004-07-02 22:18:37.000000000 -0400 @@ -8,6 +8,7 @@ #define _LINUX_NFS_H #include +#include #define NFS_PROGRAM 100003 #define NFS_PORT 2049 @@ -139,6 +140,22 @@ struct nfs_fh { }; /* + * Returns a zero iff the size and data fields match. + * Checks only "size" bytes in the data field. + */ +static inline int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b) +{ + return a->size != b->size || memcmp(a->data, b->data, a->size) != 0; +} + +static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source) +{ + target->size = source->size; + memcpy(target->data, source->data, source->size); +} + + +/* * This is really a general kernel constant, but since nothing like * this is defined in the kernel headers, I have to do it here. */ diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/nfs_page.h linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_page.h --- linux-2.6.7/include/linux/nfs_page.h 2004-07-02 18:43:22.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_page.h 2004-07-02 22:19:13.000000000 -0400 @@ -29,14 +29,9 @@ struct nfs_page { struct list_head wb_list, /* Defines state of page: */ *wb_list_head; /* read/write/commit */ - struct file *wb_file; - fl_owner_t wb_lockowner; - struct inode *wb_inode; - struct rpc_cred *wb_cred; - struct nfs4_state *wb_state; struct page *wb_page; /* page to read in/write out */ + struct nfs_open_context *wb_context; /* File state context info */ atomic_t wb_complete; /* i/os we're waiting for */ - wait_queue_head_t wb_wait; /* wait queue */ unsigned long wb_index; /* Offset >> PAGE_CACHE_SHIFT */ unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ wb_pgbase, /* Start of page data */ @@ -50,9 +45,11 @@ struct nfs_page { #define NFS_NEED_COMMIT(req) (test_bit(PG_NEED_COMMIT,&(req)->wb_flags)) #define NFS_NEED_RESCHED(req) (test_bit(PG_NEED_RESCHED,&(req)->wb_flags)) -extern struct nfs_page *nfs_create_request(struct file *, struct inode *, - struct page *, - unsigned int, unsigned int); +extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, + struct inode *inode, + struct page *page, + unsigned int offset, + unsigned int count); extern void nfs_clear_request(struct nfs_page *req); extern void nfs_release_request(struct nfs_page *req); @@ -64,8 +61,7 @@ extern int nfs_scan_list(struct list_hea extern int nfs_coalesce_requests(struct list_head *, struct list_head *, unsigned int); extern int nfs_wait_on_request(struct nfs_page *); - -extern spinlock_t nfs_wreq_lock; +extern void nfs_unlock_request(struct nfs_page *req); /* * Lock the page of an asynchronous request without incrementing the wb_count @@ -90,19 +86,6 @@ nfs_lock_request(struct nfs_page *req) return 1; } -static inline void -nfs_unlock_request(struct nfs_page *req) -{ - if (!NFS_WBACK_BUSY(req)) { - printk(KERN_ERR "NFS: Invalid unlock attempted\n"); - BUG(); - } - smp_mb__before_clear_bit(); - clear_bit(PG_BUSY, &req->wb_flags); - smp_mb__after_clear_bit(); - wake_up_all(&req->wb_wait); - nfs_release_request(req); -} /** * nfs_list_remove_request - Remove a request from its wb_list diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/nfs_xdr.h linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_xdr.h --- linux-2.6.7/include/linux/nfs_xdr.h 2004-07-02 18:43:56.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/nfs_xdr.h 2004-07-02 22:19:34.000000000 -0400 @@ -99,20 +99,21 @@ struct nfs4_change_info { * Arguments to the open call. */ struct nfs_openargs { - struct nfs_fh * fh; + const struct nfs_fh * fh; __u32 seqid; - __u32 share_access; + int open_flags; __u64 clientid; __u32 id; - __u32 opentype; - __u32 createmode; union { struct iattr * attrs; /* UNCHECKED, GUARDED */ nfs4_verifier verifier; /* EXCLUSIVE */ + nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ + int delegation_type; /* CLAIM_PREVIOUS */ } u; const struct qstr * name; const struct nfs_server *server; /* Needed for ID mapping */ const u32 * bitmask; + __u32 claim; }; struct nfs_openres { @@ -122,13 +123,17 @@ struct nfs_openres { __u32 rflags; struct nfs_fattr * f_attr; const struct nfs_server *server; + int delegation_type; + nfs4_stateid delegation; + __u32 do_recall; + __u64 maxsize; }; /* * Arguments to the open_confirm call. */ struct nfs_open_confirmargs { - struct nfs_fh * fh; + const struct nfs_fh * fh; nfs4_stateid stateid; __u32 seqid; }; @@ -138,26 +143,13 @@ struct nfs_open_confirmres { }; /* - * Arguments to the open_reclaim call. - */ -struct nfs_open_reclaimargs { - struct nfs_fh * fh; - __u64 clientid; - __u32 seqid; - __u32 id; - __u32 share_access; - __u32 claim; - const __u32 * bitmask; -}; - -/* * Arguments to the close call. */ struct nfs_closeargs { struct nfs_fh * fh; nfs4_stateid stateid; __u32 seqid; - __u32 share_access; + int open_flags; }; struct nfs_closeres { @@ -224,6 +216,11 @@ struct nfs_lockres { const struct nfs_server * server; }; +struct nfs4_delegreturnargs { + const struct nfs_fh *fhandle; + const nfs4_stateid *stateid; +}; + /* * Arguments to the read call. */ @@ -235,8 +232,7 @@ struct nfs_lockres { struct nfs_readargs { struct nfs_fh * fh; - fl_owner_t lockowner; - struct nfs4_state * state; + struct nfs_open_context *context; __u64 offset; __u32 count; unsigned int pgbase; @@ -259,8 +255,7 @@ struct nfs_readres { struct nfs_writeargs { struct nfs_fh * fh; - fl_owner_t lockowner; - struct nfs4_state * state; + struct nfs_open_context *context; __u64 offset; __u32 count; enum nfs3_stable_how stable; @@ -597,13 +592,15 @@ struct nfs4_rename_res { }; struct nfs4_setclientid { - nfs4_verifier sc_verifier; /* request */ - char * sc_name; /* request */ + const nfs4_verifier * sc_verifier; /* request */ + unsigned int sc_name_len; + char sc_name[32]; /* request */ u32 sc_prog; /* request */ + unsigned int sc_netid_len; char sc_netid[4]; /* request */ + unsigned int sc_uaddr_len; char sc_uaddr[24]; /* request */ u32 sc_cb_ident; /* request */ - struct nfs4_client * sc_state; /* response */ }; struct nfs4_statfs_arg { @@ -657,6 +654,8 @@ struct nfs_write_data { void (*complete) (struct nfs_write_data *, int); }; +struct nfs_access_entry; + /* * RPC procedure vector for NFSv2/NFSv3 demuxing */ @@ -672,11 +671,11 @@ struct nfs_rpc_ops { struct iattr *); int (*lookup) (struct inode *, struct qstr *, struct nfs_fh *, struct nfs_fattr *); - int (*access) (struct inode *, struct rpc_cred *, int); + int (*access) (struct inode *, struct nfs_access_entry *); int (*readlink)(struct inode *, struct page *); - int (*read) (struct nfs_read_data *, struct file *); - int (*write) (struct nfs_write_data *, struct file *); - int (*commit) (struct nfs_write_data *, struct file *); + int (*read) (struct nfs_read_data *); + int (*write) (struct nfs_write_data *); + int (*commit) (struct nfs_write_data *); struct inode * (*create) (struct inode *, struct qstr *, struct iattr *, int); int (*remove) (struct inode *, struct qstr *); @@ -708,8 +707,6 @@ struct nfs_rpc_ops { void (*commit_setup) (struct nfs_write_data *, int how); int (*file_open) (struct inode *, struct file *); int (*file_release) (struct inode *, struct file *); - void (*request_init)(struct nfs_page *, struct file *); - int (*request_compatible)(struct nfs_page *, struct file *, struct page *); int (*lock)(struct file *, int, struct file_lock *); }; diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/sunrpc/gss_asn1.h linux-2.6.7-43-rpc_queue_lock/include/linux/sunrpc/gss_asn1.h --- linux-2.6.7/include/linux/sunrpc/gss_asn1.h 2004-07-02 18:44:06.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/sunrpc/gss_asn1.h 2004-07-02 22:17:56.000000000 -0400 @@ -69,7 +69,6 @@ u32 g_verify_token_header( struct xdr_netobj *mech, int *body_size, unsigned char **buf_in, - int tok_type, int toksize); u32 g_get_mech_oid(struct xdr_netobj *mech, struct xdr_netobj * in_buf); diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/sunrpc/gss_spkm3.h linux-2.6.7-43-rpc_queue_lock/include/linux/sunrpc/gss_spkm3.h --- linux-2.6.7/include/linux/sunrpc/gss_spkm3.h 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/sunrpc/gss_spkm3.h 2004-07-02 22:18:11.000000000 -0400 @@ -0,0 +1,61 @@ +/* + * linux/include/linux/sunrpc/gss_spkm3.h + * + * Copyright (c) 2000 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson + */ + +#include +#include +#include + +struct spkm3_ctx { + struct xdr_netobj ctx_id; /* per message context id */ + int qop; /* negotiated qop */ + struct xdr_netobj mech_used; + unsigned int ret_flags ; + unsigned int req_flags ; + struct xdr_netobj share_key; + int conf_alg; + struct crypto_tfm* derived_conf_key; + int intg_alg; + struct crypto_tfm* derived_integ_key; + int keyestb_alg; /* alg used to get share_key */ + int owf_alg; /* one way function */ +}; + +/* from openssl/objects.h */ +/* XXX need SEAL_ALG_NONE */ +#define NID_md5 4 +#define NID_dhKeyAgreement 28 +#define NID_des_cbc 31 +#define NID_sha1 64 +#define NID_cast5_cbc 108 + +/* SPKM InnerContext Token types */ + +#define SPKM_ERROR_TOK 3 +#define SPKM_MIC_TOK 4 +#define SPKM_WRAP_TOK 5 +#define SPKM_DEL_TOK 6 + +u32 spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, struct xdr_buf * text, struct xdr_netobj * token, int toktype); + +u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int *qop_state, int toktype); + +#define CKSUMTYPE_RSA_MD5 0x0007 + +s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, + struct xdr_netobj *cksum); +void asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits); +int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, + int explen); +void spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, + unsigned char *ctxhdr, int elen, int zbit); +void spkm3_make_mic_token(unsigned char **tokp, int toklen, + struct xdr_netobj *mic_hdr, + struct xdr_netobj *md5cksum, int md5elen, int md5zbit); +u32 spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, + unsigned char **cksum); diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/sunrpc/sched.h linux-2.6.7-43-rpc_queue_lock/include/linux/sunrpc/sched.h --- linux-2.6.7/include/linux/sunrpc/sched.h 2004-07-02 18:43:45.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/sunrpc/sched.h 2004-07-02 22:20:29.000000000 -0400 @@ -11,7 +11,9 @@ #include #include +#include #include +#include #include /* @@ -25,11 +27,18 @@ struct rpc_message { struct rpc_cred * rpc_cred; /* Credentials */ }; +struct rpc_wait_queue; +struct rpc_wait { + struct list_head list; /* wait queue links */ + struct list_head links; /* Links to related tasks */ + wait_queue_head_t waitq; /* sync: sleep on this q */ + struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */ +}; + /* * This is the RPC task struct */ struct rpc_task { - struct list_head tk_list; /* wait queue links */ #ifdef RPC_DEBUG unsigned long tk_magic; /* 0xf00baa */ #endif @@ -37,7 +46,6 @@ struct rpc_task { struct rpc_clnt * tk_client; /* RPC client */ struct rpc_rqst * tk_rqstp; /* RPC request */ int tk_status; /* result of last operation */ - struct rpc_wait_queue * tk_rpcwait; /* RPC wait queue we're on */ /* * RPC call state @@ -70,13 +78,18 @@ struct rpc_task { * you have a pathological interest in kernel oopses. */ struct timer_list tk_timer; /* kernel timer */ - wait_queue_head_t tk_wait; /* sync: sleep on this q */ unsigned long tk_timeout; /* timeout for rpc_sleep() */ unsigned short tk_flags; /* misc flags */ unsigned char tk_active : 1;/* Task has been activated */ unsigned char tk_priority : 2;/* Task priority */ unsigned long tk_runstate; /* Task run status */ - struct list_head tk_links; /* links to related tasks */ + struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could + * be any workqueue + */ + union { + struct work_struct tk_work; /* Async task work queue */ + struct rpc_wait tk_wait; /* RPC wait */ + } u; #ifdef RPC_DEBUG unsigned short tk_pid; /* debugging aid */ #endif @@ -87,11 +100,11 @@ struct rpc_task { /* support walking a list of tasks on a wait queue */ #define task_for_each(task, pos, head) \ list_for_each(pos, head) \ - if ((task=list_entry(pos, struct rpc_task, tk_list)),1) + if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1) #define task_for_first(task, head) \ if (!list_empty(head) && \ - ((task=list_entry((head)->next, struct rpc_task, tk_list)),1)) + ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1)) /* .. and walking list of all tasks */ #define alltask_for_each(task, pos, head) \ @@ -124,22 +137,24 @@ typedef void (*rpc_action)(struct rpc_ #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL) #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) -#define RPC_TASK_SLEEPING 0 -#define RPC_TASK_RUNNING 1 -#define RPC_IS_SLEEPING(t) (test_bit(RPC_TASK_SLEEPING, &(t)->tk_runstate)) -#define RPC_IS_RUNNING(t) (test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)) +#define RPC_TASK_RUNNING 0 +#define RPC_TASK_QUEUED 1 +#define RPC_IS_RUNNING(t) (test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)) #define rpc_set_running(t) (set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)) -#define rpc_clear_running(t) (clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)) - -#define rpc_set_sleeping(t) (set_bit(RPC_TASK_SLEEPING, &(t)->tk_runstate)) - -#define rpc_clear_sleeping(t) \ +#define rpc_test_and_set_running(t) \ + (test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)) +#define rpc_clear_running(t) \ do { \ smp_mb__before_clear_bit(); \ - clear_bit(RPC_TASK_SLEEPING, &(t)->tk_runstate); \ + clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ smp_mb__after_clear_bit(); \ - } while(0) + } while (0) + +#define RPC_IS_QUEUED(t) (test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)) +#define rpc_set_queued(t) (set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)) +#define rpc_test_and_clear_queued(t) \ + (test_and_clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)) /* * Task priorities. @@ -155,6 +170,7 @@ typedef void (*rpc_action)(struct rpc_ * RPC synchronization objects */ struct rpc_wait_queue { + spinlock_t lock; struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ unsigned long cookie; /* cookie of last task serviced */ unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ @@ -175,6 +191,7 @@ struct rpc_wait_queue { #ifndef RPC_DEBUG # define RPC_WAITQ_INIT(var,qname) { \ + .lock = SPIN_LOCK_UNLOCKED, \ .tasks = { \ [0] = LIST_HEAD_INIT(var.tasks[0]), \ [1] = LIST_HEAD_INIT(var.tasks[1]), \ @@ -183,6 +200,7 @@ struct rpc_wait_queue { } #else # define RPC_WAITQ_INIT(var,qname) { \ + .lock = SPIN_LOCK_UNLOCKED, \ .tasks = { \ [0] = LIST_HEAD_INIT(var.tasks[0]), \ [1] = LIST_HEAD_INIT(var.tasks[1]), \ @@ -207,13 +225,10 @@ void rpc_killall_tasks(struct rpc_clnt int rpc_execute(struct rpc_task *); void rpc_run_child(struct rpc_task *parent, struct rpc_task *child, rpc_action action); -int rpc_add_wait_queue(struct rpc_wait_queue *, struct rpc_task *); -void rpc_remove_wait_queue(struct rpc_task *); void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, rpc_action action, rpc_action timer); -void rpc_add_timer(struct rpc_task *, rpc_action); void rpc_wake_up_task(struct rpc_task *); void rpc_wake_up(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); diff -u --recursive --new-file --show-c-function linux-2.6.7/include/linux/sunrpc/svc.h linux-2.6.7-43-rpc_queue_lock/include/linux/sunrpc/svc.h --- linux-2.6.7/include/linux/sunrpc/svc.h 2004-07-02 18:43:43.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/include/linux/sunrpc/svc.h 2004-07-02 22:19:28.000000000 -0400 @@ -87,6 +87,14 @@ static inline u32 svc_getu32(struct iove iov->iov_len -= sizeof(u32); return val; } + +static inline void svc_ungetu32(struct iovec *iov) +{ + u32 *vp = (u32 *)iov->iov_base; + iov->iov_base = (void *)(vp - 1); + iov->iov_len += sizeof(*vp); +} + static inline void svc_putu32(struct iovec *iov, u32 val) { u32 *vp = iov->iov_base + iov->iov_len; @@ -243,6 +251,8 @@ struct svc_program { char * pg_name; /* service name */ char * pg_class; /* class name: services sharing authentication */ struct svc_stat * pg_stats; /* rpc statistics */ + /* Override authentication. NULL means use default */ + int (*pg_authenticate)(struct svc_rqst *, u32 *); }; /* diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/auth_gss/auth_gss.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/auth_gss.c --- linux-2.6.7/net/sunrpc/auth_gss/auth_gss.c 2004-07-02 18:43:40.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/auth_gss.c 2004-07-02 22:18:08.000000000 -0400 @@ -397,7 +397,7 @@ retry: spin_unlock(&gss_auth->lock); } gss_release_msg(gss_msg); - dprintk("RPC: %4u gss_upcall for uid %u result %d", task->tk_pid, + dprintk("RPC: %4u gss_upcall for uid %u result %d\n", task->tk_pid, uid, res); return res; out_sleep: diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/auth_gss/gss_generic_token.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_generic_token.c --- linux-2.6.7/net/sunrpc/auth_gss/gss_generic_token.c 2004-07-02 18:43:24.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_generic_token.c 2004-07-02 22:17:56.000000000 -0400 @@ -179,7 +179,7 @@ EXPORT_SYMBOL(g_make_token_header); */ u32 g_verify_token_header(struct xdr_netobj *mech, int *body_size, - unsigned char **buf_in, int tok_type, int toksize) + unsigned char **buf_in, int toksize) { unsigned char *buf = *buf_in; int seqsize; diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_unseal.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_krb5_unseal.c --- linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_unseal.c 2004-07-02 18:43:54.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_krb5_unseal.c 2004-07-02 22:17:56.000000000 -0400 @@ -96,7 +96,7 @@ krb5_read_token(struct krb5_ctx *ctx, dprintk("RPC: krb5_read_token\n"); - if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, toktype, + if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, read_token->len)) goto out; diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_mech.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_spkm3_mech.c --- linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_mech.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_spkm3_mech.c 2004-07-02 22:18:11.000000000 -0400 @@ -0,0 +1,296 @@ +/* + * linux/net/sunrpc/gss_spkm3_mech.c + * + * Copyright (c) 2003 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson + * J. Bruce Fields + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef RPC_DEBUG +# define RPCDBG_FACILITY RPCDBG_AUTH +#endif + +struct xdr_netobj gss_mech_spkm3_oid = + {7, "\053\006\001\005\005\001\003"}; + +static inline int +get_bytes(char **ptr, const char *end, void *res, int len) +{ + char *p, *q; + p = *ptr; + q = p + len; + if (q > end || q < p) + return -1; + memcpy(res, p, len); + *ptr = q; + return 0; +} + +static inline int +get_netobj(char **ptr, const char *end, struct xdr_netobj *res) +{ + char *p, *q; + p = *ptr; + if (get_bytes(&p, end, &res->len, sizeof(res->len))) + return -1; + q = p + res->len; + if(res->len == 0) + goto out_nocopy; + if (q > end || q < p) + return -1; + if (!(res->data = kmalloc(res->len, GFP_KERNEL))) + return -1; + memcpy(res->data, p, res->len); +out_nocopy: + *ptr = q; + return 0; +} + +static inline int +get_key(char **p, char *end, struct crypto_tfm **res, int *resalg) +{ + struct xdr_netobj key = { + .len = 0, + .data = NULL, + }; + int alg_mode,setkey = 0; + char *alg_name; + + if (get_bytes(p, end, resalg, sizeof(int))) + goto out_err; + if ((get_netobj(p, end, &key))) + goto out_err; + + switch (*resalg) { + case NID_des_cbc: + alg_name = "des"; + alg_mode = CRYPTO_TFM_MODE_CBC; + setkey = 1; + break; + case NID_md5: + if (key.len == 0) { + dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n"); + } + alg_name = "md5"; + alg_mode = 0; + setkey = 0; + break; + case NID_cast5_cbc: + dprintk("RPC: SPKM3 get_key: case cast5_cbc, UNSUPPORTED \n"); + goto out_err; + break; + default: + dprintk("RPC: SPKM3 get_key: unsupported algorithm %d", *resalg); + goto out_err_free_key; + } + if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) + goto out_err_free_key; + if (setkey) { + if (crypto_cipher_setkey(*res, key.data, key.len)) + goto out_err_free_tfm; + } + + if(key.len > 0) + kfree(key.data); + return 0; + +out_err_free_tfm: + crypto_free_tfm(*res); +out_err_free_key: + if(key.len > 0) + kfree(key.data); +out_err: + return -1; +} + +static u32 +gss_import_sec_context_spkm3(struct xdr_netobj *inbuf, + struct gss_ctx *ctx_id) +{ + char *p = inbuf->data; + char *end = inbuf->data + inbuf->len; + struct spkm3_ctx *ctx; + + if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) + goto out_err; + memset(ctx, 0, sizeof(*ctx)); + + if (get_netobj(&p, end, &ctx->ctx_id)) + goto out_err_free_ctx; + + if (get_bytes(&p, end, &ctx->qop, sizeof(ctx->qop))) + goto out_err_free_ctx_id; + + if (get_netobj(&p, end, &ctx->mech_used)) + goto out_err_free_mech; + + if (get_bytes(&p, end, &ctx->ret_flags, sizeof(ctx->ret_flags))) + goto out_err_free_mech; + + if (get_bytes(&p, end, &ctx->req_flags, sizeof(ctx->req_flags))) + goto out_err_free_mech; + + if (get_netobj(&p, end, &ctx->share_key)) + goto out_err_free_s_key; + + if (get_key(&p, end, &ctx->derived_conf_key, &ctx->conf_alg)) { + dprintk("RPC: SPKM3 confidentiality key will be NULL\n"); + } + + if (get_key(&p, end, &ctx->derived_integ_key, &ctx->intg_alg)) { + dprintk("RPC: SPKM3 integrity key will be NULL\n"); + } + + if (get_bytes(&p, end, &ctx->owf_alg, sizeof(ctx->owf_alg))) + goto out_err_free_s_key; + + if (get_bytes(&p, end, &ctx->owf_alg, sizeof(ctx->owf_alg))) + goto out_err_free_s_key; + + if (p != end) + goto out_err_free_s_key; + + ctx_id->internal_ctx_id = ctx; + + dprintk("Succesfully imported new spkm context.\n"); + return 0; + +out_err_free_s_key: + kfree(ctx->share_key.data); +out_err_free_mech: + kfree(ctx->mech_used.data); +out_err_free_ctx_id: + kfree(ctx->ctx_id.data); +out_err_free_ctx: + kfree(ctx); +out_err: + return GSS_S_FAILURE; +} + +void +gss_delete_sec_context_spkm3(void *internal_ctx) { + struct spkm3_ctx *sctx = internal_ctx; + + if(sctx->derived_integ_key) + crypto_free_tfm(sctx->derived_integ_key); + if(sctx->derived_conf_key) + crypto_free_tfm(sctx->derived_conf_key); + if(sctx->share_key.data) + kfree(sctx->share_key.data); + if(sctx->mech_used.data) + kfree(sctx->mech_used.data); + kfree(sctx); +} + +u32 +gss_verify_mic_spkm3(struct gss_ctx *ctx, + struct xdr_buf *signbuf, + struct xdr_netobj *checksum, + u32 *qstate) { + u32 maj_stat = 0; + int qop_state = 0; + struct spkm3_ctx *sctx = ctx->internal_ctx_id; + + dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n"); + maj_stat = spkm3_read_token(sctx, checksum, signbuf, &qop_state, + SPKM_MIC_TOK); + + if (!maj_stat && qop_state) + *qstate = qop_state; + + dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); + return maj_stat; +} + +u32 +gss_get_mic_spkm3(struct gss_ctx *ctx, + u32 qop, + struct xdr_buf *message_buffer, + struct xdr_netobj *message_token) { + u32 err = 0; + struct spkm3_ctx *sctx = ctx->internal_ctx_id; + + dprintk("RPC: gss_get_mic_spkm3\n"); + + err = spkm3_make_token(sctx, qop, message_buffer, + message_token, SPKM_MIC_TOK); + return err; +} + +static struct gss_api_ops gss_spkm3_ops = { + .gss_import_sec_context = gss_import_sec_context_spkm3, + .gss_get_mic = gss_get_mic_spkm3, + .gss_verify_mic = gss_verify_mic_spkm3, + .gss_delete_sec_context = gss_delete_sec_context_spkm3, +}; + +static struct pf_desc gss_spkm3_pfs[] = { + {RPC_AUTH_GSS_SPKM, 0, RPC_GSS_SVC_NONE, "spkm3"}, + {RPC_AUTH_GSS_SPKMI, 0, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, +}; + +static struct gss_api_mech gss_spkm3_mech = { + .gm_name = "spkm3", + .gm_owner = THIS_MODULE, + .gm_ops = &gss_spkm3_ops, + .gm_pf_num = ARRAY_SIZE(gss_spkm3_pfs), + .gm_pfs = gss_spkm3_pfs, +}; + +static int __init init_spkm3_module(void) +{ + int status; + + status = gss_mech_register(&gss_spkm3_mech); + if (status) + printk("Failed to register spkm3 gss mechanism!\n"); + return 0; +} + +static void __exit cleanup_spkm3_module(void) +{ + gss_mech_unregister(&gss_spkm3_mech); +} + +MODULE_LICENSE("GPL"); +module_init(init_spkm3_module); +module_exit(cleanup_spkm3_module); diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_seal.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_spkm3_seal.c --- linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_seal.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_spkm3_seal.c 2004-07-02 22:18:11.000000000 -0400 @@ -0,0 +1,132 @@ +/* + * linux/net/sunrpc/gss_spkm3_seal.c + * + * Copyright (c) 2003 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include + +#ifdef RPC_DEBUG +# define RPCDBG_FACILITY RPCDBG_AUTH +#endif + +/* + * spkm3_make_token() + * + * Only SPKM_MIC_TOK with md5 intg-alg is supported + */ + +u32 +spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, + struct xdr_buf * text, struct xdr_netobj * token, + int toktype) +{ + s32 checksum_type; + char tokhdrbuf[25]; + struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf}; + int tmsglen, tokenlen = 0; + unsigned char *ptr; + s32 now; + int ctxelen = 0, ctxzbit = 0; + int md5elen = 0, md5zbit = 0; + + dprintk("RPC: spkm3_make_token\n"); + + now = jiffies; + if (qop_req != 0) + goto out_err; + + if (ctx->ctx_id.len != 16) { + dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", + ctx->ctx_id.len); + goto out_err; + } + + switch (ctx->intg_alg) { + case NID_md5: + checksum_type = CKSUMTYPE_RSA_MD5; + break; + default: + dprintk("RPC: gss_spkm3_seal: ctx->signalg %d not" + " supported\n", ctx->intg_alg); + goto out_err; + } + /* XXX since we don't support WRAP, perhaps we don't care... */ + if (ctx->conf_alg != NID_cast5_cbc) { + dprintk("RPC: gss_spkm3_seal: ctx->sealalg %d not supported\n", + ctx->conf_alg); + goto out_err; + } + + if (toktype == SPKM_MIC_TOK) { + tmsglen = 0; + /* Calculate checksum over the mic-header */ + asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit); + spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data, + ctxelen, ctxzbit); + + if (make_checksum(checksum_type, mic_hdr.data, mic_hdr.len, + text, &md5cksum)) + goto out_err; + + asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit); + tokenlen = 10 + ctxelen + 1 + 2 + md5elen + 1; + + /* Create token header using generic routines */ + token->len = g_token_size(&ctx->mech_used, tokenlen + tmsglen); + + ptr = token->data; + g_make_token_header(&ctx->mech_used, tokenlen + tmsglen, &ptr); + + spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); + } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ + dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK not supported\n"); + goto out_err; + } + kfree(md5cksum.data); + + /* XXX need to implement sequence numbers, and ctx->expired */ + + return GSS_S_COMPLETE; +out_err: + if (md5cksum.data) + kfree(md5cksum.data); + token->data = 0; + token->len = 0; + return GSS_S_FAILURE; +} diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_token.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_spkm3_token.c --- linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_token.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_spkm3_token.c 2004-07-02 22:18:11.000000000 -0400 @@ -0,0 +1,266 @@ +/* + * linux/net/sunrpc/gss_spkm3_token.c + * + * Copyright (c) 2003 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include + +#ifdef RPC_DEBUG +# define RPCDBG_FACILITY RPCDBG_AUTH +#endif + +/* + * asn1_bitstring_len() + * + * calculate the asn1 bitstring length of the xdr_netobject + */ +void +asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits) +{ + int i, zbit = 0,elen = in->len; + char *ptr; + + ptr = &in->data[in->len -1]; + + /* count trailing 0's */ + for(i = in->len; i > 0; i--) { + if (*ptr == 0) { + ptr--; + elen--; + } else + break; + } + + /* count number of 0 bits in final octet */ + ptr = &in->data[elen - 1]; + for(i = 0; i < 8; i++) { + short mask = 0x01; + + if (!((mask << i) & *ptr)) + zbit++; + else + break; + } + *enclen = elen; + *zerobits = zbit; +} + +/* + * decode_asn1_bitstring() + * + * decode a bitstring into a buffer of the expected length. + * enclen = bit string length + * explen = expected length (define in rfc) + */ +int +decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) +{ + if (!(out->data = kmalloc(explen,GFP_KERNEL))) + return 0; + out->len = explen; + memset(out->data, 0, explen); + memcpy(out->data, in, enclen); + return 1; +} + +/* + * SPKMInnerContextToken choice SPKM_MIC asn1 token layout + * + * contextid is always 16 bytes plain data. max asn1 bitstring len = 17. + * + * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum) + * + * pos value + * ---------- + * [0] a4 SPKM-MIC tag + * [1] ?? innertoken length (max 44) + * + * + * tok_hdr piece of checksum data starts here + * + * the maximum mic-header len = 9 + 17 = 26 + * mic-header + * ---------- + * [2] 30 SEQUENCE tag + * [3] ?? mic-header length: (max 23) = TokenID + ContextID + * + * TokenID - all fields constant and can be hardcoded + * ------- + * [4] 02 Type 2 + * [5] 02 Length 2 + * [6][7] 01 01 TokenID (SPKM_MIC_TOK) + * + * ContextID - encoded length not constant, calculated + * --------- + * [8] 03 Type 3 + * [9] ?? encoded length + * [10] ?? ctxzbit + * [11] contextid + * + * mic_header piece of checksum data ends here. + * + * int-cksum - encoded length not constant, calculated + * --------- + * [??] 03 Type 3 + * [??] ?? encoded length + * [??] ?? md5zbit + * [??] int-cksum (NID_md5 = 16) + * + * maximum SPKM-MIC innercontext token length = + * 10 + encoded contextid_size(17 max) + 2 + encoded + * cksum_size (17 maxfor NID_md5) = 46 + */ + +/* + * spkm3_mic_header() + * + * Prepare the SPKM_MIC_TOK mic-header for check-sum calculation + * elen: 16 byte context id asn1 bitstring encoded length + */ +void +spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ctxdata, int elen, int zbit) +{ + char *hptr = *hdrbuf; + char *top = *hdrbuf; + + *(u8 *)hptr++ = 0x30; + *(u8 *)hptr++ = elen + 7; /* on the wire header length */ + + /* tokenid */ + *(u8 *)hptr++ = 0x02; + *(u8 *)hptr++ = 0x02; + *(u8 *)hptr++ = 0x01; + *(u8 *)hptr++ = 0x01; + + /* coniextid */ + *(u8 *)hptr++ = 0x03; + *(u8 *)hptr++ = elen + 1; /* add 1 to include zbit */ + *(u8 *)hptr++ = zbit; + memcpy(hptr, ctxdata, elen); + hptr += elen; + *hdrlen = hptr - top; +} + +/* + * spkm3_mic_innercontext_token() + * + * *tokp points to the beginning of the SPKM_MIC token described + * in rfc 2025, section 3.2.1: + * + */ +void +spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hdr, struct xdr_netobj *md5cksum, int md5elen, int md5zbit) +{ + unsigned char *ict = *tokp; + + *(u8 *)ict++ = 0xa4; + *(u8 *)ict++ = toklen - 2; + memcpy(ict, mic_hdr->data, mic_hdr->len); + ict += mic_hdr->len; + + *(u8 *)ict++ = 0x03; + *(u8 *)ict++ = md5elen + 1; /* add 1 to include zbit */ + *(u8 *)ict++ = md5zbit; + memcpy(ict, md5cksum->data, md5elen); +} + +u32 +spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **cksum) +{ + struct xdr_netobj spkm3_ctx_id = {.len =0, .data = NULL}; + unsigned char *ptr = *tokp; + int ctxelen; + u32 ret = GSS_S_DEFECTIVE_TOKEN; + + /* spkm3 innercontext token preamble */ + if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) { + dprintk("RPC: BAD SPKM ictoken preamble\n"); + goto out; + } + + *mic_hdrlen = ptr[3]; + + /* token type */ + if ((ptr[4] != 0x02) || (ptr[5] != 0x02)) { + dprintk("RPC: BAD asn1 SPKM3 token type\n"); + goto out; + } + + /* only support SPKM_MIC_TOK */ + if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { + dprintk("RPC: ERROR unsupported SPKM3 token \n"); + goto out; + } + + /* contextid */ + if (ptr[8] != 0x03) { + dprintk("RPC: BAD SPKM3 asn1 context-id type\n"); + goto out; + } + + ctxelen = ptr[9]; + if (ctxelen > 17) { /* length includes asn1 zbit octet */ + dprintk("RPC: BAD SPKM3 contextid len %d\n", ctxelen); + goto out; + } + + /* ignore ptr[10] */ + + if(!decode_asn1_bitstring(&spkm3_ctx_id, &ptr[11], ctxelen - 1, 16)) + goto out; + + /* + * in the current implementation: the optional int-alg is not present + * so the default int-alg (md5) is used the optional snd-seq field is + * also not present + */ + + if (*mic_hdrlen != 6 + ctxelen) { + dprintk("RPC: BAD SPKM_ MIC_TOK header len %d: we only support default int-alg (should be absent) and do not support snd-seq\n", *mic_hdrlen); + goto out; + } + /* checksum */ + *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */ + + ret = GSS_S_COMPLETE; +out: + if (spkm3_ctx_id.data) + kfree(spkm3_ctx_id.data); + return ret; +} + diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_unseal.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_spkm3_unseal.c --- linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_unseal.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/gss_spkm3_unseal.c 2004-07-02 22:18:11.000000000 -0400 @@ -0,0 +1,128 @@ +/* + * linux/net/sunrpc/gss_spkm3_unseal.c + * + * Copyright (c) 2003 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include + +#ifdef RPC_DEBUG +# define RPCDBG_FACILITY RPCDBG_AUTH +#endif + +/* + * spkm3_read_token() + * + * only SPKM_MIC_TOK with md5 intg-alg is supported + */ +u32 +spkm3_read_token(struct spkm3_ctx *ctx, + struct xdr_netobj *read_token, /* checksum */ + struct xdr_buf *message_buffer, /* signbuf */ + int *qop_state, int toktype) +{ + s32 code; + struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; + struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + unsigned char *ptr = (unsigned char *)read_token->data; + unsigned char *cksum; + int bodysize, md5elen; + int mic_hdrlen; + u32 ret = GSS_S_DEFECTIVE_TOKEN; + + dprintk("RPC: spkm3_read_token read_token->len %d\n", read_token->len); + + if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used, + &bodysize, &ptr, read_token->len)) + goto out; + + /* decode the token */ + + if (toktype == SPKM_MIC_TOK) { + + if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum))) + goto out; + + if (*cksum++ != 0x03) { + dprintk("RPC: spkm3_read_token BAD checksum type\n"); + goto out; + } + md5elen = *cksum++; + cksum++; /* move past the zbit */ + + if(!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16)) + goto out; + + /* HARD CODED FOR MD5 */ + + /* compute the checksum of the message. + * ptr + 2 = start of header piece of checksum + * mic_hdrlen + 2 = length of header piece of checksum + */ + ret = GSS_S_DEFECTIVE_TOKEN; + code = make_checksum(CKSUMTYPE_RSA_MD5, ptr + 2, + mic_hdrlen + 2, + message_buffer, &md5cksum); + + if (code) + goto out; + + dprintk("RPC: spkm3_read_token: digest wire_cksum.len %d:\n", + wire_cksum.len); + dprintk(" md5cksum.data\n"); + print_hexl((u32 *) md5cksum.data, 16, 0); + dprintk(" cksum.data:\n"); + print_hexl((u32 *) wire_cksum.data, wire_cksum.len, 0); + + ret = GSS_S_BAD_SIG; + code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len); + if (code) + goto out; + + } else { + dprintk("RPC: BAD or UNSUPPORTED SPKM3 token type: %d\n",toktype); + goto out; + } + + /* XXX: need to add expiration and sequencing */ + ret = GSS_S_COMPLETE; +out: + if (md5cksum.data) + kfree(md5cksum.data); + if (wire_cksum.data) + kfree(wire_cksum.data); + return ret; +} diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/auth_gss/Makefile linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/Makefile --- linux-2.6.7/net/sunrpc/auth_gss/Makefile 2004-07-02 18:43:40.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/auth_gss/Makefile 2004-07-02 22:18:11.000000000 -0400 @@ -12,3 +12,7 @@ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_ rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ gss_krb5_seqnum.o +obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o + +rpcsec_gss_spkm3-objs := gss_spkm3_mech.o gss_spkm3_seal.o gss_spkm3_unseal.o \ + gss_spkm3_token.o diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/clnt.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/clnt.c --- linux-2.6.7/net/sunrpc/clnt.c 2004-07-02 18:43:25.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/clnt.c 2004-07-02 22:20:23.000000000 -0400 @@ -196,7 +196,15 @@ rpc_clone_client(struct rpc_clnt *clnt) memcpy(new, clnt, sizeof(*new)); atomic_set(&new->cl_count, 1); atomic_set(&new->cl_users, 0); - atomic_inc(&new->cl_parent->cl_count); + new->cl_parent = clnt; + atomic_inc(&clnt->cl_count); + /* Duplicate portmapper */ + rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); + /* Turn off autobind on clones */ + new->cl_autobind = 0; + new->cl_oneshot = 0; + new->cl_dead = 0; + rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); return new; @@ -351,7 +359,9 @@ int rpc_call_sync(struct rpc_clnt *clnt, rpc_clnt_sigmask(clnt, &oldset); /* Create/initialize a new RPC task */ - rpc_init_task(task, clnt, NULL, flags); + task = rpc_new_task(clnt, NULL, flags); + if (task == NULL) + return -ENOMEM; rpc_call_setup(task, msg, 0); /* Set up the call info struct and execute the task */ @@ -958,8 +968,12 @@ call_header(struct rpc_task *task) static u32 * call_verify(struct rpc_task *task) { - u32 *p = task->tk_rqstp->rq_rcv_buf.head[0].iov_base, n; + struct iovec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; + int len = task->tk_rqstp->rq_rcv_buf.len >> 2; + u32 *p = iov->iov_base, n; + if ((len -= 3) < 0) + goto garbage; p += 1; /* skip XID */ if ((n = ntohl(*p++)) != RPC_REPLY) { @@ -969,9 +983,11 @@ call_verify(struct rpc_task *task) if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { int error = -EACCES; + if (--len < 0) + goto garbage; if ((n = ntohl(*p++)) != RPC_AUTH_ERROR) { printk(KERN_WARNING "call_verify: RPC call rejected: %x\n", n); - } else + } else if (--len < 0) switch ((n = ntohl(*p++))) { case RPC_AUTH_REJECTEDCRED: case RPC_AUTH_REJECTEDVERF: @@ -1002,7 +1018,8 @@ call_verify(struct rpc_task *task) default: printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); error = -EIO; - } + } else + goto garbage; dprintk("RPC: %4d call_verify: call rejected %d\n", task->tk_pid, n); rpc_exit(task, error); @@ -1012,6 +1029,9 @@ call_verify(struct rpc_task *task) printk(KERN_WARNING "call_verify: auth check failed\n"); goto garbage; /* bad verifier, retry */ } + len = p - (u32 *)iov->iov_base - 1; + if (len < 0) + goto garbage; switch ((n = ntohl(*p++))) { case RPC_SUCCESS: return p; diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/sched.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/sched.c --- linux-2.6.7/net/sunrpc/sched.c 2004-07-02 18:43:43.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/sched.c 2004-07-02 22:20:29.000000000 -0400 @@ -41,13 +41,7 @@ static mempool_t *rpc_buffer_mempool; static void __rpc_default_timer(struct rpc_task *task); static void rpciod_killall(void); - -/* - * When an asynchronous RPC task is activated within a bottom half - * handler, or while executing another RPC task, it is put on - * schedq, and rpciod is woken up. - */ -static RPC_WAITQ(schedq, "schedq"); +static void rpc_async_schedule(void *); /* * RPC tasks that create another task (e.g. for contacting the portmapper) @@ -68,26 +62,18 @@ static LIST_HEAD(all_tasks); /* * rpciod-related stuff */ -static DECLARE_WAIT_QUEUE_HEAD(rpciod_idle); -static DECLARE_COMPLETION(rpciod_killer); static DECLARE_MUTEX(rpciod_sema); static unsigned int rpciod_users; -static pid_t rpciod_pid; -static int rpc_inhibit; +static struct workqueue_struct *rpciod_workqueue; /* - * Spinlock for wait queues. Access to the latter also has to be - * interrupt-safe in order to allow timers to wake up sleeping tasks. - */ -static spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED; -/* * Spinlock for other critical sections of code. */ static spinlock_t rpc_sched_lock = SPIN_LOCK_UNLOCKED; /* * Disable the timer for a given RPC task. Should be called with - * rpc_queue_lock and bh_disabled in order to avoid races within + * queue->lock and bh_disabled in order to avoid races within * rpc_run_timer(). */ static inline void @@ -105,16 +91,13 @@ __rpc_disable_timer(struct rpc_task *tas * without calling del_timer_sync(). The latter could cause a * deadlock if called while we're holding spinlocks... */ -static void -rpc_run_timer(struct rpc_task *task) +static void rpc_run_timer(struct rpc_task *task) { void (*callback)(struct rpc_task *); - spin_lock_bh(&rpc_queue_lock); callback = task->tk_timeout_fn; task->tk_timeout_fn = NULL; - spin_unlock_bh(&rpc_queue_lock); - if (callback) { + if (callback && RPC_IS_QUEUED(task)) { dprintk("RPC: %4d running timer\n", task->tk_pid); callback(task); } @@ -140,19 +123,8 @@ __rpc_add_timer(struct rpc_task *task, r } /* - * Set up a timer for an already sleeping task. - */ -void rpc_add_timer(struct rpc_task *task, rpc_action timer) -{ - spin_lock_bh(&rpc_queue_lock); - if (!RPC_IS_RUNNING(task)) - __rpc_add_timer(task, timer); - spin_unlock_bh(&rpc_queue_lock); -} - -/* * Delete any timer for the current task. Because we use del_timer_sync(), - * this function should never be called while holding rpc_queue_lock. + * this function should never be called while holding queue->lock. */ static inline void rpc_delete_timer(struct rpc_task *task) @@ -169,16 +141,17 @@ static void __rpc_add_wait_queue_priorit struct list_head *q; struct rpc_task *t; + INIT_LIST_HEAD(&task->u.tk_wait.links); q = &queue->tasks[task->tk_priority]; if (unlikely(task->tk_priority > queue->maxpriority)) q = &queue->tasks[queue->maxpriority]; - list_for_each_entry(t, q, tk_list) { + list_for_each_entry(t, q, u.tk_wait.list) { if (t->tk_cookie == task->tk_cookie) { - list_add_tail(&task->tk_list, &t->tk_links); + list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); return; } } - list_add_tail(&task->tk_list, q); + list_add_tail(&task->u.tk_wait.list, q); } /* @@ -189,37 +162,21 @@ static void __rpc_add_wait_queue_priorit * improve overall performance. * Everyone else gets appended to the queue to ensure proper FIFO behavior. */ -static int __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) +static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) { - if (task->tk_rpcwait == queue) - return 0; + BUG_ON (RPC_IS_QUEUED(task)); - if (task->tk_rpcwait) { - printk(KERN_WARNING "RPC: doubly enqueued task!\n"); - return -EWOULDBLOCK; - } if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue_priority(queue, task); else if (RPC_IS_SWAPPER(task)) - list_add(&task->tk_list, &queue->tasks[0]); + list_add(&task->u.tk_wait.list, &queue->tasks[0]); else - list_add_tail(&task->tk_list, &queue->tasks[0]); - task->tk_rpcwait = queue; + list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); + task->u.tk_wait.rpc_waitq = queue; + rpc_set_queued(task); dprintk("RPC: %4d added to queue %p \"%s\"\n", task->tk_pid, queue, rpc_qname(queue)); - - return 0; -} - -int rpc_add_wait_queue(struct rpc_wait_queue *q, struct rpc_task *task) -{ - int result; - - spin_lock_bh(&rpc_queue_lock); - result = __rpc_add_wait_queue(q, task); - spin_unlock_bh(&rpc_queue_lock); - return result; } /* @@ -229,12 +186,12 @@ static void __rpc_remove_wait_queue_prio { struct rpc_task *t; - if (!list_empty(&task->tk_links)) { - t = list_entry(task->tk_links.next, struct rpc_task, tk_list); - list_move(&t->tk_list, &task->tk_list); - list_splice_init(&task->tk_links, &t->tk_links); + if (!list_empty(&task->u.tk_wait.links)) { + t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); + list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); + list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); } - list_del(&task->tk_list); + list_del(&task->u.tk_wait.list); } /* @@ -243,31 +200,17 @@ static void __rpc_remove_wait_queue_prio */ static void __rpc_remove_wait_queue(struct rpc_task *task) { - struct rpc_wait_queue *queue = task->tk_rpcwait; - - if (!queue) - return; + struct rpc_wait_queue *queue; + queue = task->u.tk_wait.rpc_waitq; if (RPC_IS_PRIORITY(queue)) __rpc_remove_wait_queue_priority(task); else - list_del(&task->tk_list); - task->tk_rpcwait = NULL; - + list_del(&task->u.tk_wait.list); dprintk("RPC: %4d removed from queue %p \"%s\"\n", task->tk_pid, queue, rpc_qname(queue)); } -void -rpc_remove_wait_queue(struct rpc_task *task) -{ - if (!task->tk_rpcwait) - return; - spin_lock_bh(&rpc_queue_lock); - __rpc_remove_wait_queue(task); - spin_unlock_bh(&rpc_queue_lock); -} - static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) { queue->priority = priority; @@ -290,6 +233,7 @@ static void __rpc_init_priority_wait_que { int i; + spin_lock_init(&queue->lock); for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) INIT_LIST_HEAD(&queue->tasks[i]); queue->maxpriority = maxprio; @@ -316,34 +260,27 @@ EXPORT_SYMBOL(rpc_init_wait_queue); * Note: If the task is ASYNC, this must be called with * the spinlock held to protect the wait queue operation. */ -static inline void -rpc_make_runnable(struct rpc_task *task) +static void rpc_make_runnable(struct rpc_task *task) { - if (task->tk_timeout_fn) { - printk(KERN_ERR "RPC: task w/ running timer in rpc_make_runnable!!\n"); + if (rpc_test_and_set_running(task)) return; - } - rpc_set_running(task); + BUG_ON(task->tk_timeout_fn); if (RPC_IS_ASYNC(task)) { - if (RPC_IS_SLEEPING(task)) { - int status; - status = __rpc_add_wait_queue(&schedq, task); - if (status < 0) { - printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); - task->tk_status = status; - return; - } - rpc_clear_sleeping(task); - wake_up(&rpciod_idle); + int status; + + INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); + status = queue_work(task->tk_workqueue, &task->u.tk_work); + if (status < 0) { + printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); + task->tk_status = status; + return; } - } else { - rpc_clear_sleeping(task); - wake_up(&task->tk_wait); - } + } else + wake_up(&task->u.tk_wait.waitq); } /* - * Place a newly initialized task on the schedq. + * Place a newly initialized task on the workqueue. */ static inline void rpc_schedule_run(struct rpc_task *task) @@ -352,33 +289,18 @@ rpc_schedule_run(struct rpc_task *task) if (RPC_IS_ACTIVATED(task)) return; task->tk_active = 1; - rpc_set_sleeping(task); rpc_make_runnable(task); } /* - * For other people who may need to wake the I/O daemon - * but should (for now) know nothing about its innards - */ -void rpciod_wake_up(void) -{ - if(rpciod_pid==0) - printk(KERN_ERR "rpciod: wot no daemon?\n"); - wake_up(&rpciod_idle); -} - -/* * Prepare for sleeping on a wait queue. * By always appending tasks to the list we ensure FIFO behavior. * NB: An RPC task will only receive interrupt-driven events as long * as it's on a wait queue. */ -static void -__rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, +static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, rpc_action timer) { - int status; - dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid, rpc_qname(q), jiffies); @@ -388,49 +310,36 @@ __rpc_sleep_on(struct rpc_wait_queue *q, } /* Mark the task as being activated if so needed */ - if (!RPC_IS_ACTIVATED(task)) { + if (!RPC_IS_ACTIVATED(task)) task->tk_active = 1; - rpc_set_sleeping(task); - } - status = __rpc_add_wait_queue(q, task); - if (status) { - printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); - task->tk_status = status; - } else { - rpc_clear_running(task); - if (task->tk_callback) { - dprintk(KERN_ERR "RPC: %4d overwrites an active callback\n", task->tk_pid); - BUG(); - } - task->tk_callback = action; - __rpc_add_timer(task, timer); - } + __rpc_add_wait_queue(q, task); + + BUG_ON(task->tk_callback != NULL); + task->tk_callback = action; + __rpc_add_timer(task, timer); } -void -rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, +void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, rpc_action timer) { /* * Protect the queue operations. */ - spin_lock_bh(&rpc_queue_lock); + spin_lock_bh(&q->lock); __rpc_sleep_on(q, task, action, timer); - spin_unlock_bh(&rpc_queue_lock); + spin_unlock_bh(&q->lock); } /** - * __rpc_wake_up_task - wake up a single rpc_task + * __rpc_do_wake_up_task - wake up a single rpc_task * @task: task to be woken up * - * Caller must hold rpc_queue_lock + * Caller must hold queue->lock, and have cleared the task queued flag. */ -static void -__rpc_wake_up_task(struct rpc_task *task) +static void __rpc_do_wake_up_task(struct rpc_task *task) { - dprintk("RPC: %4d __rpc_wake_up_task (now %ld inh %d)\n", - task->tk_pid, jiffies, rpc_inhibit); + dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies); #ifdef RPC_DEBUG if (task->tk_magic != 0xf00baa) { @@ -445,12 +354,9 @@ __rpc_wake_up_task(struct rpc_task *task printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); return; } - if (RPC_IS_RUNNING(task)) - return; __rpc_disable_timer(task); - if (task->tk_rpcwait != &schedq) - __rpc_remove_wait_queue(task); + __rpc_remove_wait_queue(task); rpc_make_runnable(task); @@ -458,6 +364,15 @@ __rpc_wake_up_task(struct rpc_task *task } /* + * Wake up the specified task + */ +static void __rpc_wake_up_task(struct rpc_task *task) +{ + if (rpc_test_and_clear_queued(task)) + __rpc_do_wake_up_task(task); +} + +/* * Default timeout handler if none specified by user */ static void @@ -471,14 +386,15 @@ __rpc_default_timer(struct rpc_task *tas /* * Wake up the specified task */ -void -rpc_wake_up_task(struct rpc_task *task) +void rpc_wake_up_task(struct rpc_task *task) { - if (RPC_IS_RUNNING(task)) - return; - spin_lock_bh(&rpc_queue_lock); - __rpc_wake_up_task(task); - spin_unlock_bh(&rpc_queue_lock); + if (rpc_test_and_clear_queued(task)) { + struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; + + spin_lock_bh(&queue->lock); + __rpc_do_wake_up_task(task); + spin_unlock_bh(&queue->lock); + } } /* @@ -494,11 +410,11 @@ static struct rpc_task * __rpc_wake_up_n */ q = &queue->tasks[queue->priority]; if (!list_empty(q)) { - task = list_entry(q->next, struct rpc_task, tk_list); + task = list_entry(q->next, struct rpc_task, u.tk_wait.list); if (queue->cookie == task->tk_cookie) { if (--queue->nr) goto out; - list_move_tail(&task->tk_list, q); + list_move_tail(&task->u.tk_wait.list, q); } /* * Check if we need to switch queues. @@ -516,7 +432,7 @@ static struct rpc_task * __rpc_wake_up_n else q = q - 1; if (!list_empty(q)) { - task = list_entry(q->next, struct rpc_task, tk_list); + task = list_entry(q->next, struct rpc_task, u.tk_wait.list); goto new_queue; } } while (q != &queue->tasks[queue->priority]); @@ -541,14 +457,14 @@ struct rpc_task * rpc_wake_up_next(struc struct rpc_task *task = NULL; dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); - spin_lock_bh(&rpc_queue_lock); + spin_lock_bh(&queue->lock); if (RPC_IS_PRIORITY(queue)) task = __rpc_wake_up_next_priority(queue); else { task_for_first(task, &queue->tasks[0]) __rpc_wake_up_task(task); } - spin_unlock_bh(&rpc_queue_lock); + spin_unlock_bh(&queue->lock); return task; } @@ -557,25 +473,25 @@ struct rpc_task * rpc_wake_up_next(struc * rpc_wake_up - wake up all rpc_tasks * @queue: rpc_wait_queue on which the tasks are sleeping * - * Grabs rpc_queue_lock + * Grabs queue->lock */ void rpc_wake_up(struct rpc_wait_queue *queue) { struct rpc_task *task; struct list_head *head; - spin_lock_bh(&rpc_queue_lock); + spin_lock_bh(&queue->lock); head = &queue->tasks[queue->maxpriority]; for (;;) { while (!list_empty(head)) { - task = list_entry(head->next, struct rpc_task, tk_list); + task = list_entry(head->next, struct rpc_task, u.tk_wait.list); __rpc_wake_up_task(task); } if (head == &queue->tasks[0]) break; head--; } - spin_unlock_bh(&rpc_queue_lock); + spin_unlock_bh(&queue->lock); } /** @@ -583,18 +499,18 @@ void rpc_wake_up(struct rpc_wait_queue * * @queue: rpc_wait_queue on which the tasks are sleeping * @status: status value to set * - * Grabs rpc_queue_lock + * Grabs queue->lock */ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) { struct list_head *head; struct rpc_task *task; - spin_lock_bh(&rpc_queue_lock); + spin_lock_bh(&queue->lock); head = &queue->tasks[queue->maxpriority]; for (;;) { while (!list_empty(head)) { - task = list_entry(head->next, struct rpc_task, tk_list); + task = list_entry(head->next, struct rpc_task, u.tk_wait.list); task->tk_status = status; __rpc_wake_up_task(task); } @@ -602,7 +518,7 @@ void rpc_wake_up_status(struct rpc_wait_ break; head--; } - spin_unlock_bh(&rpc_queue_lock); + spin_unlock_bh(&queue->lock); } /* @@ -626,22 +542,23 @@ __rpc_atrun(struct rpc_task *task) /* * This is the RPC `scheduler' (or rather, the finite state machine). */ -static int -__rpc_execute(struct rpc_task *task) +static int __rpc_execute(struct rpc_task *task) { int status = 0; dprintk("RPC: %4d rpc_execute flgs %x\n", task->tk_pid, task->tk_flags); - if (!RPC_IS_RUNNING(task)) { - printk(KERN_WARNING "RPC: rpc_execute called for sleeping task!!\n"); - return 0; - } + BUG_ON(RPC_IS_QUEUED(task)); restarted: while (1) { /* + * Garbage collection of pending timers... + */ + rpc_delete_timer(task); + + /* * Execute any pending callback. */ if (RPC_DO_CALLBACK(task)) { @@ -657,7 +574,9 @@ __rpc_execute(struct rpc_task *task) */ save_callback=task->tk_callback; task->tk_callback=NULL; + lock_kernel(); save_callback(task); + unlock_kernel(); } /* @@ -665,43 +584,37 @@ __rpc_execute(struct rpc_task *task) * tk_action may be NULL when the task has been killed * by someone else. */ - if (RPC_IS_RUNNING(task)) { - /* - * Garbage collection of pending timers... - */ - rpc_delete_timer(task); + if (!RPC_IS_QUEUED(task)) { if (!task->tk_action) break; + lock_kernel(); task->tk_action(task); - /* micro-optimization to avoid spinlock */ - if (RPC_IS_RUNNING(task)) - continue; + unlock_kernel(); } /* - * Check whether task is sleeping. + * Lockless check for whether task is sleeping or not. */ - spin_lock_bh(&rpc_queue_lock); - if (!RPC_IS_RUNNING(task)) { - rpc_set_sleeping(task); - if (RPC_IS_ASYNC(task)) { - spin_unlock_bh(&rpc_queue_lock); + if (!RPC_IS_QUEUED(task)) + continue; + if (RPC_IS_ASYNC(task)) { + rpc_clear_running(task); + /* Careful! we may have raced... */ + if (RPC_IS_QUEUED(task)) return 0; - } + if (rpc_test_and_set_running(task)) + return 0; + continue; } - spin_unlock_bh(&rpc_queue_lock); - if (!RPC_IS_SLEEPING(task)) - continue; + init_waitqueue_head(&task->u.tk_wait.waitq); + rpc_clear_running(task); /* sync task: sleep here */ dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); - if (current->pid == rpciod_pid) - printk(KERN_ERR "RPC: rpciod waiting on sync task!\n"); - if (!task->tk_client->cl_intr) { - __wait_event(task->tk_wait, !RPC_IS_SLEEPING(task)); + __wait_event(task->u.tk_wait.waitq, RPC_IS_RUNNING(task)); } else { - __wait_event_interruptible(task->tk_wait, !RPC_IS_SLEEPING(task), status); + __wait_event_interruptible(task->u.tk_wait.waitq, RPC_IS_RUNNING(task), status); /* * When a sync task receives a signal, it exits with * -ERESTARTSYS. In order to catch any callbacks that @@ -719,7 +632,9 @@ __rpc_execute(struct rpc_task *task) } if (task->tk_exit) { + lock_kernel(); task->tk_exit(task); + unlock_kernel(); /* If tk_action is non-null, the user wants us to restart */ if (task->tk_action) { if (!RPC_ASSASSINATED(task)) { @@ -738,7 +653,6 @@ __rpc_execute(struct rpc_task *task) /* Release all resources associated with the task */ rpc_release_task(task); - return status; } @@ -754,57 +668,16 @@ __rpc_execute(struct rpc_task *task) int rpc_execute(struct rpc_task *task) { - int status = -EIO; - if (rpc_inhibit) { - printk(KERN_INFO "RPC: execution inhibited!\n"); - goto out_release; - } - - status = -EWOULDBLOCK; - if (task->tk_active) { - printk(KERN_ERR "RPC: active task was run twice!\n"); - goto out_err; - } + BUG_ON(task->tk_active); task->tk_active = 1; rpc_set_running(task); return __rpc_execute(task); - out_release: - rpc_release_task(task); - out_err: - return status; } -/* - * This is our own little scheduler for async RPC tasks. - */ -static void -__rpc_schedule(void) +static void rpc_async_schedule(void *arg) { - struct rpc_task *task; - int count = 0; - - dprintk("RPC: rpc_schedule enter\n"); - while (1) { - - task_for_first(task, &schedq.tasks[0]) { - __rpc_remove_wait_queue(task); - spin_unlock_bh(&rpc_queue_lock); - - __rpc_execute(task); - spin_lock_bh(&rpc_queue_lock); - } else { - break; - } - - if (++count >= 200 || need_resched()) { - count = 0; - spin_unlock_bh(&rpc_queue_lock); - schedule(); - spin_lock_bh(&rpc_queue_lock); - } - } - dprintk("RPC: rpc_schedule leave\n"); + __rpc_execute((struct rpc_task *)arg); } /* @@ -862,7 +735,6 @@ void rpc_init_task(struct rpc_task *task task->tk_client = clnt; task->tk_flags = flags; task->tk_exit = callback; - init_waitqueue_head(&task->tk_wait); if (current->uid != current->fsuid || current->gid != current->fsgid) task->tk_flags |= RPC_TASK_SETUID; @@ -873,7 +745,9 @@ void rpc_init_task(struct rpc_task *task task->tk_priority = RPC_PRIORITY_NORMAL; task->tk_cookie = (unsigned long)current; - INIT_LIST_HEAD(&task->tk_links); + + /* Initialize workqueue for async tasks */ + task->tk_workqueue = rpciod_workqueue; /* Add to global list of all tasks */ spin_lock(&rpc_sched_lock); @@ -942,8 +816,7 @@ cleanup: goto out; } -void -rpc_release_task(struct rpc_task *task) +void rpc_release_task(struct rpc_task *task) { dprintk("RPC: %4d release task\n", task->tk_pid); @@ -961,19 +834,9 @@ rpc_release_task(struct rpc_task *task) list_del(&task->tk_task); spin_unlock(&rpc_sched_lock); - /* Protect the execution below. */ - spin_lock_bh(&rpc_queue_lock); - - /* Disable timer to prevent zombie wakeup */ - __rpc_disable_timer(task); - - /* Remove from any wait queue we're still on */ - __rpc_remove_wait_queue(task); - + BUG_ON (rpc_test_and_clear_queued(task)); task->tk_active = 0; - spin_unlock_bh(&rpc_queue_lock); - /* Synchronously delete any running timer */ rpc_delete_timer(task); @@ -1003,10 +866,9 @@ rpc_release_task(struct rpc_task *task) * queue 'childq'. If so returns a pointer to the parent. * Upon failure returns NULL. * - * Caller must hold rpc_queue_lock + * Caller must hold childq.lock */ -static inline struct rpc_task * -rpc_find_parent(struct rpc_task *child) +static inline struct rpc_task *rpc_find_parent(struct rpc_task *child) { struct rpc_task *task, *parent; struct list_head *le; @@ -1019,17 +881,16 @@ rpc_find_parent(struct rpc_task *child) return NULL; } -static void -rpc_child_exit(struct rpc_task *child) +static void rpc_child_exit(struct rpc_task *child) { struct rpc_task *parent; - spin_lock_bh(&rpc_queue_lock); + spin_lock_bh(&childq.lock); if ((parent = rpc_find_parent(child)) != NULL) { parent->tk_status = child->tk_status; __rpc_wake_up_task(parent); } - spin_unlock_bh(&rpc_queue_lock); + spin_unlock_bh(&childq.lock); } /* @@ -1052,22 +913,20 @@ fail: return NULL; } -void -rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func) +void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func) { - spin_lock_bh(&rpc_queue_lock); + spin_lock_bh(&childq.lock); /* N.B. Is it possible for the child to have already finished? */ __rpc_sleep_on(&childq, task, func, NULL); rpc_schedule_run(child); - spin_unlock_bh(&rpc_queue_lock); + spin_unlock_bh(&childq.lock); } /* * Kill all tasks for the given client. * XXX: kill their descendants as well? */ -void -rpc_killall_tasks(struct rpc_clnt *clnt) +void rpc_killall_tasks(struct rpc_clnt *clnt) { struct rpc_task *rovr; struct list_head *le; @@ -1089,93 +948,14 @@ rpc_killall_tasks(struct rpc_clnt *clnt) static DECLARE_MUTEX_LOCKED(rpciod_running); -static inline int -rpciod_task_pending(void) -{ - return !list_empty(&schedq.tasks[0]); -} - - -/* - * This is the rpciod kernel thread - */ -static int -rpciod(void *ptr) -{ - int rounds = 0; - - lock_kernel(); - /* - * Let our maker know we're running ... - */ - rpciod_pid = current->pid; - up(&rpciod_running); - - daemonize("rpciod"); - allow_signal(SIGKILL); - - dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid); - spin_lock_bh(&rpc_queue_lock); - while (rpciod_users) { - DEFINE_WAIT(wait); - if (signalled()) { - spin_unlock_bh(&rpc_queue_lock); - rpciod_killall(); - flush_signals(current); - spin_lock_bh(&rpc_queue_lock); - } - __rpc_schedule(); - if (current->flags & PF_FREEZE) { - spin_unlock_bh(&rpc_queue_lock); - refrigerator(PF_FREEZE); - spin_lock_bh(&rpc_queue_lock); - } - - if (++rounds >= 64) { /* safeguard */ - spin_unlock_bh(&rpc_queue_lock); - schedule(); - rounds = 0; - spin_lock_bh(&rpc_queue_lock); - } - - dprintk("RPC: rpciod back to sleep\n"); - prepare_to_wait(&rpciod_idle, &wait, TASK_INTERRUPTIBLE); - if (!rpciod_task_pending() && !signalled()) { - spin_unlock_bh(&rpc_queue_lock); - schedule(); - rounds = 0; - spin_lock_bh(&rpc_queue_lock); - } - finish_wait(&rpciod_idle, &wait); - dprintk("RPC: switch to rpciod\n"); - } - spin_unlock_bh(&rpc_queue_lock); - - dprintk("RPC: rpciod shutdown commences\n"); - if (!list_empty(&all_tasks)) { - printk(KERN_ERR "rpciod: active tasks at shutdown?!\n"); - rpciod_killall(); - } - - dprintk("RPC: rpciod exiting\n"); - unlock_kernel(); - - rpciod_pid = 0; - complete_and_exit(&rpciod_killer, 0); - return 0; -} - -static void -rpciod_killall(void) +static void rpciod_killall(void) { unsigned long flags; while (!list_empty(&all_tasks)) { clear_thread_flag(TIF_SIGPENDING); rpc_killall_tasks(NULL); - spin_lock_bh(&rpc_queue_lock); - __rpc_schedule(); - spin_unlock_bh(&rpc_queue_lock); + flush_workqueue(rpciod_workqueue); if (!list_empty(&all_tasks)) { dprintk("rpciod_killall: waiting for tasks to exit\n"); yield(); @@ -1193,28 +973,30 @@ rpciod_killall(void) int rpciod_up(void) { + struct workqueue_struct *wq; int error = 0; down(&rpciod_sema); - dprintk("rpciod_up: pid %d, users %d\n", rpciod_pid, rpciod_users); + dprintk("rpciod_up: users %d\n", rpciod_users); rpciod_users++; - if (rpciod_pid) + if (rpciod_workqueue) goto out; /* * If there's no pid, we should be the first user. */ if (rpciod_users > 1) - printk(KERN_WARNING "rpciod_up: no pid, %d users??\n", rpciod_users); + printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users); /* * Create the rpciod thread and wait for it to start. */ - error = kernel_thread(rpciod, NULL, 0); - if (error < 0) { - printk(KERN_WARNING "rpciod_up: create thread failed, error=%d\n", error); + error = -ENOMEM; + wq = create_workqueue("rpciod"); + if (wq == NULL) { + printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error); rpciod_users--; goto out; } - down(&rpciod_running); + rpciod_workqueue = wq; error = 0; out: up(&rpciod_sema); @@ -1225,20 +1007,21 @@ void rpciod_down(void) { down(&rpciod_sema); - dprintk("rpciod_down pid %d sema %d\n", rpciod_pid, rpciod_users); + dprintk("rpciod_down sema %d\n", rpciod_users); if (rpciod_users) { if (--rpciod_users) goto out; } else - printk(KERN_WARNING "rpciod_down: pid=%d, no users??\n", rpciod_pid); + printk(KERN_WARNING "rpciod_down: no users??\n"); - if (!rpciod_pid) { + if (!rpciod_workqueue) { dprintk("rpciod_down: Nothing to do!\n"); goto out; } + rpciod_killall(); - kill_proc(rpciod_pid, SIGKILL, 1); - wait_for_completion(&rpciod_killer); + destroy_workqueue(rpciod_workqueue); + rpciod_workqueue = NULL; out: up(&rpciod_sema); } @@ -1256,7 +1039,12 @@ void rpc_show_tasks(void) } printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " "-rpcwait -action- --exit--\n"); - alltask_for_each(t, le, &all_tasks) + alltask_for_each(t, le, &all_tasks) { + const char *rpc_waitq = "none"; + + if (RPC_IS_QUEUED(t)) + rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); + printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n", t->tk_pid, (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), @@ -1264,8 +1052,9 @@ void rpc_show_tasks(void) t->tk_client, (t->tk_client ? t->tk_client->cl_prog : 0), t->tk_rqstp, t->tk_timeout, - rpc_qname(t->tk_rpcwait), + rpc_waitq, t->tk_action, t->tk_exit); + } spin_unlock(&rpc_sched_lock); } #endif diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/sunrpc_syms.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/sunrpc_syms.c --- linux-2.6.7/net/sunrpc/sunrpc_syms.c 2004-07-02 18:43:59.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/sunrpc_syms.c 2004-07-02 22:19:28.000000000 -0400 @@ -89,6 +89,7 @@ EXPORT_SYMBOL(svc_makesock); EXPORT_SYMBOL(svc_reserve); EXPORT_SYMBOL(svc_auth_register); EXPORT_SYMBOL(auth_domain_lookup); +EXPORT_SYMBOL(svc_authenticate); /* RPC statistics */ #ifdef CONFIG_PROC_FS diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/svc.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/svc.c --- linux-2.6.7/net/sunrpc/svc.c 2004-07-02 18:44:05.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/svc.c 2004-07-02 22:19:28.000000000 -0400 @@ -263,6 +263,7 @@ svc_process(struct svc_serv *serv, struc u32 *statp; u32 dir, prog, vers, proc, auth_stat, rpc_stat; + int auth_res; rpc_stat = rpc_success; @@ -304,12 +305,17 @@ svc_process(struct svc_serv *serv, struc rqstp->rq_vers = vers = ntohl(svc_getu32(argv)); /* version number */ rqstp->rq_proc = proc = ntohl(svc_getu32(argv)); /* procedure number */ + progp = serv->sv_program; /* * Decode auth data, and add verifier to reply buffer. * We do this before anything else in order to get a decent * auth verifier. */ - switch (svc_authenticate(rqstp, &auth_stat)) { + if (progp->pg_authenticate != NULL) + auth_res = progp->pg_authenticate(rqstp, &auth_stat); + else + auth_res = svc_authenticate(rqstp, &auth_stat); + switch (auth_res) { case SVC_OK: break; case SVC_GARBAGE: @@ -326,7 +332,6 @@ svc_process(struct svc_serv *serv, struc goto sendit; } - progp = serv->sv_program; if (prog != progp->pg_prog) goto err_bad_prog; diff -u --recursive --new-file --show-c-function linux-2.6.7/net/sunrpc/xprt.c linux-2.6.7-43-rpc_queue_lock/net/sunrpc/xprt.c --- linux-2.6.7/net/sunrpc/xprt.c 2004-07-02 18:43:45.000000000 -0400 +++ linux-2.6.7-43-rpc_queue_lock/net/sunrpc/xprt.c 2004-07-02 22:20:23.000000000 -0400 @@ -1099,7 +1099,7 @@ xprt_write_space(struct sock *sk) goto out; spin_lock_bh(&xprt->sock_lock); - if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->pending) + if (xprt->snd_task) rpc_wake_up_task(xprt->snd_task); spin_unlock_bh(&xprt->sock_lock); out: