Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 0b4acc1..a5c019e 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -361,7 +361,6 @@
 {
 	struct nlm_host	*host = req->a_host;
 	struct rpc_clnt	*clnt;
-	int status = -ENOLCK;
 
 	dprintk("lockd: call procedure %d on %s (async)\n",
 			(int)proc, host->h_name);
@@ -373,12 +372,10 @@
 	msg->rpc_proc = &clnt->cl_procinfo[proc];
 
         /* bootstrap and kick off the async RPC call */
-        status = rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req);
-	if (status == 0)
-		return 0;
+        return rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req);
 out_err:
-	nlm_release_call(req);
-	return status;
+	tk_ops->rpc_release(req);
+	return -ENOLCK;
 }
 
 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index c7db0a5..cf51f84 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -593,9 +593,7 @@
 
 	/* Call the client */
 	kref_get(&block->b_count);
-	if (nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
-						&nlmsvc_grant_ops) < 0)
-		nlmsvc_release_block(block);
+	nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops);
 }
 
 /*
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ae9f36e..2190e6c 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -394,7 +394,8 @@
 static int nfs_create_rpc_client(struct nfs_client *clp, int proto,
 						unsigned int timeo,
 						unsigned int retrans,
-						rpc_authflavor_t flavor)
+						rpc_authflavor_t flavor,
+						int flags)
 {
 	struct rpc_timeout	timeparms;
 	struct rpc_clnt		*clnt = NULL;
@@ -407,6 +408,7 @@
 		.program	= &nfs_program,
 		.version	= clp->rpc_ops->version,
 		.authflavor	= flavor,
+		.flags		= flags,
 	};
 
 	if (!IS_ERR(clp->cl_rpcclient))
@@ -548,7 +550,7 @@
 	 * - RFC 2623, sec 2.3.2
 	 */
 	error = nfs_create_rpc_client(clp, proto, data->timeo, data->retrans,
-			RPC_AUTH_UNIX);
+					RPC_AUTH_UNIX, 0);
 	if (error < 0)
 		goto error;
 	nfs_mark_client_ready(clp, NFS_CS_READY);
@@ -868,7 +870,8 @@
 	/* Check NFS protocol revision and initialize RPC op vector */
 	clp->rpc_ops = &nfs_v4_clientops;
 
-	error = nfs_create_rpc_client(clp, proto, timeo, retrans, authflavour);
+	error = nfs_create_rpc_client(clp, proto, timeo, retrans, authflavour,
+					RPC_CLNT_CREATE_DISCRTRY);
 	if (error < 0)
 		goto error;
 	memcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
@@ -1030,7 +1033,7 @@
  * Create an NFS4 referral server record
  */
 struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
-					       struct nfs_fh *fh)
+					       struct nfs_fh *mntfh)
 {
 	struct nfs_client *parent_client;
 	struct nfs_server *server, *parent_server;
@@ -1069,8 +1072,13 @@
 	BUG_ON(!server->nfs_client->rpc_ops);
 	BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
 
+	/* Probe the root fh to retrieve its FSID and filehandle */
+	error = nfs4_path_walk(server, mntfh, data->mnt_path);
+	if (error < 0)
+		goto error;
+
 	/* probe the filesystem info for this server filesystem */
-	error = nfs_probe_fsinfo(server, fh, &fattr);
+	error = nfs_probe_fsinfo(server, mntfh, &fattr);
 	if (error < 0)
 		goto error;
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index f03a770..92d8ec8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -637,7 +637,7 @@
  * In the case it has, we assume that the dentries are untrustworthy
  * and may need to be looked up again.
  */
-static inline int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
+static int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
 {
 	if (IS_ROOT(dentry))
 		return 1;
@@ -652,6 +652,12 @@
 	dentry->d_fsdata = (void *)verf;
 }
 
+static void nfs_refresh_verifier(struct dentry * dentry, unsigned long verf)
+{
+	if (time_after(verf, (unsigned long)dentry->d_fsdata))
+		nfs_set_verifier(dentry, verf);
+}
+
 /*
  * Whenever an NFS operation succeeds, we know that the dentry
  * is valid, so we update the revalidation timestamp.
@@ -785,7 +791,7 @@
 		goto out_bad;
 
 	nfs_renew_times(dentry);
-	nfs_set_verifier(dentry, verifier);
+	nfs_refresh_verifier(dentry, verifier);
  out_valid:
 	unlock_kernel();
 	dput(parent);
@@ -1085,7 +1091,7 @@
 	verifier = nfs_save_change_attribute(dir);
 	ret = nfs4_open_revalidate(dir, dentry, openflags, nd);
 	if (!ret)
-		nfs_set_verifier(dentry, verifier);
+		nfs_refresh_verifier(dentry, verifier);
 	unlock_kernel();
 out:
 	dput(parent);
@@ -1123,8 +1129,21 @@
 	}
 	name.hash = full_name_hash(name.name, name.len);
 	dentry = d_lookup(parent, &name);
-	if (dentry != NULL)
-		return dentry;
+	if (dentry != NULL) {
+		/* Is this a positive dentry that matches the readdir info? */
+		if (dentry->d_inode != NULL &&
+				(NFS_FILEID(dentry->d_inode) == entry->ino ||
+				d_mountpoint(dentry))) {
+			if (!desc->plus || entry->fh->size == 0)
+				return dentry;
+			if (nfs_compare_fh(NFS_FH(dentry->d_inode),
+						entry->fh) == 0)
+				goto out_renew;
+		}
+		/* No, so d_drop to allow one to be created */
+		d_drop(dentry);
+		dput(dentry);
+	}
 	if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
 		return NULL;
 	/* Note: caller is already holding the dir->i_mutex! */
@@ -1149,6 +1168,10 @@
 	nfs_renew_times(dentry);
 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
 	return dentry;
+out_renew:
+	nfs_renew_times(dentry);
+	nfs_refresh_verifier(dentry, nfs_save_change_attribute(dir));
+	return dentry;
 }
 
 /*
@@ -1443,6 +1466,8 @@
 	if (atomic_read(&dentry->d_count) > 1) {
 		spin_unlock(&dentry->d_lock);
 		spin_unlock(&dcache_lock);
+		/* Start asynchronous writeout of the inode */
+		write_inode_now(dentry->d_inode, 0);
 		error = nfs_sillyrename(dir, dentry);
 		unlock_kernel();
 		return error;
@@ -1684,7 +1709,7 @@
 	if (!error) {
 		d_move(old_dentry, new_dentry);
 		nfs_renew_times(new_dentry);
-		nfs_set_verifier(new_dentry, nfs_save_change_attribute(new_dir));
+		nfs_refresh_verifier(new_dentry, nfs_save_change_attribute(new_dir));
 	}
 
 	/* new dentry created? */
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index bd21d7f..b1c98ea 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -309,7 +309,8 @@
 
 		rpc_execute(&data->task);
 
-		dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
+		dprintk("NFS: %5u initiated direct read call "
+			"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
 				data->task.tk_pid,
 				inode->i_sb->s_id,
 				(long long)NFS_FILEID(inode),
@@ -639,7 +640,8 @@
 
 		rpc_execute(&data->task);
 
-		dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
+		dprintk("NFS: %5u initiated direct write call "
+			"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
 				data->task.tk_pid,
 				inode->i_sb->s_id,
 				(long long)NFS_FILEID(inode),
@@ -797,7 +799,7 @@
 	const char __user *buf = iov[0].iov_base;
 	size_t count = iov[0].iov_len;
 
-	dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
+	dprintk("nfs: direct write(%s/%s, %lu@%Ld)\n",
 		file->f_path.dentry->d_parent->d_name.name,
 		file->f_path.dentry->d_name.name,
 		(unsigned long) count, (long long) pos);
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 8391bd7..6ef268f 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -135,17 +135,15 @@
 	struct nfs_fh lastfh;
 	struct qstr name;
 	int ret;
-	//int referral_count = 0;
 
 	dprintk("--> nfs4_path_walk(,,%s)\n", path);
 
 	fsinfo.fattr = &fattr;
 	nfs_fattr_init(&fattr);
 
-	if (*path++ != '/') {
-		dprintk("nfs4_get_root: Path does not begin with a slash\n");
-		return -EINVAL;
-	}
+	/* Eat leading slashes */
+	while (*path == '/')
+		path++;
 
 	/* Start by getting the root filehandle from the server */
 	ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
@@ -160,6 +158,7 @@
 		return -ENOTDIR;
 	}
 
+	/* FIXME: It is quite valid for the server to return a referral here */
 	if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) {
 		printk(KERN_ERR "nfs4_get_root:"
 		       " getroot obtained referral\n");
@@ -187,6 +186,7 @@
 		goto eat_dot_dir;
 	}
 
+	/* FIXME: Why shouldn't the user be able to use ".." in the path? */
 	if (path[0] == '.' && path[1] == '.' && (path[2] == '/' || !path[2])
 	    ) {
 		printk(KERN_ERR "nfs4_get_root:"
@@ -212,6 +212,7 @@
 		return -ENOTDIR;
 	}
 
+	/* FIXME: Referrals are quite valid here too */
 	if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) {
 		printk(KERN_ERR "nfs4_get_root:"
 		       " lookupfh obtained referral\n");
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index d834982..af53c02 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -65,13 +65,18 @@
 
 int nfs_write_inode(struct inode *inode, int sync)
 {
-	int flags = sync ? FLUSH_SYNC : 0;
 	int ret;
 
-	ret = nfs_commit_inode(inode, flags);
-	if (ret < 0)
-		return ret;
-	return 0;
+	if (sync) {
+		ret = filemap_fdatawait(inode->i_mapping);
+		if (ret == 0)
+			ret = nfs_commit_inode(inode, FLUSH_SYNC);
+	} else
+		ret = nfs_commit_inode(inode, 0);
+	if (ret >= 0)
+		return 0;
+	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+	return ret;
 }
 
 void nfs_clear_inode(struct inode *inode)
@@ -235,6 +240,7 @@
 
 	if (inode->i_state & I_NEW) {
 		struct nfs_inode *nfsi = NFS_I(inode);
+		unsigned long now = jiffies;
 
 		/* We set i_ino for the few things that still rely on it,
 		 * such as stat(2) */
@@ -271,7 +277,8 @@
 			init_special_inode(inode, inode->i_mode, fattr->rdev);
 
 		nfsi->read_cache_jiffies = fattr->time_start;
-		nfsi->last_updated = jiffies;
+		nfsi->last_updated = now;
+		nfsi->cache_change_attribute = now;
 		inode->i_atime = fattr->atime;
 		inode->i_mtime = fattr->mtime;
 		inode->i_ctime = fattr->ctime;
@@ -290,7 +297,7 @@
 			inode->i_blocks = fattr->du.nfs2.blocks;
 		}
 		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
-		nfsi->attrtimeo_timestamp = jiffies;
+		nfsi->attrtimeo_timestamp = now;
 		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
 		nfsi->access_cache = RB_ROOT;
 
@@ -783,20 +790,21 @@
 static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
+	unsigned long now = jiffies;
 
 	/* If we have atomic WCC data, we may update some attributes */
 	if ((fattr->valid & NFS_ATTR_WCC) != 0) {
 		if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
 			memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
-			nfsi->cache_change_attribute = jiffies;
+			nfsi->cache_change_attribute = now;
 		}
 		if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
 			memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
-			nfsi->cache_change_attribute = jiffies;
+			nfsi->cache_change_attribute = now;
 		}
 		if (inode->i_size == fattr->pre_size && nfsi->npages == 0) {
 			inode->i_size = fattr->size;
-			nfsi->cache_change_attribute = jiffies;
+			nfsi->cache_change_attribute = now;
 		}
 	}
 }
@@ -934,6 +942,7 @@
 	struct nfs_inode *nfsi = NFS_I(inode);
 	loff_t cur_isize, new_isize;
 	unsigned int	invalid = 0;
+	unsigned long now = jiffies;
 	int data_stable;
 
 	dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
@@ -959,7 +968,11 @@
 	 * Update the read time so we don't revalidate too often.
 	 */
 	nfsi->read_cache_jiffies = fattr->time_start;
-	nfsi->last_updated = jiffies;
+	nfsi->last_updated = now;
+
+	/* Fix a wraparound issue with nfsi->cache_change_attribute */
+	if (time_before(now, nfsi->cache_change_attribute))
+		nfsi->cache_change_attribute = now - 600*HZ;
 
 	/* Are we racing with known updates of the metadata on the server? */
 	data_stable = nfs_verify_change_attribute(inode, fattr->time_start);
@@ -985,7 +998,7 @@
 			inode->i_size = new_isize;
 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
 		}
-		nfsi->cache_change_attribute = jiffies;
+		nfsi->cache_change_attribute = now;
 		dprintk("NFS: isize change on server for file %s/%ld\n",
 				inode->i_sb->s_id, inode->i_ino);
 	}
@@ -996,14 +1009,14 @@
 		dprintk("NFS: mtime change on server for file %s/%ld\n",
 				inode->i_sb->s_id, inode->i_ino);
 		invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
-		nfsi->cache_change_attribute = jiffies;
+		nfsi->cache_change_attribute = now;
 	}
 
 	/* If ctime has changed we should definitely clear access+acl caches */
 	if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
 		invalid |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
 		memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
-		nfsi->cache_change_attribute = jiffies;
+		nfsi->cache_change_attribute = now;
 	}
 	memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
 
@@ -1032,18 +1045,18 @@
 				inode->i_sb->s_id, inode->i_ino);
 		nfsi->change_attr = fattr->change_attr;
 		invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
-		nfsi->cache_change_attribute = jiffies;
+		nfsi->cache_change_attribute = now;
 	}
 
 	/* Update attrtimeo value if we're out of the unstable period */
 	if (invalid & NFS_INO_INVALID_ATTR) {
 		nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
 		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
-		nfsi->attrtimeo_timestamp = jiffies;
-	} else if (time_after(jiffies, nfsi->attrtimeo_timestamp+nfsi->attrtimeo)) {
+		nfsi->attrtimeo_timestamp = now;
+	} else if (time_after(now, nfsi->attrtimeo_timestamp+nfsi->attrtimeo)) {
 		if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
 			nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
-		nfsi->attrtimeo_timestamp = jiffies;
+		nfsi->attrtimeo_timestamp = now;
 	}
 	/* Don't invalidate the data if we were to blame */
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
@@ -1122,7 +1135,6 @@
 		return NULL;
 	nfsi->flags = 0UL;
 	nfsi->cache_validity = 0UL;
-	nfsi->cache_change_attribute = jiffies;
 #ifdef CONFIG_NFS_V3_ACL
 	nfsi->acl_access = ERR_PTR(-EAGAIN);
 	nfsi->acl_default = ERR_PTR(-EAGAIN);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index a28f6ce..6610f2b 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -107,10 +107,6 @@
 /* nfs4proc.c */
 #ifdef CONFIG_NFS_V4
 extern struct rpc_procinfo nfs4_procedures[];
-
-extern int nfs4_proc_fs_locations(struct inode *dir, struct dentry *dentry,
-				  struct nfs4_fs_locations *fs_locations,
-				  struct page *page);
 #endif
 
 /* dir.c */
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index acd8fe9..7d0371e 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -253,29 +253,6 @@
 	return status;
 }
 
-static int nfs3_proc_read(struct nfs_read_data *rdata)
-{
-	int			flags = rdata->flags;
-	struct inode *		inode = rdata->inode;
-	struct nfs_fattr *	fattr = rdata->res.fattr;
-	struct rpc_message	msg = {
-		.rpc_proc	= &nfs3_procedures[NFS3PROC_READ],
-		.rpc_argp	= &rdata->args,
-		.rpc_resp	= &rdata->res,
-		.rpc_cred	= rdata->cred,
-	};
-	int			status;
-
-	dprintk("NFS call  read %d @ %Ld\n", rdata->args.count,
-			(long long) rdata->args.offset);
-	nfs_fattr_init(fattr);
-	status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
-	if (status >= 0)
-		nfs_refresh_inode(inode, fattr);
-	dprintk("NFS reply read: %d\n", status);
-	return status;
-}
-
 /*
  * Create a regular file.
  * For now, we don't implement O_EXCL.
@@ -855,7 +832,6 @@
 	.lookup		= nfs3_proc_lookup,
 	.access		= nfs3_proc_access,
 	.readlink	= nfs3_proc_readlink,
-	.read		= nfs3_proc_read,
 	.create		= nfs3_proc_create,
 	.remove		= nfs3_proc_remove,
 	.unlink_setup	= nfs3_proc_unlink_setup,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index e234176..cf3a17e 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -169,7 +169,7 @@
 extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
 extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
 extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
-extern int nfs4_proc_fs_locations(struct inode *dir, struct dentry *dentry,
+extern int nfs4_proc_fs_locations(struct inode *dir, struct qstr *name,
 		struct nfs4_fs_locations *fs_locations, struct page *page);
 
 extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index b872779..dd5fef2 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -16,6 +16,7 @@
 #include <linux/vfs.h>
 #include <linux/inet.h>
 #include "internal.h"
+#include "nfs4_fs.h"
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
 
@@ -130,7 +131,6 @@
 		.authflavor = NFS_SB(mnt_parent->mnt_sb)->client->cl_auth->au_flavor,
 	};
 	char *page = NULL, *page2 = NULL;
-	char *devname;
 	int loc, s, error;
 
 	if (locations == NULL || locations->nlocations <= 0)
@@ -154,12 +154,6 @@
 		goto out;
 	}
 
-	devname = nfs_devname(mnt_parent, dentry, page, PAGE_SIZE);
-	if (IS_ERR(devname)) {
-		mnt = (struct vfsmount *)devname;
-		goto out;
-	}
-
 	loc = 0;
 	while (loc < locations->nlocations && IS_ERR(mnt)) {
 		const struct nfs4_fs_location *location = &locations->locations[loc];
@@ -194,7 +188,11 @@
 			addr.sin_port = htons(NFS_PORT);
 			mountdata.addr = &addr;
 
-			mnt = vfs_kern_mount(&nfs4_referral_fs_type, 0, devname, &mountdata);
+			snprintf(page, PAGE_SIZE, "%s:%s",
+					mountdata.hostname,
+					mountdata.mnt_path);
+
+			mnt = vfs_kern_mount(&nfs4_referral_fs_type, 0, page, &mountdata);
 			if (!IS_ERR(mnt)) {
 				break;
 			}
@@ -242,7 +240,7 @@
 	dprintk("%s: getting locations for %s/%s\n",
 		__FUNCTION__, parent->d_name.name, dentry->d_name.name);
 
-	err = nfs4_proc_fs_locations(parent->d_inode, dentry, fs_locations, page);
+	err = nfs4_proc_fs_locations(parent->d_inode, &dentry->d_name, fs_locations, page);
 	dput(parent);
 	if (err != 0 ||
 	    fs_locations->nlocations <= 0 ||
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1daee65..f52cf5c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1140,7 +1140,6 @@
 			break;
 		case -NFS4ERR_STALE_STATEID:
 		case -NFS4ERR_EXPIRED:
-			nfs4_schedule_state_recovery(server->nfs_client);
 			break;
 		default:
 			if (nfs4_async_handle_error(task, server) == -EAGAIN) {
@@ -1424,7 +1423,6 @@
 	int status = -ENOMEM;
 	struct page *page = NULL;
 	struct nfs4_fs_locations *locations = NULL;
-	struct dentry dentry = {};
 
 	page = alloc_page(GFP_KERNEL);
 	if (page == NULL)
@@ -1433,9 +1431,7 @@
 	if (locations == NULL)
 		goto out;
 
-	dentry.d_name.name = name->name;
-	dentry.d_name.len = name->len;
-	status = nfs4_proc_fs_locations(dir, &dentry, locations, page);
+	status = nfs4_proc_fs_locations(dir, name, locations, page);
 	if (status != 0)
 		goto out;
 	/* Make sure server returned a different fsid for the referral */
@@ -1737,44 +1733,6 @@
 	return err;
 }
 
-static int _nfs4_proc_read(struct nfs_read_data *rdata)
-{
-	int flags = rdata->flags;
-	struct inode *inode = rdata->inode;
-	struct nfs_fattr *fattr = rdata->res.fattr;
-	struct nfs_server *server = NFS_SERVER(inode);
-	struct rpc_message msg = {
-		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_READ],
-		.rpc_argp	= &rdata->args,
-		.rpc_resp	= &rdata->res,
-		.rpc_cred	= rdata->cred,
-	};
-	unsigned long timestamp = jiffies;
-	int status;
-
-	dprintk("NFS call  read %d @ %Ld\n", rdata->args.count,
-			(long long) rdata->args.offset);
-
-	nfs_fattr_init(fattr);
-	status = rpc_call_sync(server->client, &msg, flags);
-	if (!status)
-		renew_lease(server, timestamp);
-	dprintk("NFS reply read: %d\n", status);
-	return status;
-}
-
-static int nfs4_proc_read(struct nfs_read_data *rdata)
-{
-	struct nfs4_exception exception = { };
-	int err;
-	do {
-		err = nfs4_handle_exception(NFS_SERVER(rdata->inode),
-				_nfs4_proc_read(rdata),
-				&exception);
-	} while (exception.retry);
-	return err;
-}
-
 /*
  * Got race?
  * We will need to arrange for the VFS layer to provide an atomic open.
@@ -2753,11 +2711,15 @@
 
 	might_sleep();
 
+	rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
+
 	rpc_clnt_sigmask(clnt, &oldset);
 	res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
 			nfs4_wait_bit_interruptible,
 			TASK_INTERRUPTIBLE);
 	rpc_clnt_sigunmask(clnt, &oldset);
+
+	rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
 	return res;
 }
 
@@ -2996,7 +2958,6 @@
 		switch (err) {
 			case -NFS4ERR_STALE_STATEID:
 			case -NFS4ERR_EXPIRED:
-				nfs4_schedule_state_recovery(server->nfs_client);
 			case 0:
 				return 0;
 		}
@@ -3150,12 +3111,10 @@
 			break;
 		case -NFS4ERR_STALE_STATEID:
 		case -NFS4ERR_EXPIRED:
-			nfs4_schedule_state_recovery(calldata->server->nfs_client);
 			break;
 		default:
-			if (nfs4_async_handle_error(task, calldata->server) == -EAGAIN) {
+			if (nfs4_async_handle_error(task, calldata->server) == -EAGAIN)
 				rpc_restart_call(task);
-			}
 	}
 }
 
@@ -3585,7 +3544,7 @@
 	return len;
 }
 
-int nfs4_proc_fs_locations(struct inode *dir, struct dentry *dentry,
+int nfs4_proc_fs_locations(struct inode *dir, struct qstr *name,
 		struct nfs4_fs_locations *fs_locations, struct page *page)
 {
 	struct nfs_server *server = NFS_SERVER(dir);
@@ -3595,7 +3554,7 @@
 	};
 	struct nfs4_fs_locations_arg args = {
 		.dir_fh = NFS_FH(dir),
-		.name = &dentry->d_name,
+		.name = name,
 		.page = page,
 		.bitmask = bitmask,
 	};
@@ -3607,7 +3566,7 @@
 	int status;
 
 	dprintk("%s: start\n", __FUNCTION__);
-	fs_locations->fattr.valid = 0;
+	nfs_fattr_init(&fs_locations->fattr);
 	fs_locations->server = server;
 	fs_locations->nlocations = 0;
 	status = rpc_call_sync(server->client, &msg, 0);
@@ -3646,7 +3605,6 @@
 	.lookup		= nfs4_proc_lookup,
 	.access		= nfs4_proc_access,
 	.readlink	= nfs4_proc_readlink,
-	.read		= nfs4_proc_read,
 	.create		= nfs4_proc_create,
 	.remove		= nfs4_proc_remove,
 	.unlink_setup	= nfs4_proc_unlink_setup,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 0cf3fa3..f02d522 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -387,8 +387,10 @@
 				decode_putfh_maxsz + \
 				op_decode_hdr_maxsz + 12)
 #define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
 				encode_getattr_maxsz)
 #define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
 				decode_getattr_maxsz)
 #define NFS4_enc_delegreturn_sz	(compound_encode_hdr_maxsz + \
 				encode_putfh_maxsz + \
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 560536a..1dcf56d 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -186,35 +186,6 @@
 	return status;
 }
 
-static int nfs_proc_read(struct nfs_read_data *rdata)
-{
-	int			flags = rdata->flags;
-	struct inode *		inode = rdata->inode;
-	struct nfs_fattr *	fattr = rdata->res.fattr;
-	struct rpc_message	msg = {
-		.rpc_proc	= &nfs_procedures[NFSPROC_READ],
-		.rpc_argp	= &rdata->args,
-		.rpc_resp	= &rdata->res,
-		.rpc_cred	= rdata->cred,
-	};
-	int			status;
-
-	dprintk("NFS call  read %d @ %Ld\n", rdata->args.count,
-			(long long) rdata->args.offset);
-	nfs_fattr_init(fattr);
-	status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
-	if (status >= 0) {
-		nfs_refresh_inode(inode, fattr);
-		/* Emulate the eof flag, which isn't normally needed in NFSv2
-		 * as it is guaranteed to always return the file attributes
-		 */
-		if (rdata->args.offset + rdata->args.count >= fattr->size)
-			rdata->res.eof = 1;
-	}
-	dprintk("NFS reply read: %d\n", status);
-	return status;
-}
-
 static int
 nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
 		int flags, struct nameidata *nd)
@@ -666,7 +637,6 @@
 	.lookup		= nfs_proc_lookup,
 	.access		= NULL,		       /* access */
 	.readlink	= nfs_proc_readlink,
-	.read		= nfs_proc_read,
 	.create		= nfs_proc_create,
 	.remove		= nfs_proc_remove,
 	.unlink_setup	= nfs_proc_unlink_setup,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index a9c2652..6ab4d5a 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -5,14 +5,6 @@
  *
  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  * modified for async RPC by okir@monad.swb.de
- *
- * We do an ugly hack here in order to return proper error codes to the
- * user program when a read request failed: since generic_file_read
- * only checks the return value of inode->i_op->readpage() which is always 0
- * for async RPC, we set the error bit of the page to 1 when an error occurs,
- * and make nfs_readpage transmit requests synchronously when encountering this.
- * This is only a small problem, though, since we now retry all operations
- * within the RPC code when root squashing is suspected.
  */
 
 #include <linux/time.h>
@@ -122,93 +114,6 @@
 	}
 }
 
-/*
- * Read a page synchronously.
- */
-static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
-		struct page *page)
-{
-	unsigned int	rsize = NFS_SERVER(inode)->rsize;
-	unsigned int	count = PAGE_CACHE_SIZE;
-	int result = -ENOMEM;
-	struct nfs_read_data *rdata;
-
-	rdata = nfs_readdata_alloc(count);
-	if (!rdata)
-		goto out_unlock;
-
-	memset(rdata, 0, sizeof(*rdata));
-	rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
-	rdata->cred = ctx->cred;
-	rdata->inode = inode;
-	INIT_LIST_HEAD(&rdata->pages);
-	rdata->args.fh = NFS_FH(inode);
-	rdata->args.context = ctx;
-	rdata->args.pages = &page;
-	rdata->args.pgbase = 0UL;
-	rdata->args.count = rsize;
-	rdata->res.fattr = &rdata->fattr;
-
-	dprintk("NFS: nfs_readpage_sync(%p)\n", page);
-
-	/*
-	 * This works now because the socket layer never tries to DMA
-	 * into this buffer directly.
-	 */
-	do {
-		if (count < rsize)
-			rdata->args.count = count;
-		rdata->res.count = rdata->args.count;
-		rdata->args.offset = page_offset(page) + rdata->args.pgbase;
-
-		dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n",
-			NFS_SERVER(inode)->nfs_client->cl_hostname,
-			inode->i_sb->s_id,
-			(long long)NFS_FILEID(inode),
-			(unsigned long long)rdata->args.pgbase,
-			rdata->args.count);
-
-		lock_kernel();
-		result = NFS_PROTO(inode)->read(rdata);
-		unlock_kernel();
-
-		/*
-		 * Even if we had a partial success we can't mark the page
-		 * cache valid.
-		 */
-		if (result < 0) {
-			if (result == -EISDIR)
-				result = -EINVAL;
-			goto io_error;
-		}
-		count -= result;
-		rdata->args.pgbase += result;
-		nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, result);
-
-		/* Note: result == 0 should only happen if we're caching
-		 * a write that extends the file and punches a hole.
-		 */
-		if (rdata->res.eof != 0 || result == 0)
-			break;
-	} while (count);
-	spin_lock(&inode->i_lock);
-	NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
-	spin_unlock(&inode->i_lock);
-
-	if (rdata->res.eof || rdata->res.count == rdata->args.count) {
-		SetPageUptodate(page);
-		if (rdata->res.eof && count != 0)
-			memclear_highpage_flush(page, rdata->args.pgbase, count);
-	}
-	result = 0;
-
-io_error:
-	nfs_readdata_free(rdata);
-out_unlock:
-	unlock_page(page);
-	return result;
-}
-
 static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
 		struct page *page)
 {
@@ -278,7 +183,7 @@
 
 	data->task.tk_cookie = (unsigned long)inode;
 
-	dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
+	dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
 			data->task.tk_pid,
 			inode->i_sb->s_id,
 			(long long)NFS_FILEID(inode),
@@ -452,7 +357,7 @@
 {
 	int status;
 
-	dprintk("%s: %4d, (status %d)\n", __FUNCTION__, task->tk_pid,
+	dprintk("NFS: %s: %5u, (status %d)\n", __FUNCTION__, task->tk_pid,
 			task->tk_status);
 
 	status = NFS_PROTO(data->inode)->read_done(task, data);
@@ -621,15 +526,9 @@
 	} else
 		ctx = get_nfs_open_context((struct nfs_open_context *)
 				file->private_data);
-	if (!IS_SYNC(inode)) {
-		error = nfs_readpage_async(ctx, inode, page);
-		goto out;
-	}
 
-	error = nfs_readpage_sync(ctx, inode, page);
-	if (error < 0 && IS_SWAPFILE(inode))
-		printk("Aiee.. nfs swap-in of page failed!\n");
-out:
+	error = nfs_readpage_async(ctx, inode, page);
+
 	put_nfs_open_context(ctx);
 	return error;
 
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index baa2886..bb516a2 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1045,7 +1045,7 @@
 		nfs4_fill_super(s);
 	}
 
-	mntroot = nfs4_get_root(s, data->fh);
+	mntroot = nfs4_get_root(s, &mntfh);
 	if (IS_ERR(mntroot)) {
 		error = PTR_ERR(mntroot);
 		goto error_splat_super;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 345492e..febdade 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1,47 +1,7 @@
 /*
  * linux/fs/nfs/write.c
  *
- * Writing file data over NFS.
- *
- * We do it like this: When a (user) process wishes to write data to an
- * NFS file, a write request is allocated that contains the RPC task data
- * plus some info on the page to be written, and added to the inode's
- * write chain. If the process writes past the end of the page, an async
- * RPC call to write the page is scheduled immediately; otherwise, the call
- * is delayed for a few seconds.
- *
- * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
- *
- * Write requests are kept on the inode's writeback list. Each entry in
- * that list references the page (portion) to be written. When the
- * cache timeout has expired, the RPC task is woken up, and tries to
- * lock the page. As soon as it manages to do so, the request is moved
- * from the writeback list to the writelock list.
- *
- * Note: we must make sure never to confuse the inode passed in the
- * write_page request with the one in page->inode. As far as I understand
- * it, these are different when doing a swap-out.
- *
- * To understand everything that goes on here and in the NFS read code,
- * one should be aware that a page is locked in exactly one of the following
- * cases:
- *
- *  -	A write request is in progress.
- *  -	A user process is in generic_file_write/nfs_update_page
- *  -	A user process is in generic_file_read
- *
- * Also note that because of the way pages are invalidated in
- * nfs_revalidate_inode, the following assertions hold:
- *
- *  -	If a page is dirty, there will be no read requests (a page will
- *	not be re-read unless invalidated by nfs_revalidate_inode).
- *  -	If the page is not uptodate, there will be no pending write
- *	requests, and no process will be in nfs_update_page.
- *
- * FIXME: Interaction with the vmscan routines is not optimal yet.
- * Either vmscan must be made nfs-savvy, or we need a different page
- * reclaim concept that supports something like FS-independent
- * buffer_heads with a b_ops-> field.
+ * Write file data over NFS.
  *
  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
  */
@@ -79,7 +39,6 @@
 					    unsigned int, unsigned int);
 static void nfs_mark_request_dirty(struct nfs_page *req);
 static int nfs_wait_on_write_congestion(struct address_space *, int);
-static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
 static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
 static const struct rpc_call_ops nfs_write_partial_ops;
 static const struct rpc_call_ops nfs_write_full_ops;
@@ -194,6 +153,13 @@
 	i_size_write(inode, end);
 }
 
+/* A writeback failed: mark the page as bad, and invalidate the page cache */
+static void nfs_set_pageerror(struct page *page)
+{
+	SetPageError(page);
+	nfs_zap_mapping(page->mapping->host, page->mapping);
+}
+
 /* We can set the PG_uptodate flag if we see that a write request
  * covers the full page.
  */
@@ -323,7 +289,7 @@
 		err = 0;
 out:
 	if (!wbc->for_writepages)
-		nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc));
+		nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc));
 	return err;
 }
 
@@ -360,14 +326,7 @@
 	if (err < 0)
 		goto out;
 	nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
-	if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
-		err = nfs_wait_on_requests(inode, 0, 0);
-		if (err < 0)
-			goto out;
-	}
-	err = nfs_commit_inode(inode, wb_priority(wbc));
-	if (err > 0)
-		err = 0;
+	err = 0;
 out:
 	clear_bit(BDI_write_congested, &bdi->state);
 	wake_up_all(&nfs_write_congestion);
@@ -516,17 +475,6 @@
 	return res;
 }
 
-static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
-{
-	struct nfs_inode *nfsi = NFS_I(inode);
-	int ret;
-
-	spin_lock(&nfsi->req_lock);
-	ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
-	spin_unlock(&nfsi->req_lock);
-	return ret;
-}
-
 static void nfs_cancel_dirty_list(struct list_head *head)
 {
 	struct nfs_page *req;
@@ -773,7 +721,7 @@
         dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
 			status, (long long)i_size_read(inode));
 	if (status < 0)
-		ClearPageUptodate(page);
+		nfs_set_pageerror(page);
 	return status;
 }
 
@@ -852,7 +800,8 @@
 	data->task.tk_priority = flush_task_priority(how);
 	data->task.tk_cookie = (unsigned long)inode;
 
-	dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
+	dprintk("NFS: %5u initiated write call "
+		"(req %s/%Ld, %u bytes @ offset %Lu)\n",
 		data->task.tk_pid,
 		inode->i_sb->s_id,
 		(long long)NFS_FILEID(inode),
@@ -1034,8 +983,7 @@
 		return;
 
 	if (task->tk_status < 0) {
-		ClearPageUptodate(page);
-		SetPageError(page);
+		nfs_set_pageerror(page);
 		req->wb_context->error = task->tk_status;
 		dprintk(", error = %d\n", task->tk_status);
 	} else {
@@ -1092,8 +1040,7 @@
 			(long long)req_offset(req));
 
 		if (task->tk_status < 0) {
-			ClearPageUptodate(page);
-			SetPageError(page);
+			nfs_set_pageerror(page);
 			req->wb_context->error = task->tk_status;
 			end_page_writeback(page);
 			nfs_inode_remove_request(req);
@@ -1134,7 +1081,7 @@
 	struct nfs_writeres	*resp = &data->res;
 	int status;
 
-	dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
+	dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
 		task->tk_pid, task->tk_status);
 
 	/*
@@ -1250,7 +1197,7 @@
 	data->task.tk_priority = flush_task_priority(how);
 	data->task.tk_cookie = (unsigned long)inode;
 	
-	dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
+	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
 }
 
 /*
@@ -1291,7 +1238,7 @@
 	struct nfs_write_data	*data = calldata;
 	struct nfs_page		*req;
 
-        dprintk("NFS: %4d nfs_commit_done (status %d)\n",
+        dprintk("NFS: %5u nfs_commit_done (status %d)\n",
                                 task->tk_pid, task->tk_status);
 
 	/* Call the NFS version-specific code */
@@ -1516,6 +1463,8 @@
 		if (ret < 0)
 			goto out;
 	}
+	if (!PagePrivate(page))
+		return 0;
 	ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
 	if (ret >= 0)
 		return 0;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index ed0f2ea..47aaa2c 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -11,14 +11,6 @@
 
 #include <linux/magic.h>
 
-/*
- * Enable debugging support for nfs client.
- * Requires RPC_DEBUG.
- */
-#ifdef RPC_DEBUG
-# define NFS_DEBUG
-#endif
-
 /* Default timeout values */
 #define NFS_MAX_UDP_TIMEOUT	(60*HZ)
 #define NFS_MAX_TCP_TIMEOUT	(600*HZ)
@@ -567,6 +559,15 @@
 #define NFSDBG_ALL		0xFFFF
 
 #ifdef __KERNEL__
+
+/*
+ * Enable debugging support for nfs client.
+ * Requires RPC_DEBUG.
+ */
+#ifdef RPC_DEBUG
+# define NFS_DEBUG
+#endif
+
 # undef ifdebug
 # ifdef NFS_DEBUG
 #  define ifdebug(fac)		if (unlikely(nfs_debug & NFSDBG_##fac))
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 30d7116..10c26ed 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -784,7 +784,6 @@
 	int	(*access)  (struct inode *, struct nfs_access_entry *);
 	int	(*readlink)(struct inode *, struct page *, unsigned int,
 			    unsigned int);
-	int	(*read)    (struct nfs_read_data *);
 	int	(*create)  (struct inode *, struct dentry *,
 			    struct iattr *, int, struct nameidata *);
 	int	(*remove)  (struct inode *, struct qstr *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index a1be89d..c7a78eef2 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -40,6 +40,7 @@
 
 	unsigned int		cl_softrtry : 1,/* soft timeouts */
 				cl_intr     : 1,/* interruptible */
+				cl_discrtry : 1,/* disconnect before retry */
 				cl_autobind : 1,/* use getport() */
 				cl_oneshot  : 1,/* dispose after use */
 				cl_dead     : 1;/* abandoned */
@@ -111,6 +112,7 @@
 #define RPC_CLNT_CREATE_ONESHOT		(1UL << 3)
 #define RPC_CLNT_CREATE_NONPRIVPORT	(1UL << 4)
 #define RPC_CLNT_CREATE_NOPING		(1UL << 5)
+#define RPC_CLNT_CREATE_DISCRTRY	(1UL << 6)
 
 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
 struct rpc_clnt	*rpc_bind_new_program(struct rpc_clnt *,
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 8b6ce60..de9fc576 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -253,7 +253,7 @@
 void		rpc_exit_task(struct rpc_task *);
 void		rpc_release_calldata(const struct rpc_call_ops *, void *);
 void		rpc_killall_tasks(struct rpc_clnt *);
-int		rpc_execute(struct rpc_task *);
+void		rpc_execute(struct rpc_task *);
 void		rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
 void		rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
 void		rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 76f7eac..9527f2b 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -181,7 +181,7 @@
 	struct rpc_cred	*cred;
 	int		i;
 
-	dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth);
+	dprintk("RPC:       gc'ing RPC credentials for auth %p\n", auth);
 	for (i = 0; i < RPC_CREDCACHE_NR; i++) {
 		hlist_for_each_safe(pos, next, &cache->hashtable[i]) {
 			cred = hlist_entry(pos, struct rpc_cred, cr_hash);
@@ -267,7 +267,7 @@
 	};
 	struct rpc_cred *ret;
 
-	dprintk("RPC:     looking up %s cred\n",
+	dprintk("RPC:       looking up %s cred\n",
 		auth->au_ops->au_name);
 	get_group_info(acred.group_info);
 	ret = auth->au_ops->lookup_cred(auth, &acred, flags);
@@ -287,7 +287,7 @@
 	struct rpc_cred *ret;
 	int flags = 0;
 
-	dprintk("RPC: %4d looking up %s cred\n",
+	dprintk("RPC: %5u looking up %s cred\n",
 		task->tk_pid, task->tk_auth->au_ops->au_name);
 	get_group_info(acred.group_info);
 	if (task->tk_flags & RPC_TASK_ROOTCREDS)
@@ -304,8 +304,9 @@
 void
 rpcauth_holdcred(struct rpc_task *task)
 {
-	dprintk("RPC: %4d holding %s cred %p\n",
-		task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred);
+	dprintk("RPC: %5u holding %s cred %p\n",
+		task->tk_pid, task->tk_auth->au_ops->au_name,
+		task->tk_msg.rpc_cred);
 	if (task->tk_msg.rpc_cred)
 		get_rpccred(task->tk_msg.rpc_cred);
 }
@@ -324,7 +325,7 @@
 {
 	struct rpc_cred	*cred = task->tk_msg.rpc_cred;
 
-	dprintk("RPC: %4d releasing %s cred %p\n",
+	dprintk("RPC: %5u releasing %s cred %p\n",
 		task->tk_pid, task->tk_auth->au_ops->au_name, cred);
 
 	put_rpccred(cred);
@@ -336,7 +337,7 @@
 {
 	struct rpc_cred	*cred = task->tk_msg.rpc_cred;
 
-	dprintk("RPC: %4d marshaling %s cred %p\n",
+	dprintk("RPC: %5u marshaling %s cred %p\n",
 		task->tk_pid, task->tk_auth->au_ops->au_name, cred);
 
 	return cred->cr_ops->crmarshal(task, p);
@@ -347,7 +348,7 @@
 {
 	struct rpc_cred	*cred = task->tk_msg.rpc_cred;
 
-	dprintk("RPC: %4d validating %s cred %p\n",
+	dprintk("RPC: %5u validating %s cred %p\n",
 		task->tk_pid, task->tk_auth->au_ops->au_name, cred);
 
 	return cred->cr_ops->crvalidate(task, p);
@@ -359,7 +360,7 @@
 {
 	struct rpc_cred *cred = task->tk_msg.rpc_cred;
 
-	dprintk("RPC: %4d using %s cred %p to wrap rpc data\n",
+	dprintk("RPC: %5u using %s cred %p to wrap rpc data\n",
 			task->tk_pid, cred->cr_ops->cr_name, cred);
 	if (cred->cr_ops->crwrap_req)
 		return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
@@ -373,7 +374,7 @@
 {
 	struct rpc_cred *cred = task->tk_msg.rpc_cred;
 
-	dprintk("RPC: %4d using %s cred %p to unwrap rpc data\n",
+	dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n",
 			task->tk_pid, cred->cr_ops->cr_name, cred);
 	if (cred->cr_ops->crunwrap_resp)
 		return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
@@ -388,7 +389,7 @@
 	struct rpc_cred	*cred = task->tk_msg.rpc_cred;
 	int err;
 
-	dprintk("RPC: %4d refreshing %s cred %p\n",
+	dprintk("RPC: %5u refreshing %s cred %p\n",
 		task->tk_pid, task->tk_auth->au_ops->au_name, cred);
 
 	err = cred->cr_ops->crrefresh(task);
@@ -400,7 +401,7 @@
 void
 rpcauth_invalcred(struct rpc_task *task)
 {
-	dprintk("RPC: %4d invalidating %s cred %p\n",
+	dprintk("RPC: %5u invalidating %s cred %p\n",
 		task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred);
 	spin_lock(&rpc_credcache_lock);
 	if (task->tk_msg.rpc_cred)
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 718fb94..4e4ccc5 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -241,7 +241,7 @@
 	}
 	return q;
 err:
-	dprintk("RPC:      gss_fill_context returning %ld\n", -PTR_ERR(p));
+	dprintk("RPC:       gss_fill_context returning %ld\n", -PTR_ERR(p));
 	return p;
 }
 
@@ -276,10 +276,10 @@
 		if (pos->uid != uid)
 			continue;
 		atomic_inc(&pos->count);
-		dprintk("RPC:      gss_find_upcall found msg %p\n", pos);
+		dprintk("RPC:       gss_find_upcall found msg %p\n", pos);
 		return pos;
 	}
-	dprintk("RPC:      gss_find_upcall found nothing\n");
+	dprintk("RPC:       gss_find_upcall found nothing\n");
 	return NULL;
 }
 
@@ -393,7 +393,8 @@
 	struct gss_upcall_msg *gss_msg;
 	int err = 0;
 
-	dprintk("RPC: %4u gss_refresh_upcall for uid %u\n", task->tk_pid, cred->cr_uid);
+	dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
+								cred->cr_uid);
 	gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
 	if (IS_ERR(gss_msg)) {
 		err = PTR_ERR(gss_msg);
@@ -413,8 +414,8 @@
 	spin_unlock(&gss_auth->lock);
 	gss_release_msg(gss_msg);
 out:
-	dprintk("RPC: %4u gss_refresh_upcall for uid %u result %d\n", task->tk_pid,
-			cred->cr_uid, err);
+	dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n",
+			task->tk_pid, cred->cr_uid, err);
 	return err;
 }
 
@@ -426,7 +427,7 @@
 	DEFINE_WAIT(wait);
 	int err = 0;
 
-	dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid);
+	dprintk("RPC:       gss_upcall for uid %u\n", cred->cr_uid);
 	gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
 	if (IS_ERR(gss_msg)) {
 		err = PTR_ERR(gss_msg);
@@ -454,7 +455,8 @@
 	finish_wait(&gss_msg->waitqueue, &wait);
 	gss_release_msg(gss_msg);
 out:
-	dprintk("RPC: gss_create_upcall for uid %u result %d\n", cred->cr_uid, err);
+	dprintk("RPC:       gss_create_upcall for uid %u result %d\n",
+			cred->cr_uid, err);
 	return err;
 }
 
@@ -546,14 +548,14 @@
 	}
 	gss_put_ctx(ctx);
 	kfree(buf);
-	dprintk("RPC:      gss_pipe_downcall returning length %Zu\n", mlen);
+	dprintk("RPC:       gss_pipe_downcall returning length %Zu\n", mlen);
 	return mlen;
 err_put_ctx:
 	gss_put_ctx(ctx);
 err:
 	kfree(buf);
 out:
-	dprintk("RPC:      gss_pipe_downcall returning %d\n", err);
+	dprintk("RPC:       gss_pipe_downcall returning %d\n", err);
 	return err;
 }
 
@@ -591,7 +593,7 @@
 	static unsigned long ratelimit;
 
 	if (msg->errno < 0) {
-		dprintk("RPC:      gss_pipe_destroy_msg releasing msg %p\n",
+		dprintk("RPC:       gss_pipe_destroy_msg releasing msg %p\n",
 				gss_msg);
 		atomic_inc(&gss_msg->count);
 		gss_unhash_msg(gss_msg);
@@ -618,7 +620,7 @@
 	struct rpc_auth * auth;
 	int err = -ENOMEM; /* XXX? */
 
-	dprintk("RPC:      creating GSS authenticator for client %p\n",clnt);
+	dprintk("RPC:       creating GSS authenticator for client %p\n", clnt);
 
 	if (!try_module_get(THIS_MODULE))
 		return ERR_PTR(err);
@@ -670,8 +672,8 @@
 {
 	struct gss_auth *gss_auth;
 
-	dprintk("RPC:      destroying GSS authenticator %p flavor %d\n",
-		auth, auth->au_flavor);
+	dprintk("RPC:       destroying GSS authenticator %p flavor %d\n",
+			auth, auth->au_flavor);
 
 	gss_auth = container_of(auth, struct gss_auth, rpc_auth);
 	rpc_unlink(gss_auth->dentry);
@@ -689,7 +691,7 @@
 static void
 gss_destroy_ctx(struct gss_cl_ctx *ctx)
 {
-	dprintk("RPC:      gss_destroy_ctx\n");
+	dprintk("RPC:       gss_destroy_ctx\n");
 
 	if (ctx->gc_gss_ctx)
 		gss_delete_sec_context(&ctx->gc_gss_ctx);
@@ -703,7 +705,7 @@
 {
 	struct gss_cred *cred = container_of(rc, struct gss_cred, gc_base);
 
-	dprintk("RPC:      gss_destroy_cred \n");
+	dprintk("RPC:       gss_destroy_cred \n");
 
 	if (cred->gc_ctx)
 		gss_put_ctx(cred->gc_ctx);
@@ -726,7 +728,7 @@
 	struct gss_cred	*cred = NULL;
 	int err = -ENOMEM;
 
-	dprintk("RPC:      gss_create_cred for uid %d, flavor %d\n",
+	dprintk("RPC:       gss_create_cred for uid %d, flavor %d\n",
 		acred->uid, auth->au_flavor);
 
 	if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
@@ -745,7 +747,7 @@
 	return &cred->gc_base;
 
 out_err:
-	dprintk("RPC:      gss_create_cred failed with error %d\n", err);
+	dprintk("RPC:       gss_create_cred failed with error %d\n", err);
 	return ERR_PTR(err);
 }
 
@@ -799,7 +801,7 @@
 	struct kvec	iov;
 	struct xdr_buf	verf_buf;
 
-	dprintk("RPC: %4u gss_marshal\n", task->tk_pid);
+	dprintk("RPC: %5u gss_marshal\n", task->tk_pid);
 
 	*p++ = htonl(RPC_AUTH_GSS);
 	cred_len = p++;
@@ -865,7 +867,7 @@
 	u32		flav,len;
 	u32		maj_stat;
 
-	dprintk("RPC: %4u gss_validate\n", task->tk_pid);
+	dprintk("RPC: %5u gss_validate\n", task->tk_pid);
 
 	flav = ntohl(*p++);
 	if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
@@ -888,12 +890,12 @@
 	 * calculate the length of the verifier: */
 	task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2;
 	gss_put_ctx(ctx);
-	dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n",
+	dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n",
 			task->tk_pid);
 	return p + XDR_QUADLEN(len);
 out_bad:
 	gss_put_ctx(ctx);
-	dprintk("RPC: %4u gss_validate failed.\n", task->tk_pid);
+	dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid);
 	return NULL;
 }
 
@@ -1063,7 +1065,7 @@
 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
 	int             status = -EIO;
 
-	dprintk("RPC: %4u gss_wrap_req\n", task->tk_pid);
+	dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid);
 	if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
 		/* The spec seems a little ambiguous here, but I think that not
 		 * wrapping context destruction requests makes the most sense.
@@ -1086,7 +1088,7 @@
 	}
 out:
 	gss_put_ctx(ctx);
-	dprintk("RPC: %4u gss_wrap_req returning %d\n", task->tk_pid, status);
+	dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status);
 	return status;
 }
 
@@ -1192,7 +1194,7 @@
 	status = decode(rqstp, p, obj);
 out:
 	gss_put_ctx(ctx);
-	dprintk("RPC: %4u gss_unwrap_resp returning %d\n", task->tk_pid,
+	dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
 			status);
 	return status;
 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 0a9948d..f441aa0 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -66,8 +66,8 @@
 		goto out;
 
 	if (crypto_blkcipher_ivsize(tfm) > 16) {
-		dprintk("RPC:      gss_k5encrypt: tfm iv size to large %d\n",
-			 crypto_blkcipher_ivsize(tfm));
+		dprintk("RPC:       gss_k5encrypt: tfm iv size to large %d\n",
+		         crypto_blkcipher_ivsize(tfm));
 		goto out;
 	}
 
@@ -79,7 +79,7 @@
 
 	ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
 out:
-	dprintk("RPC:      krb5_encrypt returns %d\n",ret);
+	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
 	return ret;
 }
 
@@ -102,7 +102,7 @@
 		goto out;
 
 	if (crypto_blkcipher_ivsize(tfm) > 16) {
-		dprintk("RPC:      gss_k5decrypt: tfm iv size to large %d\n",
+		dprintk("RPC:       gss_k5decrypt: tfm iv size to large %d\n",
 			crypto_blkcipher_ivsize(tfm));
 		goto out;
 	}
@@ -114,7 +114,7 @@
 
 	ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
 out:
-	dprintk("RPC:      gss_k5decrypt returns %d\n",ret);
+	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
 	return ret;
 }
 
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 05d4bee..7b19432 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -175,7 +175,8 @@
 	}
 
 	ctx_id->internal_ctx_id = ctx;
-	dprintk("RPC:      Successfully imported new context.\n");
+
+	dprintk("RPC:       Successfully imported new context.\n");
 	return 0;
 
 out_err_free_key2:
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index d0bb506..a0d9faa 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -83,7 +83,7 @@
 	s32			now;
 	u32			seq_send;
 
-	dprintk("RPC:     gss_krb5_seal\n");
+	dprintk("RPC:       gss_krb5_seal\n");
 
 	now = get_seconds();
 
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 3e315a6..43f3421 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -70,7 +70,7 @@
 	s32 code;
 	unsigned char plain[8];
 
-	dprintk("RPC:      krb5_get_seq_num:\n");
+	dprintk("RPC:       krb5_get_seq_num:\n");
 
 	if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
 		return code;
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 87f8977..e30a993 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -86,7 +86,7 @@
 	unsigned char		*ptr = (unsigned char *)read_token->data;
 	int			bodysize;
 
-	dprintk("RPC:      krb5_read_token\n");
+	dprintk("RPC:       krb5_read_token\n");
 
 	if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr,
 					read_token->len))
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index fe25b3d..42b3220 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -129,7 +129,7 @@
 	struct page		**tmp_pages;
 	u32			seq_send;
 
-	dprintk("RPC:     gss_wrap_kerberos\n");
+	dprintk("RPC:       gss_wrap_kerberos\n");
 
 	now = get_seconds();
 
@@ -215,7 +215,7 @@
 	int			data_len;
 	int			blocksize;
 
-	dprintk("RPC:      gss_unwrap_kerberos\n");
+	dprintk("RPC:       gss_unwrap_kerberos\n");
 
 	ptr = (u8 *)buf->head[0].iov_base + offset;
 	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 3423890..2687251 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -113,7 +113,7 @@
 	spin_lock(&registered_mechs_lock);
 	list_add(&gm->gm_list, &registered_mechs);
 	spin_unlock(&registered_mechs_lock);
-	dprintk("RPC:      registered gss mechanism %s\n", gm->gm_name);
+	dprintk("RPC:       registered gss mechanism %s\n", gm->gm_name);
 	return 0;
 }
 
@@ -125,7 +125,7 @@
 	spin_lock(&registered_mechs_lock);
 	list_del(&gm->gm_list);
 	spin_unlock(&registered_mechs_lock);
-	dprintk("RPC:      unregistered gss mechanism %s\n", gm->gm_name);
+	dprintk("RPC:       unregistered gss mechanism %s\n", gm->gm_name);
 	gss_mech_free(gm);
 }
 
@@ -298,7 +298,7 @@
 u32
 gss_delete_sec_context(struct gss_ctx	**context_handle)
 {
-	dprintk("RPC:      gss_delete_sec_context deleting %p\n",
+	dprintk("RPC:       gss_delete_sec_context deleting %p\n",
 			*context_handle);
 
 	if (!*context_handle)
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 8ef3f1c..7e15aa6 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -97,7 +97,8 @@
 	if (IS_ERR(p))
 		goto out_err_free_ctx;
 	if (version != 1) {
-		dprintk("RPC: unknown spkm3 token format: obsolete nfs-utils?\n");
+		dprintk("RPC:       unknown spkm3 token format: "
+				"obsolete nfs-utils?\n");
 		goto out_err_free_ctx;
 	}
 
@@ -138,7 +139,7 @@
 
 	ctx_id->internal_ctx_id = ctx;
 
-	dprintk("Successfully imported new spkm context.\n");
+	dprintk("RPC:       Successfully imported new spkm context.\n");
 	return 0;
 
 out_err_free_intg_key:
@@ -183,7 +184,7 @@
 
 	maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK);
 
-	dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
+	dprintk("RPC:       gss_verify_mic_spkm3 returning %d\n", maj_stat);
 	return maj_stat;
 }
 
@@ -197,7 +198,7 @@
 
 	err = spkm3_make_token(sctx, message_buffer,
 				message_token, SPKM_MIC_TOK);
-	dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err);
+	dprintk("RPC:       gss_get_mic_spkm3 returning %d\n", err);
 	return err;
 }
 
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index b179d58..104cbf4 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -75,20 +75,21 @@
 	now = jiffies;
 
 	if (ctx->ctx_id.len != 16) {
-		dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
+		dprintk("RPC:       spkm3_make_token BAD ctx_id.len %d\n",
 				ctx->ctx_id.len);
 		goto out_err;
 	}
 
 	if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
-		dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm."
-				"only support hmac-md5 I-ALG.\n");
+		dprintk("RPC:       gss_spkm3_seal: unsupported I-ALG "
+				"algorithm.  only support hmac-md5 I-ALG.\n");
 		goto out_err;
 	} else
 		checksum_type = CKSUMTYPE_HMAC_MD5;
 
 	if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) {
-		dprintk("RPC: gss_spkm3_seal: unsupported C-ALG algorithm\n");
+		dprintk("RPC:       gss_spkm3_seal: unsupported C-ALG "
+				"algorithm\n");
 		goto out_err;
 	}
 
@@ -113,7 +114,8 @@
 
 		spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit);
 	} else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */
-		dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK not supported\n");
+		dprintk("RPC:       gss_spkm3_seal: SPKM_WRAP_TOK "
+				"not supported\n");
 		goto out_err;
 	}
 
@@ -153,7 +155,7 @@
 			cksumname = "md5";
 			break;
 		default:
-			dprintk("RPC:      spkm3_make_checksum:"
+			dprintk("RPC:       spkm3_make_checksum:"
 					" unsupported checksum %d", cksumtype);
 			return GSS_S_FAILURE;
 	}
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 8400b62..6cdd241a 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -209,7 +209,7 @@
 
 	/* spkm3 innercontext token preamble */
 	if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) {
-		dprintk("RPC: BAD SPKM ictoken preamble\n");
+		dprintk("RPC:       BAD SPKM ictoken preamble\n");
 		goto out;
 	}
 
@@ -217,25 +217,25 @@
 
 	/* token type */
 	if ((ptr[4] != 0x02) || (ptr[5] != 0x02)) {
-		dprintk("RPC: BAD asn1 SPKM3 token type\n");
+		dprintk("RPC:       BAD asn1 SPKM3 token type\n");
 		goto out;
 	}
 
 	/* only support SPKM_MIC_TOK */
 	if((ptr[6] != 0x01) || (ptr[7] != 0x01)) {
-		dprintk("RPC: ERROR unsupported SPKM3 token \n");
+		dprintk("RPC:       ERROR unsupported SPKM3 token \n");
 		goto out;
 	}
 
 	/* contextid */
 	if (ptr[8] != 0x03) {
-		dprintk("RPC: BAD SPKM3 asn1 context-id type\n");
+		dprintk("RPC:       BAD SPKM3 asn1 context-id type\n");
 		goto out;
 	}
 
 	ctxelen = ptr[9];
 	if (ctxelen > 17) {  /* length includes asn1 zbit octet */
-		dprintk("RPC: BAD SPKM3 contextid len %d\n", ctxelen);
+		dprintk("RPC:       BAD SPKM3 contextid len %d\n", ctxelen);
 		goto out;
 	}
 
@@ -251,7 +251,9 @@
 	*/
 
 	if (*mic_hdrlen != 6 + ctxelen) {
-		dprintk("RPC: BAD SPKM_ MIC_TOK header len %d: we only support default int-alg (should be absent) and do not support snd-seq\n", *mic_hdrlen);
+		dprintk("RPC:       BAD SPKM_ MIC_TOK header len %d: we only "
+				"support default int-alg (should be absent) "
+				"and do not support snd-seq\n", *mic_hdrlen);
 		goto out;
 	}
 	/* checksum */
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index 35a1b34..cc21ee8 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -72,7 +72,7 @@
 	/* decode the token */
 
 	if (toktype != SPKM_MIC_TOK) {
-		dprintk("RPC: BAD SPKM3 token type: %d\n", toktype);
+		dprintk("RPC:       BAD SPKM3 token type: %d\n", toktype);
 		goto out;
 	}
 
@@ -80,7 +80,7 @@
 		goto out;
 
 	if (*cksum++ != 0x03) {
-		dprintk("RPC: spkm3_read_token BAD checksum type\n");
+		dprintk("RPC:       spkm3_read_token BAD checksum type\n");
 		goto out;
 	}
 	md5elen = *cksum++;
@@ -97,7 +97,8 @@
 	 */
 	ret = GSS_S_DEFECTIVE_TOKEN;
 	if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
-		dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm\n");
+		dprintk("RPC:       gss_spkm3_seal: unsupported I-ALG "
+				"algorithm\n");
 		goto out;
 	}
 
@@ -113,7 +114,7 @@
 	ret = GSS_S_BAD_SIG;
 	code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len);
 	if (code) {
-		dprintk("RPC: bad MIC checksum\n");
+		dprintk("RPC:       bad MIC checksum\n");
 		goto out;
 	}
 
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 8fde38ec..db298b5 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -669,14 +669,14 @@
 	}
 
 	if (gc->gc_seq > MAXSEQ) {
-		dprintk("RPC:      svcauth_gss: discarding request with large sequence number %d\n",
-				gc->gc_seq);
+		dprintk("RPC:       svcauth_gss: discarding request with "
+				"large sequence number %d\n", gc->gc_seq);
 		*authp = rpcsec_gsserr_ctxproblem;
 		return SVC_DENIED;
 	}
 	if (!gss_check_seq_num(rsci, gc->gc_seq)) {
-		dprintk("RPC:      svcauth_gss: discarding request with old sequence number %d\n",
-				gc->gc_seq);
+		dprintk("RPC:       svcauth_gss: discarding request with "
+				"old sequence number %d\n", gc->gc_seq);
 		return SVC_DROP;
 	}
 	return SVC_OK;
@@ -958,7 +958,8 @@
 	__be32		*reject_stat = resv->iov_base + resv->iov_len;
 	int		ret;
 
-	dprintk("RPC:      svcauth_gss: argv->iov_len = %zd\n",argv->iov_len);
+	dprintk("RPC:       svcauth_gss: argv->iov_len = %zd\n",
+			argv->iov_len);
 
 	*authp = rpc_autherr_badcred;
 	if (!svcdata)
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index f7f990c..4e7733a 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -39,7 +39,8 @@
 static struct rpc_auth *
 unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
 {
-	dprintk("RPC: creating UNIX authenticator for client %p\n", clnt);
+	dprintk("RPC:       creating UNIX authenticator for client %p\n",
+			clnt);
 	if (atomic_inc_return(&unix_auth.au_count) == 0)
 		unix_cred_cache.nextgc = jiffies + (unix_cred_cache.expire >> 1);
 	return &unix_auth;
@@ -48,7 +49,7 @@
 static void
 unx_destroy(struct rpc_auth *auth)
 {
-	dprintk("RPC: destroying UNIX authenticator %p\n", auth);
+	dprintk("RPC:       destroying UNIX authenticator %p\n", auth);
 	rpcauth_free_credcache(auth);
 }
 
@@ -67,8 +68,8 @@
 	struct unx_cred	*cred;
 	int		i;
 
-	dprintk("RPC:      allocating UNIX cred for uid %d gid %d\n",
-				acred->uid, acred->gid);
+	dprintk("RPC:       allocating UNIX cred for uid %d gid %d\n",
+			acred->uid, acred->gid);
 
 	if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL)))
 		return ERR_PTR(-ENOMEM);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 8612044..f02f24a 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -215,7 +215,8 @@
 		if (rv == -EAGAIN)
 			rv = -ENOENT;
 	} else if (rv == -EAGAIN || age > refresh_age/2) {
-		dprintk("Want update, refage=%ld, age=%ld\n", refresh_age, age);
+		dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
+				refresh_age, age);
 		if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
 			switch (cache_make_upcall(detail, h)) {
 			case -EINVAL:
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c95a617..6d7221f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -42,6 +42,10 @@
 # define RPCDBG_FACILITY	RPCDBG_CALL
 #endif
 
+#define dprint_status(t)					\
+	dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,		\
+			__FUNCTION__, t->tk_status)
+
 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
 
 
@@ -106,8 +110,8 @@
 	int err;
 	int len;
 
-	dprintk("RPC: creating %s client for %s (xprt %p)\n",
-		program->name, servname, xprt);
+	dprintk("RPC:       creating %s client for %s (xprt %p)\n",
+			program->name, servname, xprt);
 
 	err = -EINVAL;
 	if (!xprt)
@@ -220,7 +224,7 @@
 		xprt->resvport = 0;
 
 	dprintk("RPC:       creating %s client for %s (xprt %p)\n",
-		args->program->name, args->servername, xprt);
+			args->program->name, args->servername, xprt);
 
 	clnt = rpc_new_client(xprt, args->servername, args->program,
 				args->version, args->authflavor);
@@ -245,6 +249,8 @@
 		clnt->cl_autobind = 1;
 	if (args->flags & RPC_CLNT_CREATE_ONESHOT)
 		clnt->cl_oneshot = 1;
+	if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
+		clnt->cl_discrtry = 1;
 
 	return clnt;
 }
@@ -288,7 +294,7 @@
 out_no_stats:
 	kfree(new);
 out_no_clnt:
-	dprintk("RPC: %s returned error %d\n", __FUNCTION__, err);
+	dprintk("RPC:       %s: returned error %d\n", __FUNCTION__, err);
 	return ERR_PTR(err);
 }
 
@@ -301,7 +307,7 @@
 int
 rpc_shutdown_client(struct rpc_clnt *clnt)
 {
-	dprintk("RPC: shutting down %s client for %s, tasks=%d\n",
+	dprintk("RPC:       shutting down %s client for %s, tasks=%d\n",
 			clnt->cl_protname, clnt->cl_server,
 			atomic_read(&clnt->cl_users));
 
@@ -336,7 +342,7 @@
 		return 1;
 	BUG_ON(atomic_read(&clnt->cl_users) != 0);
 
-	dprintk("RPC: destroying %s client for %s\n",
+	dprintk("RPC:       destroying %s client for %s\n",
 			clnt->cl_protname, clnt->cl_server);
 	if (clnt->cl_auth) {
 		rpcauth_destroy(clnt->cl_auth);
@@ -366,8 +372,8 @@
 void
 rpc_release_client(struct rpc_clnt *clnt)
 {
-	dprintk("RPC:      rpc_release_client(%p, %d)\n",
-				clnt, atomic_read(&clnt->cl_users));
+	dprintk("RPC:       rpc_release_client(%p, %d)\n",
+			clnt, atomic_read(&clnt->cl_users));
 
 	if (!atomic_dec_and_test(&clnt->cl_users))
 		return;
@@ -486,17 +492,13 @@
 	/* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
 	rpc_task_sigmask(task, &oldset);
 
-	rpc_call_setup(task, msg, 0);
-
 	/* Set up the call info struct and execute the task */
+	rpc_call_setup(task, msg, 0);
+	if (task->tk_status == 0) {
+		atomic_inc(&task->tk_count);
+		rpc_execute(task);
+	}
 	status = task->tk_status;
-	if (status != 0)
-		goto out;
-	atomic_inc(&task->tk_count);
-	status = rpc_execute(task);
-	if (status == 0)
-		status = task->tk_status;
-out:
 	rpc_put_task(task);
 	rpc_restore_sigmask(&oldset);
 	return status;
@@ -658,9 +660,10 @@
 {
 	struct rpc_clnt	*clnt = task->tk_client;
 
-	dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid,
-		clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc,
-		(RPC_IS_ASYNC(task) ? "async" : "sync"));
+	dprintk("RPC: %5u call_start %s%d proc %d (%s)\n", task->tk_pid,
+			clnt->cl_protname, clnt->cl_vers,
+			task->tk_msg.rpc_proc->p_proc,
+			(RPC_IS_ASYNC(task) ? "async" : "sync"));
 
 	/* Increment call count */
 	task->tk_msg.rpc_proc->p_count++;
@@ -674,7 +677,7 @@
 static void
 call_reserve(struct rpc_task *task)
 {
-	dprintk("RPC: %4d call_reserve\n", task->tk_pid);
+	dprint_status(task);
 
 	if (!rpcauth_uptodatecred(task)) {
 		task->tk_action = call_refresh;
@@ -694,8 +697,7 @@
 {
 	int status = task->tk_status;
 
-	dprintk("RPC: %4d call_reserveresult (status %d)\n",
-				task->tk_pid, task->tk_status);
+	dprint_status(task);
 
 	/*
 	 * After a call to xprt_reserve(), we must have either
@@ -749,8 +751,8 @@
 	struct rpc_xprt *xprt = task->tk_xprt;
 	unsigned int	bufsiz;
 
-	dprintk("RPC: %4d call_allocate (status %d)\n",
-				task->tk_pid, task->tk_status);
+	dprint_status(task);
+
 	task->tk_action = call_bind;
 	if (req->rq_buffer)
 		return;
@@ -761,7 +763,8 @@
 
 	if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
 		return;
-	printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
+
+	dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
 
 	if (RPC_IS_ASYNC(task) || !signalled()) {
 		xprt_release(task);
@@ -798,8 +801,7 @@
 	kxdrproc_t	encode;
 	__be32		*p;
 
-	dprintk("RPC: %4d call_encode (status %d)\n",
-				task->tk_pid, task->tk_status);
+	dprint_status(task);
 
 	/* Default buffer setup */
 	bufsiz = req->rq_bufsize >> 1;
@@ -845,8 +847,7 @@
 {
 	struct rpc_xprt *xprt = task->tk_xprt;
 
-	dprintk("RPC: %4d call_bind (status %d)\n",
-				task->tk_pid, task->tk_status);
+	dprint_status(task);
 
 	task->tk_action = call_connect;
 	if (!xprt_bound(xprt)) {
@@ -865,8 +866,7 @@
 	int status = -EACCES;
 
 	if (task->tk_status >= 0) {
-		dprintk("RPC: %4d call_bind_status (status %d)\n",
-					task->tk_pid, task->tk_status);
+		dprint_status(task);
 		task->tk_status = 0;
 		task->tk_action = call_connect;
 		return;
@@ -874,24 +874,24 @@
 
 	switch (task->tk_status) {
 	case -EACCES:
-		dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
-				task->tk_pid);
+		dprintk("RPC: %5u remote rpcbind: RPC program/version "
+				"unavailable\n", task->tk_pid);
 		rpc_delay(task, 3*HZ);
 		goto retry_timeout;
 	case -ETIMEDOUT:
-		dprintk("RPC: %4d rpcbind request timed out\n",
+		dprintk("RPC: %5u rpcbind request timed out\n",
 				task->tk_pid);
 		goto retry_timeout;
 	case -EPFNOSUPPORT:
-		dprintk("RPC: %4d remote rpcbind service unavailable\n",
+		dprintk("RPC: %5u remote rpcbind service unavailable\n",
 				task->tk_pid);
 		break;
 	case -EPROTONOSUPPORT:
-		dprintk("RPC: %4d remote rpcbind version 2 unavailable\n",
+		dprintk("RPC: %5u remote rpcbind version 2 unavailable\n",
 				task->tk_pid);
 		break;
 	default:
-		dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
+		dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
 				task->tk_pid, -task->tk_status);
 		status = -EIO;
 	}
@@ -911,7 +911,7 @@
 {
 	struct rpc_xprt *xprt = task->tk_xprt;
 
-	dprintk("RPC: %4d call_connect xprt %p %s connected\n",
+	dprintk("RPC: %5u call_connect xprt %p %s connected\n",
 			task->tk_pid, xprt,
 			(xprt_connected(xprt) ? "is" : "is not"));
 
@@ -933,8 +933,7 @@
 	struct rpc_clnt *clnt = task->tk_client;
 	int status = task->tk_status;
 
-	dprintk("RPC: %5u call_connect_status (status %d)\n",
-				task->tk_pid, task->tk_status);
+	dprint_status(task);
 
 	task->tk_status = 0;
 	if (status >= 0) {
@@ -966,8 +965,7 @@
 static void
 call_transmit(struct rpc_task *task)
 {
-	dprintk("RPC: %4d call_transmit (status %d)\n",
-				task->tk_pid, task->tk_status);
+	dprint_status(task);
 
 	task->tk_action = call_status;
 	if (task->tk_status < 0)
@@ -1028,8 +1026,7 @@
 	if (req->rq_received > 0 && !req->rq_bytes_sent)
 		task->tk_status = req->rq_received;
 
-	dprintk("RPC: %4d call_status (status %d)\n",
-				task->tk_pid, task->tk_status);
+	dprint_status(task);
 
 	status = task->tk_status;
 	if (status >= 0) {
@@ -1080,11 +1077,11 @@
 	struct rpc_clnt	*clnt = task->tk_client;
 
 	if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
-		dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid);
+		dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
 		goto retry;
 	}
 
-	dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
+	dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
 	task->tk_timeouts++;
 
 	if (RPC_IS_SOFT(task)) {
@@ -1118,8 +1115,8 @@
 	kxdrproc_t	decode = task->tk_msg.rpc_proc->p_decode;
 	__be32		*p;
 
-	dprintk("RPC: %4d call_decode (status %d)\n",
-				task->tk_pid, task->tk_status);
+	dprintk("RPC: %5u call_decode (status %d)\n",
+			task->tk_pid, task->tk_status);
 
 	if (task->tk_flags & RPC_CALL_MAJORSEEN) {
 		printk(KERN_NOTICE "%s: server %s OK\n",
@@ -1133,8 +1130,8 @@
 			clnt->cl_stats->rpcretrans++;
 			goto out_retry;
 		}
-		dprintk("%s: too small RPC reply size (%d bytes)\n",
-			clnt->cl_protname, task->tk_status);
+		dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
+				clnt->cl_protname, task->tk_status);
 		task->tk_action = call_timeout;
 		goto out_retry;
 	}
@@ -1166,8 +1163,8 @@
 						      task->tk_msg.rpc_resp);
 		unlock_kernel();
 	}
-	dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
-					task->tk_status);
+	dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
+			task->tk_status);
 	return;
 out_retry:
 	req->rq_received = req->rq_private_buf.len = 0;
@@ -1180,7 +1177,7 @@
 static void
 call_refresh(struct rpc_task *task)
 {
-	dprintk("RPC: %4d call_refresh\n", task->tk_pid);
+	dprint_status(task);
 
 	xprt_release(task);	/* Must do to obtain new XID */
 	task->tk_action = call_refreshresult;
@@ -1196,8 +1193,8 @@
 call_refreshresult(struct rpc_task *task)
 {
 	int status = task->tk_status;
-	dprintk("RPC: %4d call_refreshresult (status %d)\n",
-				task->tk_pid, task->tk_status);
+
+	dprint_status(task);
 
 	task->tk_status = 0;
 	task->tk_action = call_reserve;
@@ -1275,11 +1272,15 @@
 			case RPC_AUTH_ERROR:
 				break;
 			case RPC_MISMATCH:
-				dprintk("%s: RPC call version mismatch!\n", __FUNCTION__);
+				dprintk("RPC: %5u %s: RPC call version "
+						"mismatch!\n",
+						task->tk_pid, __FUNCTION__);
 				error = -EPROTONOSUPPORT;
 				goto out_err;
 			default:
-				dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n);
+				dprintk("RPC: %5u %s: RPC call rejected, "
+						"unknown error: %x\n",
+						task->tk_pid, __FUNCTION__, n);
 				goto out_eio;
 		}
 		if (--len < 0)
@@ -1292,8 +1293,8 @@
 			if (!task->tk_cred_retry)
 				break;
 			task->tk_cred_retry--;
-			dprintk("RPC: %4d call_verify: retry stale creds\n",
-							task->tk_pid);
+			dprintk("RPC: %5u %s: retry stale creds\n",
+					task->tk_pid, __FUNCTION__);
 			rpcauth_invalcred(task);
 			task->tk_action = call_refresh;
 			goto out_retry;
@@ -1303,8 +1304,8 @@
 			if (!task->tk_garb_retry)
 				break;
 			task->tk_garb_retry--;
-			dprintk("RPC: %4d call_verify: retry garbled creds\n",
-							task->tk_pid);
+			dprintk("RPC: %5u %s: retry garbled creds\n",
+					task->tk_pid, __FUNCTION__);
 			task->tk_action = call_bind;
 			goto out_retry;
 		case RPC_AUTH_TOOWEAK:
@@ -1315,8 +1316,8 @@
 			printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
 			error = -EIO;
 		}
-		dprintk("RPC: %4d call_verify: call rejected %d\n",
-						task->tk_pid, n);
+		dprintk("RPC: %5u %s: call rejected %d\n",
+				task->tk_pid, __FUNCTION__, n);
 		goto out_err;
 	}
 	if (!(p = rpcauth_checkverf(task, p))) {
@@ -1330,20 +1331,24 @@
 	case RPC_SUCCESS:
 		return p;
 	case RPC_PROG_UNAVAIL:
-		dprintk("RPC: call_verify: program %u is unsupported by server %s\n",
+		dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
+				task->tk_pid, __FUNCTION__,
 				(unsigned int)task->tk_client->cl_prog,
 				task->tk_client->cl_server);
 		error = -EPFNOSUPPORT;
 		goto out_err;
 	case RPC_PROG_MISMATCH:
-		dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n",
+		dprintk("RPC: %5u %s: program %u, version %u unsupported by "
+				"server %s\n", task->tk_pid, __FUNCTION__,
 				(unsigned int)task->tk_client->cl_prog,
 				(unsigned int)task->tk_client->cl_vers,
 				task->tk_client->cl_server);
 		error = -EPROTONOSUPPORT;
 		goto out_err;
 	case RPC_PROC_UNAVAIL:
-		dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
+		dprintk("RPC: %5u %s: proc %p unsupported by program %u, "
+				"version %u on server %s\n",
+				task->tk_pid, __FUNCTION__,
 				task->tk_msg.rpc_proc,
 				task->tk_client->cl_prog,
 				task->tk_client->cl_vers,
@@ -1351,7 +1356,8 @@
 		error = -EOPNOTSUPP;
 		goto out_err;
 	case RPC_GARBAGE_ARGS:
-		dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__);
+		dprintk("RPC: %5u %s: server saw garbage\n",
+				task->tk_pid, __FUNCTION__);
 		break;			/* retry */
 	default:
 		printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
@@ -1362,7 +1368,8 @@
 	task->tk_client->cl_stats->rpcgarbage++;
 	if (task->tk_garb_retry) {
 		task->tk_garb_retry--;
-		dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid);
+		dprintk("RPC: %5u %s: retrying\n",
+				task->tk_pid, __FUNCTION__);
 		task->tk_action = call_bind;
 out_retry:
 		return ERR_PTR(-EAGAIN);
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index f4e1357..d9f7653 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -62,7 +62,10 @@
 
 static void pmap_map_release(void *data)
 {
-	pmap_map_free(data);
+	struct portmap_args *map = data;
+
+	xprt_put(map->pm_xprt);
+	pmap_map_free(map);
 }
 
 static const struct rpc_call_ops pmap_getport_ops = {
@@ -94,7 +97,7 @@
 	struct rpc_task *child;
 	int status;
 
-	dprintk("RPC: %4d rpc_getport(%s, %u, %u, %d)\n",
+	dprintk("RPC: %5u rpc_getport(%s, %u, %u, %d)\n",
 			task->tk_pid, clnt->cl_server,
 			clnt->cl_prog, clnt->cl_vers, xprt->prot);
 
@@ -133,7 +136,7 @@
 	status = -EIO;
 	child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map);
 	if (IS_ERR(child))
-		goto bailout;
+		goto bailout_nofree;
 	rpc_put_task(child);
 
 	task->tk_xprt->stat.bind_count++;
@@ -175,7 +178,7 @@
 	char		hostname[32];
 	int		status;
 
-	dprintk("RPC:      rpc_getport_external(%u.%u.%u.%u, %u, %u, %d)\n",
+	dprintk("RPC:       rpc_getport_external(%u.%u.%u.%u, %u, %u, %d)\n",
 			NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot);
 
 	sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr));
@@ -218,11 +221,10 @@
 		status = 0;
 	}
 
-	dprintk("RPC: %4d pmap_getport_done(status %d, port %u)\n",
+	dprintk("RPC: %5u pmap_getport_done(status %d, port %u)\n",
 			child->tk_pid, status, map->pm_port);
 
 	pmap_wake_portmap_waiters(xprt, status);
-	xprt_put(xprt);
 }
 
 /**
@@ -255,13 +257,14 @@
 	struct rpc_clnt		*pmap_clnt;
 	int error = 0;
 
-	dprintk("RPC: registering (%u, %u, %d, %u) with portmapper.\n",
+	dprintk("RPC:       registering (%u, %u, %d, %u) with portmapper.\n",
 			prog, vers, prot, port);
 
 	pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1);
 	if (IS_ERR(pmap_clnt)) {
 		error = PTR_ERR(pmap_clnt);
-		dprintk("RPC: couldn't create pmap client. Error = %d\n", error);
+		dprintk("RPC:       couldn't create pmap client. Error = %d\n",
+				error);
 		return error;
 	}
 
@@ -272,7 +275,7 @@
 			"RPC: failed to contact portmap (errno %d).\n",
 			error);
 	}
-	dprintk("RPC: registration status %d/%d\n", error, *okay);
+	dprintk("RPC:       registration status %d/%d\n", error, *okay);
 
 	/* Client deleted automatically because cl_oneshot == 1 */
 	return error;
@@ -303,8 +306,9 @@
  */
 static int xdr_encode_mapping(struct rpc_rqst *req, __be32 *p, struct portmap_args *map)
 {
-	dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n",
-		map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port);
+	dprintk("RPC:       xdr_encode_mapping(%u, %u, %u, %u)\n",
+			map->pm_prog, map->pm_vers,
+			map->pm_prot, map->pm_port);
 	*p++ = htonl(map->pm_prog);
 	*p++ = htonl(map->pm_vers);
 	*p++ = htonl(map->pm_prot);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index e1fad77..9b9ea50 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -589,7 +589,7 @@
 {
 	struct inode *inode;
 
-	inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR);
+	inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
 	if (!inode)
 		goto out_err;
 	inode->i_ino = iunique(dir->i_sb, 100);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 54a6b92..6d87320 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -74,7 +74,7 @@
 static inline void
 __rpc_disable_timer(struct rpc_task *task)
 {
-	dprintk("RPC: %4d disabling timer\n", task->tk_pid);
+	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
 	task->tk_timeout_fn = NULL;
 	task->tk_timeout = 0;
 }
@@ -93,7 +93,7 @@
 	callback = task->tk_timeout_fn;
 	task->tk_timeout_fn = NULL;
 	if (callback && RPC_IS_QUEUED(task)) {
-		dprintk("RPC: %4d running timer\n", task->tk_pid);
+		dprintk("RPC: %5u running timer\n", task->tk_pid);
 		callback(task);
 	}
 	smp_mb__before_clear_bit();
@@ -110,7 +110,7 @@
 	if (!task->tk_timeout)
 		return;
 
-	dprintk("RPC: %4d setting alarm for %lu ms\n",
+	dprintk("RPC: %5u setting alarm for %lu ms\n",
 			task->tk_pid, task->tk_timeout * 1000 / HZ);
 
 	if (timer)
@@ -132,7 +132,7 @@
 		return;
 	if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
 		del_singleshot_timer_sync(&task->tk_timer);
-		dprintk("RPC: %4d deleting timer\n", task->tk_pid);
+		dprintk("RPC: %5u deleting timer\n", task->tk_pid);
 	}
 }
 
@@ -179,8 +179,8 @@
 	queue->qlen++;
 	rpc_set_queued(task);
 
-	dprintk("RPC: %4d added to queue %p \"%s\"\n",
-				task->tk_pid, queue, rpc_qname(queue));
+	dprintk("RPC: %5u added to queue %p \"%s\"\n",
+			task->tk_pid, queue, rpc_qname(queue));
 }
 
 /*
@@ -212,8 +212,8 @@
 	else
 		list_del(&task->u.tk_wait.list);
 	queue->qlen--;
-	dprintk("RPC: %4d removed from queue %p \"%s\"\n",
-				task->tk_pid, queue, rpc_qname(queue));
+	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
+			task->tk_pid, queue, rpc_qname(queue));
 }
 
 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
@@ -344,8 +344,8 @@
 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 			rpc_action action, rpc_action timer)
 {
-	dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
-				rpc_qname(q), jiffies);
+	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
+			task->tk_pid, rpc_qname(q), jiffies);
 
 	if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
 		printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
@@ -381,7 +381,8 @@
  */
 static void __rpc_do_wake_up_task(struct rpc_task *task)
 {
-	dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
+	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
+			task->tk_pid, jiffies);
 
 #ifdef RPC_DEBUG
 	BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
@@ -397,7 +398,7 @@
 
 	rpc_make_runnable(task);
 
-	dprintk("RPC:      __rpc_wake_up_task done\n");
+	dprintk("RPC:       __rpc_wake_up_task done\n");
 }
 
 /*
@@ -418,7 +419,7 @@
 static void
 __rpc_default_timer(struct rpc_task *task)
 {
-	dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
+	dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid);
 	task->tk_status = -ETIMEDOUT;
 	rpc_wake_up_task(task);
 }
@@ -502,7 +503,8 @@
 {
 	struct rpc_task	*task = NULL;
 
-	dprintk("RPC:      wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
+	dprintk("RPC:       wake_up_next(%p \"%s\")\n",
+			queue, rpc_qname(queue));
 	rcu_read_lock_bh();
 	spin_lock(&queue->lock);
 	if (RPC_IS_PRIORITY(queue))
@@ -625,12 +627,12 @@
 /*
  * This is the RPC `scheduler' (or rather, the finite state machine).
  */
-static int __rpc_execute(struct rpc_task *task)
+static void __rpc_execute(struct rpc_task *task)
 {
 	int		status = 0;
 
-	dprintk("RPC: %4d rpc_execute flgs %x\n",
-				task->tk_pid, task->tk_flags);
+	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
+			task->tk_pid, task->tk_flags);
 
 	BUG_ON(RPC_IS_QUEUED(task));
 
@@ -679,14 +681,14 @@
 		if (RPC_IS_ASYNC(task)) {
 			/* Careful! we may have raced... */
 			if (RPC_IS_QUEUED(task))
-				return 0;
+				return;
 			if (rpc_test_and_set_running(task))
-				return 0;
+				return;
 			continue;
 		}
 
 		/* sync task: sleep here */
-		dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
+		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
 		/* Note: Caller should be using rpc_clnt_sigmask() */
 		status = out_of_line_wait_on_bit(&task->tk_runstate,
 				RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
@@ -698,19 +700,19 @@
 			 * clean up after sleeping on some queue, we don't
 			 * break the loop here, but go around once more.
 			 */
-			dprintk("RPC: %4d got signal\n", task->tk_pid);
+			dprintk("RPC: %5u got signal\n", task->tk_pid);
 			task->tk_flags |= RPC_TASK_KILLED;
 			rpc_exit(task, -ERESTARTSYS);
 			rpc_wake_up_task(task);
 		}
 		rpc_set_running(task);
-		dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
+		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
 	}
 
-	dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
+	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
+			task->tk_status);
 	/* Release all resources associated with the task */
 	rpc_release_task(task);
-	return status;
 }
 
 /*
@@ -722,12 +724,11 @@
  *	 released. In particular note that tk_release() will have
  *	 been called, so your task memory may have been freed.
  */
-int
-rpc_execute(struct rpc_task *task)
+void rpc_execute(struct rpc_task *task)
 {
 	rpc_set_active(task);
 	rpc_set_running(task);
-	return __rpc_execute(task);
+	__rpc_execute(task);
 }
 
 static void rpc_async_schedule(struct work_struct *work)
@@ -826,7 +827,7 @@
 	/* starting timestamp */
 	task->tk_start = jiffies;
 
-	dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
+	dprintk("RPC:       new task initialized, procpid %u\n",
 				current->pid);
 }
 
@@ -839,7 +840,7 @@
 static void rpc_free_task(struct rcu_head *rcu)
 {
 	struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
-	dprintk("RPC: %4d freeing task\n", task->tk_pid);
+	dprintk("RPC: %5u freeing task\n", task->tk_pid);
 	mempool_free(task, rpc_task_mempool);
 }
 
@@ -858,7 +859,7 @@
 
 	rpc_init_task(task, clnt, flags, tk_ops, calldata);
 
-	dprintk("RPC: %4d allocated task\n", task->tk_pid);
+	dprintk("RPC:       allocated task %p\n", task);
 	task->tk_flags |= RPC_TASK_DYNAMIC;
 out:
 	return task;
@@ -902,7 +903,7 @@
 #ifdef RPC_DEBUG
 	BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
 #endif
-	dprintk("RPC: %4d release task\n", task->tk_pid);
+	dprintk("RPC: %5u release task\n", task->tk_pid);
 
 	/* Remove from global task list */
 	spin_lock(&rpc_sched_lock);
@@ -955,7 +956,7 @@
 	struct rpc_task	*rovr;
 	struct list_head *le;
 
-	dprintk("RPC:      killing all tasks for client %p\n", clnt);
+	dprintk("RPC:       killing all tasks for client %p\n", clnt);
 
 	/*
 	 * Spin lock all_tasks to prevent changes...
@@ -984,7 +985,8 @@
 		rpc_killall_tasks(NULL);
 		flush_workqueue(rpciod_workqueue);
 		if (!list_empty(&all_tasks)) {
-			dprintk("rpciod_killall: waiting for tasks to exit\n");
+			dprintk("RPC:       rpciod_killall: waiting for tasks "
+					"to exit\n");
 			yield();
 		}
 	}
@@ -1004,7 +1006,7 @@
 	int error = 0;
 
 	mutex_lock(&rpciod_mutex);
-	dprintk("rpciod_up: users %d\n", rpciod_users);
+	dprintk("RPC:       rpciod_up: users %u\n", rpciod_users);
 	rpciod_users++;
 	if (rpciod_workqueue)
 		goto out;
@@ -1012,7 +1014,7 @@
 	 * If there's no pid, we should be the first user.
 	 */
 	if (rpciod_users > 1)
-		printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
+		printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users);
 	/*
 	 * Create the rpciod thread and wait for it to start.
 	 */
@@ -1034,7 +1036,7 @@
 rpciod_down(void)
 {
 	mutex_lock(&rpciod_mutex);
-	dprintk("rpciod_down sema %d\n", rpciod_users);
+	dprintk("RPC:       rpciod_down sema %u\n", rpciod_users);
 	if (rpciod_users) {
 		if (--rpciod_users)
 			goto out;
@@ -1042,7 +1044,7 @@
 		printk(KERN_WARNING "rpciod_down: no users??\n");
 
 	if (!rpciod_workqueue) {
-		dprintk("rpciod_down: Nothing to do!\n");
+		dprintk("RPC:       rpciod_down: Nothing to do!\n");
 		goto out;
 	}
 	rpciod_killall();
@@ -1072,7 +1074,7 @@
 		if (RPC_IS_QUEUED(t))
 			rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
 
-		printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
+		printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n",
 			t->tk_pid,
 			(t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
 			t->tk_flags, t->tk_status,
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 044d948..2878e20 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -226,7 +226,7 @@
 	struct proc_dir_entry *ent;
 
 	rpc_proc_init();
-	dprintk("RPC: registering /proc/net/rpc/%s\n", name);
+	dprintk("RPC:       registering /proc/net/rpc/%s\n", name);
 
 	ent = create_proc_entry(name, 0, proc_net_rpc);
 	if (ent) {
@@ -263,7 +263,7 @@
 void
 rpc_proc_init(void)
 {
-	dprintk("RPC: registering /proc/net/rpc\n");
+	dprintk("RPC:       registering /proc/net/rpc\n");
 	if (!proc_net_rpc) {
 		struct proc_dir_entry *ent;
 		ent = proc_mkdir("rpc", proc_net);
@@ -277,7 +277,7 @@
 void
 rpc_proc_exit(void)
 {
-	dprintk("RPC: unregistering /proc/net/rpc\n");
+	dprintk("RPC:       unregistering /proc/net/rpc\n");
 	if (proc_net_rpc) {
 		proc_net_rpc = NULL;
 		remove_proc_entry("net/rpc", NULL);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b00511d..4ab1374 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -317,7 +317,7 @@
 	for (i = 0; i < serv->sv_nrpools; i++) {
 		struct svc_pool *pool = &serv->sv_pools[i];
 
-		dprintk("initialising pool %u for %s\n",
+		dprintk("svc: initialising pool %u for %s\n",
 				i, serv->sv_name);
 
 		pool->sp_id = i;
@@ -368,7 +368,7 @@
 {
 	struct svc_sock	*svsk;
 
-	dprintk("RPC: svc_destroy(%s, %d)\n",
+	dprintk("svc: svc_destroy(%s, %d)\n",
 				serv->sv_program->pg_name,
 				serv->sv_nrthreads);
 
@@ -654,7 +654,7 @@
 			if (progp->pg_vers[i] == NULL)
 				continue;
 
-			dprintk("RPC: svc_register(%s, %s, %d, %d)%s\n",
+			dprintk("svc: svc_register(%s, %s, %d, %d)%s\n",
 					progp->pg_name,
 					proto == IPPROTO_UDP?  "udp" : "tcp",
 					port,
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index e7c71a1..ee6ffa0 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -108,7 +108,7 @@
 	return 1;
 
 out_sleep:
-	dprintk("RPC: %4d failed to lock transport %p\n",
+	dprintk("RPC: %5u failed to lock transport %p\n",
 			task->tk_pid, xprt);
 	task->tk_timeout = 0;
 	task->tk_status = -EAGAIN;
@@ -158,7 +158,7 @@
 	}
 	xprt_clear_locked(xprt);
 out_sleep:
-	dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
+	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
 	task->tk_timeout = 0;
 	task->tk_status = -EAGAIN;
 	if (req && req->rq_ntrans)
@@ -281,7 +281,7 @@
 
 	if (req->rq_cong)
 		return 1;
-	dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n",
+	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
 			task->tk_pid, xprt->cong, xprt->cwnd);
 	if (RPCXPRT_CONGESTED(xprt))
 		return 0;
@@ -340,7 +340,7 @@
 		if (cwnd < RPC_CWNDSCALE)
 			cwnd = RPC_CWNDSCALE;
 	}
-	dprintk("RPC:      cong %ld, cwnd was %ld, now %ld\n",
+	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
 			xprt->cong, xprt->cwnd, cwnd);
 	xprt->cwnd = cwnd;
 	__xprt_put_cong(xprt, req);
@@ -387,8 +387,8 @@
 
 	spin_lock_bh(&xprt->transport_lock);
 	if (xprt->snd_task) {
-		dprintk("RPC:      write space: waking waiting task on xprt %p\n",
-				xprt);
+		dprintk("RPC:       write space: waking waiting task on "
+				"xprt %p\n", xprt);
 		rpc_wake_up_task(xprt->snd_task);
 	}
 	spin_unlock_bh(&xprt->transport_lock);
@@ -494,7 +494,7 @@
  */
 void xprt_disconnect(struct rpc_xprt *xprt)
 {
-	dprintk("RPC:      disconnected transport %p\n", xprt);
+	dprintk("RPC:       disconnected transport %p\n", xprt);
 	spin_lock_bh(&xprt->transport_lock);
 	xprt_clear_connected(xprt);
 	xprt_wake_pending_tasks(xprt, -ENOTCONN);
@@ -530,7 +530,7 @@
 {
 	struct rpc_xprt	*xprt = task->tk_xprt;
 
-	dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid,
+	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
 			xprt, (xprt_connected(xprt) ? "is" : "is not"));
 
 	if (!xprt_bound(xprt)) {
@@ -560,7 +560,7 @@
 	if (task->tk_status >= 0) {
 		xprt->stat.connect_count++;
 		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
-		dprintk("RPC: %4d xprt_connect_status: connection established\n",
+		dprintk("RPC: %5u xprt_connect_status: connection established\n",
 				task->tk_pid);
 		return;
 	}
@@ -568,20 +568,22 @@
 	switch (task->tk_status) {
 	case -ECONNREFUSED:
 	case -ECONNRESET:
-		dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n",
-				task->tk_pid, task->tk_client->cl_server);
+		dprintk("RPC: %5u xprt_connect_status: server %s refused "
+				"connection\n", task->tk_pid,
+				task->tk_client->cl_server);
 		break;
 	case -ENOTCONN:
-		dprintk("RPC: %4d xprt_connect_status: connection broken\n",
+		dprintk("RPC: %5u xprt_connect_status: connection broken\n",
 				task->tk_pid);
 		break;
 	case -ETIMEDOUT:
-		dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n",
-				task->tk_pid);
+		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
+				"out\n", task->tk_pid);
 		break;
 	default:
-		dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n",
-				task->tk_pid, -task->tk_status, task->tk_client->cl_server);
+		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
+				"server %s\n", task->tk_pid, -task->tk_status,
+				task->tk_client->cl_server);
 		xprt_release_write(xprt, task);
 		task->tk_status = -EIO;
 	}
@@ -602,6 +604,9 @@
 		if (entry->rq_xid == xid)
 			return entry;
 	}
+
+	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
+			ntohl(xid));
 	xprt->stat.bad_xids++;
 	return NULL;
 }
@@ -654,7 +659,7 @@
 	struct rpc_rqst *req = task->tk_rqstp;
 	struct rpc_xprt *xprt = req->rq_xprt;
 
-	dprintk("RPC: %4d xprt_timer\n", task->tk_pid);
+	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
 
 	spin_lock(&xprt->transport_lock);
 	if (!req->rq_received) {
@@ -678,7 +683,7 @@
 	struct rpc_xprt	*xprt = req->rq_xprt;
 	int err = 0;
 
-	dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid);
+	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
 
 	spin_lock_bh(&xprt->transport_lock);
 	if (req->rq_received && !req->rq_bytes_sent) {
@@ -716,7 +721,7 @@
 	struct rpc_xprt	*xprt = req->rq_xprt;
 	int status;
 
-	dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
+	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
 
 	if (!req->rq_received) {
 		if (list_empty(&req->rq_list)) {
@@ -730,13 +735,23 @@
 			xprt_reset_majortimeo(req);
 			/* Turn off autodisconnect */
 			del_singleshot_timer_sync(&xprt->timer);
+		} else {
+			/* If all request bytes have been sent,
+			 * then we must be retransmitting this one */
+			if (!req->rq_bytes_sent) {
+				if (task->tk_client->cl_discrtry) {
+					xprt_disconnect(xprt);
+					task->tk_status = -ENOTCONN;
+					return;
+				}
+			}
 		}
 	} else if (!req->rq_bytes_sent)
 		return;
 
 	status = xprt->ops->send_request(task);
 	if (status == 0) {
-		dprintk("RPC: %4d xmit complete\n", task->tk_pid);
+		dprintk("RPC: %5u xmit complete\n", task->tk_pid);
 		spin_lock_bh(&xprt->transport_lock);
 
 		xprt->ops->set_retrans_timeout(task);
@@ -777,7 +792,7 @@
 		xprt_request_init(task, xprt);
 		return;
 	}
-	dprintk("RPC:      waiting for request slot\n");
+	dprintk("RPC:       waiting for request slot\n");
 	task->tk_status = -EAGAIN;
 	task->tk_timeout = 0;
 	rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
@@ -822,7 +837,7 @@
 	req->rq_xid     = xprt_alloc_xid(xprt);
 	req->rq_release_snd_buf = NULL;
 	xprt_reset_majortimeo(req);
-	dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
+	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
 			req, ntohl(req->rq_xid));
 }
 
@@ -856,7 +871,7 @@
 		req->rq_release_snd_buf(req);
 	memset(req, 0, sizeof(*req));	/* mark unused */
 
-	dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
+	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
 
 	spin_lock(&xprt->reserve_lock);
 	list_add(&req->rq_list, &xprt->free);
@@ -906,7 +921,7 @@
 		return ERR_PTR(-EIO);
 	}
 	if (IS_ERR(xprt)) {
-		dprintk("RPC:      xprt_create_transport: failed, %ld\n",
+		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
 				-PTR_ERR(xprt));
 		return xprt;
 	}
@@ -936,7 +951,7 @@
 
 	xprt_init_xid(xprt);
 
-	dprintk("RPC:      created transport %p with %u slots\n", xprt,
+	dprintk("RPC:       created transport %p with %u slots\n", xprt,
 			xprt->max_reqs);
 
 	return xprt;
@@ -951,7 +966,7 @@
 {
 	struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
 
-	dprintk("RPC:      destroying transport %p\n", xprt);
+	dprintk("RPC:       destroying transport %p\n", xprt);
 	xprt->shutdown = 1;
 	del_timer_sync(&xprt->timer);
 
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 49cabff..64736b3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -192,7 +192,7 @@
 	u8 *buf = (u8 *) packet;
 	int j;
 
-	dprintk("RPC:      %s\n", msg);
+	dprintk("RPC:       %s\n", msg);
 	for (j = 0; j < count && j < 128; j += 4) {
 		if (!(j & 31)) {
 			if (j)
@@ -418,7 +418,7 @@
 	struct rpc_xprt *xprt = req->rq_xprt;
 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 
-	dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
+	dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
 			task->tk_pid, req->rq_slen - req->rq_bytes_sent,
 			req->rq_slen);
 
@@ -467,7 +467,7 @@
 			      xprt->addrlen, xdr,
 			      req->rq_bytes_sent);
 
-	dprintk("RPC:      xs_udp_send_request(%u) = %d\n",
+	dprintk("RPC:       xs_udp_send_request(%u) = %d\n",
 			xdr->len - req->rq_bytes_sent, status);
 
 	if (likely(status >= (int) req->rq_slen))
@@ -488,7 +488,7 @@
 		xs_nospace(task);
 		break;
 	default:
-		dprintk("RPC:      sendmsg returned unrecognized error %d\n",
+		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
 			-status);
 		break;
 	}
@@ -539,7 +539,7 @@
 		status = xs_sendpages(transport->sock,
 					NULL, 0, xdr, req->rq_bytes_sent);
 
-		dprintk("RPC:      xs_tcp_send_request(%u) = %d\n",
+		dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
 				xdr->len - req->rq_bytes_sent, status);
 
 		if (unlikely(status < 0))
@@ -570,7 +570,7 @@
 		status = -ENOTCONN;
 		break;
 	default:
-		dprintk("RPC:      sendmsg returned unrecognized error %d\n",
+		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
 			-status);
 		xprt_disconnect(xprt);
 		break;
@@ -622,7 +622,7 @@
 	if (!sk)
 		goto clear_close_wait;
 
-	dprintk("RPC:      xs_close xprt %p\n", xprt);
+	dprintk("RPC:       xs_close xprt %p\n", xprt);
 
 	write_lock_bh(&sk->sk_callback_lock);
 	transport->inet = NULL;
@@ -652,7 +652,7 @@
 {
 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 
-	dprintk("RPC:      xs_destroy xprt %p\n", xprt);
+	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
 
 	cancel_delayed_work(&transport->connect_worker);
 	flush_scheduled_work();
@@ -686,7 +686,7 @@
 	__be32 *xp;
 
 	read_lock(&sk->sk_callback_lock);
-	dprintk("RPC:      xs_udp_data_ready...\n");
+	dprintk("RPC:       xs_udp_data_ready...\n");
 	if (!(xprt = xprt_from_sock(sk)))
 		goto out;
 
@@ -698,7 +698,7 @@
 
 	repsize = skb->len - sizeof(struct udphdr);
 	if (repsize < 4) {
-		dprintk("RPC:      impossible RPC reply size %d!\n", repsize);
+		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
 		goto dropit;
 	}
 
@@ -762,11 +762,11 @@
 
 	/* Sanity check of the record length */
 	if (unlikely(transport->tcp_reclen < 4)) {
-		dprintk("RPC:      invalid TCP record fragment length\n");
+		dprintk("RPC:       invalid TCP record fragment length\n");
 		xprt_disconnect(xprt);
 		return;
 	}
-	dprintk("RPC:      reading TCP record fragment of length %d\n",
+	dprintk("RPC:       reading TCP record fragment of length %d\n",
 			transport->tcp_reclen);
 }
 
@@ -789,7 +789,7 @@
 	char *p;
 
 	len = sizeof(transport->tcp_xid) - transport->tcp_offset;
-	dprintk("RPC:      reading XID (%Zu bytes)\n", len);
+	dprintk("RPC:       reading XID (%Zu bytes)\n", len);
 	p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
 	used = xdr_skb_read_bits(desc, p, len);
 	transport->tcp_offset += used;
@@ -798,7 +798,7 @@
 	transport->tcp_flags &= ~TCP_RCV_COPY_XID;
 	transport->tcp_flags |= TCP_RCV_COPY_DATA;
 	transport->tcp_copied = 4;
-	dprintk("RPC:      reading reply for XID %08x\n",
+	dprintk("RPC:       reading reply for XID %08x\n",
 			ntohl(transport->tcp_xid));
 	xs_tcp_check_fraghdr(transport);
 }
@@ -816,7 +816,7 @@
 	req = xprt_lookup_rqst(xprt, transport->tcp_xid);
 	if (!req) {
 		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
-		dprintk("RPC:      XID %08x request not found!\n",
+		dprintk("RPC:       XID %08x request not found!\n",
 				ntohl(transport->tcp_xid));
 		spin_unlock(&xprt->transport_lock);
 		return;
@@ -853,19 +853,20 @@
 		 * be discarded.
 		 */
 		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
-		dprintk("RPC:      XID %08x truncated request\n",
+		dprintk("RPC:       XID %08x truncated request\n",
 				ntohl(transport->tcp_xid));
-		dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-				xprt, transport->tcp_copied, transport->tcp_offset,
-					transport->tcp_reclen);
+		dprintk("RPC:       xprt = %p, tcp_copied = %lu, "
+				"tcp_offset = %u, tcp_reclen = %u\n",
+				xprt, transport->tcp_copied,
+				transport->tcp_offset, transport->tcp_reclen);
 		goto out;
 	}
 
-	dprintk("RPC:      XID %08x read %Zd bytes\n",
+	dprintk("RPC:       XID %08x read %Zd bytes\n",
 			ntohl(transport->tcp_xid), r);
-	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-			xprt, transport->tcp_copied, transport->tcp_offset,
-				transport->tcp_reclen);
+	dprintk("RPC:       xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
+			"tcp_reclen = %u\n", xprt, transport->tcp_copied,
+			transport->tcp_offset, transport->tcp_reclen);
 
 	if (transport->tcp_copied == req->rq_private_buf.buflen)
 		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
@@ -891,7 +892,7 @@
 	desc->count -= len;
 	desc->offset += len;
 	transport->tcp_offset += len;
-	dprintk("RPC:      discarded %Zu bytes\n", len);
+	dprintk("RPC:       discarded %Zu bytes\n", len);
 	xs_tcp_check_fraghdr(transport);
 }
 
@@ -905,7 +906,7 @@
 		.count	= len,
 	};
 
-	dprintk("RPC:      xs_tcp_data_recv started\n");
+	dprintk("RPC:       xs_tcp_data_recv started\n");
 	do {
 		/* Read in a new fragment marker if necessary */
 		/* Can we ever really expect to get completely empty fragments? */
@@ -926,7 +927,7 @@
 		/* Skip over any trailing bytes on short reads */
 		xs_tcp_read_discard(transport, &desc);
 	} while (desc.count);
-	dprintk("RPC:      xs_tcp_data_recv done\n");
+	dprintk("RPC:       xs_tcp_data_recv done\n");
 	return len - desc.count;
 }
 
@@ -941,8 +942,9 @@
 	struct rpc_xprt *xprt;
 	read_descriptor_t rd_desc;
 
+	dprintk("RPC:       xs_tcp_data_ready...\n");
+
 	read_lock(&sk->sk_callback_lock);
-	dprintk("RPC:      xs_tcp_data_ready...\n");
 	if (!(xprt = xprt_from_sock(sk)))
 		goto out;
 	if (xprt->shutdown)
@@ -968,11 +970,11 @@
 	read_lock(&sk->sk_callback_lock);
 	if (!(xprt = xprt_from_sock(sk)))
 		goto out;
-	dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
-	dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
-				sk->sk_state, xprt_connected(xprt),
-				sock_flag(sk, SOCK_DEAD),
-				sock_flag(sk, SOCK_ZAPPED));
+	dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
+	dprintk("RPC:       state %x conn %d dead %d zapped %d\n",
+			sk->sk_state, xprt_connected(xprt),
+			sock_flag(sk, SOCK_DEAD),
+			sock_flag(sk, SOCK_ZAPPED));
 
 	switch (sk->sk_state) {
 	case TCP_ESTABLISHED:
@@ -1140,7 +1142,7 @@
 {
 	struct sockaddr_in *sap = (struct sockaddr_in *) &xprt->addr;
 
-	dprintk("RPC:      setting port for xprt %p to %u\n", xprt, port);
+	dprintk("RPC:       setting port for xprt %p to %u\n", xprt, port);
 
 	sap->sin_port = htons(port);
 }
@@ -1159,7 +1161,7 @@
 						sizeof(myaddr));
 		if (err == 0) {
 			transport->port = port;
-			dprintk("RPC:      xs_bindresvport bound to port %u\n",
+			dprintk("RPC:       xs_bindresvport bound to port %u\n",
 					port);
 			return 0;
 		}
@@ -1169,7 +1171,7 @@
 			port--;
 	} while (err == -EADDRINUSE && port != transport->port);
 
-	dprintk("RPC:      can't bind to reserved port (%d).\n", -err);
+	dprintk("RPC:       can't bind to reserved port (%d).\n", -err);
 	return err;
 }
 
@@ -1223,7 +1225,7 @@
 	xs_close(xprt);
 
 	if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) {
-		dprintk("RPC:      can't create UDP transport socket (%d).\n", -err);
+		dprintk("RPC:       can't create UDP transport socket (%d).\n", -err);
 		goto out;
 	}
 	xs_reclassify_socket(sock);
@@ -1233,7 +1235,7 @@
 		goto out;
 	}
 
-	dprintk("RPC:      worker connecting xprt %p to address: %s\n",
+	dprintk("RPC:       worker connecting xprt %p to address: %s\n",
 			xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
 
 	if (!transport->inet) {
@@ -1275,7 +1277,7 @@
 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 	struct sockaddr any;
 
-	dprintk("RPC:      disconnecting xprt %p to reuse port\n", xprt);
+	dprintk("RPC:       disconnecting xprt %p to reuse port\n", xprt);
 
 	/*
 	 * Disconnect the transport socket by doing a connect operation
@@ -1285,7 +1287,7 @@
 	any.sa_family = AF_UNSPEC;
 	result = kernel_connect(transport->sock, &any, sizeof(any), 0);
 	if (result)
-		dprintk("RPC:      AF_UNSPEC connect return code %d\n",
+		dprintk("RPC:       AF_UNSPEC connect return code %d\n",
 				result);
 }
 
@@ -1309,7 +1311,8 @@
 	if (!sock) {
 		/* start from scratch */
 		if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
-			dprintk("RPC:      can't create TCP transport socket (%d).\n", -err);
+			dprintk("RPC:       can't create TCP transport "
+					"socket (%d).\n", -err);
 			goto out;
 		}
 		xs_reclassify_socket(sock);
@@ -1322,7 +1325,7 @@
 		/* "close" the socket, preserving the local port */
 		xs_tcp_reuse_connection(xprt);
 
-	dprintk("RPC:      worker connecting xprt %p to address: %s\n",
+	dprintk("RPC:       worker connecting xprt %p to address: %s\n",
 			xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
 
 	if (!transport->inet) {
@@ -1359,8 +1362,9 @@
 	xprt->stat.connect_start = jiffies;
 	status = kernel_connect(sock, (struct sockaddr *) &xprt->addr,
 			xprt->addrlen, O_NONBLOCK);
-	dprintk("RPC: %p  connect status %d connected %d sock state %d\n",
-			xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
+	dprintk("RPC:       %p connect status %d connected %d sock state %d\n",
+			xprt, -status, xprt_connected(xprt),
+			sock->sk->sk_state);
 	if (status < 0) {
 		switch (status) {
 			case -EINPROGRESS:
@@ -1404,7 +1408,8 @@
 		return;
 
 	if (transport->sock != NULL) {
-		dprintk("RPC:      xs_connect delayed xprt %p for %lu seconds\n",
+		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
+				"seconds\n",
 				xprt, xprt->reestablish_timeout / HZ);
 		schedule_delayed_work(&transport->connect_worker,
 					xprt->reestablish_timeout);
@@ -1412,7 +1417,7 @@
 		if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
 			xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
 	} else {
-		dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
+		dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
 		schedule_delayed_work(&transport->connect_worker, 0);
 
 		/* flush_scheduled_work can sleep... */
@@ -1507,13 +1512,14 @@
 	struct sock_xprt *new;
 
 	if (addrlen > sizeof(xprt->addr)) {
-		dprintk("RPC:      xs_setup_xprt: address too large\n");
+		dprintk("RPC:       xs_setup_xprt: address too large\n");
 		return ERR_PTR(-EBADF);
 	}
 
 	new = kzalloc(sizeof(*new), GFP_KERNEL);
 	if (new == NULL) {
-		dprintk("RPC:      xs_setup_xprt: couldn't allocate rpc_xprt\n");
+		dprintk("RPC:       xs_setup_xprt: couldn't allocate "
+				"rpc_xprt\n");
 		return ERR_PTR(-ENOMEM);
 	}
 	xprt = &new->xprt;
@@ -1522,7 +1528,8 @@
 	xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
 	if (xprt->slot == NULL) {
 		kfree(xprt);
-		dprintk("RPC:      xs_setup_xprt: couldn't allocate slot table\n");
+		dprintk("RPC:       xs_setup_xprt: couldn't allocate slot "
+				"table\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1572,7 +1579,7 @@
 		xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
 
 	xs_format_peer_addresses(xprt);
-	dprintk("RPC:      set up transport to address %s\n",
+	dprintk("RPC:       set up transport to address %s\n",
 			xprt->address_strings[RPC_DISPLAY_ALL]);
 
 	return xprt;
@@ -1616,7 +1623,7 @@
 		xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
 
 	xs_format_peer_addresses(xprt);
-	dprintk("RPC:      set up transport to address %s\n",
+	dprintk("RPC:       set up transport to address %s\n",
 			xprt->address_strings[RPC_DISPLAY_ALL]);
 
 	return xprt;