Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
new file mode 100644
index 0000000..7725a0a
--- /dev/null
+++ b/fs/lockd/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the linux lock manager stuff
+#
+
+obj-$(CONFIG_LOCKD) += lockd.o
+
+lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \
+	        svcproc.o svcsubs.o mon.o xdr.o
+lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o
+lockd-objs		      := $(lockd-objs-y)
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
new file mode 100644
index 0000000..ef7103b
--- /dev/null
+++ b/fs/lockd/clntlock.c
@@ -0,0 +1,245 @@
+/*
+ * linux/fs/lockd/clntlock.c
+ *
+ * Lock handling for the client side NLM implementation
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/nfs_fs.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/smp_lock.h>
+
+#define NLMDBG_FACILITY		NLMDBG_CLIENT
+
+/*
+ * Local function prototypes
+ */
+static int			reclaimer(void *ptr);
+
+/*
+ * The following functions handle blocking and granting from the
+ * client perspective.
+ */
+
+/*
+ * This is the representation of a blocked client lock.
+ */
+struct nlm_wait {
+	struct nlm_wait *	b_next;		/* linked list */
+	wait_queue_head_t	b_wait;		/* where to wait on */
+	struct nlm_host *	b_host;
+	struct file_lock *	b_lock;		/* local file lock */
+	unsigned short		b_reclaim;	/* got to reclaim lock */
+	u32			b_status;	/* grant callback status */
+};
+
+static struct nlm_wait *	nlm_blocked;
+
+/*
+ * Block on a lock
+ */
+int
+nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp)
+{
+	struct nlm_wait	block, **head;
+	int		err;
+	u32		pstate;
+
+	block.b_host   = host;
+	block.b_lock   = fl;
+	init_waitqueue_head(&block.b_wait);
+	block.b_status = NLM_LCK_BLOCKED;
+	block.b_next   = nlm_blocked;
+	nlm_blocked    = &block;
+
+	/* Remember pseudo nsm state */
+	pstate = host->h_state;
+
+	/* Go to sleep waiting for GRANT callback. Some servers seem
+	 * to lose callbacks, however, so we're going to poll from
+	 * time to time just to make sure.
+	 *
+	 * For now, the retry frequency is pretty high; normally 
+	 * a 1 minute timeout would do. See the comment before
+	 * nlmclnt_lock for an explanation.
+	 */
+	sleep_on_timeout(&block.b_wait, 30*HZ);
+
+	for (head = &nlm_blocked; *head; head = &(*head)->b_next) {
+		if (*head == &block) {
+			*head = block.b_next;
+			break;
+		}
+	}
+
+	if (!signalled()) {
+		*statp = block.b_status;
+		return 0;
+	}
+
+	/* Okay, we were interrupted. Cancel the pending request
+	 * unless the server has rebooted.
+	 */
+	if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0)
+		printk(KERN_NOTICE
+			"lockd: CANCEL call failed (errno %d)\n", -err);
+
+	return -ERESTARTSYS;
+}
+
+/*
+ * The server lockd has called us back to tell us the lock was granted
+ */
+u32
+nlmclnt_grant(struct nlm_lock *lock)
+{
+	struct nlm_wait	*block;
+
+	/*
+	 * Look up blocked request based on arguments. 
+	 * Warning: must not use cookie to match it!
+	 */
+	for (block = nlm_blocked; block; block = block->b_next) {
+		if (nlm_compare_locks(block->b_lock, &lock->fl))
+			break;
+	}
+
+	/* Ooops, no blocked request found. */
+	if (block == NULL)
+		return nlm_lck_denied;
+
+	/* Alright, we found the lock. Set the return status and
+	 * wake up the caller.
+	 */
+	block->b_status = NLM_LCK_GRANTED;
+	wake_up(&block->b_wait);
+
+	return nlm_granted;
+}
+
+/*
+ * The following procedures deal with the recovery of locks after a
+ * server crash.
+ */
+
+/*
+ * Mark the locks for reclaiming.
+ * FIXME: In 2.5 we don't want to iterate through any global file_lock_list.
+ *        Maintain NLM lock reclaiming lists in the nlm_host instead.
+ */
+static
+void nlmclnt_mark_reclaim(struct nlm_host *host)
+{
+	struct file_lock *fl;
+	struct inode *inode;
+	struct list_head *tmp;
+
+	list_for_each(tmp, &file_lock_list) {
+		fl = list_entry(tmp, struct file_lock, fl_link);
+
+		inode = fl->fl_file->f_dentry->d_inode;
+		if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
+			continue;
+		if (fl->fl_u.nfs_fl.owner->host != host)
+			continue;
+		if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED))
+			continue;
+		fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM;
+	}
+}
+
+/*
+ * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number,
+ * that we mark locks for reclaiming, and that we bump the pseudo NSM state.
+ */
+static inline
+void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate)
+{
+	host->h_monitored = 0;
+	host->h_nsmstate = newstate;
+	host->h_state++;
+	host->h_nextrebind = 0;
+	nlm_rebind_host(host);
+	nlmclnt_mark_reclaim(host);
+	dprintk("NLM: reclaiming locks for host %s", host->h_name);
+}
+
+/*
+ * Reclaim all locks on server host. We do this by spawning a separate
+ * reclaimer thread.
+ */
+void
+nlmclnt_recovery(struct nlm_host *host, u32 newstate)
+{
+	if (host->h_reclaiming++) {
+		if (host->h_nsmstate == newstate)
+			return;
+		nlmclnt_prepare_reclaim(host, newstate);
+	} else {
+		nlmclnt_prepare_reclaim(host, newstate);
+		nlm_get_host(host);
+		__module_get(THIS_MODULE);
+		if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0)
+			module_put(THIS_MODULE);
+	}
+}
+
+static int
+reclaimer(void *ptr)
+{
+	struct nlm_host	  *host = (struct nlm_host *) ptr;
+	struct nlm_wait	  *block;
+	struct list_head *tmp;
+	struct file_lock *fl;
+	struct inode *inode;
+
+	daemonize("%s-reclaim", host->h_name);
+	allow_signal(SIGKILL);
+
+	/* This one ensures that our parent doesn't terminate while the
+	 * reclaim is in progress */
+	lock_kernel();
+	lockd_up();
+
+	/* First, reclaim all locks that have been marked. */
+restart:
+	list_for_each(tmp, &file_lock_list) {
+		fl = list_entry(tmp, struct file_lock, fl_link);
+
+		inode = fl->fl_file->f_dentry->d_inode;
+		if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
+			continue;
+		if (fl->fl_u.nfs_fl.owner->host != host)
+			continue;
+		if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM))
+			continue;
+
+		fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM;
+		nlmclnt_reclaim(host, fl);
+		if (signalled())
+			break;
+		goto restart;
+	}
+
+	host->h_reclaiming = 0;
+
+	/* Now, wake up all processes that sleep on a blocked lock */
+	for (block = nlm_blocked; block; block = block->b_next) {
+		if (block->b_host == host) {
+			block->b_status = NLM_LCK_DENIED_GRACE_PERIOD;
+			wake_up(&block->b_wait);
+		}
+	}
+
+	/* Release host handle after use */
+	nlm_release_host(host);
+	lockd_down();
+	unlock_kernel();
+	module_put_and_exit(0);
+}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
new file mode 100644
index 0000000..a440761
--- /dev/null
+++ b/fs/lockd/clntproc.c
@@ -0,0 +1,820 @@
+/*
+ * linux/fs/lockd/clntproc.c
+ *
+ * RPC procedures for the client side NLM implementation
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/nfs_fs.h>
+#include <linux/utsname.h>
+#include <linux/smp_lock.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+#define NLMDBG_FACILITY		NLMDBG_CLIENT
+#define NLMCLNT_GRACE_WAIT	(5*HZ)
+
+static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
+static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
+static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
+static void	nlmclnt_unlock_callback(struct rpc_task *);
+static void	nlmclnt_cancel_callback(struct rpc_task *);
+static int	nlm_stat_to_errno(u32 stat);
+static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
+
+/*
+ * Cookie counter for NLM requests
+ */
+static u32	nlm_cookie = 0x1234;
+
+static inline void nlmclnt_next_cookie(struct nlm_cookie *c)
+{
+	memcpy(c->data, &nlm_cookie, 4);
+	memset(c->data+4, 0, 4);
+	c->len=4;
+	nlm_cookie++;
+}
+
+static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
+{
+	atomic_inc(&lockowner->count);
+	return lockowner;
+}
+
+static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
+{
+	if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
+		return;
+	list_del(&lockowner->list);
+	spin_unlock(&lockowner->host->h_lock);
+	nlm_release_host(lockowner->host);
+	kfree(lockowner);
+}
+
+static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
+{
+	struct nlm_lockowner *lockowner;
+	list_for_each_entry(lockowner, &host->h_lockowners, list) {
+		if (lockowner->pid == pid)
+			return -EBUSY;
+	}
+	return 0;
+}
+
+static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
+{
+	uint32_t res;
+	do {
+		res = host->h_pidcount++;
+	} while (nlm_pidbusy(host, res) < 0);
+	return res;
+}
+
+static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
+{
+	struct nlm_lockowner *lockowner;
+	list_for_each_entry(lockowner, &host->h_lockowners, list) {
+		if (lockowner->owner != owner)
+			continue;
+		return nlm_get_lockowner(lockowner);
+	}
+	return NULL;
+}
+
+static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
+{
+	struct nlm_lockowner *res, *new = NULL;
+
+	spin_lock(&host->h_lock);
+	res = __nlm_find_lockowner(host, owner);
+	if (res == NULL) {
+		spin_unlock(&host->h_lock);
+		new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL);
+		spin_lock(&host->h_lock);
+		res = __nlm_find_lockowner(host, owner);
+		if (res == NULL && new != NULL) {
+			res = new;
+			atomic_set(&new->count, 1);
+			new->owner = owner;
+			new->pid = __nlm_alloc_pid(host);
+			new->host = nlm_get_host(host);
+			list_add(&new->list, &host->h_lockowners);
+			new = NULL;
+		}
+	}
+	spin_unlock(&host->h_lock);
+	if (new != NULL)
+		kfree(new);
+	return res;
+}
+
+/*
+ * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
+ */
+static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
+{
+	struct nlm_args	*argp = &req->a_args;
+	struct nlm_lock	*lock = &argp->lock;
+
+	nlmclnt_next_cookie(&argp->cookie);
+	argp->state   = nsm_local_state;
+	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
+	lock->caller  = system_utsname.nodename;
+	lock->oh.data = req->a_owner;
+	lock->oh.len  = sprintf(req->a_owner, "%d@%s",
+				current->pid, system_utsname.nodename);
+	locks_copy_lock(&lock->fl, fl);
+}
+
+static void nlmclnt_release_lockargs(struct nlm_rqst *req)
+{
+	struct file_lock *fl = &req->a_args.lock.fl;
+
+	if (fl->fl_ops && fl->fl_ops->fl_release_private)
+		fl->fl_ops->fl_release_private(fl);
+}
+
+/*
+ * Initialize arguments for GRANTED call. The nlm_rqst structure
+ * has been cleared already.
+ */
+int
+nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
+{
+	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
+	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
+	call->a_args.lock.caller = system_utsname.nodename;
+	call->a_args.lock.oh.len = lock->oh.len;
+
+	/* set default data area */
+	call->a_args.lock.oh.data = call->a_owner;
+
+	if (lock->oh.len > NLMCLNT_OHSIZE) {
+		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
+		if (!data) {
+			nlmclnt_freegrantargs(call);
+			return 0;
+		}
+		call->a_args.lock.oh.data = (u8 *) data;
+	}
+
+	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
+	return 1;
+}
+
+void
+nlmclnt_freegrantargs(struct nlm_rqst *call)
+{
+	struct file_lock *fl = &call->a_args.lock.fl;
+	/*
+	 * Check whether we allocated memory for the owner.
+	 */
+	if (call->a_args.lock.oh.data != (u8 *) call->a_owner) {
+		kfree(call->a_args.lock.oh.data);
+	}
+	if (fl->fl_ops && fl->fl_ops->fl_release_private)
+		fl->fl_ops->fl_release_private(fl);
+}
+
+/*
+ * This is the main entry point for the NLM client.
+ */
+int
+nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
+{
+	struct nfs_server	*nfssrv = NFS_SERVER(inode);
+	struct nlm_host		*host;
+	struct nlm_rqst		reqst, *call = &reqst;
+	sigset_t		oldset;
+	unsigned long		flags;
+	int			status, proto, vers;
+
+	vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
+	if (NFS_PROTO(inode)->version > 3) {
+		printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
+		return -ENOLCK;
+	}
+
+	/* Retrieve transport protocol from NFS client */
+	proto = NFS_CLIENT(inode)->cl_xprt->prot;
+
+	if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers)))
+		return -ENOLCK;
+
+	/* Create RPC client handle if not there, and copy soft
+	 * and intr flags from NFS client. */
+	if (host->h_rpcclnt == NULL) {
+		struct rpc_clnt	*clnt;
+
+		/* Bind an rpc client to this host handle (does not
+		 * perform a portmapper lookup) */
+		if (!(clnt = nlm_bind_host(host))) {
+			status = -ENOLCK;
+			goto done;
+		}
+		clnt->cl_softrtry = nfssrv->client->cl_softrtry;
+		clnt->cl_intr     = nfssrv->client->cl_intr;
+		clnt->cl_chatty   = nfssrv->client->cl_chatty;
+	}
+
+	/* Keep the old signal mask */
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	oldset = current->blocked;
+
+	/* If we're cleaning up locks because the process is exiting,
+	 * perform the RPC call asynchronously. */
+	if ((IS_SETLK(cmd) || IS_SETLKW(cmd))
+	    && fl->fl_type == F_UNLCK
+	    && (current->flags & PF_EXITING)) {
+		sigfillset(&current->blocked);	/* Mask all signals */
+		recalc_sigpending();
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+		call = nlmclnt_alloc_call();
+		if (!call) {
+			status = -ENOMEM;
+			goto out_restore;
+		}
+		call->a_flags = RPC_TASK_ASYNC;
+	} else {
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
+		memset(call, 0, sizeof(*call));
+		locks_init_lock(&call->a_args.lock.fl);
+		locks_init_lock(&call->a_res.lock.fl);
+	}
+	call->a_host = host;
+
+	nlmclnt_locks_init_private(fl, host);
+
+	/* Set up the argument struct */
+	nlmclnt_setlockargs(call, fl);
+
+	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
+		if (fl->fl_type != F_UNLCK) {
+			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
+			status = nlmclnt_lock(call, fl);
+		} else
+			status = nlmclnt_unlock(call, fl);
+	} else if (IS_GETLK(cmd))
+		status = nlmclnt_test(call, fl);
+	else
+		status = -EINVAL;
+
+ out_restore:
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	current->blocked = oldset;
+	recalc_sigpending();
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+done:
+	dprintk("lockd: clnt proc returns %d\n", status);
+	nlm_release_host(host);
+	return status;
+}
+EXPORT_SYMBOL(nlmclnt_proc);
+
+/*
+ * Allocate an NLM RPC call struct
+ */
+struct nlm_rqst *
+nlmclnt_alloc_call(void)
+{
+	struct nlm_rqst	*call;
+
+	while (!signalled()) {
+		call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL);
+		if (call) {
+			memset(call, 0, sizeof(*call));
+			locks_init_lock(&call->a_args.lock.fl);
+			locks_init_lock(&call->a_res.lock.fl);
+			return call;
+		}
+		printk("nlmclnt_alloc_call: failed, waiting for memory\n");
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(5*HZ);
+	}
+	return NULL;
+}
+
+static int nlm_wait_on_grace(wait_queue_head_t *queue)
+{
+	DEFINE_WAIT(wait);
+	int status = -EINTR;
+
+	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
+	if (!signalled ()) {
+		schedule_timeout(NLMCLNT_GRACE_WAIT);
+		try_to_freeze(PF_FREEZE);
+		if (!signalled ())
+			status = 0;
+	}
+	finish_wait(queue, &wait);
+	return status;
+}
+
+/*
+ * Generic NLM call
+ */
+static int
+nlmclnt_call(struct nlm_rqst *req, u32 proc)
+{
+	struct nlm_host	*host = req->a_host;
+	struct rpc_clnt	*clnt;
+	struct nlm_args	*argp = &req->a_args;
+	struct nlm_res	*resp = &req->a_res;
+	struct rpc_message msg = {
+		.rpc_argp	= argp,
+		.rpc_resp	= resp,
+	};
+	int		status;
+
+	dprintk("lockd: call procedure %d on %s\n",
+			(int)proc, host->h_name);
+
+	do {
+		if (host->h_reclaiming && !argp->reclaim)
+			goto in_grace_period;
+
+		/* If we have no RPC client yet, create one. */
+		if ((clnt = nlm_bind_host(host)) == NULL)
+			return -ENOLCK;
+		msg.rpc_proc = &clnt->cl_procinfo[proc];
+
+		/* Perform the RPC call. If an error occurs, try again */
+		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
+			dprintk("lockd: rpc_call returned error %d\n", -status);
+			switch (status) {
+			case -EPROTONOSUPPORT:
+				status = -EINVAL;
+				break;
+			case -ECONNREFUSED:
+			case -ETIMEDOUT:
+			case -ENOTCONN:
+				nlm_rebind_host(host);
+				status = -EAGAIN;
+				break;
+			case -ERESTARTSYS:
+				return signalled () ? -EINTR : status;
+			default:
+				break;
+			}
+			break;
+		} else
+		if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) {
+			dprintk("lockd: server in grace period\n");
+			if (argp->reclaim) {
+				printk(KERN_WARNING
+				     "lockd: spurious grace period reject?!\n");
+				return -ENOLCK;
+			}
+		} else {
+			if (!argp->reclaim) {
+				/* We appear to be out of the grace period */
+				wake_up_all(&host->h_gracewait);
+			}
+			dprintk("lockd: server returns status %d\n", resp->status);
+			return 0;	/* Okay, call complete */
+		}
+
+in_grace_period:
+		/*
+		 * The server has rebooted and appears to be in the grace
+		 * period during which locks are only allowed to be
+		 * reclaimed.
+		 * We can only back off and try again later.
+		 */
+		status = nlm_wait_on_grace(&host->h_gracewait);
+	} while (status == 0);
+
+	return status;
+}
+
+/*
+ * Generic NLM call, async version.
+ */
+int
+nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
+{
+	struct nlm_host	*host = req->a_host;
+	struct rpc_clnt	*clnt;
+	struct rpc_message msg = {
+		.rpc_argp	= &req->a_args,
+		.rpc_resp	= &req->a_res,
+	};
+	int		status;
+
+	dprintk("lockd: call procedure %d on %s (async)\n",
+			(int)proc, host->h_name);
+
+	/* If we have no RPC client yet, create one. */
+	if ((clnt = nlm_bind_host(host)) == NULL)
+		return -ENOLCK;
+	msg.rpc_proc = &clnt->cl_procinfo[proc];
+
+        /* bootstrap and kick off the async RPC call */
+        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
+
+	return status;
+}
+
+static int
+nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
+{
+	struct nlm_host	*host = req->a_host;
+	struct rpc_clnt	*clnt;
+	struct nlm_args	*argp = &req->a_args;
+	struct nlm_res	*resp = &req->a_res;
+	struct rpc_message msg = {
+		.rpc_argp	= argp,
+		.rpc_resp	= resp,
+	};
+	int		status;
+
+	dprintk("lockd: call procedure %d on %s (async)\n",
+			(int)proc, host->h_name);
+
+	/* If we have no RPC client yet, create one. */
+	if ((clnt = nlm_bind_host(host)) == NULL)
+		return -ENOLCK;
+	msg.rpc_proc = &clnt->cl_procinfo[proc];
+
+	/* Increment host refcount */
+	nlm_get_host(host);
+        /* bootstrap and kick off the async RPC call */
+        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
+	if (status < 0)
+		nlm_release_host(host);
+	return status;
+}
+
+/*
+ * TEST for the presence of a conflicting lock
+ */
+static int
+nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
+{
+	int	status;
+
+	status = nlmclnt_call(req, NLMPROC_TEST);
+	nlmclnt_release_lockargs(req);
+	if (status < 0)
+		return status;
+
+	status = req->a_res.status;
+	if (status == NLM_LCK_GRANTED) {
+		fl->fl_type = F_UNLCK;
+	} if (status == NLM_LCK_DENIED) {
+		/*
+		 * Report the conflicting lock back to the application.
+		 */
+		locks_copy_lock(fl, &req->a_res.lock.fl);
+		fl->fl_pid = 0;
+	} else {
+		return nlm_stat_to_errno(req->a_res.status);
+	}
+
+	return 0;
+}
+
+static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+	memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl));
+	nlm_get_lockowner(new->fl_u.nfs_fl.owner);
+}
+
+static void nlmclnt_locks_release_private(struct file_lock *fl)
+{
+	nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
+	fl->fl_ops = NULL;
+}
+
+static struct file_lock_operations nlmclnt_lock_ops = {
+	.fl_copy_lock = nlmclnt_locks_copy_lock,
+	.fl_release_private = nlmclnt_locks_release_private,
+};
+
+static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
+{
+	BUG_ON(fl->fl_ops != NULL);
+	fl->fl_u.nfs_fl.state = 0;
+	fl->fl_u.nfs_fl.flags = 0;
+	fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
+	fl->fl_ops = &nlmclnt_lock_ops;
+}
+
+static void do_vfs_lock(struct file_lock *fl)
+{
+	int res = 0;
+	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+		case FL_POSIX:
+			res = posix_lock_file_wait(fl->fl_file, fl);
+			break;
+		case FL_FLOCK:
+			res = flock_lock_file_wait(fl->fl_file, fl);
+			break;
+		default:
+			BUG();
+	}
+	if (res < 0)
+		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
+				__FUNCTION__);
+}
+
+/*
+ * LOCK: Try to create a lock
+ *
+ *			Programmer Harassment Alert
+ *
+ * When given a blocking lock request in a sync RPC call, the HPUX lockd
+ * will faithfully return LCK_BLOCKED but never cares to notify us when
+ * the lock could be granted. This way, our local process could hang
+ * around forever waiting for the callback.
+ *
+ *  Solution A:	Implement busy-waiting
+ *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
+ *
+ * For now I am implementing solution A, because I hate the idea of
+ * re-implementing lockd for a third time in two months. The async
+ * calls shouldn't be too hard to do, however.
+ *
+ * This is one of the lovely things about standards in the NFS area:
+ * they're so soft and squishy you can't really blame HP for doing this.
+ */
+static int
+nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
+{
+	struct nlm_host	*host = req->a_host;
+	struct nlm_res	*resp = &req->a_res;
+	int		status;
+
+	if (!host->h_monitored && nsm_monitor(host) < 0) {
+		printk(KERN_NOTICE "lockd: failed to monitor %s\n",
+					host->h_name);
+		status = -ENOLCK;
+		goto out;
+	}
+
+	do {
+		if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0) {
+			if (resp->status != NLM_LCK_BLOCKED)
+				break;
+			status = nlmclnt_block(host, fl, &resp->status);
+		}
+		if (status < 0)
+			goto out;
+	} while (resp->status == NLM_LCK_BLOCKED && req->a_args.block);
+
+	if (resp->status == NLM_LCK_GRANTED) {
+		fl->fl_u.nfs_fl.state = host->h_state;
+		fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
+		fl->fl_flags |= FL_SLEEP;
+		do_vfs_lock(fl);
+	}
+	status = nlm_stat_to_errno(resp->status);
+out:
+	nlmclnt_release_lockargs(req);
+	return status;
+}
+
+/*
+ * RECLAIM: Try to reclaim a lock
+ */
+int
+nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
+{
+	struct nlm_rqst reqst, *req;
+	int		status;
+
+	req = &reqst;
+	memset(req, 0, sizeof(*req));
+	locks_init_lock(&req->a_args.lock.fl);
+	locks_init_lock(&req->a_res.lock.fl);
+	req->a_host  = host;
+	req->a_flags = 0;
+
+	/* Set up the argument struct */
+	nlmclnt_setlockargs(req, fl);
+	req->a_args.reclaim = 1;
+
+	if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
+	 && req->a_res.status == NLM_LCK_GRANTED)
+		return 0;
+
+	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
+				"(errno %d, status %d)\n", fl->fl_pid,
+				status, req->a_res.status);
+
+	/*
+	 * FIXME: This is a serious failure. We can
+	 *
+	 *  a.	Ignore the problem
+	 *  b.	Send the owning process some signal (Linux doesn't have
+	 *	SIGLOST, though...)
+	 *  c.	Retry the operation
+	 *
+	 * Until someone comes up with a simple implementation
+	 * for b or c, I'll choose option a.
+	 */
+
+	return -ENOLCK;
+}
+
+/*
+ * UNLOCK: remove an existing lock
+ */
+static int
+nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
+{
+	struct nlm_res	*resp = &req->a_res;
+	int		status;
+
+	/* Clean the GRANTED flag now so the lock doesn't get
+	 * reclaimed while we're stuck in the unlock call. */
+	fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
+
+	if (req->a_flags & RPC_TASK_ASYNC) {
+		status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
+					nlmclnt_unlock_callback);
+		/* Hrmf... Do the unlock early since locks_remove_posix()
+		 * really expects us to free the lock synchronously */
+		do_vfs_lock(fl);
+		if (status < 0) {
+			nlmclnt_release_lockargs(req);
+			kfree(req);
+		}
+		return status;
+	}
+
+	status = nlmclnt_call(req, NLMPROC_UNLOCK);
+	nlmclnt_release_lockargs(req);
+	if (status < 0)
+		return status;
+
+	do_vfs_lock(fl);
+	if (resp->status == NLM_LCK_GRANTED)
+		return 0;
+
+	if (resp->status != NLM_LCK_DENIED_NOLOCKS)
+		printk("lockd: unexpected unlock status: %d\n", resp->status);
+
+	/* What to do now? I'm out of my depth... */
+
+	return -ENOLCK;
+}
+
+static void
+nlmclnt_unlock_callback(struct rpc_task *task)
+{
+	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
+	int		status = req->a_res.status;
+
+	if (RPC_ASSASSINATED(task))
+		goto die;
+
+	if (task->tk_status < 0) {
+		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
+		goto retry_rebind;
+	}
+	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
+		rpc_delay(task, NLMCLNT_GRACE_WAIT);
+		goto retry_unlock;
+	}
+	if (status != NLM_LCK_GRANTED)
+		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
+die:
+	nlm_release_host(req->a_host);
+	nlmclnt_release_lockargs(req);
+	kfree(req);
+	return;
+ retry_rebind:
+	nlm_rebind_host(req->a_host);
+ retry_unlock:
+	rpc_restart_call(task);
+}
+
+/*
+ * Cancel a blocked lock request.
+ * We always use an async RPC call for this in order not to hang a
+ * process that has been Ctrl-C'ed.
+ */
+int
+nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
+{
+	struct nlm_rqst	*req;
+	unsigned long	flags;
+	sigset_t	oldset;
+	int		status;
+
+	/* Block all signals while setting up call */
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	oldset = current->blocked;
+	sigfillset(&current->blocked);
+	recalc_sigpending();
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+	req = nlmclnt_alloc_call();
+	if (!req)
+		return -ENOMEM;
+	req->a_host  = host;
+	req->a_flags = RPC_TASK_ASYNC;
+
+	nlmclnt_setlockargs(req, fl);
+
+	status = nlmclnt_async_call(req, NLMPROC_CANCEL,
+					nlmclnt_cancel_callback);
+	if (status < 0) {
+		nlmclnt_release_lockargs(req);
+		kfree(req);
+	}
+
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	current->blocked = oldset;
+	recalc_sigpending();
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+	return status;
+}
+
+static void
+nlmclnt_cancel_callback(struct rpc_task *task)
+{
+	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
+
+	if (RPC_ASSASSINATED(task))
+		goto die;
+
+	if (task->tk_status < 0) {
+		dprintk("lockd: CANCEL call error %d, retrying.\n",
+					task->tk_status);
+		goto retry_cancel;
+	}
+
+	dprintk("lockd: cancel status %d (task %d)\n",
+			req->a_res.status, task->tk_pid);
+
+	switch (req->a_res.status) {
+	case NLM_LCK_GRANTED:
+	case NLM_LCK_DENIED_GRACE_PERIOD:
+		/* Everything's good */
+		break;
+	case NLM_LCK_DENIED_NOLOCKS:
+		dprintk("lockd: CANCEL failed (server has no locks)\n");
+		goto retry_cancel;
+	default:
+		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
+			req->a_res.status);
+	}
+
+die:
+	nlm_release_host(req->a_host);
+	nlmclnt_release_lockargs(req);
+	kfree(req);
+	return;
+
+retry_cancel:
+	nlm_rebind_host(req->a_host);
+	rpc_restart_call(task);
+	rpc_delay(task, 30 * HZ);
+}
+
+/*
+ * Convert an NLM status code to a generic kernel errno
+ */
+static int
+nlm_stat_to_errno(u32 status)
+{
+	switch(status) {
+	case NLM_LCK_GRANTED:
+		return 0;
+	case NLM_LCK_DENIED:
+		return -EAGAIN;
+	case NLM_LCK_DENIED_NOLOCKS:
+	case NLM_LCK_DENIED_GRACE_PERIOD:
+		return -ENOLCK;
+	case NLM_LCK_BLOCKED:
+		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
+		return -ENOLCK;
+#ifdef CONFIG_LOCKD_V4
+	case NLM_DEADLCK:
+		return -EDEADLK;
+	case NLM_ROFS:
+		return -EROFS;
+	case NLM_STALE_FH:
+		return -ESTALE;
+	case NLM_FBIG:
+		return -EOVERFLOW;
+	case NLM_FAILED:
+		return -ENOLCK;
+#endif
+	}
+	printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
+	return -ENOLCK;
+}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
new file mode 100644
index 0000000..52707c5
--- /dev/null
+++ b/fs/lockd/host.c
@@ -0,0 +1,346 @@
+/*
+ * linux/fs/lockd/host.c
+ *
+ * Management for NLM peer hosts. The nlm_host struct is shared
+ * between client and server implementation. The only reason to
+ * do so is to reduce code bloat.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+
+#define NLMDBG_FACILITY		NLMDBG_HOSTCACHE
+#define NLM_HOST_MAX		64
+#define NLM_HOST_NRHASH		32
+#define NLM_ADDRHASH(addr)	(ntohl(addr) & (NLM_HOST_NRHASH-1))
+#define NLM_HOST_REBIND		(60 * HZ)
+#define NLM_HOST_EXPIRE		((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
+#define NLM_HOST_COLLECT	((nrhosts > NLM_HOST_MAX)? 120 * HZ :  60 * HZ)
+#define NLM_HOST_ADDR(sv)	(&(sv)->s_nlmclnt->cl_xprt->addr)
+
+static struct nlm_host *	nlm_hosts[NLM_HOST_NRHASH];
+static unsigned long		next_gc;
+static int			nrhosts;
+static DECLARE_MUTEX(nlm_host_sema);
+
+
+static void			nlm_gc_hosts(void);
+
+/*
+ * Find an NLM server handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version)
+{
+	return nlm_lookup_host(0, sin, proto, version);
+}
+
+/*
+ * Find an NLM client handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmsvc_lookup_host(struct svc_rqst *rqstp)
+{
+	return nlm_lookup_host(1, &rqstp->rq_addr,
+			       rqstp->rq_prot, rqstp->rq_vers);
+}
+
+/*
+ * Common host lookup routine for server & client
+ */
+struct nlm_host *
+nlm_lookup_host(int server, struct sockaddr_in *sin,
+					int proto, int version)
+{
+	struct nlm_host	*host, **hp;
+	u32		addr;
+	int		hash;
+
+	dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n",
+			(unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version);
+
+	hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
+
+	/* Lock hash table */
+	down(&nlm_host_sema);
+
+	if (time_after_eq(jiffies, next_gc))
+		nlm_gc_hosts();
+
+	for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
+		if (host->h_proto != proto)
+			continue;
+		if (host->h_version != version)
+			continue;
+		if (host->h_server != server)
+			continue;
+
+		if (nlm_cmp_addr(&host->h_addr, sin)) {
+			if (hp != nlm_hosts + hash) {
+				*hp = host->h_next;
+				host->h_next = nlm_hosts[hash];
+				nlm_hosts[hash] = host;
+			}
+			nlm_get_host(host);
+			up(&nlm_host_sema);
+			return host;
+		}
+	}
+
+	/* Ooops, no host found, create it */
+	dprintk("lockd: creating host entry\n");
+
+	if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL)))
+		goto nohost;
+	memset(host, 0, sizeof(*host));
+
+	addr = sin->sin_addr.s_addr;
+	sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
+
+	host->h_addr       = *sin;
+	host->h_addr.sin_port = 0;	/* ouch! */
+	host->h_version    = version;
+	host->h_proto      = proto;
+	host->h_rpcclnt    = NULL;
+	init_MUTEX(&host->h_sema);
+	host->h_nextrebind = jiffies + NLM_HOST_REBIND;
+	host->h_expires    = jiffies + NLM_HOST_EXPIRE;
+	atomic_set(&host->h_count, 1);
+	init_waitqueue_head(&host->h_gracewait);
+	host->h_state      = 0;			/* pseudo NSM state */
+	host->h_nsmstate   = 0;			/* real NSM state */
+	host->h_server	   = server;
+	host->h_next       = nlm_hosts[hash];
+	nlm_hosts[hash]    = host;
+	INIT_LIST_HEAD(&host->h_lockowners);
+	spin_lock_init(&host->h_lock);
+
+	if (++nrhosts > NLM_HOST_MAX)
+		next_gc = 0;
+
+nohost:
+	up(&nlm_host_sema);
+	return host;
+}
+
+struct nlm_host *
+nlm_find_client(void)
+{
+	/* find a nlm_host for a client for which h_killed == 0.
+	 * and return it
+	 */
+	int hash;
+	down(&nlm_host_sema);
+	for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
+		struct nlm_host *host, **hp;
+		for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
+			if (host->h_server &&
+			    host->h_killed == 0) {
+				nlm_get_host(host);
+				up(&nlm_host_sema);
+				return host;
+			}
+		}
+	}
+	up(&nlm_host_sema);
+	return NULL;
+}
+
+				
+/*
+ * Create the NLM RPC client for an NLM peer
+ */
+struct rpc_clnt *
+nlm_bind_host(struct nlm_host *host)
+{
+	struct rpc_clnt	*clnt;
+	struct rpc_xprt	*xprt;
+
+	dprintk("lockd: nlm_bind_host(%08x)\n",
+			(unsigned)ntohl(host->h_addr.sin_addr.s_addr));
+
+	/* Lock host handle */
+	down(&host->h_sema);
+
+	/* If we've already created an RPC client, check whether
+	 * RPC rebind is required
+	 * Note: why keep rebinding if we're on a tcp connection?
+	 */
+	if ((clnt = host->h_rpcclnt) != NULL) {
+		xprt = clnt->cl_xprt;
+		if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) {
+			clnt->cl_port = 0;
+			host->h_nextrebind = jiffies + NLM_HOST_REBIND;
+			dprintk("lockd: next rebind in %ld jiffies\n",
+					host->h_nextrebind - jiffies);
+		}
+	} else {
+		xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL);
+		if (IS_ERR(xprt))
+			goto forgetit;
+
+		xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
+
+		/* Existing NLM servers accept AUTH_UNIX only */
+		clnt = rpc_create_client(xprt, host->h_name, &nlm_program,
+					host->h_version, RPC_AUTH_UNIX);
+		if (IS_ERR(clnt)) {
+			xprt_destroy(xprt);
+			goto forgetit;
+		}
+		clnt->cl_autobind = 1;	/* turn on pmap queries */
+		xprt->nocong = 1;	/* No congestion control for NLM */
+		xprt->resvport = 1;	/* NLM requires a reserved port */
+
+		host->h_rpcclnt = clnt;
+	}
+
+	up(&host->h_sema);
+	return clnt;
+
+forgetit:
+	printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
+	up(&host->h_sema);
+	return NULL;
+}
+
+/*
+ * Force a portmap lookup of the remote lockd port
+ */
+void
+nlm_rebind_host(struct nlm_host *host)
+{
+	dprintk("lockd: rebind host %s\n", host->h_name);
+	if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
+		host->h_rpcclnt->cl_port = 0;
+		host->h_nextrebind = jiffies + NLM_HOST_REBIND;
+	}
+}
+
+/*
+ * Increment NLM host count
+ */
+struct nlm_host * nlm_get_host(struct nlm_host *host)
+{
+	if (host) {
+		dprintk("lockd: get host %s\n", host->h_name);
+		atomic_inc(&host->h_count);
+		host->h_expires = jiffies + NLM_HOST_EXPIRE;
+	}
+	return host;
+}
+
+/*
+ * Release NLM host after use
+ */
+void nlm_release_host(struct nlm_host *host)
+{
+	if (host != NULL) {
+		dprintk("lockd: release host %s\n", host->h_name);
+		atomic_dec(&host->h_count);
+		BUG_ON(atomic_read(&host->h_count) < 0);
+	}
+}
+
+/*
+ * Shut down the hosts module.
+ * Note that this routine is called only at server shutdown time.
+ */
+void
+nlm_shutdown_hosts(void)
+{
+	struct nlm_host	*host;
+	int		i;
+
+	dprintk("lockd: shutting down host module\n");
+	down(&nlm_host_sema);
+
+	/* First, make all hosts eligible for gc */
+	dprintk("lockd: nuking all hosts...\n");
+	for (i = 0; i < NLM_HOST_NRHASH; i++) {
+		for (host = nlm_hosts[i]; host; host = host->h_next)
+			host->h_expires = jiffies - 1;
+	}
+
+	/* Then, perform a garbage collection pass */
+	nlm_gc_hosts();
+	up(&nlm_host_sema);
+
+	/* complain if any hosts are left */
+	if (nrhosts) {
+		printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
+		dprintk("lockd: %d hosts left:\n", nrhosts);
+		for (i = 0; i < NLM_HOST_NRHASH; i++) {
+			for (host = nlm_hosts[i]; host; host = host->h_next) {
+				dprintk("       %s (cnt %d use %d exp %ld)\n",
+					host->h_name, atomic_read(&host->h_count),
+					host->h_inuse, host->h_expires);
+			}
+		}
+	}
+}
+
+/*
+ * Garbage collect any unused NLM hosts.
+ * This GC combines reference counting for async operations with
+ * mark & sweep for resources held by remote clients.
+ */
+static void
+nlm_gc_hosts(void)
+{
+	struct nlm_host	**q, *host;
+	struct rpc_clnt	*clnt;
+	int		i;
+
+	dprintk("lockd: host garbage collection\n");
+	for (i = 0; i < NLM_HOST_NRHASH; i++) {
+		for (host = nlm_hosts[i]; host; host = host->h_next)
+			host->h_inuse = 0;
+	}
+
+	/* Mark all hosts that hold locks, blocks or shares */
+	nlmsvc_mark_resources();
+
+	for (i = 0; i < NLM_HOST_NRHASH; i++) {
+		q = &nlm_hosts[i];
+		while ((host = *q) != NULL) {
+			if (atomic_read(&host->h_count) || host->h_inuse
+			 || time_before(jiffies, host->h_expires)) {
+				dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
+					host->h_name, atomic_read(&host->h_count),
+					host->h_inuse, host->h_expires);
+				q = &host->h_next;
+				continue;
+			}
+			dprintk("lockd: delete host %s\n", host->h_name);
+			*q = host->h_next;
+			/* Don't unmonitor hosts that have been invalidated */
+			if (host->h_monitored && !host->h_killed)
+				nsm_unmonitor(host);
+			if ((clnt = host->h_rpcclnt) != NULL) {
+				if (atomic_read(&clnt->cl_users)) {
+					printk(KERN_WARNING
+						"lockd: active RPC handle\n");
+					clnt->cl_dead = 1;
+				} else {
+					rpc_destroy_client(host->h_rpcclnt);
+				}
+			}
+			BUG_ON(!list_empty(&host->h_lockowners));
+			kfree(host);
+			nrhosts--;
+		}
+	}
+
+	next_gc = jiffies + NLM_HOST_COLLECT;
+}
+
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
new file mode 100644
index 0000000..6fc1beb
--- /dev/null
+++ b/fs/lockd/mon.c
@@ -0,0 +1,246 @@
+/*
+ * linux/fs/lockd/mon.c
+ *
+ * The kernel statd client.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include <linux/kernel.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+
+#define NLMDBG_FACILITY		NLMDBG_MONITOR
+
+static struct rpc_clnt *	nsm_create(void);
+
+static struct rpc_program	nsm_program;
+
+/*
+ * Local NSM state
+ */
+u32				nsm_local_state;
+
+/*
+ * Common procedure for SM_MON/SM_UNMON calls
+ */
+static int
+nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res)
+{
+	struct rpc_clnt	*clnt;
+	int		status;
+	struct nsm_args	args;
+
+	clnt = nsm_create();
+	if (IS_ERR(clnt)) {
+		status = PTR_ERR(clnt);
+		goto out;
+	}
+
+	args.addr = host->h_addr.sin_addr.s_addr;
+	args.proto= (host->h_proto<<1) | host->h_server;
+	args.prog = NLM_PROGRAM;
+	args.vers = host->h_version;
+	args.proc = NLMPROC_NSM_NOTIFY;
+	memset(res, 0, sizeof(*res));
+
+	status = rpc_call(clnt, proc, &args, res, 0);
+	if (status < 0)
+		printk(KERN_DEBUG "nsm_mon_unmon: rpc failed, status=%d\n",
+			status);
+	else
+		status = 0;
+ out:
+	return status;
+}
+
+/*
+ * Set up monitoring of a remote host
+ */
+int
+nsm_monitor(struct nlm_host *host)
+{
+	struct nsm_res	res;
+	int		status;
+
+	dprintk("lockd: nsm_monitor(%s)\n", host->h_name);
+
+	status = nsm_mon_unmon(host, SM_MON, &res);
+
+	if (status < 0 || res.status != 0)
+		printk(KERN_NOTICE "lockd: cannot monitor %s\n", host->h_name);
+	else
+		host->h_monitored = 1;
+	return status;
+}
+
+/*
+ * Cease to monitor remote host
+ */
+int
+nsm_unmonitor(struct nlm_host *host)
+{
+	struct nsm_res	res;
+	int		status;
+
+	dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name);
+
+	status = nsm_mon_unmon(host, SM_UNMON, &res);
+	if (status < 0)
+		printk(KERN_NOTICE "lockd: cannot unmonitor %s\n", host->h_name);
+	else
+		host->h_monitored = 0;
+	return status;
+}
+
+/*
+ * Create NSM client for the local host
+ */
+static struct rpc_clnt *
+nsm_create(void)
+{
+	struct rpc_xprt		*xprt;
+	struct rpc_clnt		*clnt;
+	struct sockaddr_in	sin;
+
+	sin.sin_family = AF_INET;
+	sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+	sin.sin_port = 0;
+
+	xprt = xprt_create_proto(IPPROTO_UDP, &sin, NULL);
+	if (IS_ERR(xprt))
+		return (struct rpc_clnt *)xprt;
+
+	clnt = rpc_create_client(xprt, "localhost",
+				&nsm_program, SM_VERSION,
+				RPC_AUTH_NULL);
+	if (IS_ERR(clnt))
+		goto out_destroy;
+	clnt->cl_softrtry = 1;
+	clnt->cl_chatty   = 1;
+	clnt->cl_oneshot  = 1;
+	xprt->resvport = 1;	/* NSM requires a reserved port */
+	return clnt;
+
+out_destroy:
+	xprt_destroy(xprt);
+	return clnt;
+}
+
+/*
+ * XDR functions for NSM.
+ */
+
+static u32 *
+xdr_encode_common(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
+{
+	char	buffer[20];
+
+	/*
+	 * Use the dotted-quad IP address of the remote host as
+	 * identifier. Linux statd always looks up the canonical
+	 * hostname first for whatever remote hostname it receives,
+	 * so this works alright.
+	 */
+	sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(argp->addr));
+	if (!(p = xdr_encode_string(p, buffer))
+	 || !(p = xdr_encode_string(p, system_utsname.nodename)))
+		return ERR_PTR(-EIO);
+	*p++ = htonl(argp->prog);
+	*p++ = htonl(argp->vers);
+	*p++ = htonl(argp->proc);
+
+	return p;
+}
+
+static int
+xdr_encode_mon(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
+{
+	p = xdr_encode_common(rqstp, p, argp);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+	*p++ = argp->addr;
+	*p++ = argp->vers;
+	*p++ = argp->proto;
+	*p++ = 0;
+	rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
+	return 0;
+}
+
+static int
+xdr_encode_unmon(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
+{
+	p = xdr_encode_common(rqstp, p, argp);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+	rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
+	return 0;
+}
+
+static int
+xdr_decode_stat_res(struct rpc_rqst *rqstp, u32 *p, struct nsm_res *resp)
+{
+	resp->status = ntohl(*p++);
+	resp->state = ntohl(*p++);
+	dprintk("nsm: xdr_decode_stat_res status %d state %d\n",
+			resp->status, resp->state);
+	return 0;
+}
+
+static int
+xdr_decode_stat(struct rpc_rqst *rqstp, u32 *p, struct nsm_res *resp)
+{
+	resp->state = ntohl(*p++);
+	return 0;
+}
+
+#define SM_my_name_sz	(1+XDR_QUADLEN(SM_MAXSTRLEN))
+#define SM_my_id_sz	(3+1+SM_my_name_sz)
+#define SM_mon_id_sz	(1+XDR_QUADLEN(20)+SM_my_id_sz)
+#define SM_mon_sz	(SM_mon_id_sz+4)
+#define SM_monres_sz	2
+#define SM_unmonres_sz	1
+
+#ifndef MAX
+# define MAX(a, b)	(((a) > (b))? (a) : (b))
+#endif
+
+static struct rpc_procinfo	nsm_procedures[] = {
+[SM_MON] = {
+		.p_proc		= SM_MON,
+		.p_encode	= (kxdrproc_t) xdr_encode_mon,
+		.p_decode	= (kxdrproc_t) xdr_decode_stat_res,
+		.p_bufsiz	= MAX(SM_mon_sz, SM_monres_sz) << 2,
+	},
+[SM_UNMON] = {
+		.p_proc		= SM_UNMON,
+		.p_encode	= (kxdrproc_t) xdr_encode_unmon,
+		.p_decode	= (kxdrproc_t) xdr_decode_stat,
+		.p_bufsiz	= MAX(SM_mon_id_sz, SM_unmonres_sz) << 2,
+	},
+};
+
+static struct rpc_version	nsm_version1 = {
+		.number		= 1, 
+		.nrprocs	= sizeof(nsm_procedures)/sizeof(nsm_procedures[0]),
+		.procs		= nsm_procedures
+};
+
+static struct rpc_version *	nsm_version[] = {
+	[1] = &nsm_version1,
+};
+
+static struct rpc_stat		nsm_stats;
+
+static struct rpc_program	nsm_program = {
+		.name		= "statd",
+		.number		= SM_PROGRAM,
+		.nrvers		= sizeof(nsm_version)/sizeof(nsm_version[0]),
+		.version	= nsm_version,
+		.stats		= &nsm_stats
+};
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
new file mode 100644
index 0000000..b82e470
--- /dev/null
+++ b/fs/lockd/svc.c
@@ -0,0 +1,519 @@
+/*
+ * linux/fs/lockd/svc.c
+ *
+ * This is the central lockd service.
+ *
+ * FIXME: Separate the lockd NFS server functionality from the lockd NFS
+ * 	  client functionality. Oh why didn't Sun create two separate
+ *	  services in the first place?
+ *
+ * Authors:	Olaf Kirch (okir@monad.swb.de)
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+#include <linux/sunrpc/types.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svcsock.h>
+#include <linux/lockd/lockd.h>
+#include <linux/nfs.h>
+
+#define NLMDBG_FACILITY		NLMDBG_SVC
+#define LOCKD_BUFSIZE		(1024 + NLMSVC_XDRSIZE)
+#define ALLOWED_SIGS		(sigmask(SIGKILL))
+
+static struct svc_program	nlmsvc_program;
+
+struct nlmsvc_binding *		nlmsvc_ops;
+EXPORT_SYMBOL(nlmsvc_ops);
+
+static DECLARE_MUTEX(nlmsvc_sema);
+static unsigned int		nlmsvc_users;
+static pid_t			nlmsvc_pid;
+int				nlmsvc_grace_period;
+unsigned long			nlmsvc_timeout;
+
+static DECLARE_MUTEX_LOCKED(lockd_start);
+static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
+
+/*
+ * These can be set at insmod time (useful for NFS as root filesystem),
+ * and also changed through the sysctl interface.  -- Jamie Lokier, Aug 2003
+ */
+static unsigned long		nlm_grace_period;
+static unsigned long		nlm_timeout = LOCKD_DFLT_TIMEO;
+static int			nlm_udpport, nlm_tcpport;
+
+/*
+ * Constants needed for the sysctl interface.
+ */
+static const unsigned long	nlm_grace_period_min = 0;
+static const unsigned long	nlm_grace_period_max = 240;
+static const unsigned long	nlm_timeout_min = 3;
+static const unsigned long	nlm_timeout_max = 20;
+static const int		nlm_port_min = 0, nlm_port_max = 65535;
+
+static struct ctl_table_header * nlm_sysctl_table;
+
+static unsigned long set_grace_period(void)
+{
+	unsigned long grace_period;
+
+	/* Note: nlm_timeout should always be nonzero */
+	if (nlm_grace_period)
+		grace_period = ((nlm_grace_period + nlm_timeout - 1)
+				/ nlm_timeout) * nlm_timeout * HZ;
+	else
+		grace_period = nlm_timeout * 5 * HZ;
+	nlmsvc_grace_period = 1;
+	return grace_period + jiffies;
+}
+
+static inline void clear_grace_period(void)
+{
+	nlmsvc_grace_period = 0;
+}
+
+/*
+ * This is the lockd kernel thread
+ */
+static void
+lockd(struct svc_rqst *rqstp)
+{
+	struct svc_serv	*serv = rqstp->rq_server;
+	int		err = 0;
+	unsigned long grace_period_expire;
+
+	/* Lock module and set up kernel thread */
+	/* lockd_up is waiting for us to startup, so will
+	 * be holding a reference to this module, so it
+	 * is safe to just claim another reference
+	 */
+	__module_get(THIS_MODULE);
+	lock_kernel();
+
+	/*
+	 * Let our maker know we're running.
+	 */
+	nlmsvc_pid = current->pid;
+	up(&lockd_start);
+
+	daemonize("lockd");
+
+	/* Process request with signals blocked, but allow SIGKILL.  */
+	allow_signal(SIGKILL);
+
+	/* kick rpciod */
+	rpciod_up();
+
+	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
+
+	if (!nlm_timeout)
+		nlm_timeout = LOCKD_DFLT_TIMEO;
+	nlmsvc_timeout = nlm_timeout * HZ;
+
+	grace_period_expire = set_grace_period();
+
+	/*
+	 * The main request loop. We don't terminate until the last
+	 * NFS mount or NFS daemon has gone away, and we've been sent a
+	 * signal, or else another process has taken over our job.
+	 */
+	while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) {
+		long timeout = MAX_SCHEDULE_TIMEOUT;
+
+		if (signalled()) {
+			flush_signals(current);
+			if (nlmsvc_ops) {
+				nlmsvc_invalidate_all();
+				grace_period_expire = set_grace_period();
+			}
+		}
+
+		/*
+		 * Retry any blocked locks that have been notified by
+		 * the VFS. Don't do this during grace period.
+		 * (Theoretically, there shouldn't even be blocked locks
+		 * during grace period).
+		 */
+		if (!nlmsvc_grace_period) {
+			timeout = nlmsvc_retry_blocked();
+		} else if (time_before(grace_period_expire, jiffies))
+			clear_grace_period();
+
+		/*
+		 * Find a socket with data available and call its
+		 * recvfrom routine.
+		 */
+		err = svc_recv(serv, rqstp, timeout);
+		if (err == -EAGAIN || err == -EINTR)
+			continue;
+		if (err < 0) {
+			printk(KERN_WARNING
+			       "lockd: terminating on error %d\n",
+			       -err);
+			break;
+		}
+
+		dprintk("lockd: request from %08x\n",
+			(unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr));
+
+		svc_process(serv, rqstp);
+
+	}
+
+	/*
+	 * Check whether there's a new lockd process before
+	 * shutting down the hosts and clearing the slot.
+	 */
+	if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
+		if (nlmsvc_ops)
+			nlmsvc_invalidate_all();
+		nlm_shutdown_hosts();
+		nlmsvc_pid = 0;
+	} else
+		printk(KERN_DEBUG
+			"lockd: new process, skipping host shutdown\n");
+	wake_up(&lockd_exit);
+		
+	/* Exit the RPC thread */
+	svc_exit_thread(rqstp);
+
+	/* release rpciod */
+	rpciod_down();
+
+	/* Release module */
+	unlock_kernel();
+	module_put_and_exit(0);
+}
+
+/*
+ * Bring up the lockd process if it's not already up.
+ */
+int
+lockd_up(void)
+{
+	static int		warned;
+	struct svc_serv *	serv;
+	int			error = 0;
+
+	down(&nlmsvc_sema);
+	/*
+	 * Unconditionally increment the user count ... this is
+	 * the number of clients who _want_ a lockd process.
+	 */
+	nlmsvc_users++; 
+	/*
+	 * Check whether we're already up and running.
+	 */
+	if (nlmsvc_pid)
+		goto out;
+
+	/*
+	 * Sanity check: if there's no pid,
+	 * we should be the first user ...
+	 */
+	if (nlmsvc_users > 1)
+		printk(KERN_WARNING
+			"lockd_up: no pid, %d users??\n", nlmsvc_users);
+
+	error = -ENOMEM;
+	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE);
+	if (!serv) {
+		printk(KERN_WARNING "lockd_up: create service failed\n");
+		goto out;
+	}
+
+	if ((error = svc_makesock(serv, IPPROTO_UDP, nlm_udpport)) < 0 
+#ifdef CONFIG_NFSD_TCP
+	 || (error = svc_makesock(serv, IPPROTO_TCP, nlm_tcpport)) < 0
+#endif
+		) {
+		if (warned++ == 0) 
+			printk(KERN_WARNING
+				"lockd_up: makesock failed, error=%d\n", error);
+		goto destroy_and_out;
+	} 
+	warned = 0;
+
+	/*
+	 * Create the kernel thread and wait for it to start.
+	 */
+	error = svc_create_thread(lockd, serv);
+	if (error) {
+		printk(KERN_WARNING
+			"lockd_up: create thread failed, error=%d\n", error);
+		goto destroy_and_out;
+	}
+	down(&lockd_start);
+
+	/*
+	 * Note: svc_serv structures have an initial use count of 1,
+	 * so we exit through here on both success and failure.
+	 */
+destroy_and_out:
+	svc_destroy(serv);
+out:
+	up(&nlmsvc_sema);
+	return error;
+}
+EXPORT_SYMBOL(lockd_up);
+
+/*
+ * Decrement the user count and bring down lockd if we're the last.
+ */
+void
+lockd_down(void)
+{
+	static int warned;
+
+	down(&nlmsvc_sema);
+	if (nlmsvc_users) {
+		if (--nlmsvc_users)
+			goto out;
+	} else
+		printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid);
+
+	if (!nlmsvc_pid) {
+		if (warned++ == 0)
+			printk(KERN_WARNING "lockd_down: no lockd running.\n"); 
+		goto out;
+	}
+	warned = 0;
+
+	kill_proc(nlmsvc_pid, SIGKILL, 1);
+	/*
+	 * Wait for the lockd process to exit, but since we're holding
+	 * the lockd semaphore, we can't wait around forever ...
+	 */
+	clear_thread_flag(TIF_SIGPENDING);
+	interruptible_sleep_on_timeout(&lockd_exit, HZ);
+	if (nlmsvc_pid) {
+		printk(KERN_WARNING 
+			"lockd_down: lockd failed to exit, clearing pid\n");
+		nlmsvc_pid = 0;
+	}
+	spin_lock_irq(&current->sighand->siglock);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+out:
+	up(&nlmsvc_sema);
+}
+EXPORT_SYMBOL(lockd_down);
+
+/*
+ * Sysctl parameters (same as module parameters, different interface).
+ */
+
+/* Something that isn't CTL_ANY, CTL_NONE or a value that may clash. */
+#define CTL_UNNUMBERED		-2
+
+static ctl_table nlm_sysctls[] = {
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nlm_grace_period",
+		.data		= &nlm_grace_period,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_doulongvec_minmax,
+		.extra1		= (unsigned long *) &nlm_grace_period_min,
+		.extra2		= (unsigned long *) &nlm_grace_period_max,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nlm_timeout",
+		.data		= &nlm_timeout,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_doulongvec_minmax,
+		.extra1		= (unsigned long *) &nlm_timeout_min,
+		.extra2		= (unsigned long *) &nlm_timeout_max,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nlm_udpport",
+		.data		= &nlm_udpport,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= (int *) &nlm_port_min,
+		.extra2		= (int *) &nlm_port_max,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nlm_tcpport",
+		.data		= &nlm_tcpport,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= (int *) &nlm_port_min,
+		.extra2		= (int *) &nlm_port_max,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table nlm_sysctl_dir[] = {
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nfs",
+		.mode		= 0555,
+		.child		= nlm_sysctls,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table nlm_sysctl_root[] = {
+	{
+		.ctl_name	= CTL_FS,
+		.procname	= "fs",
+		.mode		= 0555,
+		.child		= nlm_sysctl_dir,
+	},
+	{ .ctl_name = 0 }
+};
+
+/*
+ * Module (and driverfs) parameters.
+ */
+
+#define param_set_min_max(name, type, which_strtol, min, max)		\
+static int param_set_##name(const char *val, struct kernel_param *kp)	\
+{									\
+	char *endp;							\
+	__typeof__(type) num = which_strtol(val, &endp, 0);		\
+	if (endp == val || *endp || num < (min) || num > (max))		\
+		return -EINVAL;						\
+	*((int *) kp->arg) = num;					\
+	return 0;							\
+}
+
+static inline int is_callback(u32 proc)
+{
+	return proc == NLMPROC_GRANTED
+		|| proc == NLMPROC_GRANTED_MSG
+		|| proc == NLMPROC_TEST_RES
+		|| proc == NLMPROC_LOCK_RES
+		|| proc == NLMPROC_CANCEL_RES
+		|| proc == NLMPROC_UNLOCK_RES
+		|| proc == NLMPROC_NSM_NOTIFY;
+}
+
+
+static int lockd_authenticate(struct svc_rqst *rqstp)
+{
+	rqstp->rq_client = NULL;
+	switch (rqstp->rq_authop->flavour) {
+		case RPC_AUTH_NULL:
+		case RPC_AUTH_UNIX:
+			if (rqstp->rq_proc == 0)
+				return SVC_OK;
+			if (is_callback(rqstp->rq_proc)) {
+				/* Leave it to individual procedures to
+				 * call nlmsvc_lookup_host(rqstp)
+				 */
+				return SVC_OK;
+			}
+			return svc_set_client(rqstp);
+	}
+	return SVC_DENIED;
+}
+
+
+param_set_min_max(port, int, simple_strtol, 0, 65535)
+param_set_min_max(grace_period, unsigned long, simple_strtoul,
+		  nlm_grace_period_min, nlm_grace_period_max)
+param_set_min_max(timeout, unsigned long, simple_strtoul,
+		  nlm_timeout_min, nlm_timeout_max)
+
+MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+MODULE_DESCRIPTION("NFS file locking service version " LOCKD_VERSION ".");
+MODULE_LICENSE("GPL");
+
+module_param_call(nlm_grace_period, param_set_grace_period, param_get_ulong,
+		  &nlm_grace_period, 0644);
+module_param_call(nlm_timeout, param_set_timeout, param_get_ulong,
+		  &nlm_timeout, 0644);
+module_param_call(nlm_udpport, param_set_port, param_get_int,
+		  &nlm_udpport, 0644);
+module_param_call(nlm_tcpport, param_set_port, param_get_int,
+		  &nlm_tcpport, 0644);
+
+/*
+ * Initialising and terminating the module.
+ */
+
+static int __init init_nlm(void)
+{
+	nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root, 0);
+	return nlm_sysctl_table ? 0 : -ENOMEM;
+}
+
+static void __exit exit_nlm(void)
+{
+	/* FIXME: delete all NLM clients */
+	nlm_shutdown_hosts();
+	unregister_sysctl_table(nlm_sysctl_table);
+}
+
+module_init(init_nlm);
+module_exit(exit_nlm);
+
+/*
+ * Define NLM program and procedures
+ */
+static struct svc_version	nlmsvc_version1 = {
+		.vs_vers	= 1,
+		.vs_nproc	= 17,
+		.vs_proc	= nlmsvc_procedures,
+		.vs_xdrsize	= NLMSVC_XDRSIZE,
+};
+static struct svc_version	nlmsvc_version3 = {
+		.vs_vers	= 3,
+		.vs_nproc	= 24,
+		.vs_proc	= nlmsvc_procedures,
+		.vs_xdrsize	= NLMSVC_XDRSIZE,
+};
+#ifdef CONFIG_LOCKD_V4
+static struct svc_version	nlmsvc_version4 = {
+		.vs_vers	= 4,
+		.vs_nproc	= 24,
+		.vs_proc	= nlmsvc_procedures4,
+		.vs_xdrsize	= NLMSVC_XDRSIZE,
+};
+#endif
+static struct svc_version *	nlmsvc_version[] = {
+	[1] = &nlmsvc_version1,
+	[3] = &nlmsvc_version3,
+#ifdef CONFIG_LOCKD_V4
+	[4] = &nlmsvc_version4,
+#endif
+};
+
+static struct svc_stat		nlmsvc_stats;
+
+#define NLM_NRVERS	(sizeof(nlmsvc_version)/sizeof(nlmsvc_version[0]))
+static struct svc_program	nlmsvc_program = {
+	.pg_prog		= NLM_PROGRAM,		/* program number */
+	.pg_nvers		= NLM_NRVERS,		/* number of entries in nlmsvc_version */
+	.pg_vers		= nlmsvc_version,	/* version table */
+	.pg_name		= "lockd",		/* service name */
+	.pg_class		= "nfsd",		/* share authentication with nfsd */
+	.pg_stats		= &nlmsvc_stats,	/* stats table */
+	.pg_authenticate = &lockd_authenticate	/* export authentication */
+};
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
new file mode 100644
index 0000000..489670e
--- /dev/null
+++ b/fs/lockd/svc4proc.c
@@ -0,0 +1,580 @@
+/*
+ * linux/fs/lockd/svc4proc.c
+ *
+ * Lockd server procedures. We don't implement the NLM_*_RES 
+ * procedures because we don't use the async procedures.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/share.h>
+#include <linux/lockd/sm_inter.h>
+
+
+#define NLMDBG_FACILITY		NLMDBG_CLIENT
+
+static u32	nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *);
+static void	nlm4svc_callback_exit(struct rpc_task *);
+
+/*
+ * Obtain client and file from arguments
+ */
+static u32
+nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
+			struct nlm_host **hostp, struct nlm_file **filp)
+{
+	struct nlm_host		*host = NULL;
+	struct nlm_file		*file = NULL;
+	struct nlm_lock		*lock = &argp->lock;
+	u32			error = 0;
+
+	/* nfsd callbacks must have been installed for this procedure */
+	if (!nlmsvc_ops)
+		return nlm_lck_denied_nolocks;
+
+	/* Obtain host handle */
+	if (!(host = nlmsvc_lookup_host(rqstp))
+	 || (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0))
+		goto no_locks;
+	*hostp = host;
+
+	/* Obtain file pointer. Not used by FREE_ALL call. */
+	if (filp != NULL) {
+		if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
+			goto no_locks;
+		*filp = file;
+
+		/* Set up the missing parts of the file_lock structure */
+		lock->fl.fl_file  = file->f_file;
+		lock->fl.fl_owner = (fl_owner_t) host;
+		lock->fl.fl_lmops = &nlmsvc_lock_operations;
+	}
+
+	return 0;
+
+no_locks:
+	if (host)
+		nlm_release_host(host);
+ 	if (error)
+		return error;	
+	return nlm_lck_denied_nolocks;
+}
+
+/*
+ * NULL: Test for presence of service
+ */
+static int
+nlm4svc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+	dprintk("lockd: NULL          called\n");
+	return rpc_success;
+}
+
+/*
+ * TEST: Check for conflicting lock
+ */
+static int
+nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
+				         struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: TEST4        called\n");
+	resp->cookie = argp->cookie;
+
+	/* Don't accept test requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now check for conflicting locks */
+	resp->status = nlmsvc_testlock(file, &argp->lock, &resp->lock);
+
+	dprintk("lockd: TEST4          status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+static int
+nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				         struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: LOCK          called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period && !argp->reclaim) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+#if 0
+	/* If supplied state doesn't match current state, we assume it's
+	 * an old request that time-warped somehow. Any error return would
+	 * do in this case because it's irrelevant anyway.
+	 *
+	 * NB: We don't retrieve the remote host's state yet.
+	 */
+	if (host->h_nsmstate && host->h_nsmstate != argp->state) {
+		resp->status = nlm_lck_denied_nolocks;
+	} else
+#endif
+
+	/* Now try to lock the file */
+	resp->status = nlmsvc_lock(rqstp, file, &argp->lock,
+					argp->block, &argp->cookie);
+
+	dprintk("lockd: LOCK          status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+static int
+nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
+				           struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: CANCEL        called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Try to cancel request. */
+	resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
+
+	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * UNLOCK: release a lock
+ */
+static int
+nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				           struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: UNLOCK        called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to remove the lock */
+	resp->status = nlmsvc_unlock(file, &argp->lock);
+
+	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * GRANTED: A server calls us to tell that a process' lock request
+ * was granted
+ */
+static int
+nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	resp->cookie = argp->cookie;
+
+	dprintk("lockd: GRANTED       called\n");
+	resp->status = nlmclnt_grant(&argp->lock);
+	dprintk("lockd: GRANTED       status %d\n", ntohl(resp->status));
+	return rpc_success;
+}
+
+/*
+ * `Async' versions of the above service routines. They aren't really,
+ * because we send the callback before the reply proper. I hope this
+ * doesn't break any clients.
+ */
+static int
+nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void	     *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: TEST_MSG      called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_test(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_TEST_RES, &res);
+	return stat;
+}
+
+static int
+nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void	     *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: LOCK_MSG      called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_lock(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, &res);
+	return stat;
+}
+
+static int
+nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					       void	       *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: CANCEL_MSG    called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_cancel(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, &res);
+	return stat;
+}
+
+static int
+nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+                                               void            *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: UNLOCK_MSG    called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_unlock(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, &res);
+	return stat;
+}
+
+static int
+nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+                                                void            *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: GRANTED_MSG   called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_granted(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, &res);
+	return stat;
+}
+
+/*
+ * SHARE: create a DOS share or alter existing share.
+ */
+static int
+nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
+				          struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: SHARE         called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period && !argp->reclaim) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to create the share */
+	resp->status = nlmsvc_share_file(host, file, argp);
+
+	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * UNSHARE: Release a DOS share.
+ */
+static int
+nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: UNSHARE       called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to lock the file */
+	resp->status = nlmsvc_unshare_file(host, file, argp);
+
+	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * NM_LOCK: Create an unmonitored lock
+ */
+static int
+nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	dprintk("lockd: NM_LOCK       called\n");
+
+	argp->monitor = 0;		/* just clean the monitor flag */
+	return nlm4svc_proc_lock(rqstp, argp, resp);
+}
+
+/*
+ * FREE_ALL: Release all locks and shares held by client
+ */
+static int
+nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void            *resp)
+{
+	struct nlm_host	*host;
+
+	/* Obtain client */
+	if (nlm4svc_retrieve_args(rqstp, argp, &host, NULL))
+		return rpc_success;
+
+	nlmsvc_free_host_resources(host);
+	nlm_release_host(host);
+	return rpc_success;
+}
+
+/*
+ * SM_NOTIFY: private callback from statd (not part of official NLM proto)
+ */
+static int
+nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+					      void	        *resp)
+{
+	struct sockaddr_in	saddr = rqstp->rq_addr;
+	int			vers = argp->vers;
+	int			prot = argp->proto >> 1;
+
+	struct nlm_host		*host;
+
+	dprintk("lockd: SM_NOTIFY     called\n");
+	if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK)
+	 || ntohs(saddr.sin_port) >= 1024) {
+		printk(KERN_WARNING
+			"lockd: rejected NSM callback from %08x:%d\n",
+			ntohl(rqstp->rq_addr.sin_addr.s_addr),
+			ntohs(rqstp->rq_addr.sin_port));
+		return rpc_system_err;
+	}
+
+	/* Obtain the host pointer for this NFS server and try to
+	 * reclaim all locks we hold on this server.
+	 */
+	saddr.sin_addr.s_addr = argp->addr;
+
+	if ((argp->proto & 1)==0) {
+		if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) {
+			nlmclnt_recovery(host, argp->state);
+			nlm_release_host(host);
+		}
+	} else {
+		/* If we run on an NFS server, delete all locks held by the client */
+
+		if ((host = nlm_lookup_host(1, &saddr, prot, vers)) != NULL) {
+			nlmsvc_free_host_resources(host);
+			nlm_release_host(host);
+		}
+	}
+	return rpc_success;
+}
+
+/*
+ * client sent a GRANTED_RES, let's remove the associated block
+ */
+static int
+nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res  *argp,
+                                                void            *resp)
+{
+        if (!nlmsvc_ops)
+                return rpc_success;
+
+        dprintk("lockd: GRANTED_RES   called\n");
+
+        nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status);
+        return rpc_success;
+}
+
+
+
+/*
+ * This is the generic lockd callback for async RPC calls
+ */
+static u32
+nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_rqst	*call;
+
+	if (!(call = nlmclnt_alloc_call()))
+		return rpc_system_err;
+
+	host = nlmclnt_lookup_host(&rqstp->rq_addr,
+				rqstp->rq_prot, rqstp->rq_vers);
+	if (!host) {
+		kfree(call);
+		return rpc_system_err;
+	}
+
+	call->a_flags = RPC_TASK_ASYNC;
+	call->a_host  = host;
+	memcpy(&call->a_args, resp, sizeof(*resp));
+
+	if (nlmsvc_async_call(call, proc, nlm4svc_callback_exit) < 0)
+		goto error;
+
+	return rpc_success;
+ error:
+	kfree(call);
+	nlm_release_host(host);
+	return rpc_system_err;
+}
+
+static void
+nlm4svc_callback_exit(struct rpc_task *task)
+{
+	struct nlm_rqst	*call = (struct nlm_rqst *) task->tk_calldata;
+
+	if (task->tk_status < 0) {
+		dprintk("lockd: %4d callback failed (errno = %d)\n",
+					task->tk_pid, -task->tk_status);
+	}
+	nlm_release_host(call->a_host);
+	kfree(call);
+}
+
+/*
+ * NLM Server procedures.
+ */
+
+#define nlm4svc_encode_norep	nlm4svc_encode_void
+#define nlm4svc_decode_norep	nlm4svc_decode_void
+#define nlm4svc_decode_testres	nlm4svc_decode_void
+#define nlm4svc_decode_lockres	nlm4svc_decode_void
+#define nlm4svc_decode_unlockres	nlm4svc_decode_void
+#define nlm4svc_decode_cancelres	nlm4svc_decode_void
+#define nlm4svc_decode_grantedres	nlm4svc_decode_void
+
+#define nlm4svc_proc_none	nlm4svc_proc_null
+#define nlm4svc_proc_test_res	nlm4svc_proc_null
+#define nlm4svc_proc_lock_res	nlm4svc_proc_null
+#define nlm4svc_proc_cancel_res	nlm4svc_proc_null
+#define nlm4svc_proc_unlock_res	nlm4svc_proc_null
+
+struct nlm_void			{ int dummy; };
+
+#define PROC(name, xargt, xrest, argt, rest, respsize)	\
+ { .pc_func	= (svc_procfunc) nlm4svc_proc_##name,	\
+   .pc_decode	= (kxdrproc_t) nlm4svc_decode_##xargt,	\
+   .pc_encode	= (kxdrproc_t) nlm4svc_encode_##xrest,	\
+   .pc_release	= NULL,					\
+   .pc_argsize	= sizeof(struct nlm_##argt),		\
+   .pc_ressize	= sizeof(struct nlm_##rest),		\
+   .pc_xdrressize = respsize,				\
+ }
+#define	Ck	(1+XDR_QUADLEN(NLM_MAXCOOKIELEN))	/* cookie */
+#define	No	(1+1024/4)				/* netobj */
+#define	St	1					/* status */
+#define	Rg	4					/* range (offset + length) */
+struct svc_procedure		nlmsvc_procedures4[] = {
+  PROC(null,		void,		void,		void,	void, 1),
+  PROC(test,		testargs,	testres,	args,	res, Ck+St+2+No+Rg),
+  PROC(lock,		lockargs,	res,		args,	res, Ck+St),
+  PROC(cancel,		cancargs,	res,		args,	res, Ck+St),
+  PROC(unlock,		unlockargs,	res,		args,	res, Ck+St),
+  PROC(granted,		testargs,	res,		args,	res, Ck+St),
+  PROC(test_msg,	testargs,	norep,		args,	void, 1),
+  PROC(lock_msg,	lockargs,	norep,		args,	void, 1),
+  PROC(cancel_msg,	cancargs,	norep,		args,	void, 1),
+  PROC(unlock_msg,	unlockargs,	norep,		args,	void, 1),
+  PROC(granted_msg,	testargs,	norep,		args,	void, 1),
+  PROC(test_res,	testres,	norep,		res,	void, 1),
+  PROC(lock_res,	lockres,	norep,		res,	void, 1),
+  PROC(cancel_res,	cancelres,	norep,		res,	void, 1),
+  PROC(unlock_res,	unlockres,	norep,		res,	void, 1),
+  PROC(granted_res,	res,		norep,		res,	void, 1),
+  /* statd callback */
+  PROC(sm_notify,	reboot,		void,		reboot,	void, 1),
+  PROC(none,		void,		void,		void,	void, 0),
+  PROC(none,		void,		void,		void,	void, 0),
+  PROC(none,		void,		void,		void,	void, 0),
+  PROC(share,		shareargs,	shareres,	args,	res, Ck+St+1),
+  PROC(unshare,		shareargs,	shareres,	args,	res, Ck+St+1),
+  PROC(nm_lock,		lockargs,	res,		args,	res, Ck+St),
+  PROC(free_all,	notify,		void,		args,	void, 1),
+
+};
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
new file mode 100644
index 0000000..49f9597
--- /dev/null
+++ b/fs/lockd/svclock.c
@@ -0,0 +1,686 @@
+/*
+ * linux/fs/lockd/svclock.c
+ *
+ * Handling of server-side locks, mostly of the blocked variety.
+ * This is the ugliest part of lockd because we tread on very thin ice.
+ * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
+ * IMNSHO introducing the grant callback into the NLM protocol was one
+ * of the worst ideas Sun ever had. Except maybe for the idea of doing
+ * NFS file locking at all.
+ *
+ * I'm trying hard to avoid race conditions by protecting most accesses
+ * to a file's list of blocked locks through a semaphore. The global
+ * list of blocked locks is not protected in this fashion however.
+ * Therefore, some functions (such as the RPC callback for the async grant
+ * call) move blocked locks towards the head of the list *while some other
+ * process might be traversing it*. This should not be a problem in
+ * practice, because this will only cause functions traversing the list
+ * to visit some blocks twice.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/nlm.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY		NLMDBG_SVCLOCK
+
+#ifdef CONFIG_LOCKD_V4
+#define nlm_deadlock	nlm4_deadlock
+#else
+#define nlm_deadlock	nlm_lck_denied
+#endif
+
+static void	nlmsvc_insert_block(struct nlm_block *block, unsigned long);
+static int	nlmsvc_remove_block(struct nlm_block *block);
+static void	nlmsvc_grant_callback(struct rpc_task *task);
+
+/*
+ * The list of blocked locks to retry
+ */
+static struct nlm_block *	nlm_blocked;
+
+/*
+ * Insert a blocked lock into the global list
+ */
+static void
+nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
+{
+	struct nlm_block **bp, *b;
+
+	dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
+	if (block->b_queued)
+		nlmsvc_remove_block(block);
+	bp = &nlm_blocked;
+	if (when != NLM_NEVER) {
+		if ((when += jiffies) == NLM_NEVER)
+			when ++;
+		while ((b = *bp) && time_before_eq(b->b_when,when) && b->b_when != NLM_NEVER)
+			bp = &b->b_next;
+	} else
+		while ((b = *bp) != 0)
+			bp = &b->b_next;
+
+	block->b_queued = 1;
+	block->b_when = when;
+	block->b_next = b;
+	*bp = block;
+}
+
+/*
+ * Remove a block from the global list
+ */
+static int
+nlmsvc_remove_block(struct nlm_block *block)
+{
+	struct nlm_block **bp, *b;
+
+	if (!block->b_queued)
+		return 1;
+	for (bp = &nlm_blocked; (b = *bp) != 0; bp = &b->b_next) {
+		if (b == block) {
+			*bp = block->b_next;
+			block->b_queued = 0;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Find a block for a given lock and optionally remove it from
+ * the list.
+ */
+static struct nlm_block *
+nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove)
+{
+	struct nlm_block	**head, *block;
+	struct file_lock	*fl;
+
+	dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
+				file, lock->fl.fl_pid,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end, lock->fl.fl_type);
+	for (head = &nlm_blocked; (block = *head) != 0; head = &block->b_next) {
+		fl = &block->b_call.a_args.lock.fl;
+		dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
+				block->b_file, fl->fl_pid,
+				(long long)fl->fl_start,
+				(long long)fl->fl_end, fl->fl_type,
+				nlmdbg_cookie2a(&block->b_call.a_args.cookie));
+		if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
+			if (remove) {
+				*head = block->b_next;
+				block->b_queued = 0;
+			}
+			return block;
+		}
+	}
+
+	return NULL;
+}
+
+static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
+{
+	if(a->len != b->len)
+		return 0;
+	if(memcmp(a->data,b->data,a->len))
+		return 0;
+	return 1;
+}
+
+/*
+ * Find a block with a given NLM cookie.
+ */
+static inline struct nlm_block *
+nlmsvc_find_block(struct nlm_cookie *cookie,  struct sockaddr_in *sin)
+{
+	struct nlm_block *block;
+
+	for (block = nlm_blocked; block; block = block->b_next) {
+		dprintk("cookie: head of blocked queue %p, block %p\n", 
+			nlm_blocked, block);
+		if (nlm_cookie_match(&block->b_call.a_args.cookie,cookie)
+				&& nlm_cmp_addr(sin, &block->b_host->h_addr))
+			break;
+	}
+
+	return block;
+}
+
+/*
+ * Create a block and initialize it.
+ *
+ * Note: we explicitly set the cookie of the grant reply to that of
+ * the blocked lock request. The spec explicitly mentions that the client
+ * should _not_ rely on the callback containing the same cookie as the
+ * request, but (as I found out later) that's because some implementations
+ * do just this. Never mind the standards comittees, they support our
+ * logging industries.
+ */
+static inline struct nlm_block *
+nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
+				struct nlm_lock *lock, struct nlm_cookie *cookie)
+{
+	struct nlm_block	*block;
+	struct nlm_host		*host;
+	struct nlm_rqst		*call;
+
+	/* Create host handle for callback */
+	host = nlmclnt_lookup_host(&rqstp->rq_addr,
+				rqstp->rq_prot, rqstp->rq_vers);
+	if (host == NULL)
+		return NULL;
+
+	/* Allocate memory for block, and initialize arguments */
+	if (!(block = (struct nlm_block *) kmalloc(sizeof(*block), GFP_KERNEL)))
+		goto failed;
+	memset(block, 0, sizeof(*block));
+	locks_init_lock(&block->b_call.a_args.lock.fl);
+	locks_init_lock(&block->b_call.a_res.lock.fl);
+
+	if (!nlmclnt_setgrantargs(&block->b_call, lock))
+		goto failed_free;
+
+	/* Set notifier function for VFS, and init args */
+	block->b_call.a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
+	block->b_call.a_args.cookie = *cookie;	/* see above */
+
+	dprintk("lockd: created block %p...\n", block);
+
+	/* Create and initialize the block */
+	block->b_daemon = rqstp->rq_server;
+	block->b_host   = host;
+	block->b_file   = file;
+
+	/* Add to file's list of blocks */
+	block->b_fnext  = file->f_blocks;
+	file->f_blocks  = block;
+
+	/* Set up RPC arguments for callback */
+	call = &block->b_call;
+	call->a_host    = host;
+	call->a_flags   = RPC_TASK_ASYNC;
+
+	return block;
+
+failed_free:
+	kfree(block);
+failed:
+	nlm_release_host(host);
+	return NULL;
+}
+
+/*
+ * Delete a block. If the lock was cancelled or the grant callback
+ * failed, unlock is set to 1.
+ * It is the caller's responsibility to check whether the file
+ * can be closed hereafter.
+ */
+static void
+nlmsvc_delete_block(struct nlm_block *block, int unlock)
+{
+	struct file_lock	*fl = &block->b_call.a_args.lock.fl;
+	struct nlm_file		*file = block->b_file;
+	struct nlm_block	**bp;
+
+	dprintk("lockd: deleting block %p...\n", block);
+
+	/* Remove block from list */
+	nlmsvc_remove_block(block);
+	if (fl->fl_next)
+		posix_unblock_lock(file->f_file, fl);
+	if (unlock) {
+		fl->fl_type = F_UNLCK;
+		posix_lock_file(file->f_file, fl);
+		block->b_granted = 0;
+	}
+
+	/* If the block is in the middle of a GRANT callback,
+	 * don't kill it yet. */
+	if (block->b_incall) {
+		nlmsvc_insert_block(block, NLM_NEVER);
+		block->b_done = 1;
+		return;
+	}
+
+	/* Remove block from file's list of blocks */
+	for (bp = &file->f_blocks; *bp; bp = &(*bp)->b_fnext) {
+		if (*bp == block) {
+			*bp = block->b_fnext;
+			break;
+		}
+	}
+
+	if (block->b_host)
+		nlm_release_host(block->b_host);
+	nlmclnt_freegrantargs(&block->b_call);
+	kfree(block);
+}
+
+/*
+ * Loop over all blocks and perform the action specified.
+ * (NLM_ACT_CHECK handled by nlmsvc_inspect_file).
+ */
+int
+nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action)
+{
+	struct nlm_block	*block, *next;
+
+	down(&file->f_sema);
+	for (block = file->f_blocks; block; block = next) {
+		next = block->b_fnext;
+		if (action == NLM_ACT_MARK)
+			block->b_host->h_inuse = 1;
+		else if (action == NLM_ACT_UNLOCK) {
+			if (host == NULL || host == block->b_host)
+				nlmsvc_delete_block(block, 1);
+		}
+	}
+	up(&file->f_sema);
+	return 0;
+}
+
+/*
+ * Attempt to establish a lock, and if it can't be granted, block it
+ * if required.
+ */
+u32
+nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+			struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
+{
+	struct file_lock	*conflock;
+	struct nlm_block	*block;
+	int			error;
+
+	dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
+				file->f_file->f_dentry->d_inode->i_sb->s_id,
+				file->f_file->f_dentry->d_inode->i_ino,
+				lock->fl.fl_type, lock->fl.fl_pid,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end,
+				wait);
+
+
+	/* Get existing block (in case client is busy-waiting) */
+	block = nlmsvc_lookup_block(file, lock, 0);
+
+	lock->fl.fl_flags |= FL_LOCKD;
+
+again:
+	/* Lock file against concurrent access */
+	down(&file->f_sema);
+
+	if (!(conflock = posix_test_lock(file->f_file, &lock->fl))) {
+		error = posix_lock_file(file->f_file, &lock->fl);
+
+		if (block)
+			nlmsvc_delete_block(block, 0);
+		up(&file->f_sema);
+
+		dprintk("lockd: posix_lock_file returned %d\n", -error);
+		switch(-error) {
+		case 0:
+			return nlm_granted;
+		case EDEADLK:
+			return nlm_deadlock;
+		case EAGAIN:
+			return nlm_lck_denied;
+		default:			/* includes ENOLCK */
+			return nlm_lck_denied_nolocks;
+		}
+	}
+
+	if (!wait) {
+		up(&file->f_sema);
+		return nlm_lck_denied;
+	}
+
+	if (posix_locks_deadlock(&lock->fl, conflock)) {
+		up(&file->f_sema);
+		return nlm_deadlock;
+	}
+
+	/* If we don't have a block, create and initialize it. Then
+	 * retry because we may have slept in kmalloc. */
+	/* We have to release f_sema as nlmsvc_create_block may try to
+	 * to claim it while doing host garbage collection */
+	if (block == NULL) {
+		up(&file->f_sema);
+		dprintk("lockd: blocking on this lock (allocating).\n");
+		if (!(block = nlmsvc_create_block(rqstp, file, lock, cookie)))
+			return nlm_lck_denied_nolocks;
+		goto again;
+	}
+
+	/* Append to list of blocked */
+	nlmsvc_insert_block(block, NLM_NEVER);
+
+	if (list_empty(&block->b_call.a_args.lock.fl.fl_block)) {
+		/* Now add block to block list of the conflicting lock
+		   if we haven't done so. */
+		dprintk("lockd: blocking on this lock.\n");
+		posix_block_lock(conflock, &block->b_call.a_args.lock.fl);
+	}
+
+	up(&file->f_sema);
+	return nlm_lck_blocked;
+}
+
+/*
+ * Test for presence of a conflicting lock.
+ */
+u32
+nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock,
+				       struct nlm_lock *conflock)
+{
+	struct file_lock	*fl;
+
+	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
+				file->f_file->f_dentry->d_inode->i_sb->s_id,
+				file->f_file->f_dentry->d_inode->i_ino,
+				lock->fl.fl_type,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end);
+
+	if ((fl = posix_test_lock(file->f_file, &lock->fl)) != NULL) {
+		dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
+				fl->fl_type, (long long)fl->fl_start,
+				(long long)fl->fl_end);
+		conflock->caller = "somehost";	/* FIXME */
+		conflock->oh.len = 0;		/* don't return OH info */
+		conflock->fl = *fl;
+		return nlm_lck_denied;
+	}
+
+	return nlm_granted;
+}
+
+/*
+ * Remove a lock.
+ * This implies a CANCEL call: We send a GRANT_MSG, the client replies
+ * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
+ * afterwards. In this case the block will still be there, and hence
+ * must be removed.
+ */
+u32
+nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
+{
+	int	error;
+
+	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
+				file->f_file->f_dentry->d_inode->i_sb->s_id,
+				file->f_file->f_dentry->d_inode->i_ino,
+				lock->fl.fl_pid,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end);
+
+	/* First, cancel any lock that might be there */
+	nlmsvc_cancel_blocked(file, lock);
+
+	lock->fl.fl_type = F_UNLCK;
+	error = posix_lock_file(file->f_file, &lock->fl);
+
+	return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
+}
+
+/*
+ * Cancel a previously blocked request.
+ *
+ * A cancel request always overrides any grant that may currently
+ * be in progress.
+ * The calling procedure must check whether the file can be closed.
+ */
+u32
+nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
+{
+	struct nlm_block	*block;
+
+	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
+				file->f_file->f_dentry->d_inode->i_sb->s_id,
+				file->f_file->f_dentry->d_inode->i_ino,
+				lock->fl.fl_pid,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end);
+
+	down(&file->f_sema);
+	if ((block = nlmsvc_lookup_block(file, lock, 1)) != NULL)
+		nlmsvc_delete_block(block, 1);
+	up(&file->f_sema);
+	return nlm_granted;
+}
+
+/*
+ * Unblock a blocked lock request. This is a callback invoked from the
+ * VFS layer when a lock on which we blocked is removed.
+ *
+ * This function doesn't grant the blocked lock instantly, but rather moves
+ * the block to the head of nlm_blocked where it can be picked up by lockd.
+ */
+static void
+nlmsvc_notify_blocked(struct file_lock *fl)
+{
+	struct nlm_block	**bp, *block;
+
+	dprintk("lockd: VFS unblock notification for block %p\n", fl);
+	for (bp = &nlm_blocked; (block = *bp) != 0; bp = &block->b_next) {
+		if (nlm_compare_locks(&block->b_call.a_args.lock.fl, fl)) {
+			nlmsvc_insert_block(block, 0);
+			svc_wake_up(block->b_daemon);
+			return;
+		}
+	}
+
+	printk(KERN_WARNING "lockd: notification for unknown block!\n");
+}
+
+static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
+{
+	return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
+}
+
+struct lock_manager_operations nlmsvc_lock_operations = {
+	.fl_compare_owner = nlmsvc_same_owner,
+	.fl_notify = nlmsvc_notify_blocked,
+};
+
+/*
+ * Try to claim a lock that was previously blocked.
+ *
+ * Note that we use both the RPC_GRANTED_MSG call _and_ an async
+ * RPC thread when notifying the client. This seems like overkill...
+ * Here's why:
+ *  -	we don't want to use a synchronous RPC thread, otherwise
+ *	we might find ourselves hanging on a dead portmapper.
+ *  -	Some lockd implementations (e.g. HP) don't react to
+ *	RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
+ */
+static void
+nlmsvc_grant_blocked(struct nlm_block *block)
+{
+	struct nlm_file		*file = block->b_file;
+	struct nlm_lock		*lock = &block->b_call.a_args.lock;
+	struct file_lock	*conflock;
+	int			error;
+
+	dprintk("lockd: grant blocked lock %p\n", block);
+
+	/* First thing is lock the file */
+	down(&file->f_sema);
+
+	/* Unlink block request from list */
+	nlmsvc_remove_block(block);
+
+	/* If b_granted is true this means we've been here before.
+	 * Just retry the grant callback, possibly refreshing the RPC
+	 * binding */
+	if (block->b_granted) {
+		nlm_rebind_host(block->b_host);
+		goto callback;
+	}
+
+	/* Try the lock operation again */
+	if ((conflock = posix_test_lock(file->f_file, &lock->fl)) != NULL) {
+		/* Bummer, we blocked again */
+		dprintk("lockd: lock still blocked\n");
+		nlmsvc_insert_block(block, NLM_NEVER);
+		posix_block_lock(conflock, &lock->fl);
+		up(&file->f_sema);
+		return;
+	}
+
+	/* Alright, no conflicting lock. Now lock it for real. If the
+	 * following yields an error, this is most probably due to low
+	 * memory. Retry the lock in a few seconds.
+	 */
+	if ((error = posix_lock_file(file->f_file, &lock->fl)) < 0) {
+		printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
+				-error, __FUNCTION__);
+		nlmsvc_insert_block(block, 10 * HZ);
+		up(&file->f_sema);
+		return;
+	}
+
+callback:
+	/* Lock was granted by VFS. */
+	dprintk("lockd: GRANTing blocked lock.\n");
+	block->b_granted = 1;
+	block->b_incall  = 1;
+
+	/* Schedule next grant callback in 30 seconds */
+	nlmsvc_insert_block(block, 30 * HZ);
+
+	/* Call the client */
+	nlm_get_host(block->b_call.a_host);
+	if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG,
+						nlmsvc_grant_callback) < 0)
+		nlm_release_host(block->b_call.a_host);
+	up(&file->f_sema);
+}
+
+/*
+ * This is the callback from the RPC layer when the NLM_GRANTED_MSG
+ * RPC call has succeeded or timed out.
+ * Like all RPC callbacks, it is invoked by the rpciod process, so it
+ * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
+ * chain once more in order to have it removed by lockd itself (which can
+ * then sleep on the file semaphore without disrupting e.g. the nfs client).
+ */
+static void
+nlmsvc_grant_callback(struct rpc_task *task)
+{
+	struct nlm_rqst		*call = (struct nlm_rqst *) task->tk_calldata;
+	struct nlm_block	*block;
+	unsigned long		timeout;
+	struct sockaddr_in	*peer_addr = RPC_PEERADDR(task->tk_client);
+
+	dprintk("lockd: GRANT_MSG RPC callback\n");
+	dprintk("callback: looking for cookie %s, host (%u.%u.%u.%u)\n",
+		nlmdbg_cookie2a(&call->a_args.cookie),
+		NIPQUAD(peer_addr->sin_addr.s_addr));
+	if (!(block = nlmsvc_find_block(&call->a_args.cookie, peer_addr))) {
+		dprintk("lockd: no block for cookie %s, host (%u.%u.%u.%u)\n",
+			nlmdbg_cookie2a(&call->a_args.cookie),
+			NIPQUAD(peer_addr->sin_addr.s_addr));
+		return;
+	}
+
+	/* Technically, we should down the file semaphore here. Since we
+	 * move the block towards the head of the queue only, no harm
+	 * can be done, though. */
+	if (task->tk_status < 0) {
+		/* RPC error: Re-insert for retransmission */
+		timeout = 10 * HZ;
+	} else if (block->b_done) {
+		/* Block already removed, kill it for real */
+		timeout = 0;
+	} else {
+		/* Call was successful, now wait for client callback */
+		timeout = 60 * HZ;
+	}
+	nlmsvc_insert_block(block, timeout);
+	svc_wake_up(block->b_daemon);
+	block->b_incall = 0;
+
+	nlm_release_host(call->a_host);
+}
+
+/*
+ * We received a GRANT_RES callback. Try to find the corresponding
+ * block.
+ */
+void
+nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status)
+{
+	struct nlm_block	*block;
+	struct nlm_file		*file;
+
+	dprintk("grant_reply: looking for cookie %x, host (%08x), s=%d \n", 
+		*(unsigned int *)(cookie->data), 
+		ntohl(rqstp->rq_addr.sin_addr.s_addr), status);
+	if (!(block = nlmsvc_find_block(cookie, &rqstp->rq_addr)))
+		return;
+	file = block->b_file;
+
+	file->f_count++;
+	down(&file->f_sema);
+	if ((block = nlmsvc_find_block(cookie,&rqstp->rq_addr)) != NULL) {
+		if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
+			/* Try again in a couple of seconds */
+			nlmsvc_insert_block(block, 10 * HZ);
+			block = NULL;
+		} else {
+			/* Lock is now held by client, or has been rejected.
+			 * In both cases, the block should be removed. */
+			up(&file->f_sema);
+			if (status == NLM_LCK_GRANTED)
+				nlmsvc_delete_block(block, 0);
+			else
+				nlmsvc_delete_block(block, 1);
+		}
+	}
+	if (!block)
+		up(&file->f_sema);
+	nlm_release_file(file);
+}
+
+/*
+ * Retry all blocked locks that have been notified. This is where lockd
+ * picks up locks that can be granted, or grant notifications that must
+ * be retransmitted.
+ */
+unsigned long
+nlmsvc_retry_blocked(void)
+{
+	struct nlm_block	*block;
+
+	dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
+			nlm_blocked,
+			nlm_blocked? nlm_blocked->b_when : 0);
+	while ((block = nlm_blocked) != 0) {
+		if (block->b_when == NLM_NEVER)
+			break;
+	        if (time_after(block->b_when,jiffies))
+			break;
+		dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n",
+			block, block->b_when, block->b_done);
+		if (block->b_done)
+			nlmsvc_delete_block(block, 0);
+		else
+			nlmsvc_grant_blocked(block);
+	}
+
+	if ((block = nlm_blocked) && block->b_when != NLM_NEVER)
+		return (block->b_when - jiffies);
+
+	return MAX_SCHEDULE_TIMEOUT;
+}
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
new file mode 100644
index 0000000..757e344
--- /dev/null
+++ b/fs/lockd/svcproc.c
@@ -0,0 +1,606 @@
+/*
+ * linux/fs/lockd/svcproc.c
+ *
+ * Lockd server procedures. We don't implement the NLM_*_RES 
+ * procedures because we don't use the async procedures.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/share.h>
+#include <linux/lockd/sm_inter.h>
+
+
+#define NLMDBG_FACILITY		NLMDBG_CLIENT
+
+static u32	nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *);
+static void	nlmsvc_callback_exit(struct rpc_task *);
+
+#ifdef CONFIG_LOCKD_V4
+static u32
+cast_to_nlm(u32 status, u32 vers)
+{
+	/* Note: status is assumed to be in network byte order !!! */
+	if (vers != 4){
+		switch (status) {
+		case nlm_granted:
+		case nlm_lck_denied:
+		case nlm_lck_denied_nolocks:
+		case nlm_lck_blocked:
+		case nlm_lck_denied_grace_period:
+			break;
+		case nlm4_deadlock:
+			status = nlm_lck_denied;
+			break;
+		default:
+			status = nlm_lck_denied_nolocks;
+		}
+	}
+
+	return (status);
+}
+#define	cast_status(status) (cast_to_nlm(status, rqstp->rq_vers))
+#else
+#define cast_status(status) (status)
+#endif
+
+/*
+ * Obtain client and file from arguments
+ */
+static u32
+nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
+			struct nlm_host **hostp, struct nlm_file **filp)
+{
+	struct nlm_host		*host = NULL;
+	struct nlm_file		*file = NULL;
+	struct nlm_lock		*lock = &argp->lock;
+	u32			error;
+
+	/* nfsd callbacks must have been installed for this procedure */
+	if (!nlmsvc_ops)
+		return nlm_lck_denied_nolocks;
+
+	/* Obtain host handle */
+	if (!(host = nlmsvc_lookup_host(rqstp))
+	 || (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0))
+		goto no_locks;
+	*hostp = host;
+
+	/* Obtain file pointer. Not used by FREE_ALL call. */
+	if (filp != NULL) {
+		if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
+			goto no_locks;
+		*filp = file;
+
+		/* Set up the missing parts of the file_lock structure */
+		lock->fl.fl_file  = file->f_file;
+		lock->fl.fl_owner = (fl_owner_t) host;
+		lock->fl.fl_lmops = &nlmsvc_lock_operations;
+	}
+
+	return 0;
+
+no_locks:
+	if (host)
+		nlm_release_host(host);
+	return nlm_lck_denied_nolocks;
+}
+
+/*
+ * NULL: Test for presence of service
+ */
+static int
+nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+	dprintk("lockd: NULL          called\n");
+	return rpc_success;
+}
+
+/*
+ * TEST: Check for conflicting lock
+ */
+static int
+nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
+				         struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: TEST          called\n");
+	resp->cookie = argp->cookie;
+
+	/* Don't accept test requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now check for conflicting locks */
+	resp->status = cast_status(nlmsvc_testlock(file, &argp->lock, &resp->lock));
+
+	dprintk("lockd: TEST          status %d vers %d\n",
+		ntohl(resp->status), rqstp->rq_vers);
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+static int
+nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				         struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: LOCK          called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period && !argp->reclaim) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+#if 0
+	/* If supplied state doesn't match current state, we assume it's
+	 * an old request that time-warped somehow. Any error return would
+	 * do in this case because it's irrelevant anyway.
+	 *
+	 * NB: We don't retrieve the remote host's state yet.
+	 */
+	if (host->h_nsmstate && host->h_nsmstate != argp->state) {
+		resp->status = nlm_lck_denied_nolocks;
+	} else
+#endif
+
+	/* Now try to lock the file */
+	resp->status = cast_status(nlmsvc_lock(rqstp, file, &argp->lock,
+					       argp->block, &argp->cookie));
+
+	dprintk("lockd: LOCK          status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+static int
+nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
+				           struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: CANCEL        called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Try to cancel request. */
+	resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
+
+	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * UNLOCK: release a lock
+ */
+static int
+nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				           struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: UNLOCK        called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to remove the lock */
+	resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
+
+	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * GRANTED: A server calls us to tell that a process' lock request
+ * was granted
+ */
+static int
+nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	resp->cookie = argp->cookie;
+
+	dprintk("lockd: GRANTED       called\n");
+	resp->status = nlmclnt_grant(&argp->lock);
+	dprintk("lockd: GRANTED       status %d\n", ntohl(resp->status));
+	return rpc_success;
+}
+
+/*
+ * `Async' versions of the above service routines. They aren't really,
+ * because we send the callback before the reply proper. I hope this
+ * doesn't break any clients.
+ */
+static int
+nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void	     *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: TEST_MSG      called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_test(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_TEST_RES, &res);
+	return stat;
+}
+
+static int
+nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void	     *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: LOCK_MSG      called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_lock(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, &res);
+	return stat;
+}
+
+static int
+nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					       void	       *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: CANCEL_MSG    called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_cancel(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, &res);
+	return stat;
+}
+
+static int
+nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+                                               void            *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: UNLOCK_MSG    called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_unlock(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, &res);
+	return stat;
+}
+
+static int
+nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+                                                void            *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: GRANTED_MSG   called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_granted(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, &res);
+	return stat;
+}
+
+/*
+ * SHARE: create a DOS share or alter existing share.
+ */
+static int
+nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
+				          struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: SHARE         called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period && !argp->reclaim) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to create the share */
+	resp->status = cast_status(nlmsvc_share_file(host, file, argp));
+
+	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * UNSHARE: Release a DOS share.
+ */
+static int
+nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: UNSHARE       called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to unshare the file */
+	resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
+
+	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * NM_LOCK: Create an unmonitored lock
+ */
+static int
+nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	dprintk("lockd: NM_LOCK       called\n");
+
+	argp->monitor = 0;		/* just clean the monitor flag */
+	return nlmsvc_proc_lock(rqstp, argp, resp);
+}
+
+/*
+ * FREE_ALL: Release all locks and shares held by client
+ */
+static int
+nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void            *resp)
+{
+	struct nlm_host	*host;
+
+	/* Obtain client */
+	if (nlmsvc_retrieve_args(rqstp, argp, &host, NULL))
+		return rpc_success;
+
+	nlmsvc_free_host_resources(host);
+	nlm_release_host(host);
+	return rpc_success;
+}
+
+/*
+ * SM_NOTIFY: private callback from statd (not part of official NLM proto)
+ */
+static int
+nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+					      void	        *resp)
+{
+	struct sockaddr_in	saddr = rqstp->rq_addr;
+	int			vers = argp->vers;
+	int			prot = argp->proto >> 1;
+	struct nlm_host		*host;
+
+	dprintk("lockd: SM_NOTIFY     called\n");
+	if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK)
+	 || ntohs(saddr.sin_port) >= 1024) {
+		printk(KERN_WARNING
+			"lockd: rejected NSM callback from %08x:%d\n",
+			ntohl(rqstp->rq_addr.sin_addr.s_addr),
+			ntohs(rqstp->rq_addr.sin_port));
+		return rpc_system_err;
+	}
+
+	/* Obtain the host pointer for this NFS server and try to
+	 * reclaim all locks we hold on this server.
+	 */
+	saddr.sin_addr.s_addr = argp->addr;
+	if ((argp->proto & 1)==0) {
+		if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) {
+			nlmclnt_recovery(host, argp->state);
+			nlm_release_host(host);
+		}
+	} else {
+		/* If we run on an NFS server, delete all locks held by the client */
+		if ((host = nlm_lookup_host(1, &saddr, prot, vers)) != NULL) {
+			nlmsvc_free_host_resources(host);
+			nlm_release_host(host);
+		}
+	}
+
+	return rpc_success;
+}
+
+/*
+ * client sent a GRANTED_RES, let's remove the associated block
+ */
+static int
+nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res  *argp,
+                                                void            *resp)
+{
+	if (!nlmsvc_ops)
+		return rpc_success;
+
+	dprintk("lockd: GRANTED_RES   called\n");
+
+	nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status);
+	return rpc_success;
+}
+
+/*
+ * This is the generic lockd callback for async RPC calls
+ */
+static u32
+nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_rqst	*call;
+
+	if (!(call = nlmclnt_alloc_call()))
+		return rpc_system_err;
+
+	host = nlmclnt_lookup_host(&rqstp->rq_addr,
+				rqstp->rq_prot, rqstp->rq_vers);
+	if (!host) {
+		kfree(call);
+		return rpc_system_err;
+	}
+
+	call->a_flags = RPC_TASK_ASYNC;
+	call->a_host  = host;
+	memcpy(&call->a_args, resp, sizeof(*resp));
+
+	if (nlmsvc_async_call(call, proc, nlmsvc_callback_exit) < 0)
+		goto error;
+
+	return rpc_success;
+ error:
+	nlm_release_host(host);
+	kfree(call);
+	return rpc_system_err;
+}
+
+static void
+nlmsvc_callback_exit(struct rpc_task *task)
+{
+	struct nlm_rqst	*call = (struct nlm_rqst *) task->tk_calldata;
+
+	if (task->tk_status < 0) {
+		dprintk("lockd: %4d callback failed (errno = %d)\n",
+					task->tk_pid, -task->tk_status);
+	}
+	nlm_release_host(call->a_host);
+	kfree(call);
+}
+
+/*
+ * NLM Server procedures.
+ */
+
+#define nlmsvc_encode_norep	nlmsvc_encode_void
+#define nlmsvc_decode_norep	nlmsvc_decode_void
+#define nlmsvc_decode_testres	nlmsvc_decode_void
+#define nlmsvc_decode_lockres	nlmsvc_decode_void
+#define nlmsvc_decode_unlockres	nlmsvc_decode_void
+#define nlmsvc_decode_cancelres	nlmsvc_decode_void
+#define nlmsvc_decode_grantedres	nlmsvc_decode_void
+
+#define nlmsvc_proc_none	nlmsvc_proc_null
+#define nlmsvc_proc_test_res	nlmsvc_proc_null
+#define nlmsvc_proc_lock_res	nlmsvc_proc_null
+#define nlmsvc_proc_cancel_res	nlmsvc_proc_null
+#define nlmsvc_proc_unlock_res	nlmsvc_proc_null
+
+struct nlm_void			{ int dummy; };
+
+#define PROC(name, xargt, xrest, argt, rest, respsize)	\
+ { .pc_func	= (svc_procfunc) nlmsvc_proc_##name,	\
+   .pc_decode	= (kxdrproc_t) nlmsvc_decode_##xargt,	\
+   .pc_encode	= (kxdrproc_t) nlmsvc_encode_##xrest,	\
+   .pc_release	= NULL,					\
+   .pc_argsize	= sizeof(struct nlm_##argt),		\
+   .pc_ressize	= sizeof(struct nlm_##rest),		\
+   .pc_xdrressize = respsize,				\
+ }
+
+#define	Ck	(1+XDR_QUADLEN(NLM_MAXCOOKIELEN))	/* cookie */
+#define	St	1				/* status */
+#define	No	(1+1024/4)			/* Net Obj */
+#define	Rg	2				/* range - offset + size */
+
+struct svc_procedure		nlmsvc_procedures[] = {
+  PROC(null,		void,		void,		void,	void, 1),
+  PROC(test,		testargs,	testres,	args,	res, Ck+St+2+No+Rg),
+  PROC(lock,		lockargs,	res,		args,	res, Ck+St),
+  PROC(cancel,		cancargs,	res,		args,	res, Ck+St),
+  PROC(unlock,		unlockargs,	res,		args,	res, Ck+St),
+  PROC(granted,		testargs,	res,		args,	res, Ck+St),
+  PROC(test_msg,	testargs,	norep,		args,	void, 1),
+  PROC(lock_msg,	lockargs,	norep,		args,	void, 1),
+  PROC(cancel_msg,	cancargs,	norep,		args,	void, 1),
+  PROC(unlock_msg,	unlockargs,	norep,		args,	void, 1),
+  PROC(granted_msg,	testargs,	norep,		args,	void, 1),
+  PROC(test_res,	testres,	norep,		res,	void, 1),
+  PROC(lock_res,	lockres,	norep,		res,	void, 1),
+  PROC(cancel_res,	cancelres,	norep,		res,	void, 1),
+  PROC(unlock_res,	unlockres,	norep,		res,	void, 1),
+  PROC(granted_res,	res,		norep,		res,	void, 1),
+  /* statd callback */
+  PROC(sm_notify,	reboot,		void,		reboot,	void, 1),
+  PROC(none,		void,		void,		void,	void, 1),
+  PROC(none,		void,		void,		void,	void, 1),
+  PROC(none,		void,		void,		void,	void, 1),
+  PROC(share,		shareargs,	shareres,	args,	res, Ck+St+1),
+  PROC(unshare,		shareargs,	shareres,	args,	res, Ck+St+1),
+  PROC(nm_lock,		lockargs,	res,		args,	res, Ck+St),
+  PROC(free_all,	notify,		void,		args,	void, 0),
+
+};
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c
new file mode 100644
index 0000000..4943fb7
--- /dev/null
+++ b/fs/lockd/svcshare.c
@@ -0,0 +1,111 @@
+/*
+ * linux/fs/lockd/svcshare.c
+ *
+ * Management of DOS shares.
+ *
+ * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/time.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/share.h>
+
+static inline int
+nlm_cmp_owner(struct nlm_share *share, struct xdr_netobj *oh)
+{
+	return share->s_owner.len == oh->len
+	    && !memcmp(share->s_owner.data, oh->data, oh->len);
+}
+
+u32
+nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file,
+			struct nlm_args *argp)
+{
+	struct nlm_share	*share;
+	struct xdr_netobj	*oh = &argp->lock.oh;
+	u8			*ohdata;
+
+	for (share = file->f_shares; share; share = share->s_next) {
+		if (share->s_host == host && nlm_cmp_owner(share, oh))
+			goto update;
+		if ((argp->fsm_access & share->s_mode)
+		 || (argp->fsm_mode   & share->s_access ))
+			return nlm_lck_denied;
+	}
+
+	share = (struct nlm_share *) kmalloc(sizeof(*share) + oh->len,
+						GFP_KERNEL);
+	if (share == NULL)
+		return nlm_lck_denied_nolocks;
+
+	/* Copy owner handle */
+	ohdata = (u8 *) (share + 1);
+	memcpy(ohdata, oh->data, oh->len);
+
+	share->s_file	    = file;
+	share->s_host       = host;
+	share->s_owner.data = ohdata;
+	share->s_owner.len  = oh->len;
+	share->s_next       = file->f_shares;
+	file->f_shares      = share;
+
+update:
+	share->s_access = argp->fsm_access;
+	share->s_mode   = argp->fsm_mode;
+	return nlm_granted;
+}
+
+/*
+ * Delete a share.
+ */
+u32
+nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file,
+			struct nlm_args *argp)
+{
+	struct nlm_share	*share, **shpp;
+	struct xdr_netobj	*oh = &argp->lock.oh;
+
+	for (shpp = &file->f_shares; (share = *shpp) != 0; shpp = &share->s_next) {
+		if (share->s_host == host && nlm_cmp_owner(share, oh)) {
+			*shpp = share->s_next;
+			kfree(share);
+			return nlm_granted;
+		}
+	}
+
+	/* X/Open spec says return success even if there was no
+	 * corresponding share. */
+	return nlm_granted;
+}
+
+/*
+ * Traverse all shares for a given file (and host).
+ * NLM_ACT_CHECK is handled by nlmsvc_inspect_file.
+ */
+int
+nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action)
+{
+	struct nlm_share	*share, **shpp;
+
+	shpp = &file->f_shares;
+	while ((share = *shpp) !=  NULL) {
+		if (action == NLM_ACT_MARK)
+			share->s_host->h_inuse = 1;
+		else if (action == NLM_ACT_UNLOCK) {
+			if (host == NULL || host == share->s_host) {
+				*shpp = share->s_next;
+				kfree(share);
+				continue;
+			}
+		}
+		shpp = &share->s_next;
+	}
+
+	return 0;
+}
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
new file mode 100644
index 0000000..de75363
--- /dev/null
+++ b/fs/lockd/svcsubs.c
@@ -0,0 +1,309 @@
+/*
+ * linux/fs/lockd/svcsubs.c
+ *
+ * Various support routines for the NLM server.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/in.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsfh.h>
+#include <linux/nfsd/export.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/share.h>
+#include <linux/lockd/sm_inter.h>
+
+#define NLMDBG_FACILITY		NLMDBG_SVCSUBS
+
+
+/*
+ * Global file hash table
+ */
+#define FILE_HASH_BITS		5
+#define FILE_NRHASH		(1<<FILE_HASH_BITS)
+static struct nlm_file *	nlm_files[FILE_NRHASH];
+static DECLARE_MUTEX(nlm_file_sema);
+
+static inline unsigned int file_hash(struct nfs_fh *f)
+{
+	unsigned int tmp=0;
+	int i;
+	for (i=0; i<NFS2_FHSIZE;i++)
+		tmp += f->data[i];
+	return tmp & (FILE_NRHASH - 1);
+}
+
+/*
+ * Lookup file info. If it doesn't exist, create a file info struct
+ * and open a (VFS) file for the given inode.
+ *
+ * FIXME:
+ * Note that we open the file O_RDONLY even when creating write locks.
+ * This is not quite right, but for now, we assume the client performs
+ * the proper R/W checking.
+ */
+u32
+nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
+					struct nfs_fh *f)
+{
+	struct nlm_file	*file;
+	unsigned int	hash;
+	u32		nfserr;
+	u32		*fhp = (u32*)f->data;
+
+	dprintk("lockd: nlm_file_lookup(%08x %08x %08x %08x %08x %08x)\n",
+		fhp[0], fhp[1], fhp[2], fhp[3], fhp[4], fhp[5]);
+
+
+	hash = file_hash(f);
+
+	/* Lock file table */
+	down(&nlm_file_sema);
+
+	for (file = nlm_files[hash]; file; file = file->f_next)
+		if (!nfs_compare_fh(&file->f_handle, f))
+			goto found;
+
+	dprintk("lockd: creating file for (%08x %08x %08x %08x %08x %08x)\n",
+		fhp[0], fhp[1], fhp[2], fhp[3], fhp[4], fhp[5]);
+
+	nfserr = nlm_lck_denied_nolocks;
+	file = (struct nlm_file *) kmalloc(sizeof(*file), GFP_KERNEL);
+	if (!file)
+		goto out_unlock;
+
+	memset(file, 0, sizeof(*file));
+	memcpy(&file->f_handle, f, sizeof(struct nfs_fh));
+	file->f_hash = hash;
+	init_MUTEX(&file->f_sema);
+
+	/* Open the file. Note that this must not sleep for too long, else
+	 * we would lock up lockd:-) So no NFS re-exports, folks.
+	 *
+	 * We have to make sure we have the right credential to open
+	 * the file.
+	 */
+	if ((nfserr = nlmsvc_ops->fopen(rqstp, f, &file->f_file)) != 0) {
+		dprintk("lockd: open failed (nfserr %d)\n", ntohl(nfserr));
+		goto out_free;
+	}
+
+	file->f_next = nlm_files[hash];
+	nlm_files[hash] = file;
+
+found:
+	dprintk("lockd: found file %p (count %d)\n", file, file->f_count);
+	*result = file;
+	file->f_count++;
+	nfserr = 0;
+
+out_unlock:
+	up(&nlm_file_sema);
+	return nfserr;
+
+out_free:
+	kfree(file);
+#ifdef CONFIG_LOCKD_V4
+	if (nfserr == 1)
+		nfserr = nlm4_stale_fh;
+	else
+#endif
+	nfserr = nlm_lck_denied;
+	goto out_unlock;
+}
+
+/*
+ * Delete a file after having released all locks, blocks and shares
+ */
+static inline void
+nlm_delete_file(struct nlm_file *file)
+{
+	struct inode *inode = file->f_file->f_dentry->d_inode;
+	struct nlm_file	**fp, *f;
+
+	dprintk("lockd: closing file %s/%ld\n",
+		inode->i_sb->s_id, inode->i_ino);
+	fp = nlm_files + file->f_hash;
+	while ((f = *fp) != NULL) {
+		if (f == file) {
+			*fp = file->f_next;
+			nlmsvc_ops->fclose(file->f_file);
+			kfree(file);
+			return;
+		}
+		fp = &f->f_next;
+	}
+
+	printk(KERN_WARNING "lockd: attempt to release unknown file!\n");
+}
+
+/*
+ * Loop over all locks on the given file and perform the specified
+ * action.
+ */
+static int
+nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, int action)
+{
+	struct inode	 *inode = nlmsvc_file_inode(file);
+	struct file_lock *fl;
+	struct nlm_host	 *lockhost;
+
+again:
+	file->f_locks = 0;
+	for (fl = inode->i_flock; fl; fl = fl->fl_next) {
+		if (!(fl->fl_flags & FL_LOCKD))
+			continue;
+
+		/* update current lock count */
+		file->f_locks++;
+		lockhost = (struct nlm_host *) fl->fl_owner;
+		if (action == NLM_ACT_MARK)
+			lockhost->h_inuse = 1;
+		else if (action == NLM_ACT_CHECK)
+			return 1;
+		else if (action == NLM_ACT_UNLOCK) {
+			struct file_lock lock = *fl;
+
+			if (host && lockhost != host)
+				continue;
+
+			lock.fl_type  = F_UNLCK;
+			lock.fl_start = 0;
+			lock.fl_end   = OFFSET_MAX;
+			if (posix_lock_file(file->f_file, &lock) < 0) {
+				printk("lockd: unlock failure in %s:%d\n",
+						__FILE__, __LINE__);
+				return 1;
+			}
+			goto again;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Operate on a single file
+ */
+static inline int
+nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, int action)
+{
+	if (action == NLM_ACT_CHECK) {
+		/* Fast path for mark and sweep garbage collection */
+		if (file->f_count || file->f_blocks || file->f_shares)
+			return 1;
+	} else {
+		if (nlmsvc_traverse_blocks(host, file, action)
+		 || nlmsvc_traverse_shares(host, file, action))
+			return 1;
+	}
+	return nlm_traverse_locks(host, file, action);
+}
+
+/*
+ * Loop over all files in the file table.
+ */
+static int
+nlm_traverse_files(struct nlm_host *host, int action)
+{
+	struct nlm_file	*file, **fp;
+	int		i;
+
+	down(&nlm_file_sema);
+	for (i = 0; i < FILE_NRHASH; i++) {
+		fp = nlm_files + i;
+		while ((file = *fp) != NULL) {
+			/* Traverse locks, blocks and shares of this file
+			 * and update file->f_locks count */
+			if (nlm_inspect_file(host, file, action)) {
+				up(&nlm_file_sema);
+				return 1;
+			}
+
+			/* No more references to this file. Let go of it. */
+			if (!file->f_blocks && !file->f_locks
+			 && !file->f_shares && !file->f_count) {
+				*fp = file->f_next;
+				nlmsvc_ops->fclose(file->f_file);
+				kfree(file);
+			} else {
+				fp = &file->f_next;
+			}
+		}
+	}
+	up(&nlm_file_sema);
+	return 0;
+}
+
+/*
+ * Release file. If there are no more remote locks on this file,
+ * close it and free the handle.
+ *
+ * Note that we can't do proper reference counting without major
+ * contortions because the code in fs/locks.c creates, deletes and
+ * splits locks without notification. Our only way is to walk the
+ * entire lock list each time we remove a lock.
+ */
+void
+nlm_release_file(struct nlm_file *file)
+{
+	dprintk("lockd: nlm_release_file(%p, ct = %d)\n",
+				file, file->f_count);
+
+	/* Lock file table */
+	down(&nlm_file_sema);
+
+	/* If there are no more locks etc, delete the file */
+	if(--file->f_count == 0) {
+		if(!nlm_inspect_file(NULL, file, NLM_ACT_CHECK))
+			nlm_delete_file(file);
+	}
+
+	up(&nlm_file_sema);
+}
+
+/*
+ * Mark all hosts that still hold resources
+ */
+void
+nlmsvc_mark_resources(void)
+{
+	dprintk("lockd: nlmsvc_mark_resources\n");
+
+	nlm_traverse_files(NULL, NLM_ACT_MARK);
+}
+
+/*
+ * Release all resources held by the given client
+ */
+void
+nlmsvc_free_host_resources(struct nlm_host *host)
+{
+	dprintk("lockd: nlmsvc_free_host_resources\n");
+
+	if (nlm_traverse_files(host, NLM_ACT_UNLOCK))
+		printk(KERN_WARNING
+			"lockd: couldn't remove all locks held by %s",
+			host->h_name);
+}
+
+/*
+ * delete all hosts structs for clients
+ */
+void
+nlmsvc_invalidate_all(void)
+{
+	struct nlm_host *host;
+	while ((host = nlm_find_client()) != NULL) {
+		nlmsvc_free_host_resources(host);
+		host->h_expires = 0;
+		host->h_killed = 1;
+		nlm_release_host(host);
+	}
+}
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
new file mode 100644
index 0000000..f01e9c0
--- /dev/null
+++ b/fs/lockd/xdr.c
@@ -0,0 +1,635 @@
+/*
+ * linux/fs/lockd/xdr.c
+ *
+ * XDR support for lockd and the lock client.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <linux/nfs.h>
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+
+static inline loff_t
+s32_to_loff_t(__s32 offset)
+{
+	return (loff_t)offset;
+}
+
+static inline __s32
+loff_t_to_s32(loff_t offset)
+{
+	__s32 res;
+	if (offset >= NLM_OFFSET_MAX)
+		res = NLM_OFFSET_MAX;
+	else if (offset <= -NLM_OFFSET_MAX)
+		res = -NLM_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+/*
+ * XDR functions for basic NLM types
+ */
+static inline u32 *nlm_decode_cookie(u32 *p, struct nlm_cookie *c)
+{
+	unsigned int	len;
+
+	len = ntohl(*p++);
+	
+	if(len==0)
+	{
+		c->len=4;
+		memset(c->data, 0, 4);	/* hockeypux brain damage */
+	}
+	else if(len<=NLM_MAXCOOKIELEN)
+	{
+		c->len=len;
+		memcpy(c->data, p, len);
+		p+=XDR_QUADLEN(len);
+	}
+	else 
+	{
+		printk(KERN_NOTICE
+			"lockd: bad cookie size %d (only cookies under %d bytes are supported.)\n", len, NLM_MAXCOOKIELEN);
+		return NULL;
+	}
+	return p;
+}
+
+static inline u32 *
+nlm_encode_cookie(u32 *p, struct nlm_cookie *c)
+{
+	*p++ = htonl(c->len);
+	memcpy(p, c->data, c->len);
+	p+=XDR_QUADLEN(c->len);
+	return p;
+}
+
+static inline u32 *
+nlm_decode_fh(u32 *p, struct nfs_fh *f)
+{
+	unsigned int	len;
+
+	if ((len = ntohl(*p++)) != NFS2_FHSIZE) {
+		printk(KERN_NOTICE
+			"lockd: bad fhandle size %d (should be %d)\n",
+			len, NFS2_FHSIZE);
+		return NULL;
+	}
+	f->size = NFS2_FHSIZE;
+	memset(f->data, 0, sizeof(f->data));
+	memcpy(f->data, p, NFS2_FHSIZE);
+	return p + XDR_QUADLEN(NFS2_FHSIZE);
+}
+
+static inline u32 *
+nlm_encode_fh(u32 *p, struct nfs_fh *f)
+{
+	*p++ = htonl(NFS2_FHSIZE);
+	memcpy(p, f->data, NFS2_FHSIZE);
+	return p + XDR_QUADLEN(NFS2_FHSIZE);
+}
+
+/*
+ * Encode and decode owner handle
+ */
+static inline u32 *
+nlm_decode_oh(u32 *p, struct xdr_netobj *oh)
+{
+	return xdr_decode_netobj(p, oh);
+}
+
+static inline u32 *
+nlm_encode_oh(u32 *p, struct xdr_netobj *oh)
+{
+	return xdr_encode_netobj(p, oh);
+}
+
+static inline u32 *
+nlm_decode_lock(u32 *p, struct nlm_lock *lock)
+{
+	struct file_lock	*fl = &lock->fl;
+	s32			start, len, end;
+
+	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len,
+					    NLM_MAXSTRLEN))
+	 || !(p = nlm_decode_fh(p, &lock->fh))
+	 || !(p = nlm_decode_oh(p, &lock->oh)))
+		return NULL;
+
+	locks_init_lock(fl);
+	fl->fl_owner = current->files;
+	fl->fl_pid   = ntohl(*p++);
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = F_RDLCK;		/* as good as anything else */
+	start = ntohl(*p++);
+	len = ntohl(*p++);
+	end = start + len - 1;
+
+	fl->fl_start = s32_to_loff_t(start);
+
+	if (len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = s32_to_loff_t(end);
+	return p;
+}
+
+/*
+ * Encode a lock as part of an NLM call
+ */
+static u32 *
+nlm_encode_lock(u32 *p, struct nlm_lock *lock)
+{
+	struct file_lock	*fl = &lock->fl;
+	__s32			start, len;
+
+	if (!(p = xdr_encode_string(p, lock->caller))
+	 || !(p = nlm_encode_fh(p, &lock->fh))
+	 || !(p = nlm_encode_oh(p, &lock->oh)))
+		return NULL;
+
+	if (fl->fl_start > NLM_OFFSET_MAX
+	 || (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
+		return NULL;
+
+	start = loff_t_to_s32(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		len = 0;
+	else
+		len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+
+	*p++ = htonl(fl->fl_pid);
+	*p++ = htonl(start);
+	*p++ = htonl(len);
+
+	return p;
+}
+
+/*
+ * Encode result of a TEST/TEST_MSG call
+ */
+static u32 *
+nlm_encode_testres(u32 *p, struct nlm_res *resp)
+{
+	s32		start, len;
+
+	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+		return NULL;
+	*p++ = resp->status;
+
+	if (resp->status == nlm_lck_denied) {
+		struct file_lock	*fl = &resp->lock.fl;
+
+		*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
+		*p++ = htonl(fl->fl_pid);
+
+		/* Encode owner handle. */
+		if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
+			return NULL;
+
+		start = loff_t_to_s32(fl->fl_start);
+		if (fl->fl_end == OFFSET_MAX)
+			len = 0;
+		else
+			len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+
+		*p++ = htonl(start);
+		*p++ = htonl(len);
+	}
+
+	return p;
+}
+
+
+/*
+ * First, the server side XDR functions
+ */
+int
+nlmsvc_decode_testargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+		return 0;
+
+	exclusive = ntohl(*p++);
+	if (!(p = nlm_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_encode_testres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_testres(p, resp)))
+		return 0;
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_lockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+		return 0;
+	argp->block  = ntohl(*p++);
+	exclusive    = ntohl(*p++);
+	if (!(p = nlm_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+	argp->reclaim = ntohl(*p++);
+	argp->state   = ntohl(*p++);
+	argp->monitor = 1;		/* monitor client by default */
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_cancargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+		return 0;
+	argp->block = ntohl(*p++);
+	exclusive = ntohl(*p++);
+	if (!(p = nlm_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	if (!(p = nlm_decode_cookie(p, &argp->cookie))
+	 || !(p = nlm_decode_lock(p, &argp->lock)))
+		return 0;
+	argp->lock.fl.fl_type = F_UNLCK;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(&lock->fl);
+	lock->fl.fl_pid = ~(u32) 0;
+
+	if (!(p = nlm_decode_cookie(p, &argp->cookie))
+	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN))
+	 || !(p = nlm_decode_fh(p, &lock->fh))
+	 || !(p = nlm_decode_oh(p, &lock->oh)))
+		return 0;
+	argp->fsm_mode = ntohl(*p++);
+	argp->fsm_access = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_encode_shareres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+		return 0;
+	*p++ = resp->status;
+	*p++ = xdr_zero;		/* sequence argument */
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlmsvc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+		return 0;
+	*p++ = resp->status;
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN)))
+		return 0;
+	argp->state = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
+{
+	if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
+		return 0;
+	argp->state = ntohl(*p++);
+	/* Preserve the address in network byte order */
+	argp->addr = *p++;
+	argp->vers = *p++;
+	argp->proto = *p++;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
+		return 0;
+	resp->status = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_ressize_check(rqstp, p);
+}
+
+/*
+ * Now, the client side XDR functions
+ */
+#ifdef NLMCLNT_SUPPORT_SHARES
+static int
+nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
+{
+	return 0;
+}
+#endif
+
+static int
+nlmclt_encode_testargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
+		return -EIO;
+	resp->status = ntohl(*p++);
+	if (resp->status == NLM_LCK_DENIED) {
+		struct file_lock	*fl = &resp->lock.fl;
+		u32			excl;
+		s32			start, len, end;
+
+		memset(&resp->lock, 0, sizeof(resp->lock));
+		locks_init_lock(fl);
+		excl = ntohl(*p++);
+		fl->fl_pid = ntohl(*p++);
+		if (!(p = nlm_decode_oh(p, &resp->lock.oh)))
+			return -EIO;
+
+		fl->fl_flags = FL_POSIX;
+		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
+		start = ntohl(*p++);
+		len = ntohl(*p++);
+		end = start + len - 1;
+
+		fl->fl_start = s32_to_loff_t(start);
+		if (len == 0 || end < 0)
+			fl->fl_end = OFFSET_MAX;
+		else
+			fl->fl_end = s32_to_loff_t(end);
+	}
+	return 0;
+}
+
+
+static int
+nlmclt_encode_lockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = argp->block? xdr_one : xdr_zero;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm_encode_lock(p, lock)))
+		return -EIO;
+	*p++ = argp->reclaim? xdr_one : xdr_zero;
+	*p++ = htonl(argp->state);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_encode_cancargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = argp->block? xdr_one : xdr_zero;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_encode_unlockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	if (!(p = nlm_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_encode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+		return -EIO;
+	*p++ = resp->status;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_encode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_testres(p, resp)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
+		return -EIO;
+	resp->status = ntohl(*p++);
+	return 0;
+}
+
+/*
+ * Buffer requirements for NLM
+ */
+#define NLM_void_sz		0
+#define NLM_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
+#define NLM_caller_sz		1+XDR_QUADLEN(sizeof(system_utsname.nodename))
+#define NLM_netobj_sz		1+XDR_QUADLEN(XDR_MAX_NETOBJ)
+/* #define NLM_owner_sz		1+XDR_QUADLEN(NLM_MAXOWNER) */
+#define NLM_fhandle_sz		1+XDR_QUADLEN(NFS2_FHSIZE)
+#define NLM_lock_sz		3+NLM_caller_sz+NLM_netobj_sz+NLM_fhandle_sz
+#define NLM_holder_sz		4+NLM_netobj_sz
+
+#define NLM_testargs_sz		NLM_cookie_sz+1+NLM_lock_sz
+#define NLM_lockargs_sz		NLM_cookie_sz+4+NLM_lock_sz
+#define NLM_cancargs_sz		NLM_cookie_sz+2+NLM_lock_sz
+#define NLM_unlockargs_sz	NLM_cookie_sz+NLM_lock_sz
+
+#define NLM_testres_sz		NLM_cookie_sz+1+NLM_holder_sz
+#define NLM_res_sz		NLM_cookie_sz+1
+#define NLM_norep_sz		0
+
+#ifndef MAX
+# define MAX(a, b)		(((a) > (b))? (a) : (b))
+#endif
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlmclt_decode_norep	NULL
+
+#define PROC(proc, argtype, restype)	\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdrproc_t) nlmclt_encode_##argtype,		\
+	.p_decode    = (kxdrproc_t) nlmclt_decode_##restype,		\
+	.p_bufsiz    = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2	\
+	}
+
+static struct rpc_procinfo	nlm_procedures[] = {
+    PROC(TEST,		testargs,	testres),
+    PROC(LOCK,		lockargs,	res),
+    PROC(CANCEL,	cancargs,	res),
+    PROC(UNLOCK,	unlockargs,	res),
+    PROC(GRANTED,	testargs,	res),
+    PROC(TEST_MSG,	testargs,	norep),
+    PROC(LOCK_MSG,	lockargs,	norep),
+    PROC(CANCEL_MSG,	cancargs,	norep),
+    PROC(UNLOCK_MSG,	unlockargs,	norep),
+    PROC(GRANTED_MSG,	testargs,	norep),
+    PROC(TEST_RES,	testres,	norep),
+    PROC(LOCK_RES,	res,		norep),
+    PROC(CANCEL_RES,	res,		norep),
+    PROC(UNLOCK_RES,	res,		norep),
+    PROC(GRANTED_RES,	res,		norep),
+#ifdef NLMCLNT_SUPPORT_SHARES
+    PROC(SHARE,		shareargs,	shareres),
+    PROC(UNSHARE,	shareargs,	shareres),
+    PROC(NM_LOCK,	lockargs,	res),
+    PROC(FREE_ALL,	notify,		void),
+#endif
+};
+
+static struct rpc_version	nlm_version1 = {
+		.number		= 1,
+		.nrprocs	= 16,
+		.procs		= nlm_procedures,
+};
+
+static struct rpc_version	nlm_version3 = {
+		.number		= 3,
+		.nrprocs	= 24,
+		.procs		= nlm_procedures,
+};
+
+#ifdef 	CONFIG_LOCKD_V4
+extern struct rpc_version nlm_version4;
+#endif
+
+static struct rpc_version *	nlm_versions[] = {
+	[1] = &nlm_version1,
+	[3] = &nlm_version3,
+#ifdef 	CONFIG_LOCKD_V4
+	[4] = &nlm_version4,
+#endif
+};
+
+static struct rpc_stat		nlm_stats;
+
+struct rpc_program		nlm_program = {
+		.name		= "lockd",
+		.number		= NLM_PROGRAM,
+		.nrvers		= sizeof(nlm_versions) / sizeof(nlm_versions[0]),
+		.version	= nlm_versions,
+		.stats		= &nlm_stats,
+};
+
+#ifdef RPC_DEBUG
+const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
+{
+	/*
+	 * We can get away with a static buffer because we're only
+	 * called with BKL held.
+	 */
+	static char buf[2*NLM_MAXCOOKIELEN+1];
+	int i;
+	int len = sizeof(buf);
+	char *p = buf;
+
+	len--;	/* allow for trailing \0 */
+	if (len < 3)
+		return "???";
+	for (i = 0 ; i < cookie->len ; i++) {
+		if (len < 2) {
+			strcpy(p-3, "...");
+			break;
+		}
+		sprintf(p, "%02x", cookie->data[i]);
+		p += 2;
+		len -= 2;
+	}
+	*p = '\0';
+
+	return buf;
+}
+#endif
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
new file mode 100644
index 0000000..ae4d6b4
--- /dev/null
+++ b/fs/lockd/xdr4.c
@@ -0,0 +1,580 @@
+/*
+ * linux/fs/lockd/xdr4.c
+ *
+ * XDR support for lockd and the lock client.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ * Copyright (C) 1999, Trond Myklebust <trond.myklebust@fys.uio.no>
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <linux/nfs.h>
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+static inline loff_t
+s64_to_loff_t(__s64 offset)
+{
+	return (loff_t)offset;
+}
+
+
+static inline s64
+loff_t_to_s64(loff_t offset)
+{
+	s64 res;
+	if (offset > NLM4_OFFSET_MAX)
+		res = NLM4_OFFSET_MAX;
+	else if (offset < -NLM4_OFFSET_MAX)
+		res = -NLM4_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+/*
+ * XDR functions for basic NLM types
+ */
+static u32 *
+nlm4_decode_cookie(u32 *p, struct nlm_cookie *c)
+{
+	unsigned int	len;
+
+	len = ntohl(*p++);
+	
+	if(len==0)
+	{
+		c->len=4;
+		memset(c->data, 0, 4);	/* hockeypux brain damage */
+	}
+	else if(len<=NLM_MAXCOOKIELEN)
+	{
+		c->len=len;
+		memcpy(c->data, p, len);
+		p+=XDR_QUADLEN(len);
+	}
+	else 
+	{
+		printk(KERN_NOTICE
+			"lockd: bad cookie size %d (only cookies under %d bytes are supported.)\n", len, NLM_MAXCOOKIELEN);
+		return NULL;
+	}
+	return p;
+}
+
+static u32 *
+nlm4_encode_cookie(u32 *p, struct nlm_cookie *c)
+{
+	*p++ = htonl(c->len);
+	memcpy(p, c->data, c->len);
+	p+=XDR_QUADLEN(c->len);
+	return p;
+}
+
+static u32 *
+nlm4_decode_fh(u32 *p, struct nfs_fh *f)
+{
+	memset(f->data, 0, sizeof(f->data));
+	f->size = ntohl(*p++);
+	if (f->size > NFS_MAXFHSIZE) {
+		printk(KERN_NOTICE
+			"lockd: bad fhandle size %d (should be <=%d)\n",
+			f->size, NFS_MAXFHSIZE);
+		return NULL;
+	}
+      	memcpy(f->data, p, f->size);
+	return p + XDR_QUADLEN(f->size);
+}
+
+static u32 *
+nlm4_encode_fh(u32 *p, struct nfs_fh *f)
+{
+	*p++ = htonl(f->size);
+	if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */
+	memcpy(p, f->data, f->size);
+	return p + XDR_QUADLEN(f->size);
+}
+
+/*
+ * Encode and decode owner handle
+ */
+static u32 *
+nlm4_decode_oh(u32 *p, struct xdr_netobj *oh)
+{
+	return xdr_decode_netobj(p, oh);
+}
+
+static u32 *
+nlm4_encode_oh(u32 *p, struct xdr_netobj *oh)
+{
+	return xdr_encode_netobj(p, oh);
+}
+
+static u32 *
+nlm4_decode_lock(u32 *p, struct nlm_lock *lock)
+{
+	struct file_lock	*fl = &lock->fl;
+	__s64			len, start, end;
+
+	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN))
+	 || !(p = nlm4_decode_fh(p, &lock->fh))
+	 || !(p = nlm4_decode_oh(p, &lock->oh)))
+		return NULL;
+
+	locks_init_lock(fl);
+	fl->fl_owner = current->files;
+	fl->fl_pid   = ntohl(*p++);
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = F_RDLCK;		/* as good as anything else */
+	p = xdr_decode_hyper(p, &start);
+	p = xdr_decode_hyper(p, &len);
+	end = start + len - 1;
+
+	fl->fl_start = s64_to_loff_t(start);
+
+	if (len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = s64_to_loff_t(end);
+	return p;
+}
+
+/*
+ * Encode a lock as part of an NLM call
+ */
+static u32 *
+nlm4_encode_lock(u32 *p, struct nlm_lock *lock)
+{
+	struct file_lock	*fl = &lock->fl;
+	__s64			start, len;
+
+	if (!(p = xdr_encode_string(p, lock->caller))
+	 || !(p = nlm4_encode_fh(p, &lock->fh))
+	 || !(p = nlm4_encode_oh(p, &lock->oh)))
+		return NULL;
+
+	if (fl->fl_start > NLM4_OFFSET_MAX
+	 || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
+		return NULL;
+
+	*p++ = htonl(fl->fl_pid);
+
+	start = loff_t_to_s64(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		len = 0;
+	else
+		len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
+
+	p = xdr_encode_hyper(p, start);
+	p = xdr_encode_hyper(p, len);
+
+	return p;
+}
+
+/*
+ * Encode result of a TEST/TEST_MSG call
+ */
+static u32 *
+nlm4_encode_testres(u32 *p, struct nlm_res *resp)
+{
+	s64		start, len;
+
+	dprintk("xdr: before encode_testres (p %p resp %p)\n", p, resp);
+	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+		return NULL;
+	*p++ = resp->status;
+
+	if (resp->status == nlm_lck_denied) {
+		struct file_lock	*fl = &resp->lock.fl;
+
+		*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
+		*p++ = htonl(fl->fl_pid);
+
+		/* Encode owner handle. */
+		if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
+			return NULL;
+
+		start = loff_t_to_s64(fl->fl_start);
+		if (fl->fl_end == OFFSET_MAX)
+			len = 0;
+		else
+			len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
+		
+		p = xdr_encode_hyper(p, start);
+		p = xdr_encode_hyper(p, len);
+		dprintk("xdr: encode_testres (status %d pid %d type %d start %Ld end %Ld)\n",
+			resp->status, fl->fl_pid, fl->fl_type,
+			(long long)fl->fl_start,  (long long)fl->fl_end);
+	}
+
+	dprintk("xdr: after encode_testres (p %p resp %p)\n", p, resp);
+	return p;
+}
+
+
+/*
+ * First, the server side XDR functions
+ */
+int
+nlm4svc_decode_testargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+		return 0;
+
+	exclusive = ntohl(*p++);
+	if (!(p = nlm4_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_encode_testres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_testres(p, resp)))
+		return 0;
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_lockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+		return 0;
+	argp->block  = ntohl(*p++);
+	exclusive    = ntohl(*p++);
+	if (!(p = nlm4_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+	argp->reclaim = ntohl(*p++);
+	argp->state   = ntohl(*p++);
+	argp->monitor = 1;		/* monitor client by default */
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_cancargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+		return 0;
+	argp->block = ntohl(*p++);
+	exclusive = ntohl(*p++);
+	if (!(p = nlm4_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie))
+	 || !(p = nlm4_decode_lock(p, &argp->lock)))
+		return 0;
+	argp->lock.fl.fl_type = F_UNLCK;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(&lock->fl);
+	lock->fl.fl_pid = ~(u32) 0;
+
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie))
+	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN))
+	 || !(p = nlm4_decode_fh(p, &lock->fh))
+	 || !(p = nlm4_decode_oh(p, &lock->oh)))
+		return 0;
+	argp->fsm_mode = ntohl(*p++);
+	argp->fsm_access = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_encode_shareres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+		return 0;
+	*p++ = resp->status;
+	*p++ = xdr_zero;		/* sequence argument */
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlm4svc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+		return 0;
+	*p++ = resp->status;
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN)))
+		return 0;
+	argp->state = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
+{
+	if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
+		return 0;
+	argp->state = ntohl(*p++);
+	/* Preserve the address in network byte order */
+	argp->addr = *p++;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
+		return 0;
+	resp->status = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_ressize_check(rqstp, p);
+}
+
+/*
+ * Now, the client side XDR functions
+ */
+#ifdef NLMCLNT_SUPPORT_SHARES
+static int
+nlm4clt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
+{
+	return 0;
+}
+#endif
+
+static int
+nlm4clt_encode_testargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm4_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
+		return -EIO;
+	resp->status = ntohl(*p++);
+	if (resp->status == NLM_LCK_DENIED) {
+		struct file_lock	*fl = &resp->lock.fl;
+		u32			excl;
+		s64			start, end, len;
+
+		memset(&resp->lock, 0, sizeof(resp->lock));
+		locks_init_lock(fl);
+		excl = ntohl(*p++);
+		fl->fl_pid = ntohl(*p++);
+		if (!(p = nlm4_decode_oh(p, &resp->lock.oh)))
+			return -EIO;
+
+		fl->fl_flags = FL_POSIX;
+		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
+		p = xdr_decode_hyper(p, &start);
+		p = xdr_decode_hyper(p, &len);
+		end = start + len - 1;
+
+		fl->fl_start = s64_to_loff_t(start);
+		if (len == 0 || end < 0)
+			fl->fl_end = OFFSET_MAX;
+		else
+			fl->fl_end = s64_to_loff_t(end);
+	}
+	return 0;
+}
+
+
+static int
+nlm4clt_encode_lockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = argp->block? xdr_one : xdr_zero;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm4_encode_lock(p, lock)))
+		return -EIO;
+	*p++ = argp->reclaim? xdr_one : xdr_zero;
+	*p++ = htonl(argp->state);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_encode_cancargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = argp->block? xdr_one : xdr_zero;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm4_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_encode_unlockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	if (!(p = nlm4_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_encode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+		return -EIO;
+	*p++ = resp->status;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_encode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_testres(p, resp)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
+		return -EIO;
+	resp->status = ntohl(*p++);
+	return 0;
+}
+
+/*
+ * Buffer requirements for NLM
+ */
+#define NLM4_void_sz		0
+#define NLM4_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
+#define NLM4_caller_sz		1+XDR_QUADLEN(NLM_MAXSTRLEN)
+#define NLM4_netobj_sz		1+XDR_QUADLEN(XDR_MAX_NETOBJ)
+/* #define NLM4_owner_sz		1+XDR_QUADLEN(NLM4_MAXOWNER) */
+#define NLM4_fhandle_sz		1+XDR_QUADLEN(NFS3_FHSIZE)
+#define NLM4_lock_sz		5+NLM4_caller_sz+NLM4_netobj_sz+NLM4_fhandle_sz
+#define NLM4_holder_sz		6+NLM4_netobj_sz
+
+#define NLM4_testargs_sz	NLM4_cookie_sz+1+NLM4_lock_sz
+#define NLM4_lockargs_sz	NLM4_cookie_sz+4+NLM4_lock_sz
+#define NLM4_cancargs_sz	NLM4_cookie_sz+2+NLM4_lock_sz
+#define NLM4_unlockargs_sz	NLM4_cookie_sz+NLM4_lock_sz
+
+#define NLM4_testres_sz		NLM4_cookie_sz+1+NLM4_holder_sz
+#define NLM4_res_sz		NLM4_cookie_sz+1
+#define NLM4_norep_sz		0
+
+#ifndef MAX
+# define MAX(a,b)		(((a) > (b))? (a) : (b))
+#endif
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm4clt_decode_norep	NULL
+
+#define PROC(proc, argtype, restype)					\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdrproc_t) nlm4clt_encode_##argtype,		\
+	.p_decode    = (kxdrproc_t) nlm4clt_decode_##restype,		\
+	.p_bufsiz    = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2	\
+	}
+
+static struct rpc_procinfo	nlm4_procedures[] = {
+    PROC(TEST,		testargs,	testres),
+    PROC(LOCK,		lockargs,	res),
+    PROC(CANCEL,	cancargs,	res),
+    PROC(UNLOCK,	unlockargs,	res),
+    PROC(GRANTED,	testargs,	res),
+    PROC(TEST_MSG,	testargs,	norep),
+    PROC(LOCK_MSG,	lockargs,	norep),
+    PROC(CANCEL_MSG,	cancargs,	norep),
+    PROC(UNLOCK_MSG,	unlockargs,	norep),
+    PROC(GRANTED_MSG,	testargs,	norep),
+    PROC(TEST_RES,	testres,	norep),
+    PROC(LOCK_RES,	res,		norep),
+    PROC(CANCEL_RES,	res,		norep),
+    PROC(UNLOCK_RES,	res,		norep),
+    PROC(GRANTED_RES,	res,		norep),
+#ifdef NLMCLNT_SUPPORT_SHARES
+    PROC(SHARE,		shareargs,	shareres),
+    PROC(UNSHARE,	shareargs,	shareres),
+    PROC(NM_LOCK,	lockargs,	res),
+    PROC(FREE_ALL,	notify,		void),
+#endif
+};
+
+struct rpc_version	nlm_version4 = {
+	.number		= 4,
+	.nrprocs	= 24,
+	.procs		= nlm4_procedures,
+};