svc: Move sk_flags to the svc_xprt structure

This functionally trivial change moves the transport independent sk_flags
field to the transport independent svc_xprt structure.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index db589d1..0a71252 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -56,22 +56,23 @@
  *	BKL protects svc_serv->sv_nrthread.
  *	svc_sock->sk_lock protects the svc_sock->sk_deferred list
  *             and the ->sk_info_authunix cache.
- *	svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
+ *	svc_sock->sk_xprt.xpt_flags.XPT_BUSY prevents a svc_sock being
+ *	enqueued multiply.
  *
  *	Some flags can be set to certain values at any time
  *	providing that certain rules are followed:
  *
- *	SK_CONN, SK_DATA, can be set or cleared at any time.
+ *	XPT_CONN, XPT_DATA, can be set or cleared at any time.
  *		after a set, svc_sock_enqueue must be called.
  *		after a clear, the socket must be read/accepted
  *		 if this succeeds, it must be set again.
- *	SK_CLOSE can set at any time. It is never cleared.
- *      xpt_ref contains a bias of '1' until SK_DEAD is set.
+ *	XPT_CLOSE can set at any time. It is never cleared.
+ *      xpt_ref contains a bias of '1' until XPT_DEAD is set.
  *             so when xprt_ref hits zero, we know the transport is dead
  *             and no-one is using it.
- *      SK_DEAD can only be set while SK_BUSY is held which ensures
+ *      XPT_DEAD can only be set while XPT_BUSY is held which ensures
  *             no other thread will be using the socket or will try to
- *	       set SK_DEAD.
+ *	       set XPT_DEAD.
  *
  */
 
@@ -219,10 +220,10 @@
 	struct svc_rqst	*rqstp;
 	int cpu;
 
-	if (!(svsk->sk_flags &
-	      ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
+	if (!(svsk->sk_xprt.xpt_flags &
+	      ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
 		return;
-	if (test_bit(SK_DEAD, &svsk->sk_flags))
+	if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags))
 		return;
 
 	cpu = get_cpu();
@@ -236,7 +237,7 @@
 		printk(KERN_ERR
 			"svc_sock_enqueue: threads and sockets both waiting??\n");
 
-	if (test_bit(SK_DEAD, &svsk->sk_flags)) {
+	if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) {
 		/* Don't enqueue dead sockets */
 		dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
 		goto out_unlock;
@@ -244,10 +245,10 @@
 
 	/* Mark socket as busy. It will remain in this state until the
 	 * server has processed all pending data and put the socket back
-	 * on the idle list.  We update SK_BUSY atomically because
+	 * on the idle list.  We update XPT_BUSY atomically because
 	 * it also guards against trying to enqueue the svc_sock twice.
 	 */
-	if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
+	if (test_and_set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) {
 		/* Don't enqueue socket while already enqueued */
 		dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
 		goto out_unlock;
@@ -256,11 +257,11 @@
 	svsk->sk_pool = pool;
 
 	/* Handle pending connection */
-	if (test_bit(SK_CONN, &svsk->sk_flags))
+	if (test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags))
 		goto process;
 
 	/* Handle close in-progress */
-	if (test_bit(SK_CLOSE, &svsk->sk_flags))
+	if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags))
 		goto process;
 
 	/* Check if we have space to reply to a request */
@@ -268,7 +269,7 @@
 		/* Don't enqueue while not enough space for reply */
 		dprintk("svc: no write space, socket %p  not enqueued\n", svsk);
 		svsk->sk_pool = NULL;
-		clear_bit(SK_BUSY, &svsk->sk_flags);
+		clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
 		goto out_unlock;
 	}
 
@@ -324,14 +325,14 @@
 /*
  * Having read something from a socket, check whether it
  * needs to be re-enqueued.
- * Note: SK_DATA only gets cleared when a read-attempt finds
+ * Note: XPT_DATA only gets cleared when a read-attempt finds
  * no (or insufficient) data.
  */
 static inline void
 svc_sock_received(struct svc_sock *svsk)
 {
 	svsk->sk_pool = NULL;
-	clear_bit(SK_BUSY, &svsk->sk_flags);
+	clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
 	svc_sock_enqueue(svsk);
 }
 
@@ -680,8 +681,9 @@
 
 	if (svsk) {
 		dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
-			svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
-		set_bit(SK_DATA, &svsk->sk_flags);
+			svsk, sk, count,
+			test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
+		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 		svc_sock_enqueue(svsk);
 	}
 	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
@@ -698,7 +700,7 @@
 
 	if (svsk) {
 		dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
-			svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
+			svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
 		svc_sock_enqueue(svsk);
 	}
 
@@ -748,7 +750,7 @@
 		.msg_flags = MSG_DONTWAIT,
 	};
 
-	if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
+	if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
 	    /* udp sockets need large rcvbuf as all pending
 	     * requests are still in that buffer.  sndbuf must
 	     * also be large enough that there is enough space
@@ -766,7 +768,7 @@
 		return svc_deferred_recv(rqstp);
 	}
 
-	clear_bit(SK_DATA, &svsk->sk_flags);
+	clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 	skb = NULL;
 	err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
 			     0, 0, MSG_PEEK | MSG_DONTWAIT);
@@ -777,7 +779,7 @@
 		if (err != -EAGAIN) {
 			/* possibly an icmp error */
 			dprintk("svc: recvfrom returned error %d\n", -err);
-			set_bit(SK_DATA, &svsk->sk_flags);
+			set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 		}
 		svc_sock_received(svsk);
 		return -EAGAIN;
@@ -789,7 +791,7 @@
 		   need that much accuracy */
 	}
 	svsk->sk_sk->sk_stamp = skb->tstamp;
-	set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
+	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
 
 	/*
 	 * Maybe more packets - kick another thread ASAP.
@@ -936,8 +938,8 @@
 			    3 * svsk->sk_server->sv_max_mesg,
 			    3 * svsk->sk_server->sv_max_mesg);
 
-	set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
-	set_bit(SK_CHNGBUF, &svsk->sk_flags);
+	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* might have come in before data_ready set up */
+	set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
 
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
@@ -971,7 +973,7 @@
 	 */
 	if (sk->sk_state == TCP_LISTEN) {
 		if (svsk) {
-			set_bit(SK_CONN, &svsk->sk_flags);
+			set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
 			svc_sock_enqueue(svsk);
 		} else
 			printk("svc: socket %p: no user data\n", sk);
@@ -995,7 +997,7 @@
 	if (!svsk)
 		printk("svc: socket %p: no user data\n", sk);
 	else {
-		set_bit(SK_CLOSE, &svsk->sk_flags);
+		set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
 		svc_sock_enqueue(svsk);
 	}
 	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
@@ -1010,7 +1012,7 @@
 	dprintk("svc: socket %p TCP data ready (svsk %p)\n",
 		sk, sk->sk_user_data);
 	if (svsk) {
-		set_bit(SK_DATA, &svsk->sk_flags);
+		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 		svc_sock_enqueue(svsk);
 	}
 	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
@@ -1050,7 +1052,7 @@
 	if (!sock)
 		return NULL;
 
-	clear_bit(SK_CONN, &svsk->sk_flags);
+	clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
 	err = kernel_accept(sock, &newsock, O_NONBLOCK);
 	if (err < 0) {
 		if (err == -ENOMEM)
@@ -1061,8 +1063,7 @@
 				   serv->sv_name, -err);
 		return NULL;
 	}
-
-	set_bit(SK_CONN, &svsk->sk_flags);
+	set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
 
 	err = kernel_getpeername(newsock, sin, &slen);
 	if (err < 0) {
@@ -1127,16 +1128,16 @@
 	int pnum, vlen;
 
 	dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
-		svsk, test_bit(SK_DATA, &svsk->sk_flags),
-		test_bit(SK_CONN, &svsk->sk_flags),
-		test_bit(SK_CLOSE, &svsk->sk_flags));
+		svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
+		test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
+		test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
 
 	if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
 		svc_sock_received(svsk);
 		return svc_deferred_recv(rqstp);
 	}
 
-	if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
+	if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
 		/* sndbuf needs to have room for one request
 		 * per thread, otherwise we can stall even when the
 		 * network isn't a bottleneck.
@@ -1153,7 +1154,7 @@
 				    (serv->sv_nrthreads+3) * serv->sv_max_mesg,
 				    3 * serv->sv_max_mesg);
 
-	clear_bit(SK_DATA, &svsk->sk_flags);
+	clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 
 	/* Receive data. If we haven't got the record length yet, get
 	 * the next four bytes. Otherwise try to gobble up as much as
@@ -1212,7 +1213,7 @@
 		return -EAGAIN;	/* record not complete */
 	}
 	len = svsk->sk_reclen;
-	set_bit(SK_DATA, &svsk->sk_flags);
+	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 
 	vec = rqstp->rq_vec;
 	vec[0] = rqstp->rq_arg.head[0];
@@ -1255,7 +1256,7 @@
 	return len;
 
  err_delete:
-	set_bit(SK_CLOSE, &svsk->sk_flags);
+	set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
 	return -EAGAIN;
 
  error:
@@ -1288,7 +1289,7 @@
 	reclen = htonl(0x80000000|((xbufp->len ) - 4));
 	memcpy(xbufp->head[0].iov_base, &reclen, 4);
 
-	if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
+	if (test_bit(XPT_DEAD, &rqstp->rq_sock->sk_xprt.xpt_flags))
 		return -ENOTCONN;
 
 	sent = svc_sendto(rqstp, &rqstp->rq_res);
@@ -1297,7 +1298,7 @@
 		       rqstp->rq_sock->sk_server->sv_name,
 		       (sent<0)?"got error":"sent only",
 		       sent, xbufp->len);
-		set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags);
+		set_bit(XPT_CLOSE, &rqstp->rq_sock->sk_xprt.xpt_flags);
 		svc_sock_enqueue(rqstp->rq_sock);
 		sent = -EAGAIN;
 	}
@@ -1387,9 +1388,9 @@
 
 	if (sk->sk_state == TCP_LISTEN) {
 		dprintk("setting up TCP socket for listening\n");
-		set_bit(SK_LISTENER, &svsk->sk_flags);
+		set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
 		sk->sk_data_ready = svc_tcp_listen_data_ready;
-		set_bit(SK_CONN, &svsk->sk_flags);
+		set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
 	} else {
 		dprintk("setting up TCP socket for reading\n");
 		sk->sk_state_change = svc_tcp_state_change;
@@ -1409,10 +1410,10 @@
 				    3 * svsk->sk_server->sv_max_mesg,
 				    3 * svsk->sk_server->sv_max_mesg);
 
-		set_bit(SK_CHNGBUF, &svsk->sk_flags);
-		set_bit(SK_DATA, &svsk->sk_flags);
+		set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
+		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 		if (sk->sk_state != TCP_ESTABLISHED)
-			set_bit(SK_CLOSE, &svsk->sk_flags);
+			set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
 	}
 }
 
@@ -1429,12 +1430,12 @@
 	list_for_each(le, &serv->sv_permsocks) {
 		struct svc_sock *svsk =
 			list_entry(le, struct svc_sock, sk_list);
-		set_bit(SK_CHNGBUF, &svsk->sk_flags);
+		set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
 	}
 	list_for_each(le, &serv->sv_tempsocks) {
 		struct svc_sock *svsk =
 			list_entry(le, struct svc_sock, sk_list);
-		set_bit(SK_CHNGBUF, &svsk->sk_flags);
+		set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
 	}
 	spin_unlock_bh(&serv->sv_lock);
 }
@@ -1471,7 +1472,7 @@
 			svsk = list_entry(serv->sv_tempsocks.prev,
 					  struct svc_sock,
 					  sk_list);
-			set_bit(SK_CLOSE, &svsk->sk_flags);
+			set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
 			svc_xprt_get(&svsk->sk_xprt);
 		}
 		spin_unlock_bh(&serv->sv_lock);
@@ -1575,10 +1576,10 @@
 	spin_unlock_bh(&pool->sp_lock);
 
 	len = 0;
-	if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
-		dprintk("svc_recv: found SK_CLOSE\n");
+	if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) {
+		dprintk("svc_recv: found XPT_CLOSE\n");
 		svc_delete_socket(svsk);
-	} else if (test_bit(SK_LISTENER, &svsk->sk_flags)) {
+	} else if (test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags)) {
 		struct svc_xprt *newxpt;
 		newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
 		if (newxpt) {
@@ -1605,7 +1606,7 @@
 		return -EAGAIN;
 	}
 	svsk->sk_lastrecv = get_seconds();
-	clear_bit(SK_OLD, &svsk->sk_flags);
+	clear_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags);
 
 	rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
 	rqstp->rq_chandle.defer = svc_defer;
@@ -1652,7 +1653,7 @@
 
 	/* Grab svsk->sk_mutex to serialize outgoing data. */
 	mutex_lock(&svsk->sk_mutex);
-	if (test_bit(SK_DEAD, &svsk->sk_flags))
+	if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags))
 		len = -ENOTCONN;
 	else
 		len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp);
@@ -1688,21 +1689,21 @@
 	list_for_each_safe(le, next, &serv->sv_tempsocks) {
 		svsk = list_entry(le, struct svc_sock, sk_list);
 
-		if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
+		if (!test_and_set_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags))
 			continue;
 		if (atomic_read(&svsk->sk_xprt.xpt_ref.refcount) > 1
-		    || test_bit(SK_BUSY, &svsk->sk_flags))
+		    || test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags))
 			continue;
 		svc_xprt_get(&svsk->sk_xprt);
 		list_move(le, &to_be_aged);
-		set_bit(SK_CLOSE, &svsk->sk_flags);
-		set_bit(SK_DETACHED, &svsk->sk_flags);
+		set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+		set_bit(XPT_DETACHED, &svsk->sk_xprt.xpt_flags);
 	}
 	spin_unlock_bh(&serv->sv_lock);
 
 	while (!list_empty(&to_be_aged)) {
 		le = to_be_aged.next;
-		/* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
+		/* fiddling the sk_list node is safe 'cos we're XPT_DETACHED */
 		list_del_init(le);
 		svsk = list_entry(le, struct svc_sock, sk_list);
 
@@ -1748,7 +1749,7 @@
 		return NULL;
 	}
 
-	set_bit(SK_BUSY, &svsk->sk_flags);
+	set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
 	inet->sk_user_data = svsk;
 	svsk->sk_sock = sock;
 	svsk->sk_sk = inet;
@@ -1770,7 +1771,7 @@
 
 	spin_lock_bh(&serv->sv_lock);
 	if (is_temporary) {
-		set_bit(SK_TEMP, &svsk->sk_flags);
+		set_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
 		list_add(&svsk->sk_list, &serv->sv_tempsocks);
 		serv->sv_tmpcnt++;
 		if (serv->sv_temptimer.function == NULL) {
@@ -1781,7 +1782,7 @@
 					jiffies + svc_conn_age_period * HZ);
 		}
 	} else {
-		clear_bit(SK_TEMP, &svsk->sk_flags);
+		clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
 		list_add(&svsk->sk_list, &serv->sv_permsocks);
 	}
 	spin_unlock_bh(&serv->sv_lock);
@@ -1931,7 +1932,7 @@
 
 	spin_lock_bh(&serv->sv_lock);
 
-	if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
+	if (!test_and_set_bit(XPT_DETACHED, &svsk->sk_xprt.xpt_flags))
 		list_del_init(&svsk->sk_list);
 	/*
 	 * We used to delete the svc_sock from whichever list
@@ -1940,9 +1941,9 @@
 	 * while still attached to a queue, the queue itself
 	 * is about to be destroyed (in svc_destroy).
 	 */
-	if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) {
+	if (!test_and_set_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) {
 		BUG_ON(atomic_read(&svsk->sk_xprt.xpt_ref.refcount) < 2);
-		if (test_bit(SK_TEMP, &svsk->sk_flags))
+		if (test_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags))
 			serv->sv_tmpcnt--;
 		svc_xprt_put(&svsk->sk_xprt);
 	}
@@ -1952,26 +1953,26 @@
 
 static void svc_close_socket(struct svc_sock *svsk)
 {
-	set_bit(SK_CLOSE, &svsk->sk_flags);
-	if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
+	set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+	if (test_and_set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags))
 		/* someone else will have to effect the close */
 		return;
 
 	svc_xprt_get(&svsk->sk_xprt);
 	svc_delete_socket(svsk);
-	clear_bit(SK_BUSY, &svsk->sk_flags);
+	clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
 	svc_xprt_put(&svsk->sk_xprt);
 }
 
 void svc_force_close_socket(struct svc_sock *svsk)
 {
-	set_bit(SK_CLOSE, &svsk->sk_flags);
-	if (test_bit(SK_BUSY, &svsk->sk_flags)) {
+	set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+	if (test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) {
 		/* Waiting to be processed, but no threads left,
 		 * So just remove it from the waiting list
 		 */
 		list_del_init(&svsk->sk_ready);
-		clear_bit(SK_BUSY, &svsk->sk_flags);
+		clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
 	}
 	svc_close_socket(svsk);
 }
@@ -1996,7 +1997,7 @@
 	spin_lock(&svsk->sk_lock);
 	list_add(&dr->handle.recent, &svsk->sk_deferred);
 	spin_unlock(&svsk->sk_lock);
-	set_bit(SK_DEFERRED, &svsk->sk_flags);
+	set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
 	svc_sock_enqueue(svsk);
 	svc_xprt_put(&svsk->sk_xprt);
 }
@@ -2059,16 +2060,16 @@
 {
 	struct svc_deferred_req *dr = NULL;
 
-	if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
+	if (!test_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags))
 		return NULL;
 	spin_lock(&svsk->sk_lock);
-	clear_bit(SK_DEFERRED, &svsk->sk_flags);
+	clear_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
 	if (!list_empty(&svsk->sk_deferred)) {
 		dr = list_entry(svsk->sk_deferred.next,
 				struct svc_deferred_req,
 				handle.recent);
 		list_del_init(&dr->handle.recent);
-		set_bit(SK_DEFERRED, &svsk->sk_flags);
+		set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
 	}
 	spin_unlock(&svsk->sk_lock);
 	return dr;