[NET]: Introduce inet_connection_sock

This creates struct inet_connection_sock, moving members out of struct
tcp_sock that are shareable with other INET connection oriented
protocols, such as DCCP, that in my private tree already uses most of
these members.

The functions that operate on these members were renamed, using a
inet_csk_ prefix while not being moved yet to a new file, so as to
ease the review of these changes.

Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
new file mode 100644
index 0000000..ef60939
--- /dev/null
+++ b/include/net/inet_connection_sock.h
@@ -0,0 +1,86 @@
+/*
+ * NET		Generic infrastructure for INET connection oriented protocols.
+ *
+ *		Definitions for inet_connection_sock 
+ *
+ * Authors:	Many people, see the TCP sources
+ *
+ * 		From code originally in TCP
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ */
+#ifndef _INET_CONNECTION_SOCK_H
+#define _INET_CONNECTION_SOCK_H
+
+#include <linux/ip.h>
+#include <linux/timer.h>
+#include <net/request_sock.h>
+
+struct inet_bind_bucket;
+struct inet_hashinfo;
+
+/** inet_connection_sock - INET connection oriented sock
+ *
+ * @icsk_accept_queue:	   FIFO of established children 
+ * @icsk_bind_hash:	   Bind node
+ * @icsk_timeout:	   Timeout
+ * @icsk_retransmit_timer: Resend (no ack)
+ * @icsk_rto:		   Retransmit timeout
+ * @icsk_retransmits:	   Number of unrecovered [RTO] timeouts
+ * @icsk_pending:	   Scheduled timer event
+ * @icsk_backoff:	   Backoff
+ * @icsk_syn_retries:      Number of allowed SYN (or equivalent) retries
+ * @icsk_ack:		   Delayed ACK control data
+ */
+struct inet_connection_sock {
+	/* inet_sock has to be the first member! */
+	struct inet_sock	  icsk_inet;
+	struct request_sock_queue icsk_accept_queue;
+	struct inet_bind_bucket	  *icsk_bind_hash;
+	unsigned long		  icsk_timeout;
+ 	struct timer_list	  icsk_retransmit_timer;
+ 	struct timer_list	  icsk_delack_timer;
+	__u32			  icsk_rto;
+	__u8			  icsk_retransmits;
+	__u8			  icsk_pending;
+	__u8			  icsk_backoff;
+	__u8			  icsk_syn_retries;
+	struct {
+		__u8		  pending;	 /* ACK is pending			   */
+		__u8		  quick;	 /* Scheduled number of quick acks	   */
+		__u8		  pingpong;	 /* The session is interactive		   */
+		__u8		  blocked;	 /* Delayed ACK was blocked by socket lock */
+		__u32		  ato;		 /* Predicted tick of soft clock	   */
+		unsigned long	  timeout;	 /* Currently scheduled timeout		   */
+		__u32		  lrcvtime;	 /* timestamp of last received data packet */
+		__u16		  last_seg_size; /* Size of last incoming segment	   */
+		__u16		  rcv_mss;	 /* MSS used for delayed ACK decisions	   */ 
+	} icsk_ack;
+};
+
+static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
+{
+	return (struct inet_connection_sock *)sk;
+}
+
+extern void inet_csk_init_xmit_timers(struct sock *sk,
+				      void (*retransmit_handler)(unsigned long),
+				      void (*delack_handler)(unsigned long),
+				      void (*keepalive_handler)(unsigned long));
+extern void inet_csk_clear_xmit_timers(struct sock *sk);
+
+extern struct request_sock *inet_csk_search_req(const struct sock *sk,
+						struct request_sock ***prevp,
+						const __u16 rport,
+						const __u32 raddr,
+						const __u32 laddr);
+extern int inet_csk_get_port(struct inet_hashinfo *hashinfo,
+			     struct sock *sk, unsigned short snum);
+
+extern struct dst_entry* inet_csk_route_req(struct sock *sk,
+					    const struct request_sock *req);
+
+#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b5c0d64..f0c21c0 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -17,7 +17,6 @@
 #include <linux/config.h>
 
 #include <linux/interrupt.h>
-#include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/list.h>
 #include <linux/slab.h>
@@ -26,6 +25,7 @@
 #include <linux/types.h>
 #include <linux/wait.h>
 
+#include <net/inet_connection_sock.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
 
@@ -185,9 +185,9 @@
 	struct inet_bind_bucket *tb;
 
 	spin_lock(&head->lock);
-	tb = inet_sk(sk)->bind_hash;
+	tb = inet_csk(sk)->icsk_bind_hash;
 	sk_add_bind_node(child, &tb->owners);
-	inet_sk(child)->bind_hash = tb;
+	inet_csk(child)->icsk_bind_hash = tb;
 	spin_unlock(&head->lock);
 }
 
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 334717bf..b7c7eec 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -224,17 +224,17 @@
 	return prev_qlen;
 }
 
-static inline int reqsk_queue_len(struct request_sock_queue *queue)
+static inline int reqsk_queue_len(const struct request_sock_queue *queue)
 {
 	return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
 }
 
-static inline int reqsk_queue_len_young(struct request_sock_queue *queue)
+static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
 {
 	return queue->listen_opt->qlen_young;
 }
 
-static inline int reqsk_queue_is_full(struct request_sock_queue *queue)
+static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
 {
 	return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
 }
diff --git a/include/net/sock.h b/include/net/sock.h
index 828dc08..48cc337 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -493,9 +493,6 @@
 
 struct request_sock_ops;
 
-/* Here is the right place to enable sock refcounting debugging */
-//#define SOCK_REFCNT_DEBUG
-
 /* Networking protocol blocks we attach to sockets.
  * socket layer -> transport layer interface
  * transport -> network interface is defined by struct inet_proto
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cf8e664..a943c79 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -19,10 +19,11 @@
 #define _TCP_H
 
 #define TCP_DEBUG 1
+#define INET_CSK_DEBUG 1
 #define FASTRETRANS_DEBUG 1
 
 /* Cancel timers, when they are not required. */
-#undef TCP_CLEAR_TIMERS
+#undef INET_CSK_CLEAR_TIMERS
 
 #include <linux/config.h>
 #include <linux/list.h>
@@ -205,10 +206,10 @@
 #define TCPOLEN_SACK_BASE_ALIGNED	4
 #define TCPOLEN_SACK_PERBLOCK		8
 
-#define TCP_TIME_RETRANS	1	/* Retransmit timer */
-#define TCP_TIME_DACK		2	/* Delayed ack timer */
-#define TCP_TIME_PROBE0		3	/* Zero window probe timer */
-#define TCP_TIME_KEEPOPEN	4	/* Keepalive timer */
+#define ICSK_TIME_RETRANS	1	/* Retransmit timer */
+#define ICSK_TIME_DACK		2	/* Delayed ack timer */
+#define ICSK_TIME_PROBE0	3	/* Zero window probe timer */
+#define ICSK_TIME_KEEPOPEN	4	/* Keepalive timer */
 
 /* Flags in tp->nonagle */
 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
@@ -257,9 +258,9 @@
 extern int tcp_memory_pressure;
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
+#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 #else
-#define TCP_INET_FAMILY(fam) 1
+#define AF_INET_FAMILY(fam) 1
 #endif
 
 /*
@@ -372,41 +373,42 @@
 
 extern void			tcp_rcv_space_adjust(struct sock *sk);
 
-enum tcp_ack_state_t
-{
-	TCP_ACK_SCHED = 1,
-	TCP_ACK_TIMER = 2,
-	TCP_ACK_PUSHED= 4
+enum inet_csk_ack_state_t {
+	ICSK_ACK_SCHED	= 1,
+	ICSK_ACK_TIMER  = 2,
+	ICSK_ACK_PUSHED = 4
 };
 
-static inline void tcp_schedule_ack(struct tcp_sock *tp)
+static inline void inet_csk_schedule_ack(struct sock *sk)
 {
-	tp->ack.pending |= TCP_ACK_SCHED;
+	inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
 }
 
-static inline int tcp_ack_scheduled(struct tcp_sock *tp)
+static inline int inet_csk_ack_scheduled(const struct sock *sk)
 {
-	return tp->ack.pending&TCP_ACK_SCHED;
+	return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
 }
 
-static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts)
+static inline void tcp_dec_quickack_mode(struct sock *sk,
+					 const unsigned int pkts)
 {
-	if (tp->ack.quick) {
-		if (pkts >= tp->ack.quick) {
-			tp->ack.quick = 0;
+	struct inet_connection_sock *icsk = inet_csk(sk);
 
+	if (icsk->icsk_ack.quick) {
+		if (pkts >= icsk->icsk_ack.quick) {
+			icsk->icsk_ack.quick = 0;
 			/* Leaving quickack mode we deflate ATO. */
-			tp->ack.ato = TCP_ATO_MIN;
+			icsk->icsk_ack.ato   = TCP_ATO_MIN;
 		} else
-			tp->ack.quick -= pkts;
+			icsk->icsk_ack.quick -= pkts;
 	}
 }
 
-extern void tcp_enter_quickack_mode(struct tcp_sock *tp);
+extern void tcp_enter_quickack_mode(struct sock *sk);
 
-static __inline__ void tcp_delack_init(struct tcp_sock *tp)
+static inline void inet_csk_delack_init(struct sock *sk)
 {
-	memset(&tp->ack, 0, sizeof(tp->ack));
+	memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
 }
 
 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@@ -440,7 +442,7 @@
 
 extern void			tcp_close(struct sock *sk, 
 					  long timeout);
-extern struct sock *		tcp_accept(struct sock *sk, int flags, int *err);
+extern struct sock *		inet_csk_accept(struct sock *sk, int flags, int *err);
 extern unsigned int		tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
 
 extern int			tcp_getsockopt(struct sock *sk, int level, 
@@ -534,15 +536,18 @@
 
 /* tcp_timer.c */
 extern void tcp_init_xmit_timers(struct sock *);
-extern void tcp_clear_xmit_timers(struct sock *);
+static inline void tcp_clear_xmit_timers(struct sock *sk)
+{
+	inet_csk_clear_xmit_timers(sk);
+}
 
-extern void tcp_delete_keepalive_timer(struct sock *);
-extern void tcp_reset_keepalive_timer(struct sock *, unsigned long);
+extern void inet_csk_delete_keepalive_timer(struct sock *sk);
+extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 extern unsigned int tcp_current_mss(struct sock *sk, int large);
 
-#ifdef TCP_DEBUG
-extern const char tcp_timer_bug_msg[];
+#ifdef INET_CSK_DEBUG
+extern const char inet_csk_timer_bug_msg[];
 #endif
 
 /* tcp_diag.c */
@@ -554,70 +559,58 @@
 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 			 sk_read_actor_t recv_actor);
 
-static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
+static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct inet_connection_sock *icsk = inet_csk(sk);
 	
-	switch (what) {
-	case TCP_TIME_RETRANS:
-	case TCP_TIME_PROBE0:
-		tp->pending = 0;
-
-#ifdef TCP_CLEAR_TIMERS
-		sk_stop_timer(sk, &tp->retransmit_timer);
+	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
+		icsk->icsk_pending = 0;
+#ifdef INET_CSK_CLEAR_TIMERS
+		sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 #endif
-		break;
-	case TCP_TIME_DACK:
-		tp->ack.blocked = 0;
-		tp->ack.pending = 0;
-
-#ifdef TCP_CLEAR_TIMERS
-		sk_stop_timer(sk, &tp->delack_timer);
+	} else if (what == ICSK_TIME_DACK) {
+		icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
+#ifdef INET_CSK_CLEAR_TIMERS
+		sk_stop_timer(sk, &icsk->icsk_delack_timer);
 #endif
-		break;
-	default:
-#ifdef TCP_DEBUG
-		printk(tcp_timer_bug_msg);
+	}
+#ifdef INET_CSK_DEBUG
+	else {
+		pr_debug(inet_csk_timer_bug_msg);
+	}
 #endif
-		return;
-	};
-
 }
 
 /*
  *	Reset the retransmission timer
  */
-static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
+static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
+					     unsigned long when)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct inet_connection_sock *icsk = inet_csk(sk);
 
 	if (when > TCP_RTO_MAX) {
-#ifdef TCP_DEBUG
-		printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
+#ifdef INET_CSK_DEBUG
+		pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
+			 sk, what, when, current_text_addr());
 #endif
 		when = TCP_RTO_MAX;
 	}
 
-	switch (what) {
-	case TCP_TIME_RETRANS:
-	case TCP_TIME_PROBE0:
-		tp->pending = what;
-		tp->timeout = jiffies+when;
-		sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
-		break;
-
-	case TCP_TIME_DACK:
-		tp->ack.pending |= TCP_ACK_TIMER;
-		tp->ack.timeout = jiffies+when;
-		sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
-		break;
-
-	default:
-#ifdef TCP_DEBUG
-		printk(tcp_timer_bug_msg);
+	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
+		icsk->icsk_pending = what;
+		icsk->icsk_timeout = jiffies + when;
+		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
+	} else if (what == ICSK_TIME_DACK) {
+		icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
+		icsk->icsk_ack.timeout = jiffies + when;
+		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
+	}
+#ifdef INET_CSK_DEBUG
+	else {
+		pr_debug(inet_csk_timer_bug_msg);
+	}
 #endif
-		return;
-	};
 }
 
 /* Initialize RCV_MSS value.
@@ -637,7 +630,7 @@
 	hint = min(hint, TCP_MIN_RCVMSS);
 	hint = max(hint, TCP_MIN_MSS);
 
-	tp->ack.rcv_mss = hint;
+	inet_csk(sk)->icsk_ack.rcv_mss = hint;
 }
 
 static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
@@ -772,7 +765,7 @@
 
 	tp->packets_out += tcp_skb_pcount(skb);
 	if (!orig)
-		tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
 }
 
 static inline void tcp_packets_out_dec(struct tcp_sock *tp, 
@@ -939,8 +932,9 @@
 
 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
 {
-	if (!tp->packets_out && !tp->pending)
-		tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
+	const struct inet_connection_sock *icsk = inet_csk(sk);
+	if (!tp->packets_out && !icsk->icsk_pending)
+		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, icsk->icsk_rto);
 }
 
 static __inline__ void tcp_push_pending_frames(struct sock *sk,
@@ -1021,8 +1015,9 @@
 			tp->ucopy.memory = 0;
 		} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
 			wake_up_interruptible(sk->sk_sleep);
-			if (!tcp_ack_scheduled(tp))
-				tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
+			if (!inet_csk_ack_scheduled(sk))
+				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+						          (3 * TCP_RTO_MIN) / 4);
 		}
 		return 1;
 	}
@@ -1055,7 +1050,7 @@
 			TCP_INC_STATS(TCP_MIB_ESTABRESETS);
 
 		sk->sk_prot->unhash(sk);
-		if (inet_sk(sk)->bind_hash &&
+		if (inet_csk(sk)->icsk_bind_hash &&
 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
 			inet_put_port(&tcp_hashinfo, sk);
 		/* fall through */
@@ -1186,51 +1181,55 @@
 	return tcp_win_from_space(sk->sk_rcvbuf); 
 }
 
-static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
-					 struct sock *child)
+static inline void inet_csk_reqsk_queue_add(struct sock *sk,
+					    struct request_sock *req,
+					    struct sock *child)
 {
-	reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
+	reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
 }
 
-static inline void
-tcp_synq_removed(struct sock *sk, struct request_sock *req)
+static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
+						struct request_sock *req)
 {
-	if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
-		tcp_delete_keepalive_timer(sk);
+	if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
+		inet_csk_delete_keepalive_timer(sk);
 }
 
-static inline void tcp_synq_added(struct sock *sk)
+static inline void inet_csk_reqsk_queue_added(struct sock *sk,
+					      const unsigned long timeout)
 {
-	if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
-		tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
+	if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
+		inet_csk_reset_keepalive_timer(sk, timeout);
 }
 
-static inline int tcp_synq_len(struct sock *sk)
+static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
 {
-	return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
+	return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
 }
 
-static inline int tcp_synq_young(struct sock *sk)
+static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
 {
-	return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
+	return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
 }
 
-static inline int tcp_synq_is_full(struct sock *sk)
+static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 {
-	return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
+	return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
 }
 
-static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
-				   struct request_sock **prev)
+static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
+					       struct request_sock *req,
+					       struct request_sock **prev)
 {
-	reqsk_queue_unlink(&tp->accept_queue, req, prev);
+	reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
 }
 
-static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
-				     struct request_sock **prev)
+static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
+					     struct request_sock *req,
+					     struct request_sock **prev)
 {
-	tcp_synq_unlink(tcp_sk(sk), req, prev);
-	tcp_synq_removed(sk, req);
+	inet_csk_reqsk_queue_unlink(sk, req, prev);
+	inet_csk_reqsk_queue_removed(sk, req);
 	reqsk_free(req);
 }
 
@@ -1265,12 +1264,13 @@
 	return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
 }
 
-static inline int tcp_fin_time(const struct tcp_sock *tp)
+static inline int tcp_fin_time(const struct sock *sk)
 {
-	int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
+	int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
+	const int rto = inet_csk(sk)->icsk_rto;
 
-	if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
-		fin_timeout = (tp->rto<<2) - (tp->rto>>1);
+	if (fin_timeout < (rto << 2) - (rto >> 1))
+		fin_timeout = (rto << 2) - (rto >> 1);
 
 	return fin_timeout;
 }
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index 64980ee..c6b8439 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -88,7 +88,7 @@
 		 * it is surely retransmit. It is not in ECN RFC,
 		 * but Linux follows this rule. */
 		else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
-			tcp_enter_quickack_mode(tp);
+			tcp_enter_quickack_mode((struct sock *)tp);
 	}
 }