tipc: Combine port structure with tipc_port structure

Merge two distinct structures containing information about a TIPC port
into a single structure. The structures were previously kept separate
so that public information about a port could be made available to
applications using TIPC's native API, while the remaining information
was kept private for use by TIPC itself. However, now that the native
API has been removed there is no longer any need for this somewhat
confusing arrangement.

Since one of the structures was already embedded within the other, the
change largely involves replacing instances of "publ.foo" with "foo".
The changes do not otherwise alter the operation of TIPC ports.

Signed-off-by: Allan Stephens <Allan.Stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 18702f5..e30770d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -2,7 +2,7 @@
  * net/tipc/link.c: TIPC link code
  *
  * Copyright (c) 1996-2007, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -90,7 +90,7 @@
 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
 static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
-static int  link_send_sections_long(struct port *sender,
+static int  link_send_sections_long(struct tipc_port *sender,
 				    struct iovec const *msg_sect,
 				    u32 num_sect, u32 destnode);
 static void link_check_defragm_bufs(struct link *l_ptr);
@@ -406,7 +406,7 @@
 
 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
 {
-	struct port *p_ptr;
+	struct tipc_port *p_ptr;
 
 	spin_lock_bh(&tipc_port_list_lock);
 	p_ptr = tipc_port_lock(origport);
@@ -415,7 +415,7 @@
 			goto exit;
 		if (!list_empty(&p_ptr->wait_list))
 			goto exit;
-		p_ptr->publ.congested = 1;
+		p_ptr->congested = 1;
 		p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
 		list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
 		l_ptr->stats.link_congs++;
@@ -428,8 +428,8 @@
 
 void tipc_link_wakeup_ports(struct link *l_ptr, int all)
 {
-	struct port *p_ptr;
-	struct port *temp_p_ptr;
+	struct tipc_port *p_ptr;
+	struct tipc_port *temp_p_ptr;
 	int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
 
 	if (all)
@@ -445,11 +445,11 @@
 		if (win <= 0)
 			break;
 		list_del_init(&p_ptr->wait_list);
-		spin_lock_bh(p_ptr->publ.lock);
-		p_ptr->publ.congested = 0;
-		p_ptr->wakeup(&p_ptr->publ);
+		spin_lock_bh(p_ptr->lock);
+		p_ptr->congested = 0;
+		p_ptr->wakeup(p_ptr);
 		win -= p_ptr->waiting_pkts;
-		spin_unlock_bh(p_ptr->publ.lock);
+		spin_unlock_bh(p_ptr->lock);
 	}
 
 exit:
@@ -1027,12 +1027,12 @@
  * except for total message length.
  * Returns user data length or errno.
  */
-int tipc_link_send_sections_fast(struct port *sender,
+int tipc_link_send_sections_fast(struct tipc_port *sender,
 				 struct iovec const *msg_sect,
 				 const u32 num_sect,
 				 u32 destaddr)
 {
-	struct tipc_msg *hdr = &sender->publ.phdr;
+	struct tipc_msg *hdr = &sender->phdr;
 	struct link *l_ptr;
 	struct sk_buff *buf;
 	struct tipc_node *node;
@@ -1045,7 +1045,7 @@
 	 * (Must not hold any locks while building message.)
 	 */
 
-	res = tipc_msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt,
+	res = tipc_msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
 			!sender->user_port, &buf);
 
 	read_lock_bh(&tipc_net_lock);
@@ -1056,7 +1056,7 @@
 		if (likely(l_ptr)) {
 			if (likely(buf)) {
 				res = link_send_buf_fast(l_ptr, buf,
-							 &sender->publ.max_pkt);
+							 &sender->max_pkt);
 				if (unlikely(res < 0))
 					buf_discard(buf);
 exit:
@@ -1075,7 +1075,7 @@
 			if (link_congested(l_ptr) ||
 			    !list_empty(&l_ptr->b_ptr->cong_links)) {
 				res = link_schedule_port(l_ptr,
-							 sender->publ.ref, res);
+							 sender->ref, res);
 				goto exit;
 			}
 
@@ -1084,12 +1084,12 @@
 			 * then re-try fast path or fragment the message
 			 */
 
-			sender->publ.max_pkt = l_ptr->max_pkt;
+			sender->max_pkt = l_ptr->max_pkt;
 			tipc_node_unlock(node);
 			read_unlock_bh(&tipc_net_lock);
 
 
-			if ((msg_hdr_sz(hdr) + res) <= sender->publ.max_pkt)
+			if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
 				goto again;
 
 			return link_send_sections_long(sender, msg_sect,
@@ -1123,14 +1123,14 @@
  *
  * Returns user data length or errno.
  */
-static int link_send_sections_long(struct port *sender,
+static int link_send_sections_long(struct tipc_port *sender,
 				   struct iovec const *msg_sect,
 				   u32 num_sect,
 				   u32 destaddr)
 {
 	struct link *l_ptr;
 	struct tipc_node *node;
-	struct tipc_msg *hdr = &sender->publ.phdr;
+	struct tipc_msg *hdr = &sender->phdr;
 	u32 dsz = msg_data_sz(hdr);
 	u32 max_pkt, fragm_sz, rest;
 	struct tipc_msg fragm_hdr;
@@ -1142,7 +1142,7 @@
 
 again:
 	fragm_no = 1;
-	max_pkt = sender->publ.max_pkt - INT_H_SIZE;
+	max_pkt = sender->max_pkt - INT_H_SIZE;
 		/* leave room for tunnel header in case of link changeover */
 	fragm_sz = max_pkt - INT_H_SIZE;
 		/* leave room for fragmentation header in each fragment */
@@ -1157,7 +1157,7 @@
 
 	tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
 		 INT_H_SIZE, msg_destnode(hdr));
-	msg_set_link_selector(&fragm_hdr, sender->publ.ref);
+	msg_set_link_selector(&fragm_hdr, sender->ref);
 	msg_set_size(&fragm_hdr, max_pkt);
 	msg_set_fragm_no(&fragm_hdr, 1);
 
@@ -1238,13 +1238,13 @@
 	node = tipc_node_find(destaddr);
 	if (likely(node)) {
 		tipc_node_lock(node);
-		l_ptr = node->active_links[sender->publ.ref & 1];
+		l_ptr = node->active_links[sender->ref & 1];
 		if (!l_ptr) {
 			tipc_node_unlock(node);
 			goto reject;
 		}
 		if (l_ptr->max_pkt < max_pkt) {
-			sender->publ.max_pkt = l_ptr->max_pkt;
+			sender->max_pkt = l_ptr->max_pkt;
 			tipc_node_unlock(node);
 			for (; buf_chain; buf_chain = buf) {
 				buf = buf_chain->next;