ibmveth: Enable TCP checksum offload

This patchset enables TCP checksum offload support for IPV4
on ibmveth. This completely eliminates the generation and checking of
the checksum for packets that are completely virtual and never
touch a physical network. A simple TCP_STREAM netperf run on
a virtual network with maximum mtu set yielded a ~30% increase
in throughput. This feature is enabled by default on systems that
support it, but can be disabled with a module option.

Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 0c35d72..9353890 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -47,6 +47,8 @@
 #include <linux/mm.h>
 #include <linux/ethtool.h>
 #include <linux/proc_fs.h>
+#include <linux/in.h>
+#include <linux/ip.h>
 #include <net/net_namespace.h>
 #include <asm/semaphore.h>
 #include <asm/hvcall.h>
@@ -132,6 +134,11 @@
 	return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
 }
 
+static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
+{
+	return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].csum_good);
+}
+
 /* setup the initial settings for a buffer pool */
 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
 {
@@ -695,6 +702,24 @@
 					desc[0].fields.length, DMA_TO_DEVICE);
 	desc[0].fields.valid   = 1;
 
+	if (skb->ip_summed == CHECKSUM_PARTIAL &&
+	    ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
+		ibmveth_error_printk("tx: failed to checksum packet\n");
+		tx_dropped++;
+		goto out;
+	}
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		unsigned char *buf = skb_transport_header(skb) + skb->csum_offset;
+
+		desc[0].fields.no_csum = 1;
+		desc[0].fields.csum_good = 1;
+
+		/* Need to zero out the checksum */
+		buf[0] = 0;
+		buf[1] = 0;
+	}
+
 	if(dma_mapping_error(desc[0].fields.address)) {
 		ibmveth_error_printk("tx: unable to map initial fragment\n");
 		tx_map_failed++;
@@ -713,6 +738,10 @@
 				frag->size, DMA_TO_DEVICE);
 		desc[curfrag+1].fields.length = frag->size;
 		desc[curfrag+1].fields.valid  = 1;
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			desc[curfrag+1].fields.no_csum = 1;
+			desc[curfrag+1].fields.csum_good = 1;
+		}
 
 		if(dma_mapping_error(desc[curfrag+1].fields.address)) {
 			ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
@@ -801,7 +830,11 @@
 		} else {
 			int length = ibmveth_rxq_frame_length(adapter);
 			int offset = ibmveth_rxq_frame_offset(adapter);
+			int csum_good = ibmveth_rxq_csum_good(adapter);
+
 			skb = ibmveth_rxq_get_buffer(adapter);
+			if (csum_good)
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 			ibmveth_rxq_harvest_buffer(adapter);
 
@@ -962,8 +995,10 @@
 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
 {
 	int rc, i;
+	long ret;
 	struct net_device *netdev;
 	struct ibmveth_adapter *adapter;
+	union ibmveth_illan_attributes set_attr, ret_attr;
 
 	unsigned char *mac_addr_p;
 	unsigned int *mcastFilterSize_p;
@@ -1057,6 +1092,24 @@
 
 	ibmveth_debug_printk("registering netdev...\n");
 
+	ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr.desc);
+
+	if (ret == H_SUCCESS && !ret_attr.fields.active_trunk &&
+	    !ret_attr.fields.trunk_priority &&
+	    ret_attr.fields.csum_offload_padded_pkt_support) {
+		set_attr.desc = 0;
+		set_attr.fields.tcp_csum_offload_ipv4 = 1;
+
+		ret = h_illan_attributes(dev->unit_address, 0, set_attr.desc,
+					 &ret_attr.desc);
+
+		if (ret == H_SUCCESS)
+			netdev->features |= NETIF_F_IP_CSUM;
+		else
+			ret = h_illan_attributes(dev->unit_address, set_attr.desc,
+						 0, &ret_attr.desc);
+	}
+
 	rc = register_netdev(netdev);
 
 	if(rc) {