enic: Add Accelerated RFS support

This patch adds supports for Accelerated Receive Flow Steering.

When the desired rx is different from current rq, for a flow, kernel calls the
driver function enic_rx_flow_steer(). enic_rx_flow_steer adds a IP-TCP/UDP
hardware filter.

Driver registers a timer function enic_flow_may_expire. This function is called
every HZ/4 seconds. In this function we check if the added filter has expired
by calling rps_may_expire_flow(). If the flow has expired, it removes the hw
filter.

As of now adaptor supports only IPv4 - TCP/UDP filters.

Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index f6703c4..7f27a4c7 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -64,3 +64,216 @@
 
 	return ret;
 }
+
+#ifdef CONFIG_RFS_ACCEL
+void enic_flow_may_expire(unsigned long data)
+{
+	struct enic *enic = (struct enic *)data;
+	bool res;
+	int j;
+
+	spin_lock(&enic->rfs_h.lock);
+	for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
+		struct hlist_head *hhead;
+		struct hlist_node *tmp;
+		struct enic_rfs_fltr_node *n;
+
+		hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
+		hlist_for_each_entry_safe(n, tmp, hhead, node) {
+			res = rps_may_expire_flow(enic->netdev, n->rq_id,
+						  n->flow_id, n->fltr_id);
+			if (res) {
+				res = enic_delfltr(enic, n->fltr_id);
+				if (unlikely(res))
+					continue;
+				hlist_del(&n->node);
+				kfree(n);
+				enic->rfs_h.free++;
+			}
+		}
+	}
+	spin_unlock(&enic->rfs_h.lock);
+	mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
+}
+
+/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
+ *	@enic: enic data
+ */
+void enic_rfs_flw_tbl_init(struct enic *enic)
+{
+	int i;
+
+	spin_lock_init(&enic->rfs_h.lock);
+	for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
+		INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
+	enic->rfs_h.max = enic->config.num_arfs;
+	enic->rfs_h.free = enic->rfs_h.max;
+	enic->rfs_h.toclean = 0;
+	init_timer(&enic->rfs_h.rfs_may_expire);
+	enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire;
+	enic->rfs_h.rfs_may_expire.data = (unsigned long)enic;
+	mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
+}
+
+void enic_rfs_flw_tbl_free(struct enic *enic)
+{
+	int i, res;
+
+	del_timer_sync(&enic->rfs_h.rfs_may_expire);
+	spin_lock(&enic->rfs_h.lock);
+	enic->rfs_h.free = 0;
+	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
+		struct hlist_head *hhead;
+		struct hlist_node *tmp;
+		struct enic_rfs_fltr_node *n;
+
+		hhead = &enic->rfs_h.ht_head[i];
+		hlist_for_each_entry_safe(n, tmp, hhead, node) {
+			enic_delfltr(enic, n->fltr_id);
+			hlist_del(&n->node);
+			kfree(n);
+		}
+	}
+	spin_unlock(&enic->rfs_h.lock);
+}
+
+static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
+						  struct flow_keys *k)
+{
+	struct enic_rfs_fltr_node *tpos;
+
+	hlist_for_each_entry(tpos, h, node)
+		if (tpos->keys.src == k->src &&
+		    tpos->keys.dst == k->dst &&
+		    tpos->keys.ports == k->ports &&
+		    tpos->keys.ip_proto == k->ip_proto &&
+		    tpos->keys.n_proto == k->n_proto)
+			return tpos;
+	return NULL;
+}
+
+int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+		       u16 rxq_index, u32 flow_id)
+{
+	struct flow_keys keys;
+	struct enic_rfs_fltr_node *n;
+	struct enic *enic;
+	u16 tbl_idx;
+	int res, i;
+
+	enic = netdev_priv(dev);
+	res = skb_flow_dissect(skb, &keys);
+	if (!res || keys.n_proto != htons(ETH_P_IP) ||
+	    (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
+		return -EPROTONOSUPPORT;
+
+	tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
+	spin_lock(&enic->rfs_h.lock);
+	n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
+
+	if (n) { /* entry already present  */
+		if (rxq_index == n->rq_id) {
+			res = -EEXIST;
+			goto ret_unlock;
+		}
+
+		/* desired rq changed for the flow, we need to delete
+		 * old fltr and add new one
+		 *
+		 * The moment we delete the fltr, the upcoming pkts
+		 * are put it default rq based on rss. When we add
+		 * new filter, upcoming pkts are put in desired queue.
+		 * This could cause ooo pkts.
+		 *
+		 * Lets 1st try adding new fltr and then del old one.
+		 */
+		i = --enic->rfs_h.free;
+		/* clsf tbl is full, we have to del old fltr first*/
+		if (unlikely(i < 0)) {
+			enic->rfs_h.free++;
+			res = enic_delfltr(enic, n->fltr_id);
+			if (unlikely(res < 0))
+				goto ret_unlock;
+			res = enic_addfltr_5t(enic, &keys, rxq_index);
+			if (res < 0) {
+				hlist_del(&n->node);
+				enic->rfs_h.free++;
+				goto ret_unlock;
+			}
+		/* add new fltr 1st then del old fltr */
+		} else {
+			int ret;
+
+			res = enic_addfltr_5t(enic, &keys, rxq_index);
+			if (res < 0) {
+				enic->rfs_h.free++;
+				goto ret_unlock;
+			}
+			ret = enic_delfltr(enic, n->fltr_id);
+			/* deleting old fltr failed. Add old fltr to list.
+			 * enic_flow_may_expire() will try to delete it later.
+			 */
+			if (unlikely(ret < 0)) {
+				struct enic_rfs_fltr_node *d;
+				struct hlist_head *head;
+
+				head = &enic->rfs_h.ht_head[tbl_idx];
+				d = kmalloc(sizeof(*d), GFP_ATOMIC);
+				if (d) {
+					d->fltr_id = n->fltr_id;
+					INIT_HLIST_NODE(&d->node);
+					hlist_add_head(&d->node, head);
+				}
+			} else {
+				enic->rfs_h.free++;
+			}
+		}
+		n->rq_id = rxq_index;
+		n->fltr_id = res;
+		n->flow_id = flow_id;
+	/* entry not present */
+	} else {
+		i = --enic->rfs_h.free;
+		if (i <= 0) {
+			enic->rfs_h.free++;
+			res = -EBUSY;
+			goto ret_unlock;
+		}
+
+		n = kmalloc(sizeof(*n), GFP_ATOMIC);
+		if (!n) {
+			res = -ENOMEM;
+			enic->rfs_h.free++;
+			goto ret_unlock;
+		}
+
+		res = enic_addfltr_5t(enic, &keys, rxq_index);
+		if (res < 0) {
+			kfree(n);
+			enic->rfs_h.free++;
+			goto ret_unlock;
+		}
+		n->rq_id = rxq_index;
+		n->fltr_id = res;
+		n->flow_id = flow_id;
+		n->keys = keys;
+		INIT_HLIST_NODE(&n->node);
+		hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
+	}
+
+ret_unlock:
+	spin_unlock(&enic->rfs_h.lock);
+	return res;
+}
+
+#else
+
+void enic_rfs_flw_tbl_init(struct enic *enic)
+{
+}
+
+void enic_rfs_flw_tbl_free(struct enic *enic)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */