net: rmnet_data: Use hrtimer for UL aggregation timer

The delay argument in schedule_delayed_work(struct delayed_work
*dwork, unsigned long delay) API is jiffies. The system tick for
seems to be 100Hz, so the minimum time resolution for the work to
be scheduled is 10ms.

Switch to hrtimer to achieve 3 ms granularity with a current timer
of 1ms for the flush thread. A workqueue is immediately scheduled
in the same context after this timer expiry to do the tx work.

CRs-Fixed: 2157214
Change-Id: I29cbbee417e84b101ed34c0d29c2731bd52c3ec5
Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
index 1c0f1060..669a890 100644
--- a/net/rmnet_data/rmnet_map_data.c
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -49,7 +49,7 @@
 MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
 
 struct agg_work {
-	struct delayed_work work;
+	struct work_struct work;
 	struct rmnet_phys_ep_config *config;
 };
 
@@ -165,25 +165,18 @@
 	return skbn;
 }
 
-/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
- * @work:        struct agg_work containing delayed work and skb to flush
- *
- * This function is scheduled to run in a specified number of jiffies after
- * the last frame transmitted by the network stack. When run, the buffer
- * containing aggregated packets is finally transmitted on the underlying link.
- *
- */
-static void rmnet_map_flush_packet_queue(struct work_struct *work)
+static void rmnet_map_flush_packet_work(struct work_struct *work)
 {
-	struct agg_work *real_work;
 	struct rmnet_phys_ep_config *config;
+	struct agg_work *real_work;
+	int rc, agg_count = 0;
 	unsigned long flags;
 	struct sk_buff *skb;
-	int rc, agg_count = 0;
 
-	skb = 0;
 	real_work = (struct agg_work *)work;
 	config = real_work->config;
+	skb = NULL;
+
 	LOGD("%s", "Entering flush thread");
 	spin_lock_irqsave(&config->agg_lock, flags);
 	if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
@@ -194,7 +187,7 @@
 				LOGL("Agg count: %d", config->agg_count);
 			skb = config->agg_skb;
 			agg_count = config->agg_count;
-			config->agg_skb = 0;
+			config->agg_skb = NULL;
 			config->agg_count = 0;
 			memset(&config->agg_time, 0, sizeof(struct timespec));
 		}
@@ -211,9 +204,37 @@
 		rc = dev_queue_xmit(skb);
 		rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
 	}
+
 	kfree(work);
 }
 
+/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
+ *
+ * This function is scheduled to run in a specified number of ns after
+ * the last frame transmitted by the network stack. When run, the buffer
+ * containing aggregated packets is finally transmitted on the underlying link.
+ *
+ */
+enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
+{
+	struct rmnet_phys_ep_config *config;
+	struct agg_work *work;
+
+	config = container_of(t, struct rmnet_phys_ep_config, hrtimer);
+
+	work = kmalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work) {
+		config->agg_state = RMNET_MAP_AGG_IDLE;
+
+		return HRTIMER_NORESTART;
+	}
+
+	INIT_WORK(&work->work, rmnet_map_flush_packet_work);
+	work->config = config;
+	schedule_work((struct work_struct *)work);
+	return HRTIMER_NORESTART;
+}
+
 /* rmnet_map_aggregate() - Software aggregates multiple packets.
  * @skb:        current packet being transmitted
  * @config:     Physical endpoint configuration of the ingress device
@@ -226,7 +247,6 @@
 void rmnet_map_aggregate(struct sk_buff *skb,
 			 struct rmnet_phys_ep_config *config) {
 	u8 *dest_buff;
-	struct agg_work *work;
 	unsigned long flags;
 	struct sk_buff *agg_skb;
 	struct timespec diff, last;
@@ -290,7 +310,9 @@
 		config->agg_skb = 0;
 		config->agg_count = 0;
 		memset(&config->agg_time, 0, sizeof(struct timespec));
+		config->agg_state = RMNET_MAP_AGG_IDLE;
 		spin_unlock_irqrestore(&config->agg_lock, flags);
+		hrtimer_cancel(&config->hrtimer);
 		LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
 		     diff.tv_nsec, agg_count);
 		trace_rmnet_map_aggregate(skb, agg_count);
@@ -307,19 +329,9 @@
 
 schedule:
 	if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
-		work = kmalloc(sizeof(*work), GFP_ATOMIC);
-		if (!work) {
-			LOGE("Failed to allocate work item for packet %s",
-			     "transfer. DATA PATH LIKELY BROKEN!");
-			config->agg_state = RMNET_MAP_AGG_IDLE;
-			spin_unlock_irqrestore(&config->agg_lock, flags);
-			return;
-		}
-		INIT_DELAYED_WORK((struct delayed_work *)work,
-				  rmnet_map_flush_packet_queue);
-		work->config = config;
 		config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
-		schedule_delayed_work((struct delayed_work *)work, 1);
+		hrtimer_start(&config->hrtimer, ns_to_ktime(3000000),
+			      HRTIMER_MODE_REL);
 	}
 	spin_unlock_irqrestore(&config->agg_lock, flags);
 }