net: rmnet_data: Go back to worker thread for UL aggregation
A crash was seen when running the hrtimer solution, so revert the
change till it is resoloved. The crash occurs due to interrupts
being enabled by a later function in the call stack within
interrupt context.
[ffffff8cc22b157c] __local_bh_enable_ip+0xa8/0x114
[ffffff8cc33495d0] _raw_spin_unlock_bh+0x30/0x3c
[ffffff8cc2e57ed8] ipa3_send+0x2a0/0xaa0
[ffffff8cc2e5c544] ipa3_tx_dp+0x59c/0x990
[ffffff8cc2ea7ef0] ipa3_wwan_xmit+0x204/0x338
[ffffff8cc30f0160] dev_hard_start_xmit+0xc4/0x29c
[ffffff8cc311d0f4] sch_direct_xmit+0x100/0x1cc
[ffffff8cc30f0890] __dev_queue_xmit+0x1f4/0x704
[ffffff8cc30f0dc8] dev_queue_xmit+0x28/0x34
[ffffff8cc333285c] rmnet_map_flush_packet_queue+0xc0/0x218
[ffffff8cc2344f9c] __hrtimer_run_queues+0x158/0x36c
[ffffff8cc2345f38] hrtimer_interrupt+0xb0/0x1f4
CRs-fixed: 2147503
Change-Id: I7b3617577cbf5e166380d4361b73e2f57a4bd042
Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
index f24b157..1c0f1060 100644
--- a/net/rmnet_data/rmnet_map_data.c
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/rmnet_data.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include <linux/time.h>
#include <linux/net_map.h>
#include <linux/ip.h>
@@ -47,6 +48,11 @@
module_param(agg_bypass_time, long, 0644);
MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
+struct agg_work {
+ struct delayed_work work;
+ struct rmnet_phys_ep_config *config;
+};
+
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
@@ -160,21 +166,24 @@
}
/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
+ * @work: struct agg_work containing delayed work and skb to flush
*
- * This function is scheduled to run in a specified number of ns after
+ * This function is scheduled to run in a specified number of jiffies after
* the last frame transmitted by the network stack. When run, the buffer
* containing aggregated packets is finally transmitted on the underlying link.
*
*/
-enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
+static void rmnet_map_flush_packet_queue(struct work_struct *work)
{
+ struct agg_work *real_work;
struct rmnet_phys_ep_config *config;
unsigned long flags;
struct sk_buff *skb;
int rc, agg_count = 0;
- config = container_of(t, struct rmnet_phys_ep_config, hrtimer);
skb = 0;
+ real_work = (struct agg_work *)work;
+ config = real_work->config;
LOGD("%s", "Entering flush thread");
spin_lock_irqsave(&config->agg_lock, flags);
if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
@@ -202,8 +211,7 @@
rc = dev_queue_xmit(skb);
rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
}
-
- return HRTIMER_NORESTART;
+ kfree(work);
}
/* rmnet_map_aggregate() - Software aggregates multiple packets.
@@ -218,6 +226,7 @@
void rmnet_map_aggregate(struct sk_buff *skb,
struct rmnet_phys_ep_config *config) {
u8 *dest_buff;
+ struct agg_work *work;
unsigned long flags;
struct sk_buff *agg_skb;
struct timespec diff, last;
@@ -281,9 +290,7 @@
config->agg_skb = 0;
config->agg_count = 0;
memset(&config->agg_time, 0, sizeof(struct timespec));
- config->agg_state = RMNET_MAP_AGG_IDLE;
spin_unlock_irqrestore(&config->agg_lock, flags);
- hrtimer_cancel(&config->hrtimer);
LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
diff.tv_nsec, agg_count);
trace_rmnet_map_aggregate(skb, agg_count);
@@ -300,9 +307,19 @@
schedule:
if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ LOGE("Failed to allocate work item for packet %s",
+ "transfer. DATA PATH LIKELY BROKEN!");
+ config->agg_state = RMNET_MAP_AGG_IDLE;
+ spin_unlock_irqrestore(&config->agg_lock, flags);
+ return;
+ }
+ INIT_DELAYED_WORK((struct delayed_work *)work,
+ rmnet_map_flush_packet_queue);
+ work->config = config;
config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
- hrtimer_start(&config->hrtimer, ns_to_ktime(3000000),
- HRTIMER_MODE_REL);
+ schedule_delayed_work((struct delayed_work *)work, 1);
}
spin_unlock_irqrestore(&config->agg_lock, flags);
}