msm: bam_dmux: save irqs when grabbing tx_pool lock
The tx_pool spinlock can have contention between atomic context and
non-atomic context leading to deadlock. Use irqsave/irqrestore to
prevent this contention.
Change-Id: I689543aa6c1c1652c5bfe3e4b3b579a4077b2545
Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org>
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index e3ca6d1..fc188ed 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -360,6 +360,7 @@
int rc;
struct tx_pkt_info *pkt;
dma_addr_t dma_address;
+ unsigned long flags;
pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
if (pkt == NULL) {
@@ -380,17 +381,17 @@
pkt->dma_address = dma_address;
pkt->is_cmd = 1;
INIT_WORK(&pkt->work, bam_mux_write_done);
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_add_tail(&pkt->list_node, &bam_tx_pool);
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
if (rc) {
DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_del(&pkt->list_node);
DBG_INC_TX_SPS_FAILURE_CNT();
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
kfree(pkt);
}
@@ -409,10 +410,10 @@
if (in_global_reset)
return;
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
node = bam_tx_pool.next;
list_del(node);
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
info = container_of(work, struct tx_pkt_info, work);
if (info->is_cmd) {
kfree(info->skb);
@@ -524,17 +525,17 @@
pkt->dma_address = dma_address;
pkt->is_cmd = 0;
INIT_WORK(&pkt->work, bam_mux_write_done);
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_add_tail(&pkt->list_node, &bam_tx_pool);
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
if (rc) {
DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_del(&pkt->list_node);
DBG_INC_TX_SPS_FAILURE_CNT();
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
kfree(pkt);
} else {
spin_lock_irqsave(&bam_ch[id].lock, flags);
@@ -1089,6 +1090,7 @@
struct list_head *node;
struct tx_pkt_info *info;
int temp_remote_status;
+ unsigned long flags;
if (code != SUBSYS_AFTER_SHUTDOWN)
return NOTIFY_DONE;
@@ -1107,7 +1109,7 @@
}
}
/*cleanup UL*/
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
while (!list_empty(&bam_tx_pool)) {
node = bam_tx_pool.next;
list_del(node);
@@ -1126,7 +1128,7 @@
}
kfree(info);
}
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
return NOTIFY_DONE;