Merge changes If9c6d64e,Ice2ac474,I38bcd9d6,I167d09c1 into msm-3.0

* changes:
  msm: bam_dmux: remove obsolete mutex
  msm: bam_dmux: change mutex to spinlock
  msm: bam_dmux: change kmalloc() while in atomic context to GFP_ATOMIC
  msm: bam_dmux: release read lock in error scenarios
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index a9b1a77..317248e 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -139,9 +139,9 @@
 static int polling_mode;
 
 static LIST_HEAD(bam_rx_pool);
-static DEFINE_MUTEX(bam_rx_pool_lock);
+static DEFINE_MUTEX(bam_rx_pool_mutexlock);
 static LIST_HEAD(bam_tx_pool);
-static DEFINE_MUTEX(bam_tx_pool_lock);
+static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
 
 struct bam_mux_hdr {
 	uint16_t magic_num;
@@ -157,7 +157,6 @@
 static void handle_bam_mux_cmd(struct work_struct *work);
 static void rx_timer_work_func(struct work_struct *work);
 
-static DEFINE_MUTEX(bam_mux_lock);
 static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
 
 static struct workqueue_struct *bam_mux_rx_workqueue;
@@ -226,9 +225,9 @@
 	info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
 	ptr = skb_put(info->skb, BUFFER_SIZE);
 
-	mutex_lock(&bam_rx_pool_lock);
+	mutex_lock(&bam_rx_pool_mutexlock);
 	list_add_tail(&info->list_node, &bam_rx_pool);
-	mutex_unlock(&bam_rx_pool_lock);
+	mutex_unlock(&bam_rx_pool_mutexlock);
 
 	/* need a way to handle error case */
 	info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
@@ -339,12 +338,10 @@
 	struct tx_pkt_info *pkt;
 	dma_addr_t dma_address;
 
-	mutex_lock(&bam_mux_lock);
 	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_KERNEL);
 	if (pkt == NULL) {
 		pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
 		rc = -ENOMEM;
-		mutex_unlock(&bam_mux_lock);
 		return rc;
 	}
 
@@ -353,7 +350,6 @@
 	if (!dma_address) {
 		pr_err("%s: dma_map_single() failed\n", __func__);
 		rc = -ENOMEM;
-		mutex_unlock(&bam_mux_lock);
 		return rc;
 	}
 	pkt->skb = (struct sk_buff *)(data);
@@ -361,13 +357,12 @@
 	pkt->dma_address = dma_address;
 	pkt->is_cmd = 1;
 	INIT_WORK(&pkt->work, bam_mux_write_done);
-	mutex_lock(&bam_tx_pool_lock);
+	spin_lock(&bam_tx_pool_spinlock);
 	list_add_tail(&pkt->list_node, &bam_tx_pool);
-	mutex_unlock(&bam_tx_pool_lock);
+	spin_unlock(&bam_tx_pool_spinlock);
 	rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
 				pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
 
-	mutex_unlock(&bam_mux_lock);
 	ul_packet_written = 1;
 	return rc;
 }
@@ -382,10 +377,10 @@
 
 	if (in_global_reset)
 		return;
-	mutex_lock(&bam_tx_pool_lock);
+	spin_lock(&bam_tx_pool_spinlock);
 	node = bam_tx_pool.next;
 	list_del(node);
-	mutex_unlock(&bam_tx_pool_lock);
+	spin_unlock(&bam_tx_pool_spinlock);
 	info = container_of(work, struct tx_pkt_info, work);
 	if (info->is_cmd) {
 		kfree(info->skb);
@@ -446,7 +441,7 @@
 					  4 - (skb->len & 0x3), GFP_ATOMIC);
 		if (new_skb == NULL) {
 			pr_err("%s: cannot allocate skb\n", __func__);
-			return -ENOMEM;
+			goto write_fail;
 		}
 		dev_kfree_skb_any(skb);
 		skb = new_skb;
@@ -474,32 +469,36 @@
 	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
 	if (pkt == NULL) {
 		pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
-		if (new_skb)
-			dev_kfree_skb_any(new_skb);
-		return -ENOMEM;
+		goto write_fail2;
 	}
 
 	dma_address = dma_map_single(NULL, skb->data, skb->len,
 					DMA_TO_DEVICE);
 	if (!dma_address) {
 		pr_err("%s: dma_map_single() failed\n", __func__);
-		if (new_skb)
-			dev_kfree_skb_any(new_skb);
-		kfree(pkt);
-		return -ENOMEM;
+		goto write_fail3;
 	}
 	pkt->skb = skb;
 	pkt->dma_address = dma_address;
 	pkt->is_cmd = 0;
 	INIT_WORK(&pkt->work, bam_mux_write_done);
-	mutex_lock(&bam_tx_pool_lock);
+	spin_lock(&bam_tx_pool_spinlock);
 	list_add_tail(&pkt->list_node, &bam_tx_pool);
-	mutex_unlock(&bam_tx_pool_lock);
+	spin_unlock(&bam_tx_pool_spinlock);
 	rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
 				pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
 	ul_packet_written = 1;
 	read_unlock(&ul_wakeup_lock);
 	return rc;
+
+write_fail3:
+	kfree(pkt);
+write_fail2:
+	if (new_skb)
+		dev_kfree_skb_any(new_skb);
+write_fail:
+	read_unlock(&ul_wakeup_lock);
+	return -ENOMEM;
 }
 
 int msm_bam_dmux_open(uint32_t id, void *priv,
@@ -597,9 +596,10 @@
 		return 0;
 	}
 
-	hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
+	hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
 	if (hdr == NULL) {
 		pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
+		read_unlock(&ul_wakeup_lock);
 		return -ENOMEM;
 	}
 	hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
@@ -634,10 +634,10 @@
 			if (iov.addr == 0)
 				break;
 			inactive_cycles = 0;
-			mutex_lock(&bam_rx_pool_lock);
+			mutex_lock(&bam_rx_pool_mutexlock);
 			node = bam_rx_pool.next;
 			list_del(node);
-			mutex_unlock(&bam_rx_pool_lock);
+			mutex_unlock(&bam_rx_pool_mutexlock);
 			info = container_of(node, struct rx_pkt_info,
 							list_node);
 			handle_bam_mux_cmd(&info->work);
@@ -680,10 +680,10 @@
 			if (iov.addr == 0)
 				return;
 			inactive_cycles = 0;
-			mutex_lock(&bam_rx_pool_lock);
+			mutex_lock(&bam_rx_pool_mutexlock);
 			node = bam_rx_pool.next;
 			list_del(node);
-			mutex_unlock(&bam_rx_pool_lock);
+			mutex_unlock(&bam_rx_pool_mutexlock);
 			info = container_of(node, struct rx_pkt_info,
 							list_node);
 			handle_bam_mux_cmd(&info->work);
@@ -971,7 +971,7 @@
 		}
 	}
 	/*cleanup UL*/
-	mutex_lock(&bam_tx_pool_lock);
+	spin_lock(&bam_tx_pool_spinlock);
 	while (!list_empty(&bam_tx_pool)) {
 		node = bam_tx_pool.next;
 		list_del(node);
@@ -990,7 +990,7 @@
 		}
 		kfree(info);
 	}
-	mutex_unlock(&bam_tx_pool_lock);
+	spin_unlock(&bam_tx_pool_spinlock);
 	smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
 
 	return NOTIFY_DONE;