usb: musb: maintain three states for buffer mappings instead of two

If dma buffers are mapped by a higher layer, with a boolean musb_request.mapped
it is still possible to call dma_sync_single_for_device() from
musb_g_giveback(), even if txstate()/rxstate() has called unmap_dma_buffer()
before falling back to pio mode.

Moreover, check for musb_ep->dma is moved within map_dma_buffer() so where
applicable checks for it are removed. And where possible, checks for
is_dma_capable() are merged with buffer map state check.

Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 23dad4c..6577503 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -92,11 +92,19 @@
 
 /* ----------------------------------------------------------------------- */
 
+#define is_buffer_mapped(req) (is_dma_capable() && \
+					(req->map_state != UN_MAPPED))
+
 /* Maps the buffer to dma  */
 
 static inline void map_dma_buffer(struct musb_request *request,
-				struct musb *musb)
+			struct musb *musb, struct musb_ep *musb_ep)
 {
+	request->map_state = UN_MAPPED;
+
+	if (!is_dma_capable() || !musb_ep->dma)
+		return;
+
 	if (request->request.dma == DMA_ADDR_INVALID) {
 		request->request.dma = dma_map_single(
 				musb->controller,
@@ -105,7 +113,7 @@
 				request->tx
 					? DMA_TO_DEVICE
 					: DMA_FROM_DEVICE);
-		request->mapped = 1;
+		request->map_state = MUSB_MAPPED;
 	} else {
 		dma_sync_single_for_device(musb->controller,
 			request->request.dma,
@@ -113,7 +121,7 @@
 			request->tx
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
-		request->mapped = 0;
+		request->map_state = PRE_MAPPED;
 	}
 }
 
@@ -121,11 +129,14 @@
 static inline void unmap_dma_buffer(struct musb_request *request,
 				struct musb *musb)
 {
+	if (!is_buffer_mapped(request))
+		return;
+
 	if (request->request.dma == DMA_ADDR_INVALID) {
 		DBG(20, "not unmapping a never mapped buffer\n");
 		return;
 	}
-	if (request->mapped) {
+	if (request->map_state == MUSB_MAPPED) {
 		dma_unmap_single(musb->controller,
 			request->request.dma,
 			request->request.length,
@@ -133,16 +144,15 @@
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
 		request->request.dma = DMA_ADDR_INVALID;
-		request->mapped = 0;
-	} else {
+	} else { /* PRE_MAPPED */
 		dma_sync_single_for_cpu(musb->controller,
 			request->request.dma,
 			request->request.length,
 			request->tx
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
-
 	}
+	request->map_state = UN_MAPPED;
 }
 
 /*
@@ -172,8 +182,7 @@
 
 	ep->busy = 1;
 	spin_unlock(&musb->lock);
-	if (is_dma_capable() && ep->dma)
-		unmap_dma_buffer(req, musb);
+	unmap_dma_buffer(req, musb);
 	if (request->status == 0)
 		DBG(5, "%s done request %p,  %d/%d\n",
 				ep->end_point.name, request,
@@ -335,7 +344,7 @@
 			csr);
 
 #ifndef	CONFIG_MUSB_PIO_ONLY
-	if (is_dma_capable() && musb_ep->dma) {
+	if (is_buffer_mapped(req)) {
 		struct dma_controller	*c = musb->dma_controller;
 		size_t request_size;
 
@@ -436,8 +445,7 @@
 		 * Unmap the dma buffer back to cpu if dma channel
 		 * programming fails
 		 */
-		if (is_dma_capable() && musb_ep->dma)
-			unmap_dma_buffer(req, musb);
+		unmap_dma_buffer(req, musb);
 
 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
 				(u8 *) (request->buf + request->actual));
@@ -627,7 +635,7 @@
 		return;
 	}
 
-	if (is_cppi_enabled() && musb_ep->dma) {
+	if (is_cppi_enabled() && is_buffer_mapped(req)) {
 		struct dma_controller	*c = musb->dma_controller;
 		struct dma_channel	*channel = musb_ep->dma;
 
@@ -658,7 +666,7 @@
 		len = musb_readw(epio, MUSB_RXCOUNT);
 		if (request->actual < request->length) {
 #ifdef CONFIG_USB_INVENTRA_DMA
-			if (is_dma_capable() && musb_ep->dma) {
+			if (is_buffer_mapped(req)) {
 				struct dma_controller	*c;
 				struct dma_channel	*channel;
 				int			use_dma = 0;
@@ -742,7 +750,7 @@
 			fifo_count = min_t(unsigned, len, fifo_count);
 
 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
-			if (tusb_dma_omap() && musb_ep->dma) {
+			if (tusb_dma_omap() && is_buffer_mapped(req)) {
 				struct dma_controller *c = musb->dma_controller;
 				struct dma_channel *channel = musb_ep->dma;
 				u32 dma_addr = request->dma + request->actual;
@@ -762,7 +770,7 @@
 			 * programming fails. This buffer is mapped if the
 			 * channel allocation is successful
 			 */
-			 if (is_dma_capable() && musb_ep->dma) {
+			 if (is_buffer_mapped(req)) {
 				unmap_dma_buffer(req, musb);
 
 				/*
@@ -1222,10 +1230,7 @@
 	request->epnum = musb_ep->current_epnum;
 	request->tx = musb_ep->is_in;
 
-	if (is_dma_capable() && musb_ep->dma)
-		map_dma_buffer(request, musb);
-	else
-		request->mapped = 0;
+	map_dma_buffer(request, musb, musb_ep);
 
 	spin_lock_irqsave(&musb->lock, lockflags);