USB: ci13xxx_udc: Add support for larger ( > 16K) length USB requests

The maximum recommended transfer size of a device transfer descriptor(dTD)
is 16K bytes.  The current code limits the USB request length to 16K bytes.
Add support for larger length USB requests by spanning them into multiple
dTD.  Remove the special handling of zero length packet termination and use
multi dTD logic to achieve the same.

The "QUEUE" and "DEBUG" event messages are slightly modified to accommodate
request pointer, actual length and original length.  Update sysfs file
implementation to print all dTD for any given USB request.

CRs-Fixed: 429212
Change-Id: If3fbb986c84ac9d138429c624a4c73ecee943ce5
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 9d231d6..8c04863 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -980,16 +980,16 @@
  * dbg_done: prints a DONE event
  * @addr:   endpoint address
  * @td:     transfer descriptor
- * @status: status
  */
-static void dbg_done(u8 addr, const u32 token, int status)
+static void dbg_done(u8 addr, const struct usb_request *req)
 {
 	char msg[DBG_DATA_MSG];
 
-	scnprintf(msg, sizeof(msg), "%d %02X",
-		  (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
-		  (int)(token & TD_STATUS)      >> ffs_nr(TD_STATUS));
-	dbg_print(addr, "DONE", status, msg);
+	if (req != NULL) {
+		scnprintf(msg, sizeof(msg),
+			  "%p %d %d", req, req->actual, req->length);
+		dbg_print(addr, "DONE", req->status, msg);
+	}
 }
 
 /**
@@ -1016,7 +1016,7 @@
 
 	if (req != NULL) {
 		scnprintf(msg, sizeof(msg),
-			  "%d %d", !req->no_interrupt, req->length);
+			  "%p %d %d", req, !req->no_interrupt, req->length);
 		dbg_print(addr, "QUEUE", status, msg);
 	}
 }
@@ -1049,6 +1049,7 @@
 {
 	char msg[DBG_DATA_MSG];
 	struct ci13xxx_req *req;
+	struct ci13xxx_td_wrapper *td;
 	struct list_head *ptr = NULL;
 
 	if (mep != NULL) {
@@ -1065,15 +1066,14 @@
 
 		list_for_each(ptr, &mep->qh.queue) {
 			req = list_entry(ptr, struct ci13xxx_req, queue);
-			scnprintf(msg, sizeof(msg),
-					"%08X:%08X:%08X\n",
-					req->dma, req->ptr->next,
-					req->ptr->token);
-			dbg_print(addr, "REQ", 0, msg);
-			scnprintf(msg, sizeof(msg), "%08X:%d\n",
-					req->ptr->page[0],
-					req->req.status);
-			dbg_print(addr, "REQPAGE", 0, msg);
+			list_for_each_entry(td, &req->td_list, list) {
+				scnprintf(msg, sizeof(msg),
+						"%08X:%08X:%08X:%08X\n",
+						td->dma, td->ptr->next,
+						td->ptr->token,
+						td->ptr->page[0]);
+				dbg_print(addr, "TD", 0, msg);
+			}
 		}
 	}
 }
@@ -1444,6 +1444,7 @@
 	unsigned long flags;
 	struct list_head   *ptr = NULL;
 	struct ci13xxx_req *req = NULL;
+	struct ci13xxx_td_wrapper *td;
 	unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
 
 	dbg_trace("[%s] %p\n", __func__, buf);
@@ -1458,15 +1459,18 @@
 		{
 			req = list_entry(ptr, struct ci13xxx_req, queue);
 
-			n += scnprintf(buf + n, PAGE_SIZE - n,
-					"EP=%02i: TD=%08X %s\n",
-					i % hw_ep_max/2, (u32)req->dma,
-					((i < hw_ep_max/2) ? "RX" : "TX"));
-
-			for (j = 0; j < qSize; j++)
+			list_for_each_entry(td, &req->td_list, list) {
 				n += scnprintf(buf + n, PAGE_SIZE - n,
-						" %04X:    %08X\n", j,
-						*((u32 *)req->ptr + j));
+						"EP=%02i: TD=%08X %s\n",
+						i % hw_ep_max/2, (u32)td->dma,
+						((i < hw_ep_max/2) ?
+						 "RX" : "TX"));
+
+				for (j = 0; j < qSize; j++)
+					n += scnprintf(buf + n, PAGE_SIZE - n,
+							" %04X:    %08X\n", j,
+							*((u32 *)td->ptr + j));
+			}
 		}
 	spin_unlock_irqrestore(udc->lock, flags);
 
@@ -1484,6 +1488,7 @@
 	unsigned int ep_num, dir;
 	int n;
 	struct ci13xxx_req *mReq = NULL;
+	struct ci13xxx_td_wrapper *td;
 
 	if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
 		dev_err(dev, "<ep_num> <dir>: prime the ep");
@@ -1497,7 +1502,8 @@
 
 	n = hw_ep_bit(mEp->num, mEp->dir);
 	mReq =  list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue);
-	mEp->qh.ptr->td.next   = mReq->dma;
+	td = list_first_entry(&mReq->td_list, struct ci13xxx_td_wrapper, list);
+	mEp->qh.ptr->td.next   = td->dma;
 	mEp->qh.ptr->td.token &= ~TD_STATUS;
 
 	wmb();
@@ -1527,12 +1533,15 @@
 	int n;
 	struct list_head   *ptr = NULL;
 	struct ci13xxx_req *req = NULL;
+	struct ci13xxx_td_wrapper *td;
+	unsigned long flags;
 
 	if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
 		dev_err(dev, "<ep_num> <dir>: to print dtds");
 		goto done;
 	}
 
+	spin_lock_irqsave(udc->lock, flags);
 	if (dir)
 		mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
 	else
@@ -1556,11 +1565,12 @@
 
 	list_for_each(ptr, &mEp->qh.queue) {
 		req = list_entry(ptr, struct ci13xxx_req, queue);
-
-		pr_info("\treq:%08x next:%08x token:%08x page0:%08x status:%d\n",
-				req->dma, req->ptr->next, req->ptr->token,
-				req->ptr->page[0], req->req.status);
+		list_for_each_entry(td, &req->td_list, list)
+			pr_info("\treq:%08x next:%08x token:%08x page0:%08x\n",
+				td->dma, td->ptr->next, td->ptr->token,
+				td->ptr->page[0]);
 	}
+	spin_unlock_irqrestore(udc->lock, flags);
 done:
 	return count;
 
@@ -1740,6 +1750,7 @@
 {
 	struct ci13xxx_ep *mep = (struct ci13xxx_ep *)data;
 	struct ci13xxx_req *req;
+	struct ci13xxx_td_wrapper *td;
 	struct list_head *ptr = NULL;
 	int n = hw_ep_bit(mep->num, mep->dir);
 	unsigned long flags;
@@ -1753,9 +1764,10 @@
 		goto out;
 
 	req = list_entry(mep->qh.queue.next, struct ci13xxx_req, queue);
+	td = list_first_entry(&req->td_list, struct ci13xxx_td_wrapper, list);
 
 	mb();
-	if (!(TD_STATUS_ACTIVE & req->ptr->token))
+	if (!(TD_STATUS_ACTIVE & td->ptr->token))
 		goto out;
 
 	mep->prime_timer_count++;
@@ -1767,10 +1779,10 @@
 				mep->qh.ptr->td.next, mep->qh.ptr->td.token);
 		list_for_each(ptr, &mep->qh.queue) {
 			req = list_entry(ptr, struct ci13xxx_req, queue);
-			pr_info("\treq:%08xnext:%08xtkn:%08xpage0:%08xsts:%d\n",
-					req->dma, req->ptr->next,
-					req->ptr->token, req->ptr->page[0],
-					req->req.status);
+			list_for_each_entry(td, &req->td_list, list)
+				pr_info("\treq:%08xnext:%08xtkn:%08xpage0:%08x\n",
+					td->dma, td->ptr->next,
+					td->ptr->token, td->ptr->page[0]);
 		}
 		dbg_usb_op_fail(0xFF, "PRIMEF", mep);
 		mep->prime_fail_count++;
@@ -1784,7 +1796,69 @@
 out:
 	mep->prime_timer_count = 0;
 	spin_unlock_irqrestore(mep->lock, flags);
+}
 
+static int prepare_dtds(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+	int i;
+	unsigned len;
+	unsigned remaining_len = mReq->req.length;
+	dma_addr_t dma = mReq->req.dma;
+	struct ci13xxx_td_wrapper *td = list_first_entry(&mReq->td_list,
+			struct ci13xxx_td_wrapper, list);
+	struct ci13xxx_td_wrapper *prev_td = NULL;
+	bool zlp = mReq->req.zero && remaining_len &&
+			(remaining_len % mEp->ep.maxpacket == 0);
+
+	do {
+		td = kmalloc(sizeof(*td), GFP_ATOMIC);
+		if (td == NULL) {
+			pr_err("td wrapper allocation failed\n");
+			goto free;
+		}
+		td->ptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
+				&td->dma);
+		if (td->ptr == NULL) {
+			kfree(td);
+			pr_err("td allocation failed\n");
+			goto free;
+		}
+
+		list_add_tail(&td->list, &mReq->td_list);
+		if (zlp && !remaining_len)
+			zlp = false;
+
+		len = min_t(unsigned, remaining_len, CI13XXX_MAX_REQ_SIZE);
+		remaining_len -= len;
+		memset(td->ptr, 0, sizeof(*td->ptr));
+		td->ptr->token    = len << ffs_nr(TD_TOTAL_BYTES);
+		td->ptr->token   &= TD_TOTAL_BYTES;
+		td->ptr->token   |= TD_STATUS_ACTIVE;
+
+		td->ptr->page[0]  = dma;
+		for (i = 1; i < 5; i++)
+			td->ptr->page[i] = (dma + i * CI13XXX_PAGE_SIZE) &
+					~TD_RESERVED_MASK;
+		if (prev_td)
+			prev_td->ptr->next = td->dma;
+
+		dma += len;
+		prev_td = td;
+
+	} while (remaining_len || zlp);
+
+	td->ptr->next = TD_TERMINATE;
+	if (!mReq->req.no_interrupt)
+		td->ptr->token |= TD_IOC;
+
+	return 0;
+free:
+	list_for_each_entry_safe(td, prev_td, &mReq->td_list, list) {
+		list_del(&td->list);
+		dma_pool_free(mEp->td_pool, td->ptr, td->dma);
+		kfree(td);
+	}
+	return -ENOMEM;
 }
 
 /**
@@ -1799,7 +1873,7 @@
 	unsigned i;
 	int ret = 0;
 	unsigned length = mReq->req.length;
-	struct ci13xxx *udc = _udc;
+	struct ci13xxx_td_wrapper *td;
 
 	trace("%p, %p", mEp, mReq);
 
@@ -1819,40 +1893,23 @@
 		mReq->map = 1;
 	}
 
-	if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
-		mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
-					   &mReq->zdma);
-		if (mReq->zptr == NULL) {
-			if (mReq->map) {
-				dma_unmap_single(mEp->device, mReq->req.dma,
-					length, mEp->dir ? DMA_TO_DEVICE :
+	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID)
+		mReq->req.dma = 0;
+
+	ret = prepare_dtds(mEp, mReq);
+	if (ret) {
+		pr_err("dTD preparation failed\n");
+		if (mReq->map) {
+			dma_unmap_single(mEp->device, mReq->req.dma,
+					mReq->req.length,
+					mEp->dir ? DMA_TO_DEVICE :
 					DMA_FROM_DEVICE);
-				mReq->req.dma = DMA_ADDR_INVALID;
-				mReq->map     = 0;
-			}
-			return -ENOMEM;
+			mReq->req.dma = DMA_ADDR_INVALID;
+			mReq->map     = 0;
 		}
-		memset(mReq->zptr, 0, sizeof(*mReq->zptr));
-		mReq->zptr->next    = TD_TERMINATE;
-		mReq->zptr->token   = TD_STATUS_ACTIVE;
-		if (!mReq->req.no_interrupt)
-			mReq->zptr->token   |= TD_IOC;
+		return ret;
 	}
-	/*
-	 * TD configuration
-	 * TODO - handle requests which spawns into several TDs
-	 */
-	memset(mReq->ptr, 0, sizeof(*mReq->ptr));
-	mReq->ptr->token    = length << ffs_nr(TD_TOTAL_BYTES);
-	mReq->ptr->token   &= TD_TOTAL_BYTES;
-	mReq->ptr->token   |= TD_STATUS_ACTIVE;
-	if (mReq->zptr) {
-		mReq->ptr->next    = mReq->zdma;
-	} else {
-		mReq->ptr->next    = TD_TERMINATE;
-		if (!mReq->req.no_interrupt)
-			mReq->ptr->token  |= TD_IOC;
-	}
+	td = list_first_entry(&mReq->td_list, struct ci13xxx_td_wrapper, list);
 
 	/* MSM Specific: updating the request as required for
 	 * SPS mode. Enable MSM proprietary DMA engine acording
@@ -1860,46 +1917,28 @@
 	 */
 	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
 		if (mReq->req.udc_priv & MSM_SPS_MODE) {
-			mReq->ptr->token = TD_STATUS_ACTIVE;
+			td->ptr->token = TD_STATUS_ACTIVE;
 			if (mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER)
-				mReq->ptr->next = TD_TERMINATE;
+				td->ptr->next = TD_TERMINATE;
 			else
-				mReq->ptr->next = MSM_ETD_TYPE | mReq->dma;
+				td->ptr->next = MSM_ETD_TYPE | td->dma;
 			if (!mReq->req.no_interrupt)
-				mReq->ptr->token |= MSM_ETD_IOC;
+				td->ptr->token |= MSM_ETD_IOC;
 		}
-		mReq->req.dma = 0;
-	}
-
-	mReq->ptr->page[0]  = mReq->req.dma;
-	for (i = 1; i < 5; i++)
-		mReq->ptr->page[i] =
-			(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
-
-	/* Remote Wakeup */
-	if (udc->suspended) {
-		if (!udc->remote_wakeup) {
-			mReq->req.status = -EAGAIN;
-			dev_dbg(mEp->device, "%s: queue failed (suspend) ept #%d\n",
-				__func__, mEp->num);
-			return -EAGAIN;
-		}
-		usb_phy_set_suspend(udc->transceiver, 0);
-		schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
 	}
 
 	if (!list_empty(&mEp->qh.queue)) {
 		struct ci13xxx_req *mReqPrev;
+		struct ci13xxx_td_wrapper *prev_td;
 		int n = hw_ep_bit(mEp->num, mEp->dir);
 		int tmp_stat;
 		ktime_t start, diff;
 
 		mReqPrev = list_entry(mEp->qh.queue.prev,
 				struct ci13xxx_req, queue);
-		if (mReqPrev->zptr)
-			mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
-		else
-			mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
+		prev_td = list_entry(mReqPrev->td_list.prev,
+				struct ci13xxx_td_wrapper, list);
+		prev_td->ptr->next = td->dma & TD_ADDR_MASK;
 		wmb();
 		if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
 			goto done;
@@ -1928,15 +1967,17 @@
 		struct ci13xxx_req *mReq = \
 			list_entry(mEp->qh.queue.next,
 				   struct ci13xxx_req, queue);
+		struct ci13xxx_td_wrapper *td = list_first_entry(&mReq->td_list,
+				struct ci13xxx_td_wrapper, list);
 
-		if (TD_STATUS_ACTIVE & mReq->ptr->token) {
-			mEp->qh.ptr->td.next   = mReq->dma;
+		if (TD_STATUS_ACTIVE & td->ptr->token) {
+			mEp->qh.ptr->td.next   = td->dma;
 			mEp->qh.ptr->td.token &= ~TD_STATUS;
 			goto prime;
 		}
 	}
 
-	mEp->qh.ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
+	mEp->qh.ptr->td.next   = td->dma;    /* TERMINATE = 0 */
 
 	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
 		if (mReq->req.udc_priv & MSM_SPS_MODE) {
@@ -1993,27 +2034,27 @@
  */
 static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
 {
+	struct ci13xxx_td_wrapper *td, *temp_td;
+	unsigned rem, total_rem = 0;
+	int status;
+
 	trace("%p, %p", mEp, mReq);
 
 	if (mReq->req.status != -EALREADY)
 		return -EINVAL;
 
-	/* clean speculative fetches on req->ptr->token */
+	/* clean speculative fetches on td->ptr->token */
 	mb();
 
-	if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
+	td = list_entry(mReq->td_list.prev, struct ci13xxx_td_wrapper, list);
+
+	if ((TD_STATUS_ACTIVE & td->ptr->token) != 0)
 		return -EBUSY;
 
 	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID)
 		if ((mReq->req.udc_priv & MSM_SPS_MODE) &&
 			(mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER))
 			return -EBUSY;
-	if (mReq->zptr) {
-		if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
-			return -EBUSY;
-		dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
-		mReq->zptr = NULL;
-	}
 
 	mReq->req.status = 0;
 
@@ -2024,18 +2065,27 @@
 		mReq->map     = 0;
 	}
 
-	mReq->req.status = mReq->ptr->token & TD_STATUS;
-	if ((TD_STATUS_HALTED & mReq->req.status) != 0)
-		mReq->req.status = -1;
-	else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
-		mReq->req.status = -1;
-	else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
-		mReq->req.status = -1;
+	list_for_each_entry_safe(td, temp_td, &mReq->td_list, list) {
 
-	mReq->req.actual   = mReq->ptr->token & TD_TOTAL_BYTES;
-	mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
-	mReq->req.actual   = mReq->req.length - mReq->req.actual;
-	mReq->req.actual   = mReq->req.status ? 0 : mReq->req.actual;
+		status = td->ptr->token & TD_STATUS;
+		if ((status & TD_STATUS_HALTED) != 0)
+			mReq->req.status = -1;
+		else if ((status & TD_STATUS_DT_ERR) != 0)
+			mReq->req.status = -1;
+		else if ((status & TD_STATUS_TR_ERR) != 0)
+			mReq->req.status = -1;
+
+		rem = td->ptr->token & TD_TOTAL_BYTES;
+		rem >>= ffs_nr(TD_TOTAL_BYTES);
+		total_rem += rem;
+
+		list_del(&td->list);
+		dma_pool_free(mEp->td_pool, td->ptr, td->dma);
+		kfree(td);
+	}
+
+	mReq->req.actual = mReq->req.status ? 0 :
+			(mReq->req.length - total_rem);
 
 	return mReq->req.actual;
 }
@@ -2052,6 +2102,7 @@
 __acquires(mEp->lock)
 {
 	struct ci13xxx_ep *mEpTemp = mEp;
+	struct ci13xxx_td_wrapper *td, *temp_td;
 	unsigned val;
 
 	trace("%p", mEp);
@@ -2093,10 +2144,16 @@
 			mReq->map     = 0;
 		}
 
+		list_for_each_entry_safe(td, temp_td, &mReq->td_list, list) {
+			list_del(&td->list);
+			dma_pool_free(mEp->td_pool, td->ptr, td->dma);
+			kfree(td);
+		}
+
 		if (mReq->req.complete != NULL) {
 			spin_unlock(mEp->lock);
 			if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
-				mReq->req.length)
+					mReq->req.length)
 				mEpTemp = &_udc->ep0in;
 			mReq->req.complete(&mEpTemp->ep, &mReq->req);
 			if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
@@ -2442,7 +2499,7 @@
 		}
 		req_dequeue = 0;
 		list_del_init(&mReq->queue);
-		dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
+		dbg_done(_usb_addr(mEp), &mReq->req);
 		if (mReq->req.complete != NULL) {
 			spin_unlock(mEp->lock);
 			if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
@@ -2810,18 +2867,14 @@
 	}
 
 	mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
-	if (mReq != NULL) {
-		INIT_LIST_HEAD(&mReq->queue);
-		mReq->req.dma = DMA_ADDR_INVALID;
+	if (mReq == NULL)
+		goto out;
 
-		mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
-					   &mReq->dma);
-		if (mReq->ptr == NULL) {
-			kfree(mReq);
-			mReq = NULL;
-		}
-	}
+	INIT_LIST_HEAD(&mReq->queue);
+	mReq->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&mReq->td_list);
 
+out:
 	dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
 
 	return (mReq == NULL) ? NULL : &mReq->req;
@@ -2850,8 +2903,6 @@
 
 	spin_lock_irqsave(mEp->lock, flags);
 
-	if (mReq->ptr)
-		dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
 	kfree(mReq);
 
 	dbg_event(_usb_addr(mEp), "FREE", 0);
@@ -2889,6 +2940,17 @@
 		return -ESHUTDOWN;
 	}
 
+	if (udc->suspended) {
+		if (!udc->remote_wakeup) {
+			mReq->req.status = -EAGAIN;
+			dev_dbg(mEp->device, "%s: queue failed (suspend) ept #%d\n",
+				__func__, mEp->num);
+			return -EAGAIN;
+		}
+		usb_phy_set_suspend(udc->transceiver, 0);
+		schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
+	}
+
 	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
 		if (req->length)
 			mEp = (_udc->ep0_dir == RX) ?
@@ -2907,11 +2969,9 @@
 		goto done;
 	}
 
-	if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
-		req->length = (4 * CI13XXX_PAGE_SIZE);
-		retval = -EMSGSIZE;
-		warn("request length truncated");
-	}
+	/* REMOVE ME */
+	if (req->length > (CI13XXX_MAX_REQ_SIZE))
+		pr_warn_once("bigger request length\n");
 
 	dbg_queue(_usb_addr(mEp), req, retval);
 
@@ -2943,6 +3003,7 @@
 	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
 	struct ci13xxx_ep *mEpTemp = mEp;
 	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+	struct ci13xxx_td_wrapper *td, *temp_td;
 	unsigned long flags;
 
 	trace("%p, %p", ep, req);
@@ -2973,6 +3034,12 @@
 	}
 	req->status = -ECONNRESET;
 
+	list_for_each_entry_safe(td, temp_td, &mReq->td_list, list) {
+		list_del(&td->list);
+		dma_pool_free(mEp->td_pool, td->ptr, td->dma);
+		kfree(td);
+	}
+
 	if (mReq->req.complete != NULL) {
 		spin_unlock(mEp->lock);
 		if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&